comment
stringlengths
1
5.49k
method_body
stringlengths
27
75.2k
target_code
stringlengths
0
5.16k
method_body_after
stringlengths
27
76k
context_before
stringlengths
8
252k
context_after
stringlengths
8
253k
Is it same? (system env vs. local property map)
void testAllowedHeadersFromSystemProperties() { Properties properties = new Properties(); properties.setProperty("jdk.httpclient.allowRestrictedHeaders", "content-length, upgrade"); JdkAsyncHttpClientBuilder jdkAsyncHttpClientBuilder = spy(new JdkAsyncHttpClientBuilder()); when(jdkAsyncHttpClientBuilder.getNetworkProperties()).thenReturn(properties); Set<String> expectedRestrictedHeaders = new TreeSet<>(String.CASE_INSENSITIVE_ORDER); expectedRestrictedHeaders.addAll(JdkAsyncHttpClientBuilder.DEFAULT_RESTRICTED_HEADERS); expectedRestrictedHeaders.removeAll(Arrays.asList("content-length", "upgrade")); validateRestrictedHeaders(jdkAsyncHttpClientBuilder, expectedRestrictedHeaders, 3); }
properties.setProperty("jdk.httpclient.allowRestrictedHeaders", "content-length, upgrade");
void testAllowedHeadersFromSystemProperties() { Properties properties = new Properties(); properties.setProperty("jdk.httpclient.allowRestrictedHeaders", "content-length, upgrade"); JdkAsyncHttpClientBuilder jdkAsyncHttpClientBuilder = spy(new JdkAsyncHttpClientBuilder()); when(jdkAsyncHttpClientBuilder.getNetworkProperties()).thenReturn(properties); Set<String> expectedRestrictedHeaders = new TreeSet<>(String.CASE_INSENSITIVE_ORDER); expectedRestrictedHeaders.addAll(JdkAsyncHttpClientBuilder.DEFAULT_RESTRICTED_HEADERS); expectedRestrictedHeaders.removeAll(Arrays.asList("content-length", "upgrade")); validateRestrictedHeaders(jdkAsyncHttpClientBuilder, expectedRestrictedHeaders, 3); }
class JdkAsyncHttpClientBuilderTests { private static final String PROXY_USERNAME = "foo"; private static final String PROXY_PASSWORD = "bar"; private static final String PROXY_USER_INFO = PROXY_USERNAME + ":" + PROXY_PASSWORD + "@"; private static final String SERVICE_ENDPOINT = "/default"; private static final ConfigurationSource EMPTY_SOURCE = new TestConfigurationSource(); /** * Tests that an {@link JdkAsyncHttpClient} is able to be built from an existing * {@link java.net.http.HttpClient.Builder}. */ @Test public void buildClientWithExistingClient() { final String[] marker = new String[1]; final java.net.http.HttpClient.Builder existingClientBuilder = java.net.http.HttpClient.newBuilder(); existingClientBuilder.executor(new Executor() { private final ExecutorService executorService = Executors.newFixedThreadPool(2); @Override public void execute(Runnable command) { marker[0] = "on_custom_executor"; executorService.submit(command); } }); final JdkAsyncHttpClient client = (JdkAsyncHttpClient) new JdkAsyncHttpClientBuilder(existingClientBuilder) .build(); final String defaultPath = "/default"; final WireMockServer server = new WireMockServer(WireMockConfiguration.options().dynamicPort().disableRequestJournal()); server.stubFor(WireMock.get(defaultPath).willReturn(WireMock.aResponse().withStatus(200))); server.start(); final String defaultUrl = "http: try { StepVerifier.create(client.send(new HttpRequest(HttpMethod.GET, defaultUrl))) .assertNext(response -> assertEquals(200, response.getStatusCode())) .verifyComplete(); } finally { if (server.isRunning()) { server.shutdown(); } } assertNotNull(marker[0]); assertEquals(marker[0], "on_custom_executor"); } /** * Tests that instantiating an {@link JdkAsyncHttpClientBuilder} with a {@code null} {@link JdkAsyncHttpClient} * will throw a {@link NullPointerException}. */ @Test public void startingWithNullClientThrows() { assertThrows(NullPointerException.class, () -> new JdkAsyncHttpClientBuilder(null)); } /** * Tests building a client with a given {@code Executor}. */ @Test public void buildWithExecutor() { final String[] marker = new String[1]; final HttpClient httpClient = new JdkAsyncHttpClientBuilder() .executor(new Executor() { private final ExecutorService executorService = Executors.newFixedThreadPool(10); @Override public void execute(Runnable command) { marker[0] = "on_custom_executor"; executorService.submit(command); } }) .build(); final String defaultPath = "/default"; final WireMockServer server = new WireMockServer(WireMockConfiguration.options().dynamicPort().disableRequestJournal()); server.stubFor(WireMock.get(defaultPath).willReturn(WireMock.aResponse().withStatus(200))); server.start(); final String defaultUrl = "http: try { StepVerifier.create(httpClient.send(new HttpRequest(HttpMethod.GET, defaultUrl))) .assertNext(response -> assertEquals(200, response.getStatusCode())) .verifyComplete(); } finally { if (server.isRunning()) { server.shutdown(); } } assertNotNull(marker[0]); assertEquals(marker[0], "on_custom_executor"); } /** * Tests that passing a {@code null} {@code executor} to the builder will throw a * {@link NullPointerException}. */ @Test public void nullExecutorThrows() { assertThrows(NullPointerException.class, () -> new JdkAsyncHttpClientBuilder().executor(null)); } /** * Tests building a client with a given proxy. */ @Test public void buildWithHttpProxy() { final SimpleBasicAuthHttpProxyServer proxyServer = new SimpleBasicAuthHttpProxyServer(PROXY_USERNAME, PROXY_PASSWORD, new String[] {SERVICE_ENDPOINT}); try { SimpleBasicAuthHttpProxyServer.ProxyEndpoint proxyEndpoint = proxyServer.start(); ProxyOptions clientProxyOptions = new ProxyOptions(ProxyOptions.Type.HTTP, new InetSocketAddress(proxyEndpoint.getHost(), proxyEndpoint.getPort())) .setCredentials(PROXY_USERNAME, PROXY_PASSWORD); HttpClient httpClient = new JdkAsyncHttpClientBuilder(java.net.http.HttpClient.newBuilder()) .proxy(clientProxyOptions) .build(); final String serviceUrl = "http: StepVerifier.create(httpClient.send(new HttpRequest(HttpMethod.GET, serviceUrl))) .expectNextCount(1) .verifyComplete(); } finally { proxyServer.shutdown(); } } @Test public void buildWithHttpProxyFromEnvConfiguration() { final SimpleBasicAuthHttpProxyServer proxyServer = new SimpleBasicAuthHttpProxyServer(PROXY_USERNAME, PROXY_PASSWORD, new String[] {SERVICE_ENDPOINT}); try { SimpleBasicAuthHttpProxyServer.ProxyEndpoint proxyEndpoint = proxyServer.start(); Configuration configuration = new ConfigurationBuilder(EMPTY_SOURCE, EMPTY_SOURCE, new TestConfigurationSource() .put(Configuration.PROPERTY_HTTP_PROXY, "http: .put("java.net.useSystemProxies", "true")) .build(); configurationProxyTest(configuration); } finally { proxyServer.shutdown(); } } @Test public void buildWithHttpProxyFromExplicitConfiguration() { final SimpleBasicAuthHttpProxyServer proxyServer = new SimpleBasicAuthHttpProxyServer(PROXY_USERNAME, PROXY_PASSWORD, new String[] {SERVICE_ENDPOINT}); try { SimpleBasicAuthHttpProxyServer.ProxyEndpoint proxyEndpoint = proxyServer.start(); Configuration configuration = new ConfigurationBuilder() .putProperty("http.proxy.hostname", proxyEndpoint.getHost()) .putProperty("http.proxy.port", String.valueOf(proxyEndpoint.getPort())) .build(); configurationProxyTest(configuration); } finally { proxyServer.shutdown(); } } @Test public void buildWithConfigurationNone() { final HttpClient httpClient = new JdkAsyncHttpClientBuilder() .configuration(Configuration.NONE) .build(); final String defaultPath = "/default"; final WireMockServer server = new WireMockServer(WireMockConfiguration.options().dynamicPort().disableRequestJournal()); server.stubFor(WireMock.get(defaultPath).willReturn(WireMock.aResponse().withStatus(200))); server.start(); final String defaultUrl = "http: try { StepVerifier.create(httpClient.send(new HttpRequest(HttpMethod.GET, defaultUrl))) .assertNext(response -> assertEquals(200, response.getStatusCode())) .verifyComplete(); } finally { if (server.isRunning()) { server.shutdown(); } } } @ParameterizedTest @MethodSource("buildWithExplicitConfigurationProxySupplier") public void buildWithNonProxyConfigurationProxy(Configuration configuration) { final HttpClient httpClient = new JdkAsyncHttpClientBuilder() .configuration(configuration) .build(); final String defaultPath = "/default"; final WireMockServer server = new WireMockServer(WireMockConfiguration.options().dynamicPort().disableRequestJournal()); server.stubFor(WireMock.get(defaultPath).willReturn(WireMock.aResponse().withStatus(200))); server.start(); final String defaultUrl = "http: try { StepVerifier.create(httpClient.send(new HttpRequest(HttpMethod.GET, defaultUrl))) .assertNext(response -> assertEquals(200, response.getStatusCode())) .verifyComplete(); } finally { if (server.isRunning()) { server.shutdown(); } } } private static Stream<Arguments> buildWithExplicitConfigurationProxySupplier() { List<Arguments> arguments = new ArrayList<>(); final Configuration envConfiguration = new ConfigurationBuilder(EMPTY_SOURCE, EMPTY_SOURCE, new TestConfigurationSource() .put(Configuration.PROPERTY_HTTP_PROXY, "http: .put(Configuration.PROPERTY_NO_PROXY, "localhost")) .build(); arguments.add(Arguments.of(envConfiguration)); final Configuration explicitConfiguration = new ConfigurationBuilder() .putProperty("http.proxy.hostname", "localhost") .putProperty("http.proxy.port", "42") .putProperty("http.proxy.non-proxy-hosts", "localhost") .build(); arguments.add(Arguments.of(explicitConfiguration)); return arguments.stream(); } @Test void testAllowedHeadersFromNetworkProperties() { JdkAsyncHttpClientBuilder jdkAsyncHttpClientBuilder = spy(new JdkAsyncHttpClientBuilder()); Properties properties = new Properties(); properties.put("jdk.httpclient.allowRestrictedHeaders", "content-length, upgrade"); when(jdkAsyncHttpClientBuilder.getNetworkProperties()).thenReturn(properties); Set<String> expectedRestrictedHeaders = new TreeSet<>(String.CASE_INSENSITIVE_ORDER); expectedRestrictedHeaders.addAll(JdkAsyncHttpClientBuilder.DEFAULT_RESTRICTED_HEADERS); expectedRestrictedHeaders.removeAll(Arrays.asList("content-length", "upgrade")); validateRestrictedHeaders(jdkAsyncHttpClientBuilder, expectedRestrictedHeaders, 3); } @Test void testAllowedHeadersFromConfiguration() { Configuration configuration = new ConfigurationBuilder(EMPTY_SOURCE, new TestConfigurationSource().put("jdk.httpclient.allowRestrictedHeaders", "content-length, upgrade"), EMPTY_SOURCE) .build(); JdkAsyncHttpClientBuilder jdkAsyncHttpClientBuilder = spy( new JdkAsyncHttpClientBuilder().configuration(configuration)); Properties properties = new Properties(); when(jdkAsyncHttpClientBuilder.getNetworkProperties()).thenReturn(properties); Set<String> expectedRestrictedHeaders = new TreeSet<>(String.CASE_INSENSITIVE_ORDER); expectedRestrictedHeaders.addAll(JdkAsyncHttpClientBuilder.DEFAULT_RESTRICTED_HEADERS); expectedRestrictedHeaders.removeAll(Arrays.asList("content-length", "upgrade")); validateRestrictedHeaders(jdkAsyncHttpClientBuilder, expectedRestrictedHeaders, 3); } @Test void testAllowedHeadersFromBoth() { Configuration configuration = new ConfigurationBuilder(new TestConfigurationSource(), new TestConfigurationSource().put("jdk.httpclient.allowRestrictedHeaders", "content-length, upgrade"), new TestConfigurationSource()) .build(); JdkAsyncHttpClientBuilder jdkAsyncHttpClientBuilder = spy( new JdkAsyncHttpClientBuilder().configuration(configuration)); Properties properties = new Properties(); properties.put("jdk.httpclient.allowRestrictedHeaders", "host, connection, upgrade"); when(jdkAsyncHttpClientBuilder.getNetworkProperties()).thenReturn(properties); Set<String> expectedRestrictedHeaders = new TreeSet<>(String.CASE_INSENSITIVE_ORDER); expectedRestrictedHeaders.addAll(JdkAsyncHttpClientBuilder.DEFAULT_RESTRICTED_HEADERS); expectedRestrictedHeaders.removeAll(Arrays.asList("content-length", "host", "connection", "upgrade")); validateRestrictedHeaders(jdkAsyncHttpClientBuilder, expectedRestrictedHeaders, 1); } @Test @Test void testCaseInsensitivity() { Properties properties = new Properties(); properties.setProperty("jdk.httpclient.allowRestrictedHeaders", "content-LENGTH"); JdkAsyncHttpClientBuilder jdkAsyncHttpClientBuilder = spy(new JdkAsyncHttpClientBuilder()); when(jdkAsyncHttpClientBuilder.getNetworkProperties()).thenReturn(properties); Set<String> restrictedHeaders = jdkAsyncHttpClientBuilder.getRestrictedHeaders(); assertTrue(restrictedHeaders.contains("Connection"), "connection header is missing"); assertTrue(restrictedHeaders.contains("connection"), "connection header is missing"); assertTrue(restrictedHeaders.contains("CONNECTION"), "connection header is missing"); assertFalse(restrictedHeaders.contains("Content-Length"), "content-length not removed"); assertFalse(restrictedHeaders.contains("content-length"), "content-length not removed"); assertFalse(restrictedHeaders.contains("CONTENT-length"), "content-length not removed"); } private static void configurationProxyTest(Configuration configuration) { HttpClient httpClient = new JdkAsyncHttpClientBuilder(java.net.http.HttpClient.newBuilder()) .configuration(configuration) .build(); final String serviceUrl = "http: StepVerifier.create(httpClient.send(new HttpRequest(HttpMethod.GET, serviceUrl))) .expectNextCount(1) .verifyComplete(); } private void validateRestrictedHeaders(JdkAsyncHttpClientBuilder jdkAsyncHttpClientBuilder, Set<String> expectedRestrictedHeaders, int expectedRestrictedHeadersSize) { Set<String> restrictedHeaders = jdkAsyncHttpClientBuilder.getRestrictedHeaders(); assertEquals(expectedRestrictedHeadersSize, restrictedHeaders.size()); assertEquals(expectedRestrictedHeaders, restrictedHeaders); } }
class JdkAsyncHttpClientBuilderTests { private static final String PROXY_USERNAME = "foo"; private static final String PROXY_PASSWORD = "bar"; private static final String PROXY_USER_INFO = PROXY_USERNAME + ":" + PROXY_PASSWORD + "@"; private static final String SERVICE_ENDPOINT = "/default"; private static final ConfigurationSource EMPTY_SOURCE = new TestConfigurationSource(); /** * Tests that an {@link JdkAsyncHttpClient} is able to be built from an existing * {@link java.net.http.HttpClient.Builder}. */ @Test public void buildClientWithExistingClient() { final String[] marker = new String[1]; final java.net.http.HttpClient.Builder existingClientBuilder = java.net.http.HttpClient.newBuilder(); existingClientBuilder.executor(new Executor() { private final ExecutorService executorService = Executors.newFixedThreadPool(2); @Override public void execute(Runnable command) { marker[0] = "on_custom_executor"; executorService.submit(command); } }); final JdkAsyncHttpClient client = (JdkAsyncHttpClient) new JdkAsyncHttpClientBuilder(existingClientBuilder) .build(); final String defaultPath = "/default"; final WireMockServer server = new WireMockServer(WireMockConfiguration.options().dynamicPort().disableRequestJournal()); server.stubFor(WireMock.get(defaultPath).willReturn(WireMock.aResponse().withStatus(200))); server.start(); final String defaultUrl = "http: try { StepVerifier.create(client.send(new HttpRequest(HttpMethod.GET, defaultUrl))) .assertNext(response -> assertEquals(200, response.getStatusCode())) .verifyComplete(); } finally { if (server.isRunning()) { server.shutdown(); } } assertNotNull(marker[0]); assertEquals(marker[0], "on_custom_executor"); } /** * Tests that instantiating an {@link JdkAsyncHttpClientBuilder} with a {@code null} {@link JdkAsyncHttpClient} * will throw a {@link NullPointerException}. */ @Test public void startingWithNullClientThrows() { assertThrows(NullPointerException.class, () -> new JdkAsyncHttpClientBuilder(null)); } /** * Tests building a client with a given {@code Executor}. */ @Test public void buildWithExecutor() { final String[] marker = new String[1]; final HttpClient httpClient = new JdkAsyncHttpClientBuilder() .executor(new Executor() { private final ExecutorService executorService = Executors.newFixedThreadPool(10); @Override public void execute(Runnable command) { marker[0] = "on_custom_executor"; executorService.submit(command); } }) .build(); final String defaultPath = "/default"; final WireMockServer server = new WireMockServer(WireMockConfiguration.options().dynamicPort().disableRequestJournal()); server.stubFor(WireMock.get(defaultPath).willReturn(WireMock.aResponse().withStatus(200))); server.start(); final String defaultUrl = "http: try { StepVerifier.create(httpClient.send(new HttpRequest(HttpMethod.GET, defaultUrl))) .assertNext(response -> assertEquals(200, response.getStatusCode())) .verifyComplete(); } finally { if (server.isRunning()) { server.shutdown(); } } assertNotNull(marker[0]); assertEquals(marker[0], "on_custom_executor"); } /** * Tests that passing a {@code null} {@code executor} to the builder will throw a * {@link NullPointerException}. */ @Test public void nullExecutorThrows() { assertThrows(NullPointerException.class, () -> new JdkAsyncHttpClientBuilder().executor(null)); } /** * Tests building a client with a given proxy. */ @Test public void buildWithHttpProxy() { final SimpleBasicAuthHttpProxyServer proxyServer = new SimpleBasicAuthHttpProxyServer(PROXY_USERNAME, PROXY_PASSWORD, new String[] {SERVICE_ENDPOINT}); try { SimpleBasicAuthHttpProxyServer.ProxyEndpoint proxyEndpoint = proxyServer.start(); ProxyOptions clientProxyOptions = new ProxyOptions(ProxyOptions.Type.HTTP, new InetSocketAddress(proxyEndpoint.getHost(), proxyEndpoint.getPort())) .setCredentials(PROXY_USERNAME, PROXY_PASSWORD); HttpClient httpClient = new JdkAsyncHttpClientBuilder(java.net.http.HttpClient.newBuilder()) .proxy(clientProxyOptions) .build(); final String serviceUrl = "http: StepVerifier.create(httpClient.send(new HttpRequest(HttpMethod.GET, serviceUrl))) .expectNextCount(1) .verifyComplete(); } finally { proxyServer.shutdown(); } } @Test public void buildWithHttpProxyFromEnvConfiguration() { final SimpleBasicAuthHttpProxyServer proxyServer = new SimpleBasicAuthHttpProxyServer(PROXY_USERNAME, PROXY_PASSWORD, new String[] {SERVICE_ENDPOINT}); try { SimpleBasicAuthHttpProxyServer.ProxyEndpoint proxyEndpoint = proxyServer.start(); Configuration configuration = new ConfigurationBuilder(EMPTY_SOURCE, EMPTY_SOURCE, new TestConfigurationSource() .put(Configuration.PROPERTY_HTTP_PROXY, "http: .put("java.net.useSystemProxies", "true")) .build(); configurationProxyTest(configuration); } finally { proxyServer.shutdown(); } } @Test public void buildWithHttpProxyFromExplicitConfiguration() { final SimpleBasicAuthHttpProxyServer proxyServer = new SimpleBasicAuthHttpProxyServer(PROXY_USERNAME, PROXY_PASSWORD, new String[] {SERVICE_ENDPOINT}); try { SimpleBasicAuthHttpProxyServer.ProxyEndpoint proxyEndpoint = proxyServer.start(); Configuration configuration = new ConfigurationBuilder() .putProperty("http.proxy.hostname", proxyEndpoint.getHost()) .putProperty("http.proxy.port", String.valueOf(proxyEndpoint.getPort())) .build(); configurationProxyTest(configuration); } finally { proxyServer.shutdown(); } } @Test public void buildWithConfigurationNone() { final HttpClient httpClient = new JdkAsyncHttpClientBuilder() .configuration(Configuration.NONE) .build(); final String defaultPath = "/default"; final WireMockServer server = new WireMockServer(WireMockConfiguration.options().dynamicPort().disableRequestJournal()); server.stubFor(WireMock.get(defaultPath).willReturn(WireMock.aResponse().withStatus(200))); server.start(); final String defaultUrl = "http: try { StepVerifier.create(httpClient.send(new HttpRequest(HttpMethod.GET, defaultUrl))) .assertNext(response -> assertEquals(200, response.getStatusCode())) .verifyComplete(); } finally { if (server.isRunning()) { server.shutdown(); } } } @ParameterizedTest @MethodSource("buildWithExplicitConfigurationProxySupplier") public void buildWithNonProxyConfigurationProxy(Configuration configuration) { final HttpClient httpClient = new JdkAsyncHttpClientBuilder() .configuration(configuration) .build(); final String defaultPath = "/default"; final WireMockServer server = new WireMockServer(WireMockConfiguration.options().dynamicPort().disableRequestJournal()); server.stubFor(WireMock.get(defaultPath).willReturn(WireMock.aResponse().withStatus(200))); server.start(); final String defaultUrl = "http: try { StepVerifier.create(httpClient.send(new HttpRequest(HttpMethod.GET, defaultUrl))) .assertNext(response -> assertEquals(200, response.getStatusCode())) .verifyComplete(); } finally { if (server.isRunning()) { server.shutdown(); } } } private static Stream<Arguments> buildWithExplicitConfigurationProxySupplier() { List<Arguments> arguments = new ArrayList<>(); final Configuration envConfiguration = new ConfigurationBuilder(EMPTY_SOURCE, EMPTY_SOURCE, new TestConfigurationSource() .put(Configuration.PROPERTY_HTTP_PROXY, "http: .put(Configuration.PROPERTY_NO_PROXY, "localhost")) .build(); arguments.add(Arguments.of(envConfiguration)); final Configuration explicitConfiguration = new ConfigurationBuilder() .putProperty("http.proxy.hostname", "localhost") .putProperty("http.proxy.port", "42") .putProperty("http.proxy.non-proxy-hosts", "localhost") .build(); arguments.add(Arguments.of(explicitConfiguration)); return arguments.stream(); } @Test void testAllowedHeadersFromNetworkProperties() { JdkAsyncHttpClientBuilder jdkAsyncHttpClientBuilder = spy(new JdkAsyncHttpClientBuilder()); Properties properties = new Properties(); properties.put("jdk.httpclient.allowRestrictedHeaders", "content-length, upgrade"); when(jdkAsyncHttpClientBuilder.getNetworkProperties()).thenReturn(properties); Set<String> expectedRestrictedHeaders = new TreeSet<>(String.CASE_INSENSITIVE_ORDER); expectedRestrictedHeaders.addAll(JdkAsyncHttpClientBuilder.DEFAULT_RESTRICTED_HEADERS); expectedRestrictedHeaders.removeAll(Arrays.asList("content-length", "upgrade")); validateRestrictedHeaders(jdkAsyncHttpClientBuilder, expectedRestrictedHeaders, 3); } @Test void testAllowedHeadersFromConfiguration() { Configuration configuration = new ConfigurationBuilder(EMPTY_SOURCE, new TestConfigurationSource().put("jdk.httpclient.allowRestrictedHeaders", "content-length, upgrade"), EMPTY_SOURCE) .build(); JdkAsyncHttpClientBuilder jdkAsyncHttpClientBuilder = spy( new JdkAsyncHttpClientBuilder().configuration(configuration)); Properties properties = new Properties(); when(jdkAsyncHttpClientBuilder.getNetworkProperties()).thenReturn(properties); Set<String> expectedRestrictedHeaders = new TreeSet<>(String.CASE_INSENSITIVE_ORDER); expectedRestrictedHeaders.addAll(JdkAsyncHttpClientBuilder.DEFAULT_RESTRICTED_HEADERS); expectedRestrictedHeaders.removeAll(Arrays.asList("content-length", "upgrade")); validateRestrictedHeaders(jdkAsyncHttpClientBuilder, expectedRestrictedHeaders, 3); } @Test void testAllowedHeadersFromBoth() { Configuration configuration = new ConfigurationBuilder(new TestConfigurationSource(), new TestConfigurationSource().put("jdk.httpclient.allowRestrictedHeaders", "content-length, upgrade"), new TestConfigurationSource()) .build(); JdkAsyncHttpClientBuilder jdkAsyncHttpClientBuilder = spy( new JdkAsyncHttpClientBuilder().configuration(configuration)); Properties properties = new Properties(); properties.put("jdk.httpclient.allowRestrictedHeaders", "host, connection, upgrade"); when(jdkAsyncHttpClientBuilder.getNetworkProperties()).thenReturn(properties); Set<String> expectedRestrictedHeaders = new TreeSet<>(String.CASE_INSENSITIVE_ORDER); expectedRestrictedHeaders.addAll(JdkAsyncHttpClientBuilder.DEFAULT_RESTRICTED_HEADERS); expectedRestrictedHeaders.removeAll(Arrays.asList("content-length", "host", "connection", "upgrade")); validateRestrictedHeaders(jdkAsyncHttpClientBuilder, expectedRestrictedHeaders, 1); } @Test @Test void testCaseInsensitivity() { Properties properties = new Properties(); properties.setProperty("jdk.httpclient.allowRestrictedHeaders", "content-LENGTH"); JdkAsyncHttpClientBuilder jdkAsyncHttpClientBuilder = spy(new JdkAsyncHttpClientBuilder()); when(jdkAsyncHttpClientBuilder.getNetworkProperties()).thenReturn(properties); Set<String> restrictedHeaders = jdkAsyncHttpClientBuilder.getRestrictedHeaders(); assertTrue(restrictedHeaders.contains("Connection"), "connection header is missing"); assertTrue(restrictedHeaders.contains("connection"), "connection header is missing"); assertTrue(restrictedHeaders.contains("CONNECTION"), "connection header is missing"); assertFalse(restrictedHeaders.contains("Content-Length"), "content-length not removed"); assertFalse(restrictedHeaders.contains("content-length"), "content-length not removed"); assertFalse(restrictedHeaders.contains("CONTENT-length"), "content-length not removed"); } private static void configurationProxyTest(Configuration configuration) { HttpClient httpClient = new JdkAsyncHttpClientBuilder(java.net.http.HttpClient.newBuilder()) .configuration(configuration) .build(); final String serviceUrl = "http: StepVerifier.create(httpClient.send(new HttpRequest(HttpMethod.GET, serviceUrl))) .expectNextCount(1) .verifyComplete(); } private void validateRestrictedHeaders(JdkAsyncHttpClientBuilder jdkAsyncHttpClientBuilder, Set<String> expectedRestrictedHeaders, int expectedRestrictedHeadersSize) { Set<String> restrictedHeaders = jdkAsyncHttpClientBuilder.getRestrictedHeaders(); assertEquals(expectedRestrictedHeadersSize, restrictedHeaders.size()); assertEquals(expectedRestrictedHeaders, restrictedHeaders); } }
Opposite comment from the above, this doesn't seem to need to call into `setBody(BinaryData)` as it won't set the Content-Length (unless `FluxByteBufferContent` is eagerly reading the `Flux<ByteBuffer>`).
public HttpRequest setBody(Flux<ByteBuffer> content) { if (content != null) { setBody(BinaryDataHelper.createBinaryData(new FluxByteBufferContent(content))); } else { this.body = null; } return this; }
setBody(BinaryDataHelper.createBinaryData(new FluxByteBufferContent(content)));
public HttpRequest setBody(Flux<ByteBuffer> content) { if (content != null) { this.body = BinaryDataHelper.createBinaryData(new FluxByteBufferContent(content)); } else { this.body = null; } return this; }
class HttpRequest { private static final ClientLogger LOGGER = new ClientLogger(HttpRequest.class); private HttpMethod httpMethod; private URL url; private HttpHeaders headers; private BinaryData body; /** * Create a new HttpRequest instance. * * @param httpMethod the HTTP request method * @param url the target address to send the request to */ public HttpRequest(HttpMethod httpMethod, URL url) { this(httpMethod, url, new HttpHeaders(), (BinaryData) null); } /** * Create a new HttpRequest instance. * * @param httpMethod the HTTP request method * @param url the target address to send the request to * @throws IllegalArgumentException if {@code url} is null or it cannot be parsed into a valid URL. */ public HttpRequest(HttpMethod httpMethod, String url) { this.httpMethod = httpMethod; setUrl(url); this.headers = new HttpHeaders(); } /** * Create a new HttpRequest instance. * * @param httpMethod the HTTP request method * @param url the target address to send the request to * @param headers the HTTP headers to use with this request * @param body the request content */ public HttpRequest(HttpMethod httpMethod, URL url, HttpHeaders headers, Flux<ByteBuffer> body) { this.httpMethod = httpMethod; this.url = url; this.headers = headers; setBody(BinaryDataHelper.createBinaryData(new FluxByteBufferContent(body))); } /** * Create a new HttpRequest instance. * * @param httpMethod the HTTP request method * @param url the target address to send the request to * @param headers the HTTP headers to use with this request * @param body the request content */ public HttpRequest(HttpMethod httpMethod, URL url, HttpHeaders headers, BinaryData body) { this.httpMethod = httpMethod; this.url = url; this.headers = headers; setBody(body); } /** * Get the request method. * * @return the request method */ public HttpMethod getHttpMethod() { return httpMethod; } /** * Set the request method. * * @param httpMethod the request method * @return this HttpRequest */ public HttpRequest setHttpMethod(HttpMethod httpMethod) { this.httpMethod = httpMethod; return this; } /** * Get the target address. * * @return the target address */ public URL getUrl() { return url; } /** * Set the target address to send the request to. * * @param url target address as {@link URL} * @return this HttpRequest */ public HttpRequest setUrl(URL url) { this.url = url; return this; } /** * Set the target address to send the request to. * * @param url target address as a String * @return this HttpRequest * @throws IllegalArgumentException if {@code url} is null or it cannot be parsed into a valid URL. */ public HttpRequest setUrl(String url) { try { this.url = new URL(url); } catch (MalformedURLException ex) { throw LOGGER.logExceptionAsWarning(new IllegalArgumentException("'url' must be a valid URL.", ex)); } return this; } /** * Get the request headers. * * @return headers to be sent */ public HttpHeaders getHeaders() { return headers; } /** * Set the request headers. * * @param headers the set of headers * @return this HttpRequest */ public HttpRequest setHeaders(HttpHeaders headers) { this.headers = headers; return this; } /** * Set a request header, replacing any existing value. A null for {@code value} will remove the header if one with * matching name exists. * * @param name the header name * @param value the header value * @return this HttpRequest */ public HttpRequest setHeader(String name, String value) { headers.set(name, value); return this; } /** * Get the request content. * * @return the content to be send */ public Flux<ByteBuffer> getBody() { return body == null ? null : body.toFluxByteBuffer(); } /** * Get the request content. * * @return the content to be send */ public BinaryData getBodyAsBinaryData() { return body; } /** * Set the request content. * <p> * The Content-Length header will be set based on the given content's length. * * @param content the request content * @return this HttpRequest */ public HttpRequest setBody(String content) { this.body = BinaryData.fromString(content); setContentLength(this.body.getLength()); return this; } /** * Set the request content. * <p> * The Content-Length header will be set based on the given content's length. * * @param content the request content * @return this HttpRequest */ public HttpRequest setBody(byte[] content) { setContentLength(content.length); this.body = BinaryData.fromBytes(content); return this; } /** * Set request content. * <p> * Caller must set the Content-Length header to indicate the length of the content, or use Transfer-Encoding: * chunked. * * @param content the request content * @return this HttpRequest */ /** * Set request content. * <p> * If provided content has known length, i.e. {@link BinaryData * Content-Length header is updated. Otherwise, * if provided content has unknown length, i.e. {@link BinaryData * the caller must set the Content-Length header to indicate the length of the content, or use Transfer-Encoding: * chunked. * * @param content the request content * @return this HttpRequest */ public HttpRequest setBody(BinaryData content) { this.body = content; if (content != null && content.getLength() != null) { setContentLength(content.getLength()); } return this; } private void setContentLength(long contentLength) { headers.set("Content-Length", String.valueOf(contentLength)); } /** * Creates a copy of the request. * * The main purpose of this is so that this HttpRequest can be changed and the resulting HttpRequest can be a * backup. This means that the cloned HttpHeaders and body must not be able to change from side effects of this * HttpRequest. * * @return a new HTTP request instance with cloned instances of all mutable properties. */ public HttpRequest copy() { final HttpHeaders bufferedHeaders = new HttpHeaders(headers); return new HttpRequest(httpMethod, url, bufferedHeaders, body); } }
class HttpRequest { private static final ClientLogger LOGGER = new ClientLogger(HttpRequest.class); private HttpMethod httpMethod; private URL url; private HttpHeaders headers; private BinaryData body; /** * Create a new HttpRequest instance. * * @param httpMethod the HTTP request method * @param url the target address to send the request to */ public HttpRequest(HttpMethod httpMethod, URL url) { this(httpMethod, url, new HttpHeaders(), (BinaryData) null); } /** * Create a new HttpRequest instance. * * @param httpMethod the HTTP request method * @param url the target address to send the request to * @throws IllegalArgumentException if {@code url} is null or it cannot be parsed into a valid URL. */ public HttpRequest(HttpMethod httpMethod, String url) { this.httpMethod = httpMethod; setUrl(url); this.headers = new HttpHeaders(); } /** * Create a new HttpRequest instance. * * @param httpMethod the HTTP request method * @param url the target address to send the request to * @param headers the HTTP headers to use with this request */ public HttpRequest(HttpMethod httpMethod, URL url, HttpHeaders headers) { this.httpMethod = httpMethod; this.url = url; this.headers = headers; } /** * Create a new HttpRequest instance. * * @param httpMethod the HTTP request method * @param url the target address to send the request to * @param headers the HTTP headers to use with this request * @param body the request content */ public HttpRequest(HttpMethod httpMethod, URL url, HttpHeaders headers, Flux<ByteBuffer> body) { this.httpMethod = httpMethod; this.url = url; this.headers = headers; setBody(BinaryDataHelper.createBinaryData(new FluxByteBufferContent(body))); } /** * Create a new HttpRequest instance. * * @param httpMethod the HTTP request method * @param url the target address to send the request to * @param headers the HTTP headers to use with this request * @param body the request content */ public HttpRequest(HttpMethod httpMethod, URL url, HttpHeaders headers, BinaryData body) { this.httpMethod = httpMethod; this.url = url; this.headers = headers; setBody(body); } /** * Get the request method. * * @return the request method */ public HttpMethod getHttpMethod() { return httpMethod; } /** * Set the request method. * * @param httpMethod the request method * @return this HttpRequest */ public HttpRequest setHttpMethod(HttpMethod httpMethod) { this.httpMethod = httpMethod; return this; } /** * Get the target address. * * @return the target address */ public URL getUrl() { return url; } /** * Set the target address to send the request to. * * @param url target address as {@link URL} * @return this HttpRequest */ public HttpRequest setUrl(URL url) { this.url = url; return this; } /** * Set the target address to send the request to. * * @param url target address as a String * @return this HttpRequest * @throws IllegalArgumentException if {@code url} is null or it cannot be parsed into a valid URL. */ public HttpRequest setUrl(String url) { try { this.url = new URL(url); } catch (MalformedURLException ex) { throw LOGGER.logExceptionAsWarning(new IllegalArgumentException("'url' must be a valid URL.", ex)); } return this; } /** * Get the request headers. * * @return headers to be sent */ public HttpHeaders getHeaders() { return headers; } /** * Set the request headers. * * @param headers the set of headers * @return this HttpRequest */ public HttpRequest setHeaders(HttpHeaders headers) { this.headers = headers; return this; } /** * Set a request header, replacing any existing value. A null for {@code value} will remove the header if one with * matching name exists. * * @param name the header name * @param value the header value * @return this HttpRequest */ public HttpRequest setHeader(String name, String value) { headers.set(name, value); return this; } /** * Get the request content. * * @return the content to be sent */ public Flux<ByteBuffer> getBody() { return body == null ? null : body.toFluxByteBuffer(); } /** * Get the request content. * * @return the content to be sent */ public BinaryData getBodyAsBinaryData() { return body; } /** * Set the request content. * <p> * The Content-Length header will be set based on the given content's length. * * @param content the request content * @return this HttpRequest */ public HttpRequest setBody(String content) { return setBody(BinaryData.fromString(content)); } /** * Set the request content. * <p> * The Content-Length header will be set based on the given content's length. * * @param content the request content * @return this HttpRequest */ public HttpRequest setBody(byte[] content) { return setBody(BinaryData.fromBytes(content)); } /** * Set request content. * <p> * Caller must set the Content-Length header to indicate the length of the content, or use Transfer-Encoding: * chunked. * * @param content the request content * @return this HttpRequest */ /** * Set request content. * <p> * If provided content has known length, i.e. {@link BinaryData * Content-Length header is updated. Otherwise, * if provided content has unknown length, i.e. {@link BinaryData * the caller must set the Content-Length header to indicate the length of the content, or use Transfer-Encoding: * chunked. * * @param content the request content * @return this HttpRequest */ public HttpRequest setBody(BinaryData content) { this.body = content; if (content != null && content.getLength() != null) { setContentLength(content.getLength()); } return this; } private void setContentLength(long contentLength) { headers.set("Content-Length", String.valueOf(contentLength)); } /** * Creates a copy of the request. * * The main purpose of this is so that this HttpRequest can be changed and the resulting HttpRequest can be a * backup. This means that the cloned HttpHeaders and body must not be able to change from side effects of this * HttpRequest. * * @return a new HTTP request instance with cloned instances of all mutable properties. */ public HttpRequest copy() { final HttpHeaders bufferedHeaders = new HttpHeaders(headers); return new HttpRequest(httpMethod, url, bufferedHeaders, body); } }
Wrapped within client retry policy, so can have retries when failed to get the addresses back from gateway. Does it make sense?
public Flux<OpenConnectionResponse> openConnectionsAndInitCaches(String containerLink) { checkArgument(StringUtils.isNotEmpty(containerLink), "Argument 'containerLink' should not be null nor empty"); DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.fluxInlineIfPossibleAsObs( () -> this.storeModel.openConnectionsAndInitCaches(containerLink), retryPolicyInstance); }
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
public Flux<OpenConnectionResponse> openConnectionsAndInitCaches(String containerLink) { checkArgument(StringUtils.isNotEmpty(containerLink), "Argument 'containerLink' should not be null nor empty"); return this.storeModel.openConnectionsAndInitCaches(containerLink); }
class RxDocumentClientImpl implements AsyncDocumentClient, IAuthorizationTokenProvider, CpuMemoryListener, DiagnosticsClientContext { private static final String tempMachineId = "uuid:" + UUID.randomUUID(); private static final AtomicInteger activeClientsCnt = new AtomicInteger(0); private static final AtomicInteger clientIdGenerator = new AtomicInteger(0); private static final Range<String> RANGE_INCLUDING_ALL_PARTITION_KEY_RANGES = new Range<>( PartitionKeyInternalHelper.MinimumInclusiveEffectivePartitionKey, PartitionKeyInternalHelper.MaximumExclusiveEffectivePartitionKey, true, false); private static final String DUMMY_SQL_QUERY = "this is dummy and only used in creating " + "ParallelDocumentQueryExecutioncontext, but not used"; private final static ObjectMapper mapper = Utils.getSimpleObjectMapper(); private final ItemDeserializer itemDeserializer = new ItemDeserializer.JsonDeserializer(); private final Logger logger = LoggerFactory.getLogger(RxDocumentClientImpl.class); private final String masterKeyOrResourceToken; private final URI serviceEndpoint; private final ConnectionPolicy connectionPolicy; private final ConsistencyLevel consistencyLevel; private final BaseAuthorizationTokenProvider authorizationTokenProvider; private final UserAgentContainer userAgentContainer; private final boolean hasAuthKeyResourceToken; private final Configs configs; private final boolean connectionSharingAcrossClientsEnabled; private AzureKeyCredential credential; private final TokenCredential tokenCredential; private String[] tokenCredentialScopes; private SimpleTokenCache tokenCredentialCache; private CosmosAuthorizationTokenResolver cosmosAuthorizationTokenResolver; AuthorizationTokenType authorizationTokenType; private SessionContainer sessionContainer; private String firstResourceTokenFromPermissionFeed = StringUtils.EMPTY; private RxClientCollectionCache collectionCache; private RxStoreModel gatewayProxy; private RxStoreModel storeModel; private GlobalAddressResolver addressResolver; private RxPartitionKeyRangeCache partitionKeyRangeCache; private Map<String, List<PartitionKeyAndResourceTokenPair>> resourceTokensMap; private final boolean contentResponseOnWriteEnabled; private Map<String, PartitionedQueryExecutionInfo> queryPlanCache; private final AtomicBoolean closed = new AtomicBoolean(false); private final int clientId; private ClientTelemetry clientTelemetry; private ApiType apiType; private IRetryPolicyFactory resetSessionTokenRetryPolicy; /** * Compatibility mode: Allows to specify compatibility mode used by client when * making query requests. Should be removed when application/sql is no longer * supported. */ private final QueryCompatibilityMode queryCompatibilityMode = QueryCompatibilityMode.Default; private final GlobalEndpointManager globalEndpointManager; private final RetryPolicy retryPolicy; private HttpClient reactorHttpClient; private Function<HttpClient, HttpClient> httpClientInterceptor; private volatile boolean useMultipleWriteLocations; private StoreClientFactory storeClientFactory; private GatewayServiceConfigurationReader gatewayConfigurationReader; private final DiagnosticsClientConfig diagnosticsClientConfig; private final AtomicBoolean throughputControlEnabled; private ThroughputControlStore throughputControlStore; public RxDocumentClientImpl(URI serviceEndpoint, String masterKeyOrResourceToken, List<Permission> permissionFeed, ConnectionPolicy connectionPolicy, ConsistencyLevel consistencyLevel, Configs configs, CosmosAuthorizationTokenResolver cosmosAuthorizationTokenResolver, AzureKeyCredential credential, boolean sessionCapturingOverride, boolean connectionSharingAcrossClientsEnabled, boolean contentResponseOnWriteEnabled, CosmosClientMetadataCachesSnapshot metadataCachesSnapshot, ApiType apiType) { this(serviceEndpoint, masterKeyOrResourceToken, permissionFeed, connectionPolicy, consistencyLevel, configs, credential, null, sessionCapturingOverride, connectionSharingAcrossClientsEnabled, contentResponseOnWriteEnabled, metadataCachesSnapshot, apiType); this.cosmosAuthorizationTokenResolver = cosmosAuthorizationTokenResolver; } public RxDocumentClientImpl(URI serviceEndpoint, String masterKeyOrResourceToken, List<Permission> permissionFeed, ConnectionPolicy connectionPolicy, ConsistencyLevel consistencyLevel, Configs configs, CosmosAuthorizationTokenResolver cosmosAuthorizationTokenResolver, AzureKeyCredential credential, TokenCredential tokenCredential, boolean sessionCapturingOverride, boolean connectionSharingAcrossClientsEnabled, boolean contentResponseOnWriteEnabled, CosmosClientMetadataCachesSnapshot metadataCachesSnapshot, ApiType apiType) { this(serviceEndpoint, masterKeyOrResourceToken, permissionFeed, connectionPolicy, consistencyLevel, configs, credential, tokenCredential, sessionCapturingOverride, connectionSharingAcrossClientsEnabled, contentResponseOnWriteEnabled, metadataCachesSnapshot, apiType); this.cosmosAuthorizationTokenResolver = cosmosAuthorizationTokenResolver; } private RxDocumentClientImpl(URI serviceEndpoint, String masterKeyOrResourceToken, List<Permission> permissionFeed, ConnectionPolicy connectionPolicy, ConsistencyLevel consistencyLevel, Configs configs, AzureKeyCredential credential, TokenCredential tokenCredential, boolean sessionCapturingOverrideEnabled, boolean connectionSharingAcrossClientsEnabled, boolean contentResponseOnWriteEnabled, CosmosClientMetadataCachesSnapshot metadataCachesSnapshot, ApiType apiType) { this(serviceEndpoint, masterKeyOrResourceToken, connectionPolicy, consistencyLevel, configs, credential, tokenCredential, sessionCapturingOverrideEnabled, connectionSharingAcrossClientsEnabled, contentResponseOnWriteEnabled, metadataCachesSnapshot, apiType); if (permissionFeed != null && permissionFeed.size() > 0) { this.resourceTokensMap = new HashMap<>(); for (Permission permission : permissionFeed) { String[] segments = StringUtils.split(permission.getResourceLink(), Constants.Properties.PATH_SEPARATOR.charAt(0)); if (segments.length <= 0) { throw new IllegalArgumentException("resourceLink"); } List<PartitionKeyAndResourceTokenPair> partitionKeyAndResourceTokenPairs = null; PathInfo pathInfo = new PathInfo(false, StringUtils.EMPTY, StringUtils.EMPTY, false); if (!PathsHelper.tryParsePathSegments(permission.getResourceLink(), pathInfo, null)) { throw new IllegalArgumentException(permission.getResourceLink()); } partitionKeyAndResourceTokenPairs = resourceTokensMap.get(pathInfo.resourceIdOrFullName); if (partitionKeyAndResourceTokenPairs == null) { partitionKeyAndResourceTokenPairs = new ArrayList<>(); this.resourceTokensMap.put(pathInfo.resourceIdOrFullName, partitionKeyAndResourceTokenPairs); } PartitionKey partitionKey = permission.getResourcePartitionKey(); partitionKeyAndResourceTokenPairs.add(new PartitionKeyAndResourceTokenPair( partitionKey != null ? BridgeInternal.getPartitionKeyInternal(partitionKey) : PartitionKeyInternal.Empty, permission.getToken())); logger.debug("Initializing resource token map , with map key [{}] , partition key [{}] and resource token [{}]", pathInfo.resourceIdOrFullName, partitionKey != null ? partitionKey.toString() : null, permission.getToken()); } if(this.resourceTokensMap.isEmpty()) { throw new IllegalArgumentException("permissionFeed"); } String firstToken = permissionFeed.get(0).getToken(); if(ResourceTokenAuthorizationHelper.isResourceToken(firstToken)) { this.firstResourceTokenFromPermissionFeed = firstToken; } } } RxDocumentClientImpl(URI serviceEndpoint, String masterKeyOrResourceToken, ConnectionPolicy connectionPolicy, ConsistencyLevel consistencyLevel, Configs configs, AzureKeyCredential credential, TokenCredential tokenCredential, boolean sessionCapturingOverrideEnabled, boolean connectionSharingAcrossClientsEnabled, boolean contentResponseOnWriteEnabled, CosmosClientMetadataCachesSnapshot metadataCachesSnapshot, ApiType apiType) { activeClientsCnt.incrementAndGet(); this.clientId = clientIdGenerator.incrementAndGet(); this.diagnosticsClientConfig = new DiagnosticsClientConfig(); this.diagnosticsClientConfig.withClientId(this.clientId); this.diagnosticsClientConfig.withActiveClientCounter(activeClientsCnt); this.diagnosticsClientConfig.withConnectionSharingAcrossClientsEnabled(connectionSharingAcrossClientsEnabled); this.diagnosticsClientConfig.withConsistency(consistencyLevel); this.throughputControlEnabled = new AtomicBoolean(false); logger.info( "Initializing DocumentClient [{}] with" + " serviceEndpoint [{}], connectionPolicy [{}], consistencyLevel [{}], directModeProtocol [{}]", this.clientId, serviceEndpoint, connectionPolicy, consistencyLevel, configs.getProtocol()); try { this.connectionSharingAcrossClientsEnabled = connectionSharingAcrossClientsEnabled; this.configs = configs; this.masterKeyOrResourceToken = masterKeyOrResourceToken; this.serviceEndpoint = serviceEndpoint; this.credential = credential; this.tokenCredential = tokenCredential; this.contentResponseOnWriteEnabled = contentResponseOnWriteEnabled; this.authorizationTokenType = AuthorizationTokenType.Invalid; if (this.credential != null) { hasAuthKeyResourceToken = false; this.authorizationTokenType = AuthorizationTokenType.PrimaryMasterKey; this.authorizationTokenProvider = new BaseAuthorizationTokenProvider(this.credential); } else if (masterKeyOrResourceToken != null && ResourceTokenAuthorizationHelper.isResourceToken(masterKeyOrResourceToken)) { this.authorizationTokenProvider = null; hasAuthKeyResourceToken = true; this.authorizationTokenType = AuthorizationTokenType.ResourceToken; } else if(masterKeyOrResourceToken != null && !ResourceTokenAuthorizationHelper.isResourceToken(masterKeyOrResourceToken)) { this.credential = new AzureKeyCredential(this.masterKeyOrResourceToken); hasAuthKeyResourceToken = false; this.authorizationTokenType = AuthorizationTokenType.PrimaryMasterKey; this.authorizationTokenProvider = new BaseAuthorizationTokenProvider(this.credential); } else { hasAuthKeyResourceToken = false; this.authorizationTokenProvider = null; if (tokenCredential != null) { this.tokenCredentialScopes = new String[] { serviceEndpoint.getScheme() + ": }; this.tokenCredentialCache = new SimpleTokenCache(() -> this.tokenCredential .getToken(new TokenRequestContext().addScopes(this.tokenCredentialScopes))); this.authorizationTokenType = AuthorizationTokenType.AadToken; } } if (connectionPolicy != null) { this.connectionPolicy = connectionPolicy; } else { this.connectionPolicy = new ConnectionPolicy(DirectConnectionConfig.getDefaultConfig()); } this.diagnosticsClientConfig.withConnectionMode(this.getConnectionPolicy().getConnectionMode()); this.diagnosticsClientConfig.withMultipleWriteRegionsEnabled(this.connectionPolicy.isMultipleWriteRegionsEnabled()); this.diagnosticsClientConfig.withEndpointDiscoveryEnabled(this.connectionPolicy.isEndpointDiscoveryEnabled()); this.diagnosticsClientConfig.withPreferredRegions(this.connectionPolicy.getPreferredRegions()); this.diagnosticsClientConfig.withMachineId(tempMachineId); boolean disableSessionCapturing = (ConsistencyLevel.SESSION != consistencyLevel && !sessionCapturingOverrideEnabled); this.sessionContainer = new SessionContainer(this.serviceEndpoint.getHost(), disableSessionCapturing); this.consistencyLevel = consistencyLevel; this.userAgentContainer = new UserAgentContainer(); String userAgentSuffix = this.connectionPolicy.getUserAgentSuffix(); if (userAgentSuffix != null && userAgentSuffix.length() > 0) { userAgentContainer.setSuffix(userAgentSuffix); } this.httpClientInterceptor = null; this.reactorHttpClient = httpClient(); this.globalEndpointManager = new GlobalEndpointManager(asDatabaseAccountManagerInternal(), this.connectionPolicy, /**/configs); this.retryPolicy = new RetryPolicy(this, this.globalEndpointManager, this.connectionPolicy); this.resetSessionTokenRetryPolicy = retryPolicy; CpuMemoryMonitor.register(this); this.queryPlanCache = Collections.synchronizedMap(new SizeLimitingLRUCache(Constants.QUERYPLAN_CACHE_SIZE)); this.apiType = apiType; } catch (RuntimeException e) { logger.error("unexpected failure in initializing client.", e); close(); throw e; } } @Override public DiagnosticsClientConfig getConfig() { return diagnosticsClientConfig; } @Override public CosmosDiagnostics createDiagnostics() { return BridgeInternal.createCosmosDiagnostics(this, this.globalEndpointManager); } private void initializeGatewayConfigurationReader() { this.gatewayConfigurationReader = new GatewayServiceConfigurationReader(this.globalEndpointManager); DatabaseAccount databaseAccount = this.globalEndpointManager.getLatestDatabaseAccount(); if (databaseAccount == null) { logger.error("Client initialization failed." + " Check if the endpoint is reachable and if your auth token is valid. More info: https: throw new RuntimeException("Client initialization failed." + " Check if the endpoint is reachable and if your auth token is valid. More info: https: } this.useMultipleWriteLocations = this.connectionPolicy.isMultipleWriteRegionsEnabled() && BridgeInternal.isEnableMultipleWriteLocations(databaseAccount); } private void updateGatewayProxy() { ((RxGatewayStoreModel)this.gatewayProxy).setGatewayServiceConfigurationReader(this.gatewayConfigurationReader); ((RxGatewayStoreModel)this.gatewayProxy).setCollectionCache(this.collectionCache); ((RxGatewayStoreModel)this.gatewayProxy).setPartitionKeyRangeCache(this.partitionKeyRangeCache); ((RxGatewayStoreModel)this.gatewayProxy).setUseMultipleWriteLocations(this.useMultipleWriteLocations); } public void init(CosmosClientMetadataCachesSnapshot metadataCachesSnapshot, Function<HttpClient, HttpClient> httpClientInterceptor) { try { this.httpClientInterceptor = httpClientInterceptor; if (httpClientInterceptor != null) { this.reactorHttpClient = httpClientInterceptor.apply(httpClient()); } this.gatewayProxy = createRxGatewayProxy(this.sessionContainer, this.consistencyLevel, this.queryCompatibilityMode, this.userAgentContainer, this.globalEndpointManager, this.reactorHttpClient, this.apiType); this.globalEndpointManager.init(); this.initializeGatewayConfigurationReader(); if (metadataCachesSnapshot != null) { this.collectionCache = new RxClientCollectionCache(this, this.sessionContainer, this.gatewayProxy, this, this.retryPolicy, metadataCachesSnapshot.getCollectionInfoByNameCache(), metadataCachesSnapshot.getCollectionInfoByIdCache() ); } else { this.collectionCache = new RxClientCollectionCache(this, this.sessionContainer, this.gatewayProxy, this, this.retryPolicy); } this.resetSessionTokenRetryPolicy = new ResetSessionTokenRetryPolicyFactory(this.sessionContainer, this.collectionCache, this.retryPolicy); this.partitionKeyRangeCache = new RxPartitionKeyRangeCache(RxDocumentClientImpl.this, collectionCache); updateGatewayProxy(); clientTelemetry = new ClientTelemetry(this, null, UUID.randomUUID().toString(), ManagementFactory.getRuntimeMXBean().getName(), userAgentContainer.getUserAgent(), connectionPolicy.getConnectionMode(), globalEndpointManager.getLatestDatabaseAccount().getId(), null, null, this.reactorHttpClient, connectionPolicy.isClientTelemetryEnabled(), this, this.connectionPolicy.getPreferredRegions()); clientTelemetry.init(); if (this.connectionPolicy.getConnectionMode() == ConnectionMode.GATEWAY) { this.storeModel = this.gatewayProxy; } else { this.initializeDirectConnectivity(); } this.retryPolicy.setRxCollectionCache(this.collectionCache); } catch (Exception e) { logger.error("unexpected failure in initializing client.", e); close(); throw e; } } public void serialize(CosmosClientMetadataCachesSnapshot state) { RxCollectionCache.serialize(state, this.collectionCache); } private void initializeDirectConnectivity() { this.addressResolver = new GlobalAddressResolver(this, this.reactorHttpClient, this.globalEndpointManager, this.configs.getProtocol(), this, this.collectionCache, this.partitionKeyRangeCache, userAgentContainer, null, this.connectionPolicy, this.apiType); this.storeClientFactory = new StoreClientFactory( this.addressResolver, this.diagnosticsClientConfig, this.configs, this.connectionPolicy, this.userAgentContainer, this.connectionSharingAcrossClientsEnabled, this.clientTelemetry ); this.createStoreModel(true); } DatabaseAccountManagerInternal asDatabaseAccountManagerInternal() { return new DatabaseAccountManagerInternal() { @Override public URI getServiceEndpoint() { return RxDocumentClientImpl.this.getServiceEndpoint(); } @Override public Flux<DatabaseAccount> getDatabaseAccountFromEndpoint(URI endpoint) { logger.info("Getting database account endpoint from {}", endpoint); return RxDocumentClientImpl.this.getDatabaseAccountFromEndpoint(endpoint); } @Override public ConnectionPolicy getConnectionPolicy() { return RxDocumentClientImpl.this.getConnectionPolicy(); } }; } RxGatewayStoreModel createRxGatewayProxy(ISessionContainer sessionContainer, ConsistencyLevel consistencyLevel, QueryCompatibilityMode queryCompatibilityMode, UserAgentContainer userAgentContainer, GlobalEndpointManager globalEndpointManager, HttpClient httpClient, ApiType apiType) { return new RxGatewayStoreModel( this, sessionContainer, consistencyLevel, queryCompatibilityMode, userAgentContainer, globalEndpointManager, httpClient, apiType); } private HttpClient httpClient() { HttpClientConfig httpClientConfig = new HttpClientConfig(this.configs) .withMaxIdleConnectionTimeout(this.connectionPolicy.getIdleHttpConnectionTimeout()) .withPoolSize(this.connectionPolicy.getMaxConnectionPoolSize()) .withProxy(this.connectionPolicy.getProxy()) .withNetworkRequestTimeout(this.connectionPolicy.getHttpNetworkRequestTimeout()); if (connectionSharingAcrossClientsEnabled) { return SharedGatewayHttpClient.getOrCreateInstance(httpClientConfig, diagnosticsClientConfig); } else { diagnosticsClientConfig.withGatewayHttpClientConfig(httpClientConfig); return HttpClient.createFixed(httpClientConfig); } } private void createStoreModel(boolean subscribeRntbdStatus) { StoreClient storeClient = this.storeClientFactory.createStoreClient(this, this.addressResolver, this.sessionContainer, this.gatewayConfigurationReader, this, this.useMultipleWriteLocations ); this.storeModel = new ServerStoreModel(storeClient); } @Override public URI getServiceEndpoint() { return this.serviceEndpoint; } @Override public URI getWriteEndpoint() { return globalEndpointManager.getWriteEndpoints().stream().findFirst().orElse(null); } @Override public URI getReadEndpoint() { return globalEndpointManager.getReadEndpoints().stream().findFirst().orElse(null); } @Override public ConnectionPolicy getConnectionPolicy() { return this.connectionPolicy; } @Override public boolean isContentResponseOnWriteEnabled() { return contentResponseOnWriteEnabled; } @Override public ConsistencyLevel getConsistencyLevel() { return consistencyLevel; } @Override public ClientTelemetry getClientTelemetry() { return this.clientTelemetry; } @Override public Mono<ResourceResponse<Database>> createDatabase(Database database, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> createDatabaseInternal(database, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<Database>> createDatabaseInternal(Database database, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (database == null) { throw new IllegalArgumentException("Database"); } logger.debug("Creating a Database. id: [{}]", database.getId()); validateResource(database); Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.Database, OperationType.Create); Instant serializationStartTimeUTC = Instant.now(); ByteBuffer byteBuffer = ModelBridgeInternal.serializeJsonToByteBuffer(database); Instant serializationEndTimeUTC = Instant.now(); SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics = new SerializationDiagnosticsContext.SerializationDiagnostics( serializationStartTimeUTC, serializationEndTimeUTC, SerializationDiagnosticsContext.SerializationType.DATABASE_SERIALIZATION); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Create, ResourceType.Database, Paths.DATABASES_ROOT, byteBuffer, requestHeaders, options); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } SerializationDiagnosticsContext serializationDiagnosticsContext = BridgeInternal.getSerializationDiagnosticsContext(request.requestContext.cosmosDiagnostics); if (serializationDiagnosticsContext != null) { serializationDiagnosticsContext.addSerializationDiagnostics(serializationDiagnostics); } return this.create(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)) .map(response -> toResourceResponse(response, Database.class)); } catch (Exception e) { logger.debug("Failure in creating a database. due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Mono<ResourceResponse<Database>> deleteDatabase(String databaseLink, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> deleteDatabaseInternal(databaseLink, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<Database>> deleteDatabaseInternal(String databaseLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(databaseLink)) { throw new IllegalArgumentException("databaseLink"); } logger.debug("Deleting a Database. databaseLink: [{}]", databaseLink); String path = Utils.joinPath(databaseLink, null); Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.Database, OperationType.Delete); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Delete, ResourceType.Database, path, requestHeaders, options); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.delete(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, Database.class)); } catch (Exception e) { logger.debug("Failure in deleting a database. due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Mono<ResourceResponse<Database>> readDatabase(String databaseLink, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> readDatabaseInternal(databaseLink, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<Database>> readDatabaseInternal(String databaseLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(databaseLink)) { throw new IllegalArgumentException("databaseLink"); } logger.debug("Reading a Database. databaseLink: [{}]", databaseLink); String path = Utils.joinPath(databaseLink, null); Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.Database, OperationType.Read); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Read, ResourceType.Database, path, requestHeaders, options); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, Database.class)); } catch (Exception e) { logger.debug("Failure in reading a database. due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Flux<FeedResponse<Database>> readDatabases(CosmosQueryRequestOptions options) { return readFeed(options, ResourceType.Database, Database.class, Paths.DATABASES_ROOT); } private String parentResourceLinkToQueryLink(String parentResourceLink, ResourceType resourceTypeEnum) { switch (resourceTypeEnum) { case Database: return Paths.DATABASES_ROOT; case DocumentCollection: return Utils.joinPath(parentResourceLink, Paths.COLLECTIONS_PATH_SEGMENT); case Document: return Utils.joinPath(parentResourceLink, Paths.DOCUMENTS_PATH_SEGMENT); case Offer: return Paths.OFFERS_ROOT; case User: return Utils.joinPath(parentResourceLink, Paths.USERS_PATH_SEGMENT); case ClientEncryptionKey: return Utils.joinPath(parentResourceLink, Paths.CLIENT_ENCRYPTION_KEY_PATH_SEGMENT); case Permission: return Utils.joinPath(parentResourceLink, Paths.PERMISSIONS_PATH_SEGMENT); case Attachment: return Utils.joinPath(parentResourceLink, Paths.ATTACHMENTS_PATH_SEGMENT); case StoredProcedure: return Utils.joinPath(parentResourceLink, Paths.STORED_PROCEDURES_PATH_SEGMENT); case Trigger: return Utils.joinPath(parentResourceLink, Paths.TRIGGERS_PATH_SEGMENT); case UserDefinedFunction: return Utils.joinPath(parentResourceLink, Paths.USER_DEFINED_FUNCTIONS_PATH_SEGMENT); case Conflict: return Utils.joinPath(parentResourceLink, Paths.CONFLICTS_PATH_SEGMENT); default: throw new IllegalArgumentException("resource type not supported"); } } private OperationContextAndListenerTuple getOperationContextAndListenerTuple(CosmosQueryRequestOptions options) { if (options == null) { return null; } return ImplementationBridgeHelpers.CosmosQueryRequestOptionsHelper.getCosmosQueryRequestOptionsAccessor().getOperationContext(options); } private OperationContextAndListenerTuple getOperationContextAndListenerTuple(RequestOptions options) { if (options == null) { return null; } return options.getOperationContextAndListenerTuple(); } private <T> Flux<FeedResponse<T>> createQuery( String parentResourceLink, SqlQuerySpec sqlQuery, CosmosQueryRequestOptions options, Class<T> klass, ResourceType resourceTypeEnum) { String resourceLink = parentResourceLinkToQueryLink(parentResourceLink, resourceTypeEnum); UUID correlationActivityIdOfRequestOptions = ImplementationBridgeHelpers .CosmosQueryRequestOptionsHelper .getCosmosQueryRequestOptionsAccessor() .getCorrelationActivityId(options); UUID correlationActivityId = correlationActivityIdOfRequestOptions != null ? correlationActivityIdOfRequestOptions : Utils.randomUUID(); IDocumentQueryClient queryClient = documentQueryClientImpl(RxDocumentClientImpl.this, getOperationContextAndListenerTuple(options)); InvalidPartitionExceptionRetryPolicy invalidPartitionExceptionRetryPolicy = new InvalidPartitionExceptionRetryPolicy( this.collectionCache, null, resourceLink, ModelBridgeInternal.getPropertiesFromQueryRequestOptions(options)); return ObservableHelper.fluxInlineIfPossibleAsObs( () -> createQueryInternal( resourceLink, sqlQuery, options, klass, resourceTypeEnum, queryClient, correlationActivityId), invalidPartitionExceptionRetryPolicy); } private <T> Flux<FeedResponse<T>> createQueryInternal( String resourceLink, SqlQuerySpec sqlQuery, CosmosQueryRequestOptions options, Class<T> klass, ResourceType resourceTypeEnum, IDocumentQueryClient queryClient, UUID activityId) { Flux<? extends IDocumentQueryExecutionContext<T>> executionContext = DocumentQueryExecutionContextFactory .createDocumentQueryExecutionContextAsync(this, queryClient, resourceTypeEnum, klass, sqlQuery, options, resourceLink, false, activityId, Configs.isQueryPlanCachingEnabled(), queryPlanCache); AtomicBoolean isFirstResponse = new AtomicBoolean(true); return executionContext.flatMap(iDocumentQueryExecutionContext -> { QueryInfo queryInfo = null; if (iDocumentQueryExecutionContext instanceof PipelinedQueryExecutionContextBase) { queryInfo = ((PipelinedQueryExecutionContextBase<T>) iDocumentQueryExecutionContext).getQueryInfo(); } QueryInfo finalQueryInfo = queryInfo; return iDocumentQueryExecutionContext.executeAsync() .map(tFeedResponse -> { if (finalQueryInfo != null) { if (finalQueryInfo.hasSelectValue()) { ModelBridgeInternal .addQueryInfoToFeedResponse(tFeedResponse, finalQueryInfo); } if (isFirstResponse.compareAndSet(true, false)) { ModelBridgeInternal.addQueryPlanDiagnosticsContextToFeedResponse(tFeedResponse, finalQueryInfo.getQueryPlanDiagnosticsContext()); } } return tFeedResponse; }); }); } @Override public Flux<FeedResponse<Database>> queryDatabases(String query, CosmosQueryRequestOptions options) { return queryDatabases(new SqlQuerySpec(query), options); } @Override public Flux<FeedResponse<Database>> queryDatabases(SqlQuerySpec querySpec, CosmosQueryRequestOptions options) { return createQuery(Paths.DATABASES_ROOT, querySpec, options, Database.class, ResourceType.Database); } @Override public Mono<ResourceResponse<DocumentCollection>> createCollection(String databaseLink, DocumentCollection collection, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> this.createCollectionInternal(databaseLink, collection, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<DocumentCollection>> createCollectionInternal(String databaseLink, DocumentCollection collection, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(databaseLink)) { throw new IllegalArgumentException("databaseLink"); } if (collection == null) { throw new IllegalArgumentException("collection"); } logger.debug("Creating a Collection. databaseLink: [{}], Collection id: [{}]", databaseLink, collection.getId()); validateResource(collection); String path = Utils.joinPath(databaseLink, Paths.COLLECTIONS_PATH_SEGMENT); Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.DocumentCollection, OperationType.Create); Instant serializationStartTimeUTC = Instant.now(); ByteBuffer byteBuffer = ModelBridgeInternal.serializeJsonToByteBuffer(collection); Instant serializationEndTimeUTC = Instant.now(); SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics = new SerializationDiagnosticsContext.SerializationDiagnostics( serializationStartTimeUTC, serializationEndTimeUTC, SerializationDiagnosticsContext.SerializationType.CONTAINER_SERIALIZATION); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Create, ResourceType.DocumentCollection, path, byteBuffer, requestHeaders, options); if (retryPolicyInstance != null){ retryPolicyInstance.onBeforeSendRequest(request); } SerializationDiagnosticsContext serializationDiagnosticsContext = BridgeInternal.getSerializationDiagnosticsContext(request.requestContext.cosmosDiagnostics); if (serializationDiagnosticsContext != null) { serializationDiagnosticsContext.addSerializationDiagnostics(serializationDiagnostics); } return this.create(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, DocumentCollection.class)) .doOnNext(resourceResponse -> { this.sessionContainer.setSessionToken(resourceResponse.getResource().getResourceId(), getAltLink(resourceResponse.getResource()), resourceResponse.getResponseHeaders()); }); } catch (Exception e) { logger.debug("Failure in creating a collection. due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Mono<ResourceResponse<DocumentCollection>> replaceCollection(DocumentCollection collection, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> replaceCollectionInternal(collection, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<DocumentCollection>> replaceCollectionInternal(DocumentCollection collection, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (collection == null) { throw new IllegalArgumentException("collection"); } logger.debug("Replacing a Collection. id: [{}]", collection.getId()); validateResource(collection); String path = Utils.joinPath(collection.getSelfLink(), null); Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.DocumentCollection, OperationType.Replace); Instant serializationStartTimeUTC = Instant.now(); ByteBuffer byteBuffer = ModelBridgeInternal.serializeJsonToByteBuffer(collection); Instant serializationEndTimeUTC = Instant.now(); SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics = new SerializationDiagnosticsContext.SerializationDiagnostics( serializationStartTimeUTC, serializationEndTimeUTC, SerializationDiagnosticsContext.SerializationType.CONTAINER_SERIALIZATION); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Replace, ResourceType.DocumentCollection, path, byteBuffer, requestHeaders, options); if (retryPolicyInstance != null){ retryPolicyInstance.onBeforeSendRequest(request); } SerializationDiagnosticsContext serializationDiagnosticsContext = BridgeInternal.getSerializationDiagnosticsContext(request.requestContext.cosmosDiagnostics); if (serializationDiagnosticsContext != null) { serializationDiagnosticsContext.addSerializationDiagnostics(serializationDiagnostics); } return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, DocumentCollection.class)) .doOnNext(resourceResponse -> { if (resourceResponse.getResource() != null) { this.sessionContainer.setSessionToken(resourceResponse.getResource().getResourceId(), getAltLink(resourceResponse.getResource()), resourceResponse.getResponseHeaders()); } }); } catch (Exception e) { logger.debug("Failure in replacing a collection. due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Mono<ResourceResponse<DocumentCollection>> deleteCollection(String collectionLink, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> deleteCollectionInternal(collectionLink, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<DocumentCollection>> deleteCollectionInternal(String collectionLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(collectionLink)) { throw new IllegalArgumentException("collectionLink"); } logger.debug("Deleting a Collection. collectionLink: [{}]", collectionLink); String path = Utils.joinPath(collectionLink, null); Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.DocumentCollection, OperationType.Delete); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Delete, ResourceType.DocumentCollection, path, requestHeaders, options); if (retryPolicyInstance != null){ retryPolicyInstance.onBeforeSendRequest(request); } return this.delete(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, DocumentCollection.class)); } catch (Exception e) { logger.debug("Failure in deleting a collection, due to [{}]", e.getMessage(), e); return Mono.error(e); } } private Mono<RxDocumentServiceResponse> delete(RxDocumentServiceRequest request, DocumentClientRetryPolicy documentClientRetryPolicy, OperationContextAndListenerTuple operationContextAndListenerTuple) { return populateHeaders(request, RequestVerb.DELETE) .flatMap(requestPopulated -> { if (documentClientRetryPolicy.getRetryContext() != null && documentClientRetryPolicy.getRetryContext().getRetryCount() > 0) { documentClientRetryPolicy.getRetryContext().updateEndTime(); } return getStoreProxy(requestPopulated).processMessage(requestPopulated, operationContextAndListenerTuple); }); } private Mono<RxDocumentServiceResponse> deleteAllItemsByPartitionKey(RxDocumentServiceRequest request, DocumentClientRetryPolicy documentClientRetryPolicy, OperationContextAndListenerTuple operationContextAndListenerTuple) { return populateHeaders(request, RequestVerb.POST) .flatMap(requestPopulated -> { RxStoreModel storeProxy = this.getStoreProxy(requestPopulated); if (documentClientRetryPolicy.getRetryContext() != null && documentClientRetryPolicy.getRetryContext().getRetryCount() > 0) { documentClientRetryPolicy.getRetryContext().updateEndTime(); } return storeProxy.processMessage(requestPopulated, operationContextAndListenerTuple); }); } private Mono<RxDocumentServiceResponse> read(RxDocumentServiceRequest request, DocumentClientRetryPolicy documentClientRetryPolicy) { return populateHeaders(request, RequestVerb.GET) .flatMap(requestPopulated -> { if (documentClientRetryPolicy.getRetryContext() != null && documentClientRetryPolicy.getRetryContext().getRetryCount() > 0) { documentClientRetryPolicy.getRetryContext().updateEndTime(); } return getStoreProxy(requestPopulated).processMessage(requestPopulated); }); } Mono<RxDocumentServiceResponse> readFeed(RxDocumentServiceRequest request) { return populateHeaders(request, RequestVerb.GET) .flatMap(requestPopulated -> getStoreProxy(requestPopulated).processMessage(requestPopulated)); } private Mono<RxDocumentServiceResponse> query(RxDocumentServiceRequest request) { return populateHeaders(request, RequestVerb.POST) .flatMap(requestPopulated -> this.getStoreProxy(requestPopulated).processMessage(requestPopulated) .map(response -> { this.captureSessionToken(requestPopulated, response); return response; } )); } @Override public Mono<ResourceResponse<DocumentCollection>> readCollection(String collectionLink, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> readCollectionInternal(collectionLink, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<DocumentCollection>> readCollectionInternal(String collectionLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(collectionLink)) { throw new IllegalArgumentException("collectionLink"); } logger.debug("Reading a Collection. collectionLink: [{}]", collectionLink); String path = Utils.joinPath(collectionLink, null); Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.DocumentCollection, OperationType.Read); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Read, ResourceType.DocumentCollection, path, requestHeaders, options); if (retryPolicyInstance != null){ retryPolicyInstance.onBeforeSendRequest(request); } return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, DocumentCollection.class)); } catch (Exception e) { logger.debug("Failure in reading a collection, due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Flux<FeedResponse<DocumentCollection>> readCollections(String databaseLink, CosmosQueryRequestOptions options) { if (StringUtils.isEmpty(databaseLink)) { throw new IllegalArgumentException("databaseLink"); } return readFeed(options, ResourceType.DocumentCollection, DocumentCollection.class, Utils.joinPath(databaseLink, Paths.COLLECTIONS_PATH_SEGMENT)); } @Override public Flux<FeedResponse<DocumentCollection>> queryCollections(String databaseLink, String query, CosmosQueryRequestOptions options) { return createQuery(databaseLink, new SqlQuerySpec(query), options, DocumentCollection.class, ResourceType.DocumentCollection); } @Override public Flux<FeedResponse<DocumentCollection>> queryCollections(String databaseLink, SqlQuerySpec querySpec, CosmosQueryRequestOptions options) { return createQuery(databaseLink, querySpec, options, DocumentCollection.class, ResourceType.DocumentCollection); } private static String serializeProcedureParams(List<Object> objectArray) { String[] stringArray = new String[objectArray.size()]; for (int i = 0; i < objectArray.size(); ++i) { Object object = objectArray.get(i); if (object instanceof JsonSerializable) { stringArray[i] = ModelBridgeInternal.toJsonFromJsonSerializable((JsonSerializable) object); } else { try { stringArray[i] = mapper.writeValueAsString(object); } catch (IOException e) { throw new IllegalArgumentException("Can't serialize the object into the json string", e); } } } return String.format("[%s]", StringUtils.join(stringArray, ",")); } private static void validateResource(Resource resource) { if (!StringUtils.isEmpty(resource.getId())) { if (resource.getId().indexOf('/') != -1 || resource.getId().indexOf('\\') != -1 || resource.getId().indexOf('?') != -1 || resource.getId().indexOf(' throw new IllegalArgumentException("Id contains illegal chars."); } if (resource.getId().endsWith(" ")) { throw new IllegalArgumentException("Id ends with a space."); } } } private Map<String, String> getRequestHeaders(RequestOptions options, ResourceType resourceType, OperationType operationType) { Map<String, String> headers = new HashMap<>(); if (this.useMultipleWriteLocations) { headers.put(HttpConstants.HttpHeaders.ALLOW_TENTATIVE_WRITES, Boolean.TRUE.toString()); } if (consistencyLevel != null) { headers.put(HttpConstants.HttpHeaders.CONSISTENCY_LEVEL, consistencyLevel.toString()); } if (options == null) { if (!this.contentResponseOnWriteEnabled && resourceType.equals(ResourceType.Document) && operationType.isWriteOperation()) { headers.put(HttpConstants.HttpHeaders.PREFER, HttpConstants.HeaderValues.PREFER_RETURN_MINIMAL); } return headers; } Map<String, String> customOptions = options.getHeaders(); if (customOptions != null) { headers.putAll(customOptions); } boolean contentResponseOnWriteEnabled = this.contentResponseOnWriteEnabled; if (options.isContentResponseOnWriteEnabled() != null) { contentResponseOnWriteEnabled = options.isContentResponseOnWriteEnabled(); } if (!contentResponseOnWriteEnabled && resourceType.equals(ResourceType.Document) && operationType.isWriteOperation()) { headers.put(HttpConstants.HttpHeaders.PREFER, HttpConstants.HeaderValues.PREFER_RETURN_MINIMAL); } if (options.getIfMatchETag() != null) { headers.put(HttpConstants.HttpHeaders.IF_MATCH, options.getIfMatchETag()); } if(options.getIfNoneMatchETag() != null) { headers.put(HttpConstants.HttpHeaders.IF_NONE_MATCH, options.getIfNoneMatchETag()); } if (options.getConsistencyLevel() != null) { headers.put(HttpConstants.HttpHeaders.CONSISTENCY_LEVEL, options.getConsistencyLevel().toString()); } if (options.getIndexingDirective() != null) { headers.put(HttpConstants.HttpHeaders.INDEXING_DIRECTIVE, options.getIndexingDirective().toString()); } if (options.getPostTriggerInclude() != null && options.getPostTriggerInclude().size() > 0) { String postTriggerInclude = StringUtils.join(options.getPostTriggerInclude(), ","); headers.put(HttpConstants.HttpHeaders.POST_TRIGGER_INCLUDE, postTriggerInclude); } if (options.getPreTriggerInclude() != null && options.getPreTriggerInclude().size() > 0) { String preTriggerInclude = StringUtils.join(options.getPreTriggerInclude(), ","); headers.put(HttpConstants.HttpHeaders.PRE_TRIGGER_INCLUDE, preTriggerInclude); } if (!Strings.isNullOrEmpty(options.getSessionToken())) { headers.put(HttpConstants.HttpHeaders.SESSION_TOKEN, options.getSessionToken()); } if (options.getResourceTokenExpirySeconds() != null) { headers.put(HttpConstants.HttpHeaders.RESOURCE_TOKEN_EXPIRY, String.valueOf(options.getResourceTokenExpirySeconds())); } if (options.getOfferThroughput() != null && options.getOfferThroughput() >= 0) { headers.put(HttpConstants.HttpHeaders.OFFER_THROUGHPUT, options.getOfferThroughput().toString()); } else if (options.getOfferType() != null) { headers.put(HttpConstants.HttpHeaders.OFFER_TYPE, options.getOfferType()); } if (options.getOfferThroughput() == null) { if (options.getThroughputProperties() != null) { Offer offer = ModelBridgeInternal.getOfferFromThroughputProperties(options.getThroughputProperties()); final OfferAutoscaleSettings offerAutoscaleSettings = offer.getOfferAutoScaleSettings(); OfferAutoscaleAutoUpgradeProperties autoscaleAutoUpgradeProperties = null; if (offerAutoscaleSettings != null) { autoscaleAutoUpgradeProperties = offer.getOfferAutoScaleSettings().getAutoscaleAutoUpgradeProperties(); } if (offer.hasOfferThroughput() && (offerAutoscaleSettings != null && offerAutoscaleSettings.getMaxThroughput() >= 0 || autoscaleAutoUpgradeProperties != null && autoscaleAutoUpgradeProperties .getAutoscaleThroughputProperties() .getIncrementPercent() >= 0)) { throw new IllegalArgumentException("Autoscale provisioned throughput can not be configured with " + "fixed offer"); } if (offer.hasOfferThroughput()) { headers.put(HttpConstants.HttpHeaders.OFFER_THROUGHPUT, String.valueOf(offer.getThroughput())); } else if (offer.getOfferAutoScaleSettings() != null) { headers.put(HttpConstants.HttpHeaders.OFFER_AUTOPILOT_SETTINGS, ModelBridgeInternal.toJsonFromJsonSerializable(offer.getOfferAutoScaleSettings())); } } } if (options.isQuotaInfoEnabled()) { headers.put(HttpConstants.HttpHeaders.POPULATE_QUOTA_INFO, String.valueOf(true)); } if (options.isScriptLoggingEnabled()) { headers.put(HttpConstants.HttpHeaders.SCRIPT_ENABLE_LOGGING, String.valueOf(true)); } if (options.getDedicatedGatewayRequestOptions() != null && options.getDedicatedGatewayRequestOptions().getMaxIntegratedCacheStaleness() != null) { headers.put(HttpConstants.HttpHeaders.DEDICATED_GATEWAY_PER_REQUEST_CACHE_STALENESS, String.valueOf(Utils.getMaxIntegratedCacheStalenessInMillis(options.getDedicatedGatewayRequestOptions()))); } return headers; } public IRetryPolicyFactory getResetSessionTokenRetryPolicy() { return this.resetSessionTokenRetryPolicy; } private Mono<RxDocumentServiceRequest> addPartitionKeyInformation(RxDocumentServiceRequest request, ByteBuffer contentAsByteBuffer, Document document, RequestOptions options) { Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = this.collectionCache.resolveCollectionAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), request); return collectionObs .map(collectionValueHolder -> { addPartitionKeyInformation(request, contentAsByteBuffer, document, options, collectionValueHolder.v); return request; }); } private Mono<RxDocumentServiceRequest> addPartitionKeyInformation(RxDocumentServiceRequest request, ByteBuffer contentAsByteBuffer, Object document, RequestOptions options, Mono<Utils.ValueHolder<DocumentCollection>> collectionObs) { return collectionObs.map(collectionValueHolder -> { addPartitionKeyInformation(request, contentAsByteBuffer, document, options, collectionValueHolder.v); return request; }); } private void addPartitionKeyInformation(RxDocumentServiceRequest request, ByteBuffer contentAsByteBuffer, Object objectDoc, RequestOptions options, DocumentCollection collection) { PartitionKeyDefinition partitionKeyDefinition = collection.getPartitionKey(); PartitionKeyInternal partitionKeyInternal = null; if (options != null && options.getPartitionKey() != null && options.getPartitionKey().equals(PartitionKey.NONE)){ partitionKeyInternal = ModelBridgeInternal.getNonePartitionKey(partitionKeyDefinition); } else if (options != null && options.getPartitionKey() != null) { partitionKeyInternal = BridgeInternal.getPartitionKeyInternal(options.getPartitionKey()); } else if (partitionKeyDefinition == null || partitionKeyDefinition.getPaths().size() == 0) { partitionKeyInternal = PartitionKeyInternal.getEmpty(); } else if (contentAsByteBuffer != null || objectDoc != null) { InternalObjectNode internalObjectNode; if (objectDoc instanceof InternalObjectNode) { internalObjectNode = (InternalObjectNode) objectDoc; } else if (objectDoc instanceof ObjectNode) { internalObjectNode = new InternalObjectNode((ObjectNode)objectDoc); } else if (contentAsByteBuffer != null) { contentAsByteBuffer.rewind(); internalObjectNode = new InternalObjectNode(contentAsByteBuffer); } else { throw new IllegalStateException("ContentAsByteBuffer and objectDoc are null"); } Instant serializationStartTime = Instant.now(); partitionKeyInternal = extractPartitionKeyValueFromDocument(internalObjectNode, partitionKeyDefinition); Instant serializationEndTime = Instant.now(); SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics = new SerializationDiagnosticsContext.SerializationDiagnostics( serializationStartTime, serializationEndTime, SerializationDiagnosticsContext.SerializationType.PARTITION_KEY_FETCH_SERIALIZATION ); SerializationDiagnosticsContext serializationDiagnosticsContext = BridgeInternal.getSerializationDiagnosticsContext(request.requestContext.cosmosDiagnostics); if (serializationDiagnosticsContext != null) { serializationDiagnosticsContext.addSerializationDiagnostics(serializationDiagnostics); } } else { throw new UnsupportedOperationException("PartitionKey value must be supplied for this operation."); } request.setPartitionKeyInternal(partitionKeyInternal); request.getHeaders().put(HttpConstants.HttpHeaders.PARTITION_KEY, Utils.escapeNonAscii(partitionKeyInternal.toJson())); } public static PartitionKeyInternal extractPartitionKeyValueFromDocument( InternalObjectNode document, PartitionKeyDefinition partitionKeyDefinition) { if (partitionKeyDefinition != null) { switch (partitionKeyDefinition.getKind()) { case HASH: String path = partitionKeyDefinition.getPaths().iterator().next(); List<String> parts = PathParser.getPathParts(path); if (parts.size() >= 1) { Object value = ModelBridgeInternal.getObjectByPathFromJsonSerializable(document, parts); if (value == null || value.getClass() == ObjectNode.class) { value = ModelBridgeInternal.getNonePartitionKey(partitionKeyDefinition); } if (value instanceof PartitionKeyInternal) { return (PartitionKeyInternal) value; } else { return PartitionKeyInternal.fromObjectArray(Collections.singletonList(value), false); } } break; case MULTI_HASH: Object[] partitionKeyValues = new Object[partitionKeyDefinition.getPaths().size()]; for(int pathIter = 0 ; pathIter < partitionKeyDefinition.getPaths().size(); pathIter++){ String partitionPath = partitionKeyDefinition.getPaths().get(pathIter); List<String> partitionPathParts = PathParser.getPathParts(partitionPath); partitionKeyValues[pathIter] = ModelBridgeInternal.getObjectByPathFromJsonSerializable(document, partitionPathParts); } return PartitionKeyInternal.fromObjectArray(partitionKeyValues, false); default: throw new IllegalArgumentException("Unrecognized Partition kind: " + partitionKeyDefinition.getKind()); } } return null; } private Mono<RxDocumentServiceRequest> getCreateDocumentRequest(DocumentClientRetryPolicy requestRetryPolicy, String documentCollectionLink, Object document, RequestOptions options, boolean disableAutomaticIdGeneration, OperationType operationType) { if (StringUtils.isEmpty(documentCollectionLink)) { throw new IllegalArgumentException("documentCollectionLink"); } if (document == null) { throw new IllegalArgumentException("document"); } Instant serializationStartTimeUTC = Instant.now(); ByteBuffer content = BridgeInternal.serializeJsonToByteBuffer(document, mapper); Instant serializationEndTimeUTC = Instant.now(); SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics = new SerializationDiagnosticsContext.SerializationDiagnostics( serializationStartTimeUTC, serializationEndTimeUTC, SerializationDiagnosticsContext.SerializationType.ITEM_SERIALIZATION); String path = Utils.joinPath(documentCollectionLink, Paths.DOCUMENTS_PATH_SEGMENT); Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.Document, operationType); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, operationType, ResourceType.Document, path, requestHeaders, options, content); if (requestRetryPolicy != null) { requestRetryPolicy.onBeforeSendRequest(request); } SerializationDiagnosticsContext serializationDiagnosticsContext = BridgeInternal.getSerializationDiagnosticsContext(request.requestContext.cosmosDiagnostics); if (serializationDiagnosticsContext != null) { serializationDiagnosticsContext.addSerializationDiagnostics(serializationDiagnostics); } Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = this.collectionCache.resolveCollectionAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), request); return addPartitionKeyInformation(request, content, document, options, collectionObs); } private Mono<RxDocumentServiceRequest> getBatchDocumentRequest(DocumentClientRetryPolicy requestRetryPolicy, String documentCollectionLink, ServerBatchRequest serverBatchRequest, RequestOptions options, boolean disableAutomaticIdGeneration) { checkArgument(StringUtils.isNotEmpty(documentCollectionLink), "expected non empty documentCollectionLink"); checkNotNull(serverBatchRequest, "expected non null serverBatchRequest"); Instant serializationStartTimeUTC = Instant.now(); ByteBuffer content = ByteBuffer.wrap(Utils.getUTF8Bytes(serverBatchRequest.getRequestBody())); Instant serializationEndTimeUTC = Instant.now(); SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics = new SerializationDiagnosticsContext.SerializationDiagnostics( serializationStartTimeUTC, serializationEndTimeUTC, SerializationDiagnosticsContext.SerializationType.ITEM_SERIALIZATION); String path = Utils.joinPath(documentCollectionLink, Paths.DOCUMENTS_PATH_SEGMENT); Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.Document, OperationType.Batch); RxDocumentServiceRequest request = RxDocumentServiceRequest.create( this, OperationType.Batch, ResourceType.Document, path, requestHeaders, options, content); if (requestRetryPolicy != null) { requestRetryPolicy.onBeforeSendRequest(request); } SerializationDiagnosticsContext serializationDiagnosticsContext = BridgeInternal.getSerializationDiagnosticsContext(request.requestContext.cosmosDiagnostics); if (serializationDiagnosticsContext != null) { serializationDiagnosticsContext.addSerializationDiagnostics(serializationDiagnostics); } Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = this.collectionCache.resolveCollectionAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), request); return collectionObs.map((Utils.ValueHolder<DocumentCollection> collectionValueHolder) -> { addBatchHeaders(request, serverBatchRequest, collectionValueHolder.v); return request; }); } private RxDocumentServiceRequest addBatchHeaders(RxDocumentServiceRequest request, ServerBatchRequest serverBatchRequest, DocumentCollection collection) { if(serverBatchRequest instanceof SinglePartitionKeyServerBatchRequest) { PartitionKey partitionKey = ((SinglePartitionKeyServerBatchRequest) serverBatchRequest).getPartitionKeyValue(); PartitionKeyInternal partitionKeyInternal; if (partitionKey.equals(PartitionKey.NONE)) { PartitionKeyDefinition partitionKeyDefinition = collection.getPartitionKey(); partitionKeyInternal = ModelBridgeInternal.getNonePartitionKey(partitionKeyDefinition); } else { partitionKeyInternal = BridgeInternal.getPartitionKeyInternal(partitionKey); } request.setPartitionKeyInternal(partitionKeyInternal); request.getHeaders().put(HttpConstants.HttpHeaders.PARTITION_KEY, Utils.escapeNonAscii(partitionKeyInternal.toJson())); } else if(serverBatchRequest instanceof PartitionKeyRangeServerBatchRequest) { request.setPartitionKeyRangeIdentity(new PartitionKeyRangeIdentity(((PartitionKeyRangeServerBatchRequest) serverBatchRequest).getPartitionKeyRangeId())); } else { throw new UnsupportedOperationException("Unknown Server request."); } request.getHeaders().put(HttpConstants.HttpHeaders.IS_BATCH_REQUEST, Boolean.TRUE.toString()); request.getHeaders().put(HttpConstants.HttpHeaders.IS_BATCH_ATOMIC, String.valueOf(serverBatchRequest.isAtomicBatch())); request.getHeaders().put(HttpConstants.HttpHeaders.SHOULD_BATCH_CONTINUE_ON_ERROR, String.valueOf(serverBatchRequest.isShouldContinueOnError())); request.setNumberOfItemsInBatchRequest(serverBatchRequest.getOperations().size()); return request; } private Mono<RxDocumentServiceRequest> populateHeaders(RxDocumentServiceRequest request, RequestVerb httpMethod) { request.getHeaders().put(HttpConstants.HttpHeaders.X_DATE, Utils.nowAsRFC1123()); if (this.masterKeyOrResourceToken != null || this.resourceTokensMap != null || this.cosmosAuthorizationTokenResolver != null || this.credential != null) { String resourceName = request.getResourceAddress(); String authorization = this.getUserAuthorizationToken( resourceName, request.getResourceType(), httpMethod, request.getHeaders(), AuthorizationTokenType.PrimaryMasterKey, request.properties); try { authorization = URLEncoder.encode(authorization, "UTF-8"); } catch (UnsupportedEncodingException e) { throw new IllegalStateException("Failed to encode authtoken.", e); } request.getHeaders().put(HttpConstants.HttpHeaders.AUTHORIZATION, authorization); } if (this.apiType != null) { request.getHeaders().put(HttpConstants.HttpHeaders.API_TYPE, this.apiType.toString()); } if ((RequestVerb.POST.equals(httpMethod) || RequestVerb.PUT.equals(httpMethod)) && !request.getHeaders().containsKey(HttpConstants.HttpHeaders.CONTENT_TYPE)) { request.getHeaders().put(HttpConstants.HttpHeaders.CONTENT_TYPE, RuntimeConstants.MediaTypes.JSON); } if (RequestVerb.PATCH.equals(httpMethod) && !request.getHeaders().containsKey(HttpConstants.HttpHeaders.CONTENT_TYPE)) { request.getHeaders().put(HttpConstants.HttpHeaders.CONTENT_TYPE, RuntimeConstants.MediaTypes.JSON_PATCH); } if (!request.getHeaders().containsKey(HttpConstants.HttpHeaders.ACCEPT)) { request.getHeaders().put(HttpConstants.HttpHeaders.ACCEPT, RuntimeConstants.MediaTypes.JSON); } MetadataDiagnosticsContext metadataDiagnosticsCtx = BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics); if (this.requiresFeedRangeFiltering(request)) { return request.getFeedRange() .populateFeedRangeFilteringHeaders( this.getPartitionKeyRangeCache(), request, this.collectionCache.resolveCollectionAsync(metadataDiagnosticsCtx, request)) .flatMap(this::populateAuthorizationHeader); } return this.populateAuthorizationHeader(request); } private boolean requiresFeedRangeFiltering(RxDocumentServiceRequest request) { if (request.getResourceType() != ResourceType.Document && request.getResourceType() != ResourceType.Conflict) { return false; } switch (request.getOperationType()) { case ReadFeed: case Query: case SqlQuery: return request.getFeedRange() != null; default: return false; } } @Override public Mono<RxDocumentServiceRequest> populateAuthorizationHeader(RxDocumentServiceRequest request) { if (request == null) { throw new IllegalArgumentException("request"); } if (this.authorizationTokenType == AuthorizationTokenType.AadToken) { return AadTokenAuthorizationHelper.getAuthorizationToken(this.tokenCredentialCache) .map(authorization -> { request.getHeaders().put(HttpConstants.HttpHeaders.AUTHORIZATION, authorization); return request; }); } else { return Mono.just(request); } } @Override public Mono<HttpHeaders> populateAuthorizationHeader(HttpHeaders httpHeaders) { if (httpHeaders == null) { throw new IllegalArgumentException("httpHeaders"); } if (this.authorizationTokenType == AuthorizationTokenType.AadToken) { return AadTokenAuthorizationHelper.getAuthorizationToken(this.tokenCredentialCache) .map(authorization -> { httpHeaders.set(HttpConstants.HttpHeaders.AUTHORIZATION, authorization); return httpHeaders; }); } return Mono.just(httpHeaders); } @Override public AuthorizationTokenType getAuthorizationTokenType() { return this.authorizationTokenType; } @Override public String getUserAuthorizationToken(String resourceName, ResourceType resourceType, RequestVerb requestVerb, Map<String, String> headers, AuthorizationTokenType tokenType, Map<String, Object> properties) { if (this.cosmosAuthorizationTokenResolver != null) { return this.cosmosAuthorizationTokenResolver.getAuthorizationToken(requestVerb.toUpperCase(), resourceName, this.resolveCosmosResourceType(resourceType).toString(), properties != null ? Collections.unmodifiableMap(properties) : null); } else if (credential != null) { return this.authorizationTokenProvider.generateKeyAuthorizationSignature(requestVerb, resourceName, resourceType, headers); } else if (masterKeyOrResourceToken != null && hasAuthKeyResourceToken && resourceTokensMap == null) { return masterKeyOrResourceToken; } else { assert resourceTokensMap != null; if(resourceType.equals(ResourceType.DatabaseAccount)) { return this.firstResourceTokenFromPermissionFeed; } return ResourceTokenAuthorizationHelper.getAuthorizationTokenUsingResourceTokens(resourceTokensMap, requestVerb, resourceName, headers); } } private CosmosResourceType resolveCosmosResourceType(ResourceType resourceType) { CosmosResourceType cosmosResourceType = ModelBridgeInternal.fromServiceSerializedFormat(resourceType.toString()); if (cosmosResourceType == null) { return CosmosResourceType.SYSTEM; } return cosmosResourceType; } void captureSessionToken(RxDocumentServiceRequest request, RxDocumentServiceResponse response) { this.sessionContainer.setSessionToken(request, response.getResponseHeaders()); } private Mono<RxDocumentServiceResponse> create(RxDocumentServiceRequest request, DocumentClientRetryPolicy documentClientRetryPolicy, OperationContextAndListenerTuple operationContextAndListenerTuple) { return populateHeaders(request, RequestVerb.POST) .flatMap(requestPopulated -> { RxStoreModel storeProxy = this.getStoreProxy(requestPopulated); if (documentClientRetryPolicy.getRetryContext() != null && documentClientRetryPolicy.getRetryContext().getRetryCount() > 0) { documentClientRetryPolicy.getRetryContext().updateEndTime(); } return storeProxy.processMessage(requestPopulated, operationContextAndListenerTuple); }); } private Mono<RxDocumentServiceResponse> upsert(RxDocumentServiceRequest request, DocumentClientRetryPolicy documentClientRetryPolicy, OperationContextAndListenerTuple operationContextAndListenerTuple) { return populateHeaders(request, RequestVerb.POST) .flatMap(requestPopulated -> { Map<String, String> headers = requestPopulated.getHeaders(); assert (headers != null); headers.put(HttpConstants.HttpHeaders.IS_UPSERT, "true"); if (documentClientRetryPolicy.getRetryContext() != null && documentClientRetryPolicy.getRetryContext().getRetryCount() > 0) { documentClientRetryPolicy.getRetryContext().updateEndTime(); } return getStoreProxy(requestPopulated).processMessage(requestPopulated, operationContextAndListenerTuple) .map(response -> { this.captureSessionToken(requestPopulated, response); return response; } ); }); } private Mono<RxDocumentServiceResponse> replace(RxDocumentServiceRequest request, DocumentClientRetryPolicy documentClientRetryPolicy) { return populateHeaders(request, RequestVerb.PUT) .flatMap(requestPopulated -> { if (documentClientRetryPolicy.getRetryContext() != null && documentClientRetryPolicy.getRetryContext().getRetryCount() > 0) { documentClientRetryPolicy.getRetryContext().updateEndTime(); } return getStoreProxy(requestPopulated).processMessage(requestPopulated); }); } private Mono<RxDocumentServiceResponse> patch(RxDocumentServiceRequest request, DocumentClientRetryPolicy documentClientRetryPolicy) { populateHeaders(request, RequestVerb.PATCH); if (documentClientRetryPolicy.getRetryContext() != null && documentClientRetryPolicy.getRetryContext().getRetryCount() > 0) { documentClientRetryPolicy.getRetryContext().updateEndTime(); } return getStoreProxy(request).processMessage(request); } @Override public Mono<ResourceResponse<Document>> createDocument(String collectionLink, Object document, RequestOptions options, boolean disableAutomaticIdGeneration) { DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(); if (options == null || options.getPartitionKey() == null) { requestRetryPolicy = new PartitionKeyMismatchRetryPolicy(collectionCache, requestRetryPolicy, collectionLink, options); } DocumentClientRetryPolicy finalRetryPolicyInstance = requestRetryPolicy; return ObservableHelper.inlineIfPossibleAsObs(() -> createDocumentInternal(collectionLink, document, options, disableAutomaticIdGeneration, finalRetryPolicyInstance), requestRetryPolicy); } private Mono<ResourceResponse<Document>> createDocumentInternal(String collectionLink, Object document, RequestOptions options, boolean disableAutomaticIdGeneration, DocumentClientRetryPolicy requestRetryPolicy) { try { logger.debug("Creating a Document. collectionLink: [{}]", collectionLink); Mono<RxDocumentServiceRequest> requestObs = getCreateDocumentRequest(requestRetryPolicy, collectionLink, document, options, disableAutomaticIdGeneration, OperationType.Create); Mono<RxDocumentServiceResponse> responseObservable = requestObs.flatMap(request -> create(request, requestRetryPolicy, getOperationContextAndListenerTuple(options))); return responseObservable .map(serviceResponse -> toResourceResponse(serviceResponse, Document.class)); } catch (Exception e) { logger.debug("Failure in creating a document due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Mono<ResourceResponse<Document>> upsertDocument(String collectionLink, Object document, RequestOptions options, boolean disableAutomaticIdGeneration) { DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(); if (options == null || options.getPartitionKey() == null) { requestRetryPolicy = new PartitionKeyMismatchRetryPolicy(collectionCache, requestRetryPolicy, collectionLink, options); } DocumentClientRetryPolicy finalRetryPolicyInstance = requestRetryPolicy; return ObservableHelper.inlineIfPossibleAsObs(() -> upsertDocumentInternal(collectionLink, document, options, disableAutomaticIdGeneration, finalRetryPolicyInstance), finalRetryPolicyInstance); } private Mono<ResourceResponse<Document>> upsertDocumentInternal(String collectionLink, Object document, RequestOptions options, boolean disableAutomaticIdGeneration, DocumentClientRetryPolicy retryPolicyInstance) { try { logger.debug("Upserting a Document. collectionLink: [{}]", collectionLink); Mono<RxDocumentServiceRequest> reqObs = getCreateDocumentRequest(retryPolicyInstance, collectionLink, document, options, disableAutomaticIdGeneration, OperationType.Upsert); Mono<RxDocumentServiceResponse> responseObservable = reqObs.flatMap(request -> upsert(request, retryPolicyInstance, getOperationContextAndListenerTuple(options))); return responseObservable .map(serviceResponse -> toResourceResponse(serviceResponse, Document.class)); } catch (Exception e) { logger.debug("Failure in upserting a document due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Mono<ResourceResponse<Document>> replaceDocument(String documentLink, Object document, RequestOptions options) { DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(); if (options == null || options.getPartitionKey() == null) { String collectionLink = Utils.getCollectionName(documentLink); requestRetryPolicy = new PartitionKeyMismatchRetryPolicy(collectionCache, requestRetryPolicy, collectionLink, options); } DocumentClientRetryPolicy finalRequestRetryPolicy = requestRetryPolicy; return ObservableHelper.inlineIfPossibleAsObs(() -> replaceDocumentInternal(documentLink, document, options, finalRequestRetryPolicy), requestRetryPolicy); } private Mono<ResourceResponse<Document>> replaceDocumentInternal(String documentLink, Object document, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(documentLink)) { throw new IllegalArgumentException("documentLink"); } if (document == null) { throw new IllegalArgumentException("document"); } Document typedDocument = documentFromObject(document, mapper); return this.replaceDocumentInternal(documentLink, typedDocument, options, retryPolicyInstance); } catch (Exception e) { logger.debug("Failure in replacing a document due to [{}]", e.getMessage()); return Mono.error(e); } } @Override public Mono<ResourceResponse<Document>> replaceDocument(Document document, RequestOptions options) { DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(); if (options == null || options.getPartitionKey() == null) { String collectionLink = document.getSelfLink(); requestRetryPolicy = new PartitionKeyMismatchRetryPolicy(collectionCache, requestRetryPolicy, collectionLink, options); } DocumentClientRetryPolicy finalRequestRetryPolicy = requestRetryPolicy; return ObservableHelper.inlineIfPossibleAsObs(() -> replaceDocumentInternal(document, options, finalRequestRetryPolicy), requestRetryPolicy); } private Mono<ResourceResponse<Document>> replaceDocumentInternal(Document document, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (document == null) { throw new IllegalArgumentException("document"); } return this.replaceDocumentInternal(document.getSelfLink(), document, options, retryPolicyInstance); } catch (Exception e) { logger.debug("Failure in replacing a database due to [{}]", e.getMessage()); return Mono.error(e); } } private Mono<ResourceResponse<Document>> replaceDocumentInternal(String documentLink, Document document, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { if (document == null) { throw new IllegalArgumentException("document"); } logger.debug("Replacing a Document. documentLink: [{}]", documentLink); final String path = Utils.joinPath(documentLink, null); final Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Document, OperationType.Replace); Instant serializationStartTimeUTC = Instant.now(); ByteBuffer content = serializeJsonToByteBuffer(document); Instant serializationEndTime = Instant.now(); SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics = new SerializationDiagnosticsContext.SerializationDiagnostics( serializationStartTimeUTC, serializationEndTime, SerializationDiagnosticsContext.SerializationType.ITEM_SERIALIZATION); final RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Replace, ResourceType.Document, path, requestHeaders, options, content); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } SerializationDiagnosticsContext serializationDiagnosticsContext = BridgeInternal.getSerializationDiagnosticsContext(request.requestContext.cosmosDiagnostics); if (serializationDiagnosticsContext != null) { serializationDiagnosticsContext.addSerializationDiagnostics(serializationDiagnostics); } Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = collectionCache.resolveCollectionAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), request); Mono<RxDocumentServiceRequest> requestObs = addPartitionKeyInformation(request, content, document, options, collectionObs); return requestObs.flatMap(req -> replace(request, retryPolicyInstance) .map(resp -> toResourceResponse(resp, Document.class))); } @Override public Mono<ResourceResponse<Document>> patchDocument(String documentLink, CosmosPatchOperations cosmosPatchOperations, RequestOptions options) { DocumentClientRetryPolicy documentClientRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> patchDocumentInternal(documentLink, cosmosPatchOperations, options, documentClientRetryPolicy), documentClientRetryPolicy); } private Mono<ResourceResponse<Document>> patchDocumentInternal(String documentLink, CosmosPatchOperations cosmosPatchOperations, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { checkArgument(StringUtils.isNotEmpty(documentLink), "expected non empty documentLink"); checkNotNull(cosmosPatchOperations, "expected non null cosmosPatchOperations"); logger.debug("Running patch operations on Document. documentLink: [{}]", documentLink); final String path = Utils.joinPath(documentLink, null); final Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Document, OperationType.Patch); Instant serializationStartTimeUTC = Instant.now(); ByteBuffer content = ByteBuffer.wrap(PatchUtil.serializeCosmosPatchToByteArray(cosmosPatchOperations, options)); Instant serializationEndTime = Instant.now(); SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics = new SerializationDiagnosticsContext.SerializationDiagnostics( serializationStartTimeUTC, serializationEndTime, SerializationDiagnosticsContext.SerializationType.ITEM_SERIALIZATION); final RxDocumentServiceRequest request = RxDocumentServiceRequest.create( this, OperationType.Patch, ResourceType.Document, path, requestHeaders, options, content); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } SerializationDiagnosticsContext serializationDiagnosticsContext = BridgeInternal.getSerializationDiagnosticsContext(request.requestContext.cosmosDiagnostics); if (serializationDiagnosticsContext != null) { serializationDiagnosticsContext.addSerializationDiagnostics(serializationDiagnostics); } Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = collectionCache.resolveCollectionAsync( BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), request); Mono<RxDocumentServiceRequest> requestObs = addPartitionKeyInformation( request, null, null, options, collectionObs); return requestObs.flatMap(req -> patch(request, retryPolicyInstance) .map(resp -> toResourceResponse(resp, Document.class))); } @Override public Mono<ResourceResponse<Document>> deleteDocument(String documentLink, RequestOptions options) { DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> deleteDocumentInternal(documentLink, null, options, requestRetryPolicy), requestRetryPolicy); } @Override public Mono<ResourceResponse<Document>> deleteDocument(String documentLink, InternalObjectNode internalObjectNode, RequestOptions options) { DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> deleteDocumentInternal(documentLink, internalObjectNode, options, requestRetryPolicy), requestRetryPolicy); } private Mono<ResourceResponse<Document>> deleteDocumentInternal(String documentLink, InternalObjectNode internalObjectNode, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(documentLink)) { throw new IllegalArgumentException("documentLink"); } logger.debug("Deleting a Document. documentLink: [{}]", documentLink); String path = Utils.joinPath(documentLink, null); Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.Document, OperationType.Delete); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Delete, ResourceType.Document, path, requestHeaders, options); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = collectionCache.resolveCollectionAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), request); Mono<RxDocumentServiceRequest> requestObs = addPartitionKeyInformation(request, null, internalObjectNode, options, collectionObs); return requestObs.flatMap(req -> this .delete(req, retryPolicyInstance, getOperationContextAndListenerTuple(options)) .map(serviceResponse -> toResourceResponse(serviceResponse, Document.class))); } catch (Exception e) { logger.debug("Failure in deleting a document due to [{}]", e.getMessage()); return Mono.error(e); } } @Override public Mono<ResourceResponse<Document>> deleteAllDocumentsByPartitionKey(String collectionLink, PartitionKey partitionKey, RequestOptions options) { DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> deleteAllDocumentsByPartitionKeyInternal(collectionLink, options, requestRetryPolicy), requestRetryPolicy); } private Mono<ResourceResponse<Document>> deleteAllDocumentsByPartitionKeyInternal(String collectionLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(collectionLink)) { throw new IllegalArgumentException("collectionLink"); } logger.debug("Deleting all items by Partition Key. collectionLink: [{}]", collectionLink); String path = Utils.joinPath(collectionLink, null); Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.PartitionKey, OperationType.Delete); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Delete, ResourceType.PartitionKey, path, requestHeaders, options); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = collectionCache.resolveCollectionAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), request); Mono<RxDocumentServiceRequest> requestObs = addPartitionKeyInformation(request, null, null, options, collectionObs); return requestObs.flatMap(req -> this .deleteAllItemsByPartitionKey(req, retryPolicyInstance, getOperationContextAndListenerTuple(options)) .map(serviceResponse -> toResourceResponse(serviceResponse, Document.class))); } catch (Exception e) { logger.debug("Failure in deleting documents due to [{}]", e.getMessage()); return Mono.error(e); } } @Override public Mono<ResourceResponse<Document>> readDocument(String documentLink, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> readDocumentInternal(documentLink, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<Document>> readDocumentInternal(String documentLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(documentLink)) { throw new IllegalArgumentException("documentLink"); } logger.debug("Reading a Document. documentLink: [{}]", documentLink); String path = Utils.joinPath(documentLink, null); Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.Document, OperationType.Read); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Read, ResourceType.Document, path, requestHeaders, options); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = this.collectionCache.resolveCollectionAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), request); Mono<RxDocumentServiceRequest> requestObs = addPartitionKeyInformation(request, null, null, options, collectionObs); return requestObs.flatMap(req -> { return this.read(request, retryPolicyInstance).map(serviceResponse -> toResourceResponse(serviceResponse, Document.class)); }); } catch (Exception e) { logger.debug("Failure in reading a document due to [{}]", e.getMessage()); return Mono.error(e); } } @Override public <T> Flux<FeedResponse<T>> readDocuments( String collectionLink, CosmosQueryRequestOptions options, Class<T> classOfT) { if (StringUtils.isEmpty(collectionLink)) { throw new IllegalArgumentException("collectionLink"); } return queryDocuments(collectionLink, "SELECT * FROM r", options, classOfT); } @Override public <T> Mono<FeedResponse<T>> readMany( List<CosmosItemIdentity> itemIdentityList, String collectionLink, CosmosQueryRequestOptions options, Class<T> klass) { String resourceLink = parentResourceLinkToQueryLink(collectionLink, ResourceType.Document); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Query, ResourceType.Document, collectionLink, null ); Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = collectionCache.resolveCollectionAsync(null, request); return collectionObs .flatMap(documentCollectionResourceResponse -> { final DocumentCollection collection = documentCollectionResourceResponse.v; if (collection == null) { throw new IllegalStateException("Collection cannot be null"); } final PartitionKeyDefinition pkDefinition = collection.getPartitionKey(); Mono<Utils.ValueHolder<CollectionRoutingMap>> valueHolderMono = partitionKeyRangeCache .tryLookupAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), collection.getResourceId(), null, null); return valueHolderMono.flatMap(collectionRoutingMapValueHolder -> { Map<PartitionKeyRange, List<CosmosItemIdentity>> partitionRangeItemKeyMap = new HashMap<>(); CollectionRoutingMap routingMap = collectionRoutingMapValueHolder.v; if (routingMap == null) { throw new IllegalStateException("Failed to get routing map."); } itemIdentityList .forEach(itemIdentity -> { String effectivePartitionKeyString = PartitionKeyInternalHelper .getEffectivePartitionKeyString( BridgeInternal.getPartitionKeyInternal( itemIdentity.getPartitionKey()), pkDefinition); PartitionKeyRange range = routingMap.getRangeByEffectivePartitionKey(effectivePartitionKeyString); if (partitionRangeItemKeyMap.get(range) == null) { List<CosmosItemIdentity> list = new ArrayList<>(); list.add(itemIdentity); partitionRangeItemKeyMap.put(range, list); } else { List<CosmosItemIdentity> pairs = partitionRangeItemKeyMap.get(range); pairs.add(itemIdentity); partitionRangeItemKeyMap.put(range, pairs); } }); Map<PartitionKeyRange, SqlQuerySpec> rangeQueryMap; rangeQueryMap = getRangeQueryMap(partitionRangeItemKeyMap, collection.getPartitionKey()); return createReadManyQuery( resourceLink, new SqlQuerySpec(DUMMY_SQL_QUERY), options, Document.class, ResourceType.Document, collection, Collections.unmodifiableMap(rangeQueryMap)) .collectList() .map(feedList -> { List<T> finalList = new ArrayList<>(); HashMap<String, String> headers = new HashMap<>(); ConcurrentMap<String, QueryMetrics> aggregatedQueryMetrics = new ConcurrentHashMap<>(); double requestCharge = 0; for (FeedResponse<Document> page : feedList) { ConcurrentMap<String, QueryMetrics> pageQueryMetrics = ModelBridgeInternal.queryMetrics(page); if (pageQueryMetrics != null) { pageQueryMetrics.forEach( aggregatedQueryMetrics::putIfAbsent); } requestCharge += page.getRequestCharge(); finalList.addAll(page.getResults().stream().map(document -> ModelBridgeInternal.toObjectFromJsonSerializable(document, klass)).collect(Collectors.toList())); } headers.put(HttpConstants.HttpHeaders.REQUEST_CHARGE, Double .toString(requestCharge)); FeedResponse<T> frp = BridgeInternal .createFeedResponse(finalList, headers); return frp; }); }); } ); } private Map<PartitionKeyRange, SqlQuerySpec> getRangeQueryMap( Map<PartitionKeyRange, List<CosmosItemIdentity>> partitionRangeItemKeyMap, PartitionKeyDefinition partitionKeyDefinition) { Map<PartitionKeyRange, SqlQuerySpec> rangeQueryMap = new HashMap<>(); String partitionKeySelector = createPkSelector(partitionKeyDefinition); for(Map.Entry<PartitionKeyRange, List<CosmosItemIdentity>> entry: partitionRangeItemKeyMap.entrySet()) { SqlQuerySpec sqlQuerySpec; if (partitionKeySelector.equals("[\"id\"]")) { sqlQuerySpec = createReadManyQuerySpecPartitionKeyIdSame(entry.getValue(), partitionKeySelector); } else { sqlQuerySpec = createReadManyQuerySpec(entry.getValue(), partitionKeySelector); } rangeQueryMap.put(entry.getKey(), sqlQuerySpec); } return rangeQueryMap; } private SqlQuerySpec createReadManyQuerySpecPartitionKeyIdSame( List<CosmosItemIdentity> idPartitionKeyPairList, String partitionKeySelector) { StringBuilder queryStringBuilder = new StringBuilder(); List<SqlParameter> parameters = new ArrayList<>(); queryStringBuilder.append("SELECT * FROM c WHERE c.id IN ( "); for (int i = 0; i < idPartitionKeyPairList.size(); i++) { CosmosItemIdentity itemIdentity = idPartitionKeyPairList.get(i); String idValue = itemIdentity.getId(); String idParamName = "@param" + i; PartitionKey pkValueAsPartitionKey = itemIdentity.getPartitionKey(); Object pkValue = ModelBridgeInternal.getPartitionKeyObject(pkValueAsPartitionKey); if (!Objects.equals(idValue, pkValue)) { continue; } parameters.add(new SqlParameter(idParamName, idValue)); queryStringBuilder.append(idParamName); if (i < idPartitionKeyPairList.size() - 1) { queryStringBuilder.append(", "); } } queryStringBuilder.append(" )"); return new SqlQuerySpec(queryStringBuilder.toString(), parameters); } private SqlQuerySpec createReadManyQuerySpec(List<CosmosItemIdentity> itemIdentities, String partitionKeySelector) { StringBuilder queryStringBuilder = new StringBuilder(); List<SqlParameter> parameters = new ArrayList<>(); queryStringBuilder.append("SELECT * FROM c WHERE ( "); for (int i = 0; i < itemIdentities.size(); i++) { CosmosItemIdentity itemIdentity = itemIdentities.get(i); PartitionKey pkValueAsPartitionKey = itemIdentity.getPartitionKey(); Object pkValue = ModelBridgeInternal.getPartitionKeyObject(pkValueAsPartitionKey); String pkParamName = "@param" + (2 * i); parameters.add(new SqlParameter(pkParamName, pkValue)); String idValue = itemIdentity.getId(); String idParamName = "@param" + (2 * i + 1); parameters.add(new SqlParameter(idParamName, idValue)); queryStringBuilder.append("("); queryStringBuilder.append("c.id = "); queryStringBuilder.append(idParamName); queryStringBuilder.append(" AND "); queryStringBuilder.append(" c"); queryStringBuilder.append(partitionKeySelector); queryStringBuilder.append((" = ")); queryStringBuilder.append(pkParamName); queryStringBuilder.append(" )"); if (i < itemIdentities.size() - 1) { queryStringBuilder.append(" OR "); } } queryStringBuilder.append(" )"); return new SqlQuerySpec(queryStringBuilder.toString(), parameters); } private String createPkSelector(PartitionKeyDefinition partitionKeyDefinition) { return partitionKeyDefinition.getPaths() .stream() .map(pathPart -> StringUtils.substring(pathPart, 1)) .map(pathPart -> StringUtils.replace(pathPart, "\"", "\\")) .map(part -> "[\"" + part + "\"]") .collect(Collectors.joining()); } private <T extends Resource> Flux<FeedResponse<T>> createReadManyQuery( String parentResourceLink, SqlQuerySpec sqlQuery, CosmosQueryRequestOptions options, Class<T> klass, ResourceType resourceTypeEnum, DocumentCollection collection, Map<PartitionKeyRange, SqlQuerySpec> rangeQueryMap) { UUID activityId = Utils.randomUUID(); IDocumentQueryClient queryClient = documentQueryClientImpl(RxDocumentClientImpl.this, getOperationContextAndListenerTuple(options)); Flux<? extends IDocumentQueryExecutionContext<T>> executionContext = DocumentQueryExecutionContextFactory.createReadManyQueryAsync(this, queryClient, collection.getResourceId(), sqlQuery, rangeQueryMap, options, collection.getResourceId(), parentResourceLink, activityId, klass, resourceTypeEnum); return executionContext.flatMap(IDocumentQueryExecutionContext<T>::executeAsync); } @Override public <T> Flux<FeedResponse<T>> queryDocuments( String collectionLink, String query, CosmosQueryRequestOptions options, Class<T> classOfT) { return queryDocuments(collectionLink, new SqlQuerySpec(query), options, classOfT); } private IDocumentQueryClient documentQueryClientImpl(RxDocumentClientImpl rxDocumentClientImpl, OperationContextAndListenerTuple operationContextAndListenerTuple) { return new IDocumentQueryClient () { @Override public RxCollectionCache getCollectionCache() { return RxDocumentClientImpl.this.collectionCache; } @Override public RxPartitionKeyRangeCache getPartitionKeyRangeCache() { return RxDocumentClientImpl.this.partitionKeyRangeCache; } @Override public IRetryPolicyFactory getResetSessionTokenRetryPolicy() { return RxDocumentClientImpl.this.resetSessionTokenRetryPolicy; } @Override public ConsistencyLevel getDefaultConsistencyLevelAsync() { return RxDocumentClientImpl.this.gatewayConfigurationReader.getDefaultConsistencyLevel(); } @Override public ConsistencyLevel getDesiredConsistencyLevelAsync() { return RxDocumentClientImpl.this.consistencyLevel; } @Override public Mono<RxDocumentServiceResponse> executeQueryAsync(RxDocumentServiceRequest request) { if (operationContextAndListenerTuple == null) { return RxDocumentClientImpl.this.query(request).single(); } else { final OperationListener listener = operationContextAndListenerTuple.getOperationListener(); final OperationContext operationContext = operationContextAndListenerTuple.getOperationContext(); request.getHeaders().put(HttpConstants.HttpHeaders.CORRELATED_ACTIVITY_ID, operationContext.getCorrelationActivityId()); listener.requestListener(operationContext, request); return RxDocumentClientImpl.this.query(request).single().doOnNext( response -> listener.responseListener(operationContext, response) ).doOnError( ex -> listener.exceptionListener(operationContext, ex) ); } } @Override public QueryCompatibilityMode getQueryCompatibilityMode() { return QueryCompatibilityMode.Default; } @Override public Mono<RxDocumentServiceResponse> readFeedAsync(RxDocumentServiceRequest request) { return null; } }; } @Override public <T> Flux<FeedResponse<T>> queryDocuments( String collectionLink, SqlQuerySpec querySpec, CosmosQueryRequestOptions options, Class<T> classOfT) { SqlQuerySpecLogger.getInstance().logQuery(querySpec); return createQuery(collectionLink, querySpec, options, classOfT, ResourceType.Document); } @Override public <T> Flux<FeedResponse<T>> queryDocumentChangeFeed( final DocumentCollection collection, final CosmosChangeFeedRequestOptions changeFeedOptions, Class<T> classOfT) { checkNotNull(collection, "Argument 'collection' must not be null."); ChangeFeedQueryImpl<T> changeFeedQueryImpl = new ChangeFeedQueryImpl<>( this, ResourceType.Document, classOfT, collection.getAltLink(), collection.getResourceId(), changeFeedOptions); return changeFeedQueryImpl.executeAsync(); } @Override public <T> Flux<FeedResponse<T>> readAllDocuments( String collectionLink, PartitionKey partitionKey, CosmosQueryRequestOptions options, Class<T> classOfT) { if (StringUtils.isEmpty(collectionLink)) { throw new IllegalArgumentException("collectionLink"); } if (partitionKey == null) { throw new IllegalArgumentException("partitionKey"); } RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Query, ResourceType.Document, collectionLink, null ); Flux<Utils.ValueHolder<DocumentCollection>> collectionObs = collectionCache.resolveCollectionAsync(null, request).flux(); return collectionObs.flatMap(documentCollectionResourceResponse -> { DocumentCollection collection = documentCollectionResourceResponse.v; if (collection == null) { throw new IllegalStateException("Collection cannot be null"); } PartitionKeyDefinition pkDefinition = collection.getPartitionKey(); String pkSelector = createPkSelector(pkDefinition); SqlQuerySpec querySpec = createLogicalPartitionScanQuerySpec(partitionKey, pkSelector); String resourceLink = parentResourceLinkToQueryLink(collectionLink, ResourceType.Document); UUID activityId = Utils.randomUUID(); IDocumentQueryClient queryClient = documentQueryClientImpl(RxDocumentClientImpl.this, getOperationContextAndListenerTuple(options)); final CosmosQueryRequestOptions effectiveOptions = ModelBridgeInternal.createQueryRequestOptions(options); InvalidPartitionExceptionRetryPolicy invalidPartitionExceptionRetryPolicy = new InvalidPartitionExceptionRetryPolicy( this.collectionCache, null, resourceLink, ModelBridgeInternal.getPropertiesFromQueryRequestOptions(effectiveOptions)); return ObservableHelper.fluxInlineIfPossibleAsObs( () -> { Flux<Utils.ValueHolder<CollectionRoutingMap>> valueHolderMono = this.partitionKeyRangeCache .tryLookupAsync( BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), collection.getResourceId(), null, null).flux(); return valueHolderMono.flatMap(collectionRoutingMapValueHolder -> { CollectionRoutingMap routingMap = collectionRoutingMapValueHolder.v; if (routingMap == null) { throw new IllegalStateException("Failed to get routing map."); } String effectivePartitionKeyString = PartitionKeyInternalHelper .getEffectivePartitionKeyString( BridgeInternal.getPartitionKeyInternal(partitionKey), pkDefinition); PartitionKeyRange range = routingMap.getRangeByEffectivePartitionKey(effectivePartitionKeyString); return createQueryInternal( resourceLink, querySpec, ModelBridgeInternal.setPartitionKeyRangeIdInternal(effectiveOptions, range.getId()), classOfT, ResourceType.Document, queryClient, activityId); }); }, invalidPartitionExceptionRetryPolicy); }); } @Override public Map<String, PartitionedQueryExecutionInfo> getQueryPlanCache() { return queryPlanCache; } @Override public Flux<FeedResponse<PartitionKeyRange>> readPartitionKeyRanges(final String collectionLink, CosmosQueryRequestOptions options) { if (StringUtils.isEmpty(collectionLink)) { throw new IllegalArgumentException("collectionLink"); } return readFeed(options, ResourceType.PartitionKeyRange, PartitionKeyRange.class, Utils.joinPath(collectionLink, Paths.PARTITION_KEY_RANGES_PATH_SEGMENT)); } private RxDocumentServiceRequest getStoredProcedureRequest(String collectionLink, StoredProcedure storedProcedure, RequestOptions options, OperationType operationType) { if (StringUtils.isEmpty(collectionLink)) { throw new IllegalArgumentException("collectionLink"); } if (storedProcedure == null) { throw new IllegalArgumentException("storedProcedure"); } validateResource(storedProcedure); String path = Utils.joinPath(collectionLink, Paths.STORED_PROCEDURES_PATH_SEGMENT); Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.StoredProcedure, operationType); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, operationType, ResourceType.StoredProcedure, path, storedProcedure, requestHeaders, options); return request; } private RxDocumentServiceRequest getUserDefinedFunctionRequest(String collectionLink, UserDefinedFunction udf, RequestOptions options, OperationType operationType) { if (StringUtils.isEmpty(collectionLink)) { throw new IllegalArgumentException("collectionLink"); } if (udf == null) { throw new IllegalArgumentException("udf"); } validateResource(udf); String path = Utils.joinPath(collectionLink, Paths.USER_DEFINED_FUNCTIONS_PATH_SEGMENT); Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.UserDefinedFunction, operationType); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, operationType, ResourceType.UserDefinedFunction, path, udf, requestHeaders, options); return request; } @Override public Mono<ResourceResponse<StoredProcedure>> createStoredProcedure(String collectionLink, StoredProcedure storedProcedure, RequestOptions options) { DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> createStoredProcedureInternal(collectionLink, storedProcedure, options, requestRetryPolicy), requestRetryPolicy); } private Mono<ResourceResponse<StoredProcedure>> createStoredProcedureInternal(String collectionLink, StoredProcedure storedProcedure, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { logger.debug("Creating a StoredProcedure. collectionLink: [{}], storedProcedure id [{}]", collectionLink, storedProcedure.getId()); RxDocumentServiceRequest request = getStoredProcedureRequest(collectionLink, storedProcedure, options, OperationType.Create); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.create(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, StoredProcedure.class)); } catch (Exception e) { logger.debug("Failure in creating a StoredProcedure due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Mono<ResourceResponse<StoredProcedure>> upsertStoredProcedure(String collectionLink, StoredProcedure storedProcedure, RequestOptions options) { DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> upsertStoredProcedureInternal(collectionLink, storedProcedure, options, requestRetryPolicy), requestRetryPolicy); } private Mono<ResourceResponse<StoredProcedure>> upsertStoredProcedureInternal(String collectionLink, StoredProcedure storedProcedure, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { logger.debug("Upserting a StoredProcedure. collectionLink: [{}], storedProcedure id [{}]", collectionLink, storedProcedure.getId()); RxDocumentServiceRequest request = getStoredProcedureRequest(collectionLink, storedProcedure, options, OperationType.Upsert); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.upsert(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, StoredProcedure.class)); } catch (Exception e) { logger.debug("Failure in upserting a StoredProcedure due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Mono<ResourceResponse<StoredProcedure>> replaceStoredProcedure(StoredProcedure storedProcedure, RequestOptions options) { DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> replaceStoredProcedureInternal(storedProcedure, options, requestRetryPolicy), requestRetryPolicy); } private Mono<ResourceResponse<StoredProcedure>> replaceStoredProcedureInternal(StoredProcedure storedProcedure, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (storedProcedure == null) { throw new IllegalArgumentException("storedProcedure"); } logger.debug("Replacing a StoredProcedure. storedProcedure id [{}]", storedProcedure.getId()); RxDocumentClientImpl.validateResource(storedProcedure); String path = Utils.joinPath(storedProcedure.getSelfLink(), null); Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.StoredProcedure, OperationType.Replace); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Replace, ResourceType.StoredProcedure, path, storedProcedure, requestHeaders, options); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, StoredProcedure.class)); } catch (Exception e) { logger.debug("Failure in replacing a StoredProcedure due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Mono<ResourceResponse<StoredProcedure>> deleteStoredProcedure(String storedProcedureLink, RequestOptions options) { DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> deleteStoredProcedureInternal(storedProcedureLink, options, requestRetryPolicy), requestRetryPolicy); } private Mono<ResourceResponse<StoredProcedure>> deleteStoredProcedureInternal(String storedProcedureLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(storedProcedureLink)) { throw new IllegalArgumentException("storedProcedureLink"); } logger.debug("Deleting a StoredProcedure. storedProcedureLink [{}]", storedProcedureLink); String path = Utils.joinPath(storedProcedureLink, null); Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.StoredProcedure, OperationType.Delete); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Delete, ResourceType.StoredProcedure, path, requestHeaders, options); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.delete(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, StoredProcedure.class)); } catch (Exception e) { logger.debug("Failure in deleting a StoredProcedure due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Mono<ResourceResponse<StoredProcedure>> readStoredProcedure(String storedProcedureLink, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> readStoredProcedureInternal(storedProcedureLink, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<StoredProcedure>> readStoredProcedureInternal(String storedProcedureLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(storedProcedureLink)) { throw new IllegalArgumentException("storedProcedureLink"); } logger.debug("Reading a StoredProcedure. storedProcedureLink [{}]", storedProcedureLink); String path = Utils.joinPath(storedProcedureLink, null); Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.StoredProcedure, OperationType.Read); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Read, ResourceType.StoredProcedure, path, requestHeaders, options); if (retryPolicyInstance != null){ retryPolicyInstance.onBeforeSendRequest(request); } return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, StoredProcedure.class)); } catch (Exception e) { logger.debug("Failure in reading a StoredProcedure due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Flux<FeedResponse<StoredProcedure>> readStoredProcedures(String collectionLink, CosmosQueryRequestOptions options) { if (StringUtils.isEmpty(collectionLink)) { throw new IllegalArgumentException("collectionLink"); } return readFeed(options, ResourceType.StoredProcedure, StoredProcedure.class, Utils.joinPath(collectionLink, Paths.STORED_PROCEDURES_PATH_SEGMENT)); } @Override public Flux<FeedResponse<StoredProcedure>> queryStoredProcedures(String collectionLink, String query, CosmosQueryRequestOptions options) { return queryStoredProcedures(collectionLink, new SqlQuerySpec(query), options); } @Override public Flux<FeedResponse<StoredProcedure>> queryStoredProcedures(String collectionLink, SqlQuerySpec querySpec, CosmosQueryRequestOptions options) { return createQuery(collectionLink, querySpec, options, StoredProcedure.class, ResourceType.StoredProcedure); } @Override public Mono<StoredProcedureResponse> executeStoredProcedure(String storedProcedureLink, List<Object> procedureParams) { return this.executeStoredProcedure(storedProcedureLink, null, procedureParams); } @Override public Mono<StoredProcedureResponse> executeStoredProcedure(String storedProcedureLink, RequestOptions options, List<Object> procedureParams) { DocumentClientRetryPolicy documentClientRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> executeStoredProcedureInternal(storedProcedureLink, options, procedureParams, documentClientRetryPolicy), documentClientRetryPolicy); } @Override public Mono<CosmosBatchResponse> executeBatchRequest(String collectionLink, ServerBatchRequest serverBatchRequest, RequestOptions options, boolean disableAutomaticIdGeneration) { DocumentClientRetryPolicy documentClientRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> executeBatchRequestInternal(collectionLink, serverBatchRequest, options, documentClientRetryPolicy, disableAutomaticIdGeneration), documentClientRetryPolicy); } private Mono<StoredProcedureResponse> executeStoredProcedureInternal(String storedProcedureLink, RequestOptions options, List<Object> procedureParams, DocumentClientRetryPolicy retryPolicy) { try { logger.debug("Executing a StoredProcedure. storedProcedureLink [{}]", storedProcedureLink); String path = Utils.joinPath(storedProcedureLink, null); Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.StoredProcedure, OperationType.ExecuteJavaScript); requestHeaders.put(HttpConstants.HttpHeaders.ACCEPT, RuntimeConstants.MediaTypes.JSON); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.ExecuteJavaScript, ResourceType.StoredProcedure, path, procedureParams != null && !procedureParams.isEmpty() ? RxDocumentClientImpl.serializeProcedureParams(procedureParams) : "", requestHeaders, options); if (retryPolicy != null) { retryPolicy.onBeforeSendRequest(request); } Mono<RxDocumentServiceRequest> reqObs = addPartitionKeyInformation(request, null, null, options); return reqObs.flatMap(req -> create(request, retryPolicy, getOperationContextAndListenerTuple(options)) .map(response -> { this.captureSessionToken(request, response); return toStoredProcedureResponse(response); })); } catch (Exception e) { logger.debug("Failure in executing a StoredProcedure due to [{}]", e.getMessage(), e); return Mono.error(e); } } private Mono<CosmosBatchResponse> executeBatchRequestInternal(String collectionLink, ServerBatchRequest serverBatchRequest, RequestOptions options, DocumentClientRetryPolicy requestRetryPolicy, boolean disableAutomaticIdGeneration) { try { logger.debug("Executing a Batch request with number of operations {}", serverBatchRequest.getOperations().size()); Mono<RxDocumentServiceRequest> requestObs = getBatchDocumentRequest(requestRetryPolicy, collectionLink, serverBatchRequest, options, disableAutomaticIdGeneration); Mono<RxDocumentServiceResponse> responseObservable = requestObs.flatMap(request -> create(request, requestRetryPolicy, getOperationContextAndListenerTuple(options))); return responseObservable .map(serviceResponse -> BatchResponseParser.fromDocumentServiceResponse(serviceResponse, serverBatchRequest, true)); } catch (Exception ex) { logger.debug("Failure in executing a batch due to [{}]", ex.getMessage(), ex); return Mono.error(ex); } } @Override public Mono<ResourceResponse<Trigger>> createTrigger(String collectionLink, Trigger trigger, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> createTriggerInternal(collectionLink, trigger, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<Trigger>> createTriggerInternal(String collectionLink, Trigger trigger, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { logger.debug("Creating a Trigger. collectionLink [{}], trigger id [{}]", collectionLink, trigger.getId()); RxDocumentServiceRequest request = getTriggerRequest(collectionLink, trigger, options, OperationType.Create); if (retryPolicyInstance != null){ retryPolicyInstance.onBeforeSendRequest(request); } return this.create(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, Trigger.class)); } catch (Exception e) { logger.debug("Failure in creating a Trigger due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Mono<ResourceResponse<Trigger>> upsertTrigger(String collectionLink, Trigger trigger, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> upsertTriggerInternal(collectionLink, trigger, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<Trigger>> upsertTriggerInternal(String collectionLink, Trigger trigger, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { logger.debug("Upserting a Trigger. collectionLink [{}], trigger id [{}]", collectionLink, trigger.getId()); RxDocumentServiceRequest request = getTriggerRequest(collectionLink, trigger, options, OperationType.Upsert); if (retryPolicyInstance != null){ retryPolicyInstance.onBeforeSendRequest(request); } return this.upsert(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, Trigger.class)); } catch (Exception e) { logger.debug("Failure in upserting a Trigger due to [{}]", e.getMessage(), e); return Mono.error(e); } } private RxDocumentServiceRequest getTriggerRequest(String collectionLink, Trigger trigger, RequestOptions options, OperationType operationType) { if (StringUtils.isEmpty(collectionLink)) { throw new IllegalArgumentException("collectionLink"); } if (trigger == null) { throw new IllegalArgumentException("trigger"); } RxDocumentClientImpl.validateResource(trigger); String path = Utils.joinPath(collectionLink, Paths.TRIGGERS_PATH_SEGMENT); Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Trigger, operationType); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, operationType, ResourceType.Trigger, path, trigger, requestHeaders, options); return request; } @Override public Mono<ResourceResponse<Trigger>> replaceTrigger(Trigger trigger, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> replaceTriggerInternal(trigger, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<Trigger>> replaceTriggerInternal(Trigger trigger, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (trigger == null) { throw new IllegalArgumentException("trigger"); } logger.debug("Replacing a Trigger. trigger id [{}]", trigger.getId()); RxDocumentClientImpl.validateResource(trigger); String path = Utils.joinPath(trigger.getSelfLink(), null); Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Trigger, OperationType.Replace); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Replace, ResourceType.Trigger, path, trigger, requestHeaders, options); if (retryPolicyInstance != null){ retryPolicyInstance.onBeforeSendRequest(request); } return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, Trigger.class)); } catch (Exception e) { logger.debug("Failure in replacing a Trigger due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Mono<ResourceResponse<Trigger>> deleteTrigger(String triggerLink, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> deleteTriggerInternal(triggerLink, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<Trigger>> deleteTriggerInternal(String triggerLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(triggerLink)) { throw new IllegalArgumentException("triggerLink"); } logger.debug("Deleting a Trigger. triggerLink [{}]", triggerLink); String path = Utils.joinPath(triggerLink, null); Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Trigger, OperationType.Delete); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Delete, ResourceType.Trigger, path, requestHeaders, options); if (retryPolicyInstance != null){ retryPolicyInstance.onBeforeSendRequest(request); } return this.delete(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, Trigger.class)); } catch (Exception e) { logger.debug("Failure in deleting a Trigger due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Mono<ResourceResponse<Trigger>> readTrigger(String triggerLink, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> readTriggerInternal(triggerLink, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<Trigger>> readTriggerInternal(String triggerLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(triggerLink)) { throw new IllegalArgumentException("triggerLink"); } logger.debug("Reading a Trigger. triggerLink [{}]", triggerLink); String path = Utils.joinPath(triggerLink, null); Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Trigger, OperationType.Read); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Read, ResourceType.Trigger, path, requestHeaders, options); if (retryPolicyInstance != null){ retryPolicyInstance.onBeforeSendRequest(request); } return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, Trigger.class)); } catch (Exception e) { logger.debug("Failure in reading a Trigger due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Flux<FeedResponse<Trigger>> readTriggers(String collectionLink, CosmosQueryRequestOptions options) { if (StringUtils.isEmpty(collectionLink)) { throw new IllegalArgumentException("collectionLink"); } return readFeed(options, ResourceType.Trigger, Trigger.class, Utils.joinPath(collectionLink, Paths.TRIGGERS_PATH_SEGMENT)); } @Override public Flux<FeedResponse<Trigger>> queryTriggers(String collectionLink, String query, CosmosQueryRequestOptions options) { return queryTriggers(collectionLink, new SqlQuerySpec(query), options); } @Override public Flux<FeedResponse<Trigger>> queryTriggers(String collectionLink, SqlQuerySpec querySpec, CosmosQueryRequestOptions options) { return createQuery(collectionLink, querySpec, options, Trigger.class, ResourceType.Trigger); } @Override public Mono<ResourceResponse<UserDefinedFunction>> createUserDefinedFunction(String collectionLink, UserDefinedFunction udf, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> createUserDefinedFunctionInternal(collectionLink, udf, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<UserDefinedFunction>> createUserDefinedFunctionInternal(String collectionLink, UserDefinedFunction udf, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { logger.debug("Creating a UserDefinedFunction. collectionLink [{}], udf id [{}]", collectionLink, udf.getId()); RxDocumentServiceRequest request = getUserDefinedFunctionRequest(collectionLink, udf, options, OperationType.Create); if (retryPolicyInstance != null){ retryPolicyInstance.onBeforeSendRequest(request); } return this.create(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, UserDefinedFunction.class)); } catch (Exception e) { logger.debug("Failure in creating a UserDefinedFunction due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Mono<ResourceResponse<UserDefinedFunction>> upsertUserDefinedFunction(String collectionLink, UserDefinedFunction udf, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> upsertUserDefinedFunctionInternal(collectionLink, udf, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<UserDefinedFunction>> upsertUserDefinedFunctionInternal(String collectionLink, UserDefinedFunction udf, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { logger.debug("Upserting a UserDefinedFunction. collectionLink [{}], udf id [{}]", collectionLink, udf.getId()); RxDocumentServiceRequest request = getUserDefinedFunctionRequest(collectionLink, udf, options, OperationType.Upsert); if (retryPolicyInstance != null){ retryPolicyInstance.onBeforeSendRequest(request); } return this.upsert(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, UserDefinedFunction.class)); } catch (Exception e) { logger.debug("Failure in upserting a UserDefinedFunction due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Mono<ResourceResponse<UserDefinedFunction>> replaceUserDefinedFunction(UserDefinedFunction udf, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> replaceUserDefinedFunctionInternal(udf, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<UserDefinedFunction>> replaceUserDefinedFunctionInternal(UserDefinedFunction udf, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (udf == null) { throw new IllegalArgumentException("udf"); } logger.debug("Replacing a UserDefinedFunction. udf id [{}]", udf.getId()); validateResource(udf); String path = Utils.joinPath(udf.getSelfLink(), null); Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.UserDefinedFunction, OperationType.Replace); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Replace, ResourceType.UserDefinedFunction, path, udf, requestHeaders, options); if (retryPolicyInstance != null){ retryPolicyInstance.onBeforeSendRequest(request); } return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, UserDefinedFunction.class)); } catch (Exception e) { logger.debug("Failure in replacing a UserDefinedFunction due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Mono<ResourceResponse<UserDefinedFunction>> deleteUserDefinedFunction(String udfLink, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> deleteUserDefinedFunctionInternal(udfLink, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<UserDefinedFunction>> deleteUserDefinedFunctionInternal(String udfLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(udfLink)) { throw new IllegalArgumentException("udfLink"); } logger.debug("Deleting a UserDefinedFunction. udfLink [{}]", udfLink); String path = Utils.joinPath(udfLink, null); Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.UserDefinedFunction, OperationType.Delete); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Delete, ResourceType.UserDefinedFunction, path, requestHeaders, options); if (retryPolicyInstance != null){ retryPolicyInstance.onBeforeSendRequest(request); } return this.delete(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, UserDefinedFunction.class)); } catch (Exception e) { logger.debug("Failure in deleting a UserDefinedFunction due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Mono<ResourceResponse<UserDefinedFunction>> readUserDefinedFunction(String udfLink, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> readUserDefinedFunctionInternal(udfLink, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<UserDefinedFunction>> readUserDefinedFunctionInternal(String udfLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(udfLink)) { throw new IllegalArgumentException("udfLink"); } logger.debug("Reading a UserDefinedFunction. udfLink [{}]", udfLink); String path = Utils.joinPath(udfLink, null); Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.UserDefinedFunction, OperationType.Read); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Read, ResourceType.UserDefinedFunction, path, requestHeaders, options); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, UserDefinedFunction.class)); } catch (Exception e) { logger.debug("Failure in reading a UserDefinedFunction due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Flux<FeedResponse<UserDefinedFunction>> readUserDefinedFunctions(String collectionLink, CosmosQueryRequestOptions options) { if (StringUtils.isEmpty(collectionLink)) { throw new IllegalArgumentException("collectionLink"); } return readFeed(options, ResourceType.UserDefinedFunction, UserDefinedFunction.class, Utils.joinPath(collectionLink, Paths.USER_DEFINED_FUNCTIONS_PATH_SEGMENT)); } @Override public Flux<FeedResponse<UserDefinedFunction>> queryUserDefinedFunctions(String collectionLink, String query, CosmosQueryRequestOptions options) { return queryUserDefinedFunctions(collectionLink, new SqlQuerySpec(query), options); } @Override public Flux<FeedResponse<UserDefinedFunction>> queryUserDefinedFunctions(String collectionLink, SqlQuerySpec querySpec, CosmosQueryRequestOptions options) { return createQuery(collectionLink, querySpec, options, UserDefinedFunction.class, ResourceType.UserDefinedFunction); } @Override public Mono<ResourceResponse<Conflict>> readConflict(String conflictLink, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> readConflictInternal(conflictLink, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<Conflict>> readConflictInternal(String conflictLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(conflictLink)) { throw new IllegalArgumentException("conflictLink"); } logger.debug("Reading a Conflict. conflictLink [{}]", conflictLink); String path = Utils.joinPath(conflictLink, null); Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Conflict, OperationType.Read); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Read, ResourceType.Conflict, path, requestHeaders, options); Mono<RxDocumentServiceRequest> reqObs = addPartitionKeyInformation(request, null, null, options); return reqObs.flatMap(req -> { if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, Conflict.class)); }); } catch (Exception e) { logger.debug("Failure in reading a Conflict due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Flux<FeedResponse<Conflict>> readConflicts(String collectionLink, CosmosQueryRequestOptions options) { if (StringUtils.isEmpty(collectionLink)) { throw new IllegalArgumentException("collectionLink"); } return readFeed(options, ResourceType.Conflict, Conflict.class, Utils.joinPath(collectionLink, Paths.CONFLICTS_PATH_SEGMENT)); } @Override public Flux<FeedResponse<Conflict>> queryConflicts(String collectionLink, String query, CosmosQueryRequestOptions options) { return queryConflicts(collectionLink, new SqlQuerySpec(query), options); } @Override public Flux<FeedResponse<Conflict>> queryConflicts(String collectionLink, SqlQuerySpec querySpec, CosmosQueryRequestOptions options) { return createQuery(collectionLink, querySpec, options, Conflict.class, ResourceType.Conflict); } @Override public Mono<ResourceResponse<Conflict>> deleteConflict(String conflictLink, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> deleteConflictInternal(conflictLink, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<Conflict>> deleteConflictInternal(String conflictLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(conflictLink)) { throw new IllegalArgumentException("conflictLink"); } logger.debug("Deleting a Conflict. conflictLink [{}]", conflictLink); String path = Utils.joinPath(conflictLink, null); Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Conflict, OperationType.Delete); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Delete, ResourceType.Conflict, path, requestHeaders, options); Mono<RxDocumentServiceRequest> reqObs = addPartitionKeyInformation(request, null, null, options); return reqObs.flatMap(req -> { if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.delete(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, Conflict.class)); }); } catch (Exception e) { logger.debug("Failure in deleting a Conflict due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Mono<ResourceResponse<User>> createUser(String databaseLink, User user, RequestOptions options) { DocumentClientRetryPolicy documentClientRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> createUserInternal(databaseLink, user, options, documentClientRetryPolicy), documentClientRetryPolicy); } private Mono<ResourceResponse<User>> createUserInternal(String databaseLink, User user, RequestOptions options, DocumentClientRetryPolicy documentClientRetryPolicy) { try { logger.debug("Creating a User. databaseLink [{}], user id [{}]", databaseLink, user.getId()); RxDocumentServiceRequest request = getUserRequest(databaseLink, user, options, OperationType.Create); return this.create(request, documentClientRetryPolicy, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, User.class)); } catch (Exception e) { logger.debug("Failure in creating a User due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Mono<ResourceResponse<User>> upsertUser(String databaseLink, User user, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> upsertUserInternal(databaseLink, user, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<User>> upsertUserInternal(String databaseLink, User user, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { logger.debug("Upserting a User. databaseLink [{}], user id [{}]", databaseLink, user.getId()); RxDocumentServiceRequest request = getUserRequest(databaseLink, user, options, OperationType.Upsert); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.upsert(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, User.class)); } catch (Exception e) { logger.debug("Failure in upserting a User due to [{}]", e.getMessage(), e); return Mono.error(e); } } private RxDocumentServiceRequest getUserRequest(String databaseLink, User user, RequestOptions options, OperationType operationType) { if (StringUtils.isEmpty(databaseLink)) { throw new IllegalArgumentException("databaseLink"); } if (user == null) { throw new IllegalArgumentException("user"); } RxDocumentClientImpl.validateResource(user); String path = Utils.joinPath(databaseLink, Paths.USERS_PATH_SEGMENT); Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.User, operationType); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, operationType, ResourceType.User, path, user, requestHeaders, options); return request; } @Override public Mono<ResourceResponse<User>> replaceUser(User user, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> replaceUserInternal(user, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<User>> replaceUserInternal(User user, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (user == null) { throw new IllegalArgumentException("user"); } logger.debug("Replacing a User. user id [{}]", user.getId()); RxDocumentClientImpl.validateResource(user); String path = Utils.joinPath(user.getSelfLink(), null); Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.User, OperationType.Replace); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Replace, ResourceType.User, path, user, requestHeaders, options); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, User.class)); } catch (Exception e) { logger.debug("Failure in replacing a User due to [{}]", e.getMessage(), e); return Mono.error(e); } } public Mono<ResourceResponse<User>> deleteUser(String userLink, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> deleteUserInternal(userLink, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<User>> deleteUserInternal(String userLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(userLink)) { throw new IllegalArgumentException("userLink"); } logger.debug("Deleting a User. userLink [{}]", userLink); String path = Utils.joinPath(userLink, null); Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.User, OperationType.Delete); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Delete, ResourceType.User, path, requestHeaders, options); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.delete(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, User.class)); } catch (Exception e) { logger.debug("Failure in deleting a User due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Mono<ResourceResponse<User>> readUser(String userLink, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> readUserInternal(userLink, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<User>> readUserInternal(String userLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(userLink)) { throw new IllegalArgumentException("userLink"); } logger.debug("Reading a User. userLink [{}]", userLink); String path = Utils.joinPath(userLink, null); Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.User, OperationType.Read); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Read, ResourceType.User, path, requestHeaders, options); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, User.class)); } catch (Exception e) { logger.debug("Failure in reading a User due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Flux<FeedResponse<User>> readUsers(String databaseLink, CosmosQueryRequestOptions options) { if (StringUtils.isEmpty(databaseLink)) { throw new IllegalArgumentException("databaseLink"); } return readFeed(options, ResourceType.User, User.class, Utils.joinPath(databaseLink, Paths.USERS_PATH_SEGMENT)); } @Override public Flux<FeedResponse<User>> queryUsers(String databaseLink, String query, CosmosQueryRequestOptions options) { return queryUsers(databaseLink, new SqlQuerySpec(query), options); } @Override public Flux<FeedResponse<User>> queryUsers(String databaseLink, SqlQuerySpec querySpec, CosmosQueryRequestOptions options) { return createQuery(databaseLink, querySpec, options, User.class, ResourceType.User); } @Override public Mono<ResourceResponse<ClientEncryptionKey>> readClientEncryptionKey(String clientEncryptionKeyLink, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> readClientEncryptionKeyInternal(clientEncryptionKeyLink, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<ClientEncryptionKey>> readClientEncryptionKeyInternal(String clientEncryptionKeyLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(clientEncryptionKeyLink)) { throw new IllegalArgumentException("clientEncryptionKeyLink"); } logger.debug("Reading a client encryption key. clientEncryptionKeyLink [{}]", clientEncryptionKeyLink); String path = Utils.joinPath(clientEncryptionKeyLink, null); Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.ClientEncryptionKey, OperationType.Read); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Read, ResourceType.ClientEncryptionKey, path, requestHeaders, options); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, ClientEncryptionKey.class)); } catch (Exception e) { logger.debug("Failure in reading a client encryption key due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Mono<ResourceResponse<ClientEncryptionKey>> createClientEncryptionKey(String databaseLink, ClientEncryptionKey clientEncryptionKey, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> createClientEncryptionKeyInternal(databaseLink, clientEncryptionKey, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<ClientEncryptionKey>> createClientEncryptionKeyInternal(String databaseLink, ClientEncryptionKey clientEncryptionKey, RequestOptions options, DocumentClientRetryPolicy documentClientRetryPolicy) { try { logger.debug("Creating a client encryption key. databaseLink [{}], clientEncryptionKey id [{}]", databaseLink, clientEncryptionKey.getId()); RxDocumentServiceRequest request = getClientEncryptionKeyRequest(databaseLink, clientEncryptionKey, options, OperationType.Create); return this.create(request, documentClientRetryPolicy, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, ClientEncryptionKey.class)); } catch (Exception e) { logger.debug("Failure in creating a client encryption key due to [{}]", e.getMessage(), e); return Mono.error(e); } } private RxDocumentServiceRequest getClientEncryptionKeyRequest(String databaseLink, ClientEncryptionKey clientEncryptionKey, RequestOptions options, OperationType operationType) { if (StringUtils.isEmpty(databaseLink)) { throw new IllegalArgumentException("databaseLink"); } if (clientEncryptionKey == null) { throw new IllegalArgumentException("clientEncryptionKey"); } RxDocumentClientImpl.validateResource(clientEncryptionKey); String path = Utils.joinPath(databaseLink, Paths.CLIENT_ENCRYPTION_KEY_PATH_SEGMENT); Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.ClientEncryptionKey, operationType); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, operationType, ResourceType.ClientEncryptionKey, path, clientEncryptionKey, requestHeaders, options); return request; } @Override public Mono<ResourceResponse<ClientEncryptionKey>> replaceClientEncryptionKey(ClientEncryptionKey clientEncryptionKey, String nameBasedLink, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> replaceClientEncryptionKeyInternal(clientEncryptionKey, nameBasedLink, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<ClientEncryptionKey>> replaceClientEncryptionKeyInternal(ClientEncryptionKey clientEncryptionKey, String nameBasedLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (clientEncryptionKey == null) { throw new IllegalArgumentException("clientEncryptionKey"); } logger.debug("Replacing a clientEncryptionKey. clientEncryptionKey id [{}]", clientEncryptionKey.getId()); RxDocumentClientImpl.validateResource(clientEncryptionKey); String path = Utils.joinPath(nameBasedLink, null); Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.ClientEncryptionKey, OperationType.Replace); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Replace, ResourceType.ClientEncryptionKey, path, clientEncryptionKey, requestHeaders, options); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, ClientEncryptionKey.class)); } catch (Exception e) { logger.debug("Failure in replacing a clientEncryptionKey due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Flux<FeedResponse<ClientEncryptionKey>> readClientEncryptionKeys(String databaseLink, CosmosQueryRequestOptions options) { if (StringUtils.isEmpty(databaseLink)) { throw new IllegalArgumentException("databaseLink"); } return readFeed(options, ResourceType.ClientEncryptionKey, ClientEncryptionKey.class, Utils.joinPath(databaseLink, Paths.CLIENT_ENCRYPTION_KEY_PATH_SEGMENT)); } @Override public Flux<FeedResponse<ClientEncryptionKey>> queryClientEncryptionKeys(String databaseLink, String query, CosmosQueryRequestOptions options) { return queryClientEncryptionKeys(databaseLink, new SqlQuerySpec(query), options); } @Override public Flux<FeedResponse<ClientEncryptionKey>> queryClientEncryptionKeys(String databaseLink, SqlQuerySpec querySpec, CosmosQueryRequestOptions options) { return createQuery(databaseLink, querySpec, options, ClientEncryptionKey.class, ResourceType.ClientEncryptionKey); } @Override public Mono<ResourceResponse<Permission>> createPermission(String userLink, Permission permission, RequestOptions options) { DocumentClientRetryPolicy documentClientRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> createPermissionInternal(userLink, permission, options, documentClientRetryPolicy), this.resetSessionTokenRetryPolicy.getRequestPolicy()); } private Mono<ResourceResponse<Permission>> createPermissionInternal(String userLink, Permission permission, RequestOptions options, DocumentClientRetryPolicy documentClientRetryPolicy) { try { logger.debug("Creating a Permission. userLink [{}], permission id [{}]", userLink, permission.getId()); RxDocumentServiceRequest request = getPermissionRequest(userLink, permission, options, OperationType.Create); return this.create(request, documentClientRetryPolicy, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, Permission.class)); } catch (Exception e) { logger.debug("Failure in creating a Permission due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Mono<ResourceResponse<Permission>> upsertPermission(String userLink, Permission permission, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> upsertPermissionInternal(userLink, permission, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<Permission>> upsertPermissionInternal(String userLink, Permission permission, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { logger.debug("Upserting a Permission. userLink [{}], permission id [{}]", userLink, permission.getId()); RxDocumentServiceRequest request = getPermissionRequest(userLink, permission, options, OperationType.Upsert); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.upsert(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, Permission.class)); } catch (Exception e) { logger.debug("Failure in upserting a Permission due to [{}]", e.getMessage(), e); return Mono.error(e); } } private RxDocumentServiceRequest getPermissionRequest(String userLink, Permission permission, RequestOptions options, OperationType operationType) { if (StringUtils.isEmpty(userLink)) { throw new IllegalArgumentException("userLink"); } if (permission == null) { throw new IllegalArgumentException("permission"); } RxDocumentClientImpl.validateResource(permission); String path = Utils.joinPath(userLink, Paths.PERMISSIONS_PATH_SEGMENT); Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Permission, operationType); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, operationType, ResourceType.Permission, path, permission, requestHeaders, options); return request; } @Override public Mono<ResourceResponse<Permission>> replacePermission(Permission permission, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> replacePermissionInternal(permission, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<Permission>> replacePermissionInternal(Permission permission, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (permission == null) { throw new IllegalArgumentException("permission"); } logger.debug("Replacing a Permission. permission id [{}]", permission.getId()); RxDocumentClientImpl.validateResource(permission); String path = Utils.joinPath(permission.getSelfLink(), null); Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Permission, OperationType.Replace); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Replace, ResourceType.Permission, path, permission, requestHeaders, options); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, Permission.class)); } catch (Exception e) { logger.debug("Failure in replacing a Permission due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Mono<ResourceResponse<Permission>> deletePermission(String permissionLink, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> deletePermissionInternal(permissionLink, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<Permission>> deletePermissionInternal(String permissionLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(permissionLink)) { throw new IllegalArgumentException("permissionLink"); } logger.debug("Deleting a Permission. permissionLink [{}]", permissionLink); String path = Utils.joinPath(permissionLink, null); Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Permission, OperationType.Delete); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Delete, ResourceType.Permission, path, requestHeaders, options); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.delete(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, Permission.class)); } catch (Exception e) { logger.debug("Failure in deleting a Permission due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Mono<ResourceResponse<Permission>> readPermission(String permissionLink, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> readPermissionInternal(permissionLink, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<Permission>> readPermissionInternal(String permissionLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance ) { try { if (StringUtils.isEmpty(permissionLink)) { throw new IllegalArgumentException("permissionLink"); } logger.debug("Reading a Permission. permissionLink [{}]", permissionLink); String path = Utils.joinPath(permissionLink, null); Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Permission, OperationType.Read); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Read, ResourceType.Permission, path, requestHeaders, options); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, Permission.class)); } catch (Exception e) { logger.debug("Failure in reading a Permission due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Flux<FeedResponse<Permission>> readPermissions(String userLink, CosmosQueryRequestOptions options) { if (StringUtils.isEmpty(userLink)) { throw new IllegalArgumentException("userLink"); } return readFeed(options, ResourceType.Permission, Permission.class, Utils.joinPath(userLink, Paths.PERMISSIONS_PATH_SEGMENT)); } @Override public Flux<FeedResponse<Permission>> queryPermissions(String userLink, String query, CosmosQueryRequestOptions options) { return queryPermissions(userLink, new SqlQuerySpec(query), options); } @Override public Flux<FeedResponse<Permission>> queryPermissions(String userLink, SqlQuerySpec querySpec, CosmosQueryRequestOptions options) { return createQuery(userLink, querySpec, options, Permission.class, ResourceType.Permission); } @Override public Mono<ResourceResponse<Offer>> replaceOffer(Offer offer) { DocumentClientRetryPolicy documentClientRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> replaceOfferInternal(offer, documentClientRetryPolicy), documentClientRetryPolicy); } private Mono<ResourceResponse<Offer>> replaceOfferInternal(Offer offer, DocumentClientRetryPolicy documentClientRetryPolicy) { try { if (offer == null) { throw new IllegalArgumentException("offer"); } logger.debug("Replacing an Offer. offer id [{}]", offer.getId()); RxDocumentClientImpl.validateResource(offer); String path = Utils.joinPath(offer.getSelfLink(), null); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Replace, ResourceType.Offer, path, offer, null, null); return this.replace(request, documentClientRetryPolicy).map(response -> toResourceResponse(response, Offer.class)); } catch (Exception e) { logger.debug("Failure in replacing an Offer due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Mono<ResourceResponse<Offer>> readOffer(String offerLink) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> readOfferInternal(offerLink, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<Offer>> readOfferInternal(String offerLink, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(offerLink)) { throw new IllegalArgumentException("offerLink"); } logger.debug("Reading an Offer. offerLink [{}]", offerLink); String path = Utils.joinPath(offerLink, null); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Read, ResourceType.Offer, path, (HashMap<String, String>)null, null); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, Offer.class)); } catch (Exception e) { logger.debug("Failure in reading an Offer due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Flux<FeedResponse<Offer>> readOffers(CosmosQueryRequestOptions options) { return readFeed(options, ResourceType.Offer, Offer.class, Utils.joinPath(Paths.OFFERS_PATH_SEGMENT, null)); } private <T> Flux<FeedResponse<T>> readFeed( CosmosQueryRequestOptions options, ResourceType resourceType, Class<T> klass, String resourceLink) { if (options == null) { options = new CosmosQueryRequestOptions(); } Integer maxItemCount = ModelBridgeInternal.getMaxItemCountFromQueryRequestOptions(options); int maxPageSize = maxItemCount != null ? maxItemCount : -1; final CosmosQueryRequestOptions finalCosmosQueryRequestOptions = options; DocumentClientRetryPolicy retryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(); BiFunction<String, Integer, RxDocumentServiceRequest> createRequestFunc = (continuationToken, pageSize) -> { Map<String, String> requestHeaders = new HashMap<>(); if (continuationToken != null) { requestHeaders.put(HttpConstants.HttpHeaders.CONTINUATION, continuationToken); } requestHeaders.put(HttpConstants.HttpHeaders.PAGE_SIZE, Integer.toString(pageSize)); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.ReadFeed, resourceType, resourceLink, requestHeaders, finalCosmosQueryRequestOptions); retryPolicy.onBeforeSendRequest(request); return request; }; Function<RxDocumentServiceRequest, Mono<FeedResponse<T>>> executeFunc = request -> ObservableHelper .inlineIfPossibleAsObs(() -> readFeed(request).map(response -> toFeedResponsePage( response, ImplementationBridgeHelpers .CosmosQueryRequestOptionsHelper .getCosmosQueryRequestOptionsAccessor() .getItemFactoryMethod(finalCosmosQueryRequestOptions, klass), klass)), retryPolicy); return Paginator.getPaginatedQueryResultAsObservable( options, createRequestFunc, executeFunc, maxPageSize); } @Override public Flux<FeedResponse<Offer>> queryOffers(String query, CosmosQueryRequestOptions options) { return queryOffers(new SqlQuerySpec(query), options); } @Override public Flux<FeedResponse<Offer>> queryOffers(SqlQuerySpec querySpec, CosmosQueryRequestOptions options) { return createQuery(null, querySpec, options, Offer.class, ResourceType.Offer); } @Override public Mono<DatabaseAccount> getDatabaseAccount() { DocumentClientRetryPolicy documentClientRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> getDatabaseAccountInternal(documentClientRetryPolicy), documentClientRetryPolicy); } @Override public DatabaseAccount getLatestDatabaseAccount() { return this.globalEndpointManager.getLatestDatabaseAccount(); } private Mono<DatabaseAccount> getDatabaseAccountInternal(DocumentClientRetryPolicy documentClientRetryPolicy) { try { logger.debug("Getting Database Account"); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Read, ResourceType.DatabaseAccount, "", (HashMap<String, String>) null, null); return this.read(request, documentClientRetryPolicy).map(ModelBridgeInternal::toDatabaseAccount); } catch (Exception e) { logger.debug("Failure in getting Database Account due to [{}]", e.getMessage(), e); return Mono.error(e); } } public Object getSession() { return this.sessionContainer; } public void setSession(Object sessionContainer) { this.sessionContainer = (SessionContainer) sessionContainer; } @Override public RxClientCollectionCache getCollectionCache() { return this.collectionCache; } @Override public RxPartitionKeyRangeCache getPartitionKeyRangeCache() { return partitionKeyRangeCache; } public Flux<DatabaseAccount> getDatabaseAccountFromEndpoint(URI endpoint) { return Flux.defer(() -> { RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Read, ResourceType.DatabaseAccount, "", null, (Object) null); return this.populateHeaders(request, RequestVerb.GET) .flatMap(requestPopulated -> { requestPopulated.setEndpointOverride(endpoint); return this.gatewayProxy.processMessage(requestPopulated).doOnError(e -> { String message = String.format("Failed to retrieve database account information. %s", e.getCause() != null ? e.getCause().toString() : e.toString()); logger.warn(message); }).map(rsp -> rsp.getResource(DatabaseAccount.class)) .doOnNext(databaseAccount -> this.useMultipleWriteLocations = this.connectionPolicy.isMultipleWriteRegionsEnabled() && BridgeInternal.isEnableMultipleWriteLocations(databaseAccount)); }); }); } /** * Certain requests must be routed through gateway even when the client connectivity mode is direct. * * @param request * @return RxStoreModel */ private RxStoreModel getStoreProxy(RxDocumentServiceRequest request) { if (request.UseGatewayMode) { return this.gatewayProxy; } ResourceType resourceType = request.getResourceType(); OperationType operationType = request.getOperationType(); if (resourceType == ResourceType.Offer || resourceType == ResourceType.ClientEncryptionKey || resourceType.isScript() && operationType != OperationType.ExecuteJavaScript || resourceType == ResourceType.PartitionKeyRange || resourceType == ResourceType.PartitionKey && operationType == OperationType.Delete) { return this.gatewayProxy; } if (operationType == OperationType.Create || operationType == OperationType.Upsert) { if (resourceType == ResourceType.Database || resourceType == ResourceType.User || resourceType == ResourceType.DocumentCollection || resourceType == ResourceType.Permission) { return this.gatewayProxy; } else { return this.storeModel; } } else if (operationType == OperationType.Delete) { if (resourceType == ResourceType.Database || resourceType == ResourceType.User || resourceType == ResourceType.DocumentCollection) { return this.gatewayProxy; } else { return this.storeModel; } } else if (operationType == OperationType.Replace) { if (resourceType == ResourceType.DocumentCollection) { return this.gatewayProxy; } else { return this.storeModel; } } else if (operationType == OperationType.Read) { if (resourceType == ResourceType.DocumentCollection) { return this.gatewayProxy; } else { return this.storeModel; } } else { if ((operationType == OperationType.Query || operationType == OperationType.SqlQuery || operationType == OperationType.ReadFeed) && Utils.isCollectionChild(request.getResourceType())) { if (request.getPartitionKeyRangeIdentity() == null && request.getHeaders().get(HttpConstants.HttpHeaders.PARTITION_KEY) == null) { return this.gatewayProxy; } } return this.storeModel; } } @Override public void close() { logger.info("Attempting to close client {}", this.clientId); if (!closed.getAndSet(true)) { activeClientsCnt.decrementAndGet(); logger.info("Shutting down ..."); logger.info("Closing Global Endpoint Manager ..."); LifeCycleUtils.closeQuietly(this.globalEndpointManager); logger.info("Closing StoreClientFactory ..."); LifeCycleUtils.closeQuietly(this.storeClientFactory); logger.info("Shutting down reactorHttpClient ..."); LifeCycleUtils.closeQuietly(this.reactorHttpClient); logger.info("Shutting down CpuMonitor ..."); CpuMemoryMonitor.unregister(this); if (this.throughputControlEnabled.get()) { logger.info("Closing ThroughputControlStore ..."); this.throughputControlStore.close(); } logger.info("Shutting down completed."); } else { logger.warn("Already shutdown!"); } } @Override public ItemDeserializer getItemDeserializer() { return this.itemDeserializer; } @Override public synchronized void enableThroughputControlGroup(ThroughputControlGroupInternal group) { checkNotNull(group, "Throughput control group can not be null"); if (this.throughputControlEnabled.compareAndSet(false, true)) { this.throughputControlStore = new ThroughputControlStore( this.collectionCache, this.connectionPolicy.getConnectionMode(), this.partitionKeyRangeCache); this.storeModel.enableThroughputControl(throughputControlStore); } this.throughputControlStore.enableThroughputControlGroup(group); } @Override private static SqlQuerySpec createLogicalPartitionScanQuerySpec( PartitionKey partitionKey, String partitionKeySelector) { StringBuilder queryStringBuilder = new StringBuilder(); List<SqlParameter> parameters = new ArrayList<>(); queryStringBuilder.append("SELECT * FROM c WHERE"); Object pkValue = ModelBridgeInternal.getPartitionKeyObject(partitionKey); String pkParamName = "@pkValue"; parameters.add(new SqlParameter(pkParamName, pkValue)); queryStringBuilder.append(" c"); queryStringBuilder.append(partitionKeySelector); queryStringBuilder.append((" = ")); queryStringBuilder.append(pkParamName); return new SqlQuerySpec(queryStringBuilder.toString(), parameters); } @Override public Mono<List<FeedRange>> getFeedRanges(String collectionLink) { InvalidPartitionExceptionRetryPolicy invalidPartitionExceptionRetryPolicy = new InvalidPartitionExceptionRetryPolicy( this.collectionCache, null, collectionLink, new HashMap<>()); RxDocumentServiceRequest request = RxDocumentServiceRequest.create( this, OperationType.Query, ResourceType.Document, collectionLink, null); invalidPartitionExceptionRetryPolicy.onBeforeSendRequest(request); return ObservableHelper.inlineIfPossibleAsObs( () -> getFeedRangesInternal(request, collectionLink), invalidPartitionExceptionRetryPolicy); } private Mono<List<FeedRange>> getFeedRangesInternal(RxDocumentServiceRequest request, String collectionLink) { logger.debug("getFeedRange collectionLink=[{}]", collectionLink); if (StringUtils.isEmpty(collectionLink)) { throw new IllegalArgumentException("collectionLink"); } Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = collectionCache.resolveCollectionAsync(null, request); return collectionObs.flatMap(documentCollectionResourceResponse -> { final DocumentCollection collection = documentCollectionResourceResponse.v; if (collection == null) { throw new IllegalStateException("Collection cannot be null"); } Mono<Utils.ValueHolder<List<PartitionKeyRange>>> valueHolderMono = partitionKeyRangeCache .tryGetOverlappingRangesAsync( BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), collection.getResourceId(), RANGE_INCLUDING_ALL_PARTITION_KEY_RANGES, true, null); return valueHolderMono.map(partitionKeyRangeList -> toFeedRanges(partitionKeyRangeList, request)); }); } private static List<FeedRange> toFeedRanges( Utils.ValueHolder<List<PartitionKeyRange>> partitionKeyRangeListValueHolder, RxDocumentServiceRequest request) { final List<PartitionKeyRange> partitionKeyRangeList = partitionKeyRangeListValueHolder.v; if (partitionKeyRangeList == null) { request.forceNameCacheRefresh = true; throw new InvalidPartitionException(); } List<FeedRange> feedRanges = new ArrayList<>(); partitionKeyRangeList.forEach(pkRange -> feedRanges.add(toFeedRange(pkRange))); return feedRanges; } private static FeedRange toFeedRange(PartitionKeyRange pkRange) { return new FeedRangeEpkImpl(pkRange.toRange()); } }
class RxDocumentClientImpl implements AsyncDocumentClient, IAuthorizationTokenProvider, CpuMemoryListener, DiagnosticsClientContext { private static final String tempMachineId = "uuid:" + UUID.randomUUID(); private static final AtomicInteger activeClientsCnt = new AtomicInteger(0); private static final AtomicInteger clientIdGenerator = new AtomicInteger(0); private static final Range<String> RANGE_INCLUDING_ALL_PARTITION_KEY_RANGES = new Range<>( PartitionKeyInternalHelper.MinimumInclusiveEffectivePartitionKey, PartitionKeyInternalHelper.MaximumExclusiveEffectivePartitionKey, true, false); private static final String DUMMY_SQL_QUERY = "this is dummy and only used in creating " + "ParallelDocumentQueryExecutioncontext, but not used"; private final static ObjectMapper mapper = Utils.getSimpleObjectMapper(); private final ItemDeserializer itemDeserializer = new ItemDeserializer.JsonDeserializer(); private final Logger logger = LoggerFactory.getLogger(RxDocumentClientImpl.class); private final String masterKeyOrResourceToken; private final URI serviceEndpoint; private final ConnectionPolicy connectionPolicy; private final ConsistencyLevel consistencyLevel; private final BaseAuthorizationTokenProvider authorizationTokenProvider; private final UserAgentContainer userAgentContainer; private final boolean hasAuthKeyResourceToken; private final Configs configs; private final boolean connectionSharingAcrossClientsEnabled; private AzureKeyCredential credential; private final TokenCredential tokenCredential; private String[] tokenCredentialScopes; private SimpleTokenCache tokenCredentialCache; private CosmosAuthorizationTokenResolver cosmosAuthorizationTokenResolver; AuthorizationTokenType authorizationTokenType; private SessionContainer sessionContainer; private String firstResourceTokenFromPermissionFeed = StringUtils.EMPTY; private RxClientCollectionCache collectionCache; private RxStoreModel gatewayProxy; private RxStoreModel storeModel; private GlobalAddressResolver addressResolver; private RxPartitionKeyRangeCache partitionKeyRangeCache; private Map<String, List<PartitionKeyAndResourceTokenPair>> resourceTokensMap; private final boolean contentResponseOnWriteEnabled; private Map<String, PartitionedQueryExecutionInfo> queryPlanCache; private final AtomicBoolean closed = new AtomicBoolean(false); private final int clientId; private ClientTelemetry clientTelemetry; private ApiType apiType; private IRetryPolicyFactory resetSessionTokenRetryPolicy; /** * Compatibility mode: Allows to specify compatibility mode used by client when * making query requests. Should be removed when application/sql is no longer * supported. */ private final QueryCompatibilityMode queryCompatibilityMode = QueryCompatibilityMode.Default; private final GlobalEndpointManager globalEndpointManager; private final RetryPolicy retryPolicy; private HttpClient reactorHttpClient; private Function<HttpClient, HttpClient> httpClientInterceptor; private volatile boolean useMultipleWriteLocations; private StoreClientFactory storeClientFactory; private GatewayServiceConfigurationReader gatewayConfigurationReader; private final DiagnosticsClientConfig diagnosticsClientConfig; private final AtomicBoolean throughputControlEnabled; private ThroughputControlStore throughputControlStore; public RxDocumentClientImpl(URI serviceEndpoint, String masterKeyOrResourceToken, List<Permission> permissionFeed, ConnectionPolicy connectionPolicy, ConsistencyLevel consistencyLevel, Configs configs, CosmosAuthorizationTokenResolver cosmosAuthorizationTokenResolver, AzureKeyCredential credential, boolean sessionCapturingOverride, boolean connectionSharingAcrossClientsEnabled, boolean contentResponseOnWriteEnabled, CosmosClientMetadataCachesSnapshot metadataCachesSnapshot, ApiType apiType) { this(serviceEndpoint, masterKeyOrResourceToken, permissionFeed, connectionPolicy, consistencyLevel, configs, credential, null, sessionCapturingOverride, connectionSharingAcrossClientsEnabled, contentResponseOnWriteEnabled, metadataCachesSnapshot, apiType); this.cosmosAuthorizationTokenResolver = cosmosAuthorizationTokenResolver; } public RxDocumentClientImpl(URI serviceEndpoint, String masterKeyOrResourceToken, List<Permission> permissionFeed, ConnectionPolicy connectionPolicy, ConsistencyLevel consistencyLevel, Configs configs, CosmosAuthorizationTokenResolver cosmosAuthorizationTokenResolver, AzureKeyCredential credential, TokenCredential tokenCredential, boolean sessionCapturingOverride, boolean connectionSharingAcrossClientsEnabled, boolean contentResponseOnWriteEnabled, CosmosClientMetadataCachesSnapshot metadataCachesSnapshot, ApiType apiType) { this(serviceEndpoint, masterKeyOrResourceToken, permissionFeed, connectionPolicy, consistencyLevel, configs, credential, tokenCredential, sessionCapturingOverride, connectionSharingAcrossClientsEnabled, contentResponseOnWriteEnabled, metadataCachesSnapshot, apiType); this.cosmosAuthorizationTokenResolver = cosmosAuthorizationTokenResolver; } private RxDocumentClientImpl(URI serviceEndpoint, String masterKeyOrResourceToken, List<Permission> permissionFeed, ConnectionPolicy connectionPolicy, ConsistencyLevel consistencyLevel, Configs configs, AzureKeyCredential credential, TokenCredential tokenCredential, boolean sessionCapturingOverrideEnabled, boolean connectionSharingAcrossClientsEnabled, boolean contentResponseOnWriteEnabled, CosmosClientMetadataCachesSnapshot metadataCachesSnapshot, ApiType apiType) { this(serviceEndpoint, masterKeyOrResourceToken, connectionPolicy, consistencyLevel, configs, credential, tokenCredential, sessionCapturingOverrideEnabled, connectionSharingAcrossClientsEnabled, contentResponseOnWriteEnabled, metadataCachesSnapshot, apiType); if (permissionFeed != null && permissionFeed.size() > 0) { this.resourceTokensMap = new HashMap<>(); for (Permission permission : permissionFeed) { String[] segments = StringUtils.split(permission.getResourceLink(), Constants.Properties.PATH_SEPARATOR.charAt(0)); if (segments.length <= 0) { throw new IllegalArgumentException("resourceLink"); } List<PartitionKeyAndResourceTokenPair> partitionKeyAndResourceTokenPairs = null; PathInfo pathInfo = new PathInfo(false, StringUtils.EMPTY, StringUtils.EMPTY, false); if (!PathsHelper.tryParsePathSegments(permission.getResourceLink(), pathInfo, null)) { throw new IllegalArgumentException(permission.getResourceLink()); } partitionKeyAndResourceTokenPairs = resourceTokensMap.get(pathInfo.resourceIdOrFullName); if (partitionKeyAndResourceTokenPairs == null) { partitionKeyAndResourceTokenPairs = new ArrayList<>(); this.resourceTokensMap.put(pathInfo.resourceIdOrFullName, partitionKeyAndResourceTokenPairs); } PartitionKey partitionKey = permission.getResourcePartitionKey(); partitionKeyAndResourceTokenPairs.add(new PartitionKeyAndResourceTokenPair( partitionKey != null ? BridgeInternal.getPartitionKeyInternal(partitionKey) : PartitionKeyInternal.Empty, permission.getToken())); logger.debug("Initializing resource token map , with map key [{}] , partition key [{}] and resource token [{}]", pathInfo.resourceIdOrFullName, partitionKey != null ? partitionKey.toString() : null, permission.getToken()); } if(this.resourceTokensMap.isEmpty()) { throw new IllegalArgumentException("permissionFeed"); } String firstToken = permissionFeed.get(0).getToken(); if(ResourceTokenAuthorizationHelper.isResourceToken(firstToken)) { this.firstResourceTokenFromPermissionFeed = firstToken; } } } RxDocumentClientImpl(URI serviceEndpoint, String masterKeyOrResourceToken, ConnectionPolicy connectionPolicy, ConsistencyLevel consistencyLevel, Configs configs, AzureKeyCredential credential, TokenCredential tokenCredential, boolean sessionCapturingOverrideEnabled, boolean connectionSharingAcrossClientsEnabled, boolean contentResponseOnWriteEnabled, CosmosClientMetadataCachesSnapshot metadataCachesSnapshot, ApiType apiType) { activeClientsCnt.incrementAndGet(); this.clientId = clientIdGenerator.incrementAndGet(); this.diagnosticsClientConfig = new DiagnosticsClientConfig(); this.diagnosticsClientConfig.withClientId(this.clientId); this.diagnosticsClientConfig.withActiveClientCounter(activeClientsCnt); this.diagnosticsClientConfig.withConnectionSharingAcrossClientsEnabled(connectionSharingAcrossClientsEnabled); this.diagnosticsClientConfig.withConsistency(consistencyLevel); this.throughputControlEnabled = new AtomicBoolean(false); logger.info( "Initializing DocumentClient [{}] with" + " serviceEndpoint [{}], connectionPolicy [{}], consistencyLevel [{}], directModeProtocol [{}]", this.clientId, serviceEndpoint, connectionPolicy, consistencyLevel, configs.getProtocol()); try { this.connectionSharingAcrossClientsEnabled = connectionSharingAcrossClientsEnabled; this.configs = configs; this.masterKeyOrResourceToken = masterKeyOrResourceToken; this.serviceEndpoint = serviceEndpoint; this.credential = credential; this.tokenCredential = tokenCredential; this.contentResponseOnWriteEnabled = contentResponseOnWriteEnabled; this.authorizationTokenType = AuthorizationTokenType.Invalid; if (this.credential != null) { hasAuthKeyResourceToken = false; this.authorizationTokenType = AuthorizationTokenType.PrimaryMasterKey; this.authorizationTokenProvider = new BaseAuthorizationTokenProvider(this.credential); } else if (masterKeyOrResourceToken != null && ResourceTokenAuthorizationHelper.isResourceToken(masterKeyOrResourceToken)) { this.authorizationTokenProvider = null; hasAuthKeyResourceToken = true; this.authorizationTokenType = AuthorizationTokenType.ResourceToken; } else if(masterKeyOrResourceToken != null && !ResourceTokenAuthorizationHelper.isResourceToken(masterKeyOrResourceToken)) { this.credential = new AzureKeyCredential(this.masterKeyOrResourceToken); hasAuthKeyResourceToken = false; this.authorizationTokenType = AuthorizationTokenType.PrimaryMasterKey; this.authorizationTokenProvider = new BaseAuthorizationTokenProvider(this.credential); } else { hasAuthKeyResourceToken = false; this.authorizationTokenProvider = null; if (tokenCredential != null) { this.tokenCredentialScopes = new String[] { serviceEndpoint.getScheme() + ": }; this.tokenCredentialCache = new SimpleTokenCache(() -> this.tokenCredential .getToken(new TokenRequestContext().addScopes(this.tokenCredentialScopes))); this.authorizationTokenType = AuthorizationTokenType.AadToken; } } if (connectionPolicy != null) { this.connectionPolicy = connectionPolicy; } else { this.connectionPolicy = new ConnectionPolicy(DirectConnectionConfig.getDefaultConfig()); } this.diagnosticsClientConfig.withConnectionMode(this.getConnectionPolicy().getConnectionMode()); this.diagnosticsClientConfig.withMultipleWriteRegionsEnabled(this.connectionPolicy.isMultipleWriteRegionsEnabled()); this.diagnosticsClientConfig.withEndpointDiscoveryEnabled(this.connectionPolicy.isEndpointDiscoveryEnabled()); this.diagnosticsClientConfig.withPreferredRegions(this.connectionPolicy.getPreferredRegions()); this.diagnosticsClientConfig.withMachineId(tempMachineId); boolean disableSessionCapturing = (ConsistencyLevel.SESSION != consistencyLevel && !sessionCapturingOverrideEnabled); this.sessionContainer = new SessionContainer(this.serviceEndpoint.getHost(), disableSessionCapturing); this.consistencyLevel = consistencyLevel; this.userAgentContainer = new UserAgentContainer(); String userAgentSuffix = this.connectionPolicy.getUserAgentSuffix(); if (userAgentSuffix != null && userAgentSuffix.length() > 0) { userAgentContainer.setSuffix(userAgentSuffix); } this.httpClientInterceptor = null; this.reactorHttpClient = httpClient(); this.globalEndpointManager = new GlobalEndpointManager(asDatabaseAccountManagerInternal(), this.connectionPolicy, /**/configs); this.retryPolicy = new RetryPolicy(this, this.globalEndpointManager, this.connectionPolicy); this.resetSessionTokenRetryPolicy = retryPolicy; CpuMemoryMonitor.register(this); this.queryPlanCache = new ConcurrentHashMap<>(); this.apiType = apiType; } catch (RuntimeException e) { logger.error("unexpected failure in initializing client.", e); close(); throw e; } } @Override public DiagnosticsClientConfig getConfig() { return diagnosticsClientConfig; } @Override public CosmosDiagnostics createDiagnostics() { return BridgeInternal.createCosmosDiagnostics(this); } private void initializeGatewayConfigurationReader() { this.gatewayConfigurationReader = new GatewayServiceConfigurationReader(this.globalEndpointManager); DatabaseAccount databaseAccount = this.globalEndpointManager.getLatestDatabaseAccount(); if (databaseAccount == null) { logger.error("Client initialization failed." + " Check if the endpoint is reachable and if your auth token is valid. More info: https: throw new RuntimeException("Client initialization failed." + " Check if the endpoint is reachable and if your auth token is valid. More info: https: } this.useMultipleWriteLocations = this.connectionPolicy.isMultipleWriteRegionsEnabled() && BridgeInternal.isEnableMultipleWriteLocations(databaseAccount); } private void updateGatewayProxy() { ((RxGatewayStoreModel)this.gatewayProxy).setGatewayServiceConfigurationReader(this.gatewayConfigurationReader); ((RxGatewayStoreModel)this.gatewayProxy).setCollectionCache(this.collectionCache); ((RxGatewayStoreModel)this.gatewayProxy).setPartitionKeyRangeCache(this.partitionKeyRangeCache); ((RxGatewayStoreModel)this.gatewayProxy).setUseMultipleWriteLocations(this.useMultipleWriteLocations); } public void init(CosmosClientMetadataCachesSnapshot metadataCachesSnapshot, Function<HttpClient, HttpClient> httpClientInterceptor) { try { this.httpClientInterceptor = httpClientInterceptor; if (httpClientInterceptor != null) { this.reactorHttpClient = httpClientInterceptor.apply(httpClient()); } this.gatewayProxy = createRxGatewayProxy(this.sessionContainer, this.consistencyLevel, this.queryCompatibilityMode, this.userAgentContainer, this.globalEndpointManager, this.reactorHttpClient, this.apiType); this.globalEndpointManager.init(); this.initializeGatewayConfigurationReader(); if (metadataCachesSnapshot != null) { this.collectionCache = new RxClientCollectionCache(this, this.sessionContainer, this.gatewayProxy, this, this.retryPolicy, metadataCachesSnapshot.getCollectionInfoByNameCache(), metadataCachesSnapshot.getCollectionInfoByIdCache() ); } else { this.collectionCache = new RxClientCollectionCache(this, this.sessionContainer, this.gatewayProxy, this, this.retryPolicy); } this.resetSessionTokenRetryPolicy = new ResetSessionTokenRetryPolicyFactory(this.sessionContainer, this.collectionCache, this.retryPolicy); this.partitionKeyRangeCache = new RxPartitionKeyRangeCache(RxDocumentClientImpl.this, collectionCache); updateGatewayProxy(); clientTelemetry = new ClientTelemetry(this, null, UUID.randomUUID().toString(), ManagementFactory.getRuntimeMXBean().getName(), userAgentContainer.getUserAgent(), connectionPolicy.getConnectionMode(), globalEndpointManager.getLatestDatabaseAccount().getId(), null, null, this.reactorHttpClient, connectionPolicy.isClientTelemetryEnabled(), this, this.connectionPolicy.getPreferredRegions()); clientTelemetry.init(); if (this.connectionPolicy.getConnectionMode() == ConnectionMode.GATEWAY) { this.storeModel = this.gatewayProxy; } else { this.initializeDirectConnectivity(); } this.retryPolicy.setRxCollectionCache(this.collectionCache); } catch (Exception e) { logger.error("unexpected failure in initializing client.", e); close(); throw e; } } public void serialize(CosmosClientMetadataCachesSnapshot state) { RxCollectionCache.serialize(state, this.collectionCache); } private void initializeDirectConnectivity() { this.addressResolver = new GlobalAddressResolver(this, this.reactorHttpClient, this.globalEndpointManager, this.configs.getProtocol(), this, this.collectionCache, this.partitionKeyRangeCache, userAgentContainer, null, this.connectionPolicy, this.apiType); this.storeClientFactory = new StoreClientFactory( this.addressResolver, this.diagnosticsClientConfig, this.configs, this.connectionPolicy, this.userAgentContainer, this.connectionSharingAcrossClientsEnabled, this.clientTelemetry, this.globalEndpointManager ); this.createStoreModel(true); } DatabaseAccountManagerInternal asDatabaseAccountManagerInternal() { return new DatabaseAccountManagerInternal() { @Override public URI getServiceEndpoint() { return RxDocumentClientImpl.this.getServiceEndpoint(); } @Override public Flux<DatabaseAccount> getDatabaseAccountFromEndpoint(URI endpoint) { logger.info("Getting database account endpoint from {}", endpoint); return RxDocumentClientImpl.this.getDatabaseAccountFromEndpoint(endpoint); } @Override public ConnectionPolicy getConnectionPolicy() { return RxDocumentClientImpl.this.getConnectionPolicy(); } }; } RxGatewayStoreModel createRxGatewayProxy(ISessionContainer sessionContainer, ConsistencyLevel consistencyLevel, QueryCompatibilityMode queryCompatibilityMode, UserAgentContainer userAgentContainer, GlobalEndpointManager globalEndpointManager, HttpClient httpClient, ApiType apiType) { return new RxGatewayStoreModel( this, sessionContainer, consistencyLevel, queryCompatibilityMode, userAgentContainer, globalEndpointManager, httpClient, apiType); } private HttpClient httpClient() { HttpClientConfig httpClientConfig = new HttpClientConfig(this.configs) .withMaxIdleConnectionTimeout(this.connectionPolicy.getIdleHttpConnectionTimeout()) .withPoolSize(this.connectionPolicy.getMaxConnectionPoolSize()) .withProxy(this.connectionPolicy.getProxy()) .withNetworkRequestTimeout(this.connectionPolicy.getHttpNetworkRequestTimeout()); if (connectionSharingAcrossClientsEnabled) { return SharedGatewayHttpClient.getOrCreateInstance(httpClientConfig, diagnosticsClientConfig); } else { diagnosticsClientConfig.withGatewayHttpClientConfig(httpClientConfig.toDiagnosticsString()); return HttpClient.createFixed(httpClientConfig); } } private void createStoreModel(boolean subscribeRntbdStatus) { StoreClient storeClient = this.storeClientFactory.createStoreClient(this, this.addressResolver, this.sessionContainer, this.gatewayConfigurationReader, this, this.useMultipleWriteLocations ); this.storeModel = new ServerStoreModel(storeClient); } @Override public URI getServiceEndpoint() { return this.serviceEndpoint; } @Override public URI getWriteEndpoint() { return globalEndpointManager.getWriteEndpoints().stream().findFirst().orElse(null); } @Override public URI getReadEndpoint() { return globalEndpointManager.getReadEndpoints().stream().findFirst().orElse(null); } @Override public ConnectionPolicy getConnectionPolicy() { return this.connectionPolicy; } @Override public boolean isContentResponseOnWriteEnabled() { return contentResponseOnWriteEnabled; } @Override public ConsistencyLevel getConsistencyLevel() { return consistencyLevel; } @Override public ClientTelemetry getClientTelemetry() { return this.clientTelemetry; } @Override public Mono<ResourceResponse<Database>> createDatabase(Database database, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> createDatabaseInternal(database, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<Database>> createDatabaseInternal(Database database, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (database == null) { throw new IllegalArgumentException("Database"); } logger.debug("Creating a Database. id: [{}]", database.getId()); validateResource(database); Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.Database, OperationType.Create); Instant serializationStartTimeUTC = Instant.now(); ByteBuffer byteBuffer = ModelBridgeInternal.serializeJsonToByteBuffer(database); Instant serializationEndTimeUTC = Instant.now(); SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics = new SerializationDiagnosticsContext.SerializationDiagnostics( serializationStartTimeUTC, serializationEndTimeUTC, SerializationDiagnosticsContext.SerializationType.DATABASE_SERIALIZATION); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Create, ResourceType.Database, Paths.DATABASES_ROOT, byteBuffer, requestHeaders, options); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } SerializationDiagnosticsContext serializationDiagnosticsContext = BridgeInternal.getSerializationDiagnosticsContext(request.requestContext.cosmosDiagnostics); if (serializationDiagnosticsContext != null) { serializationDiagnosticsContext.addSerializationDiagnostics(serializationDiagnostics); } return this.create(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)) .map(response -> toResourceResponse(response, Database.class)); } catch (Exception e) { logger.debug("Failure in creating a database. due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Mono<ResourceResponse<Database>> deleteDatabase(String databaseLink, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> deleteDatabaseInternal(databaseLink, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<Database>> deleteDatabaseInternal(String databaseLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(databaseLink)) { throw new IllegalArgumentException("databaseLink"); } logger.debug("Deleting a Database. databaseLink: [{}]", databaseLink); String path = Utils.joinPath(databaseLink, null); Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.Database, OperationType.Delete); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Delete, ResourceType.Database, path, requestHeaders, options); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.delete(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, Database.class)); } catch (Exception e) { logger.debug("Failure in deleting a database. due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Mono<ResourceResponse<Database>> readDatabase(String databaseLink, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> readDatabaseInternal(databaseLink, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<Database>> readDatabaseInternal(String databaseLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(databaseLink)) { throw new IllegalArgumentException("databaseLink"); } logger.debug("Reading a Database. databaseLink: [{}]", databaseLink); String path = Utils.joinPath(databaseLink, null); Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.Database, OperationType.Read); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Read, ResourceType.Database, path, requestHeaders, options); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, Database.class)); } catch (Exception e) { logger.debug("Failure in reading a database. due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Flux<FeedResponse<Database>> readDatabases(CosmosQueryRequestOptions options) { return readFeed(options, ResourceType.Database, Database.class, Paths.DATABASES_ROOT); } private String parentResourceLinkToQueryLink(String parentResourceLink, ResourceType resourceTypeEnum) { switch (resourceTypeEnum) { case Database: return Paths.DATABASES_ROOT; case DocumentCollection: return Utils.joinPath(parentResourceLink, Paths.COLLECTIONS_PATH_SEGMENT); case Document: return Utils.joinPath(parentResourceLink, Paths.DOCUMENTS_PATH_SEGMENT); case Offer: return Paths.OFFERS_ROOT; case User: return Utils.joinPath(parentResourceLink, Paths.USERS_PATH_SEGMENT); case ClientEncryptionKey: return Utils.joinPath(parentResourceLink, Paths.CLIENT_ENCRYPTION_KEY_PATH_SEGMENT); case Permission: return Utils.joinPath(parentResourceLink, Paths.PERMISSIONS_PATH_SEGMENT); case Attachment: return Utils.joinPath(parentResourceLink, Paths.ATTACHMENTS_PATH_SEGMENT); case StoredProcedure: return Utils.joinPath(parentResourceLink, Paths.STORED_PROCEDURES_PATH_SEGMENT); case Trigger: return Utils.joinPath(parentResourceLink, Paths.TRIGGERS_PATH_SEGMENT); case UserDefinedFunction: return Utils.joinPath(parentResourceLink, Paths.USER_DEFINED_FUNCTIONS_PATH_SEGMENT); case Conflict: return Utils.joinPath(parentResourceLink, Paths.CONFLICTS_PATH_SEGMENT); default: throw new IllegalArgumentException("resource type not supported"); } } private OperationContextAndListenerTuple getOperationContextAndListenerTuple(CosmosQueryRequestOptions options) { if (options == null) { return null; } return ImplementationBridgeHelpers.CosmosQueryRequestOptionsHelper.getCosmosQueryRequestOptionsAccessor().getOperationContext(options); } private OperationContextAndListenerTuple getOperationContextAndListenerTuple(RequestOptions options) { if (options == null) { return null; } return options.getOperationContextAndListenerTuple(); } private <T> Flux<FeedResponse<T>> createQuery( String parentResourceLink, SqlQuerySpec sqlQuery, CosmosQueryRequestOptions options, Class<T> klass, ResourceType resourceTypeEnum) { String resourceLink = parentResourceLinkToQueryLink(parentResourceLink, resourceTypeEnum); UUID correlationActivityIdOfRequestOptions = ImplementationBridgeHelpers .CosmosQueryRequestOptionsHelper .getCosmosQueryRequestOptionsAccessor() .getCorrelationActivityId(options); UUID correlationActivityId = correlationActivityIdOfRequestOptions != null ? correlationActivityIdOfRequestOptions : Utils.randomUUID(); IDocumentQueryClient queryClient = documentQueryClientImpl(RxDocumentClientImpl.this, getOperationContextAndListenerTuple(options)); InvalidPartitionExceptionRetryPolicy invalidPartitionExceptionRetryPolicy = new InvalidPartitionExceptionRetryPolicy( this.collectionCache, null, resourceLink, ModelBridgeInternal.getPropertiesFromQueryRequestOptions(options)); return ObservableHelper.fluxInlineIfPossibleAsObs( () -> createQueryInternal( resourceLink, sqlQuery, options, klass, resourceTypeEnum, queryClient, correlationActivityId), invalidPartitionExceptionRetryPolicy); } private <T> Flux<FeedResponse<T>> createQueryInternal( String resourceLink, SqlQuerySpec sqlQuery, CosmosQueryRequestOptions options, Class<T> klass, ResourceType resourceTypeEnum, IDocumentQueryClient queryClient, UUID activityId) { Flux<? extends IDocumentQueryExecutionContext<T>> executionContext = DocumentQueryExecutionContextFactory .createDocumentQueryExecutionContextAsync(this, queryClient, resourceTypeEnum, klass, sqlQuery, options, resourceLink, false, activityId, Configs.isQueryPlanCachingEnabled(), queryPlanCache); AtomicBoolean isFirstResponse = new AtomicBoolean(true); return executionContext.flatMap(iDocumentQueryExecutionContext -> { QueryInfo queryInfo = null; if (iDocumentQueryExecutionContext instanceof PipelinedQueryExecutionContextBase) { queryInfo = ((PipelinedQueryExecutionContextBase<T>) iDocumentQueryExecutionContext).getQueryInfo(); } QueryInfo finalQueryInfo = queryInfo; return iDocumentQueryExecutionContext.executeAsync() .map(tFeedResponse -> { if (finalQueryInfo != null) { if (finalQueryInfo.hasSelectValue()) { ModelBridgeInternal .addQueryInfoToFeedResponse(tFeedResponse, finalQueryInfo); } if (isFirstResponse.compareAndSet(true, false)) { ModelBridgeInternal.addQueryPlanDiagnosticsContextToFeedResponse(tFeedResponse, finalQueryInfo.getQueryPlanDiagnosticsContext()); } } return tFeedResponse; }); }); } @Override public Flux<FeedResponse<Database>> queryDatabases(String query, CosmosQueryRequestOptions options) { return queryDatabases(new SqlQuerySpec(query), options); } @Override public Flux<FeedResponse<Database>> queryDatabases(SqlQuerySpec querySpec, CosmosQueryRequestOptions options) { return createQuery(Paths.DATABASES_ROOT, querySpec, options, Database.class, ResourceType.Database); } @Override public Mono<ResourceResponse<DocumentCollection>> createCollection(String databaseLink, DocumentCollection collection, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> this.createCollectionInternal(databaseLink, collection, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<DocumentCollection>> createCollectionInternal(String databaseLink, DocumentCollection collection, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(databaseLink)) { throw new IllegalArgumentException("databaseLink"); } if (collection == null) { throw new IllegalArgumentException("collection"); } logger.debug("Creating a Collection. databaseLink: [{}], Collection id: [{}]", databaseLink, collection.getId()); validateResource(collection); String path = Utils.joinPath(databaseLink, Paths.COLLECTIONS_PATH_SEGMENT); Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.DocumentCollection, OperationType.Create); Instant serializationStartTimeUTC = Instant.now(); ByteBuffer byteBuffer = ModelBridgeInternal.serializeJsonToByteBuffer(collection); Instant serializationEndTimeUTC = Instant.now(); SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics = new SerializationDiagnosticsContext.SerializationDiagnostics( serializationStartTimeUTC, serializationEndTimeUTC, SerializationDiagnosticsContext.SerializationType.CONTAINER_SERIALIZATION); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Create, ResourceType.DocumentCollection, path, byteBuffer, requestHeaders, options); if (retryPolicyInstance != null){ retryPolicyInstance.onBeforeSendRequest(request); } SerializationDiagnosticsContext serializationDiagnosticsContext = BridgeInternal.getSerializationDiagnosticsContext(request.requestContext.cosmosDiagnostics); if (serializationDiagnosticsContext != null) { serializationDiagnosticsContext.addSerializationDiagnostics(serializationDiagnostics); } return this.create(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, DocumentCollection.class)) .doOnNext(resourceResponse -> { this.sessionContainer.setSessionToken(resourceResponse.getResource().getResourceId(), getAltLink(resourceResponse.getResource()), resourceResponse.getResponseHeaders()); }); } catch (Exception e) { logger.debug("Failure in creating a collection. due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Mono<ResourceResponse<DocumentCollection>> replaceCollection(DocumentCollection collection, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> replaceCollectionInternal(collection, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<DocumentCollection>> replaceCollectionInternal(DocumentCollection collection, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (collection == null) { throw new IllegalArgumentException("collection"); } logger.debug("Replacing a Collection. id: [{}]", collection.getId()); validateResource(collection); String path = Utils.joinPath(collection.getSelfLink(), null); Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.DocumentCollection, OperationType.Replace); Instant serializationStartTimeUTC = Instant.now(); ByteBuffer byteBuffer = ModelBridgeInternal.serializeJsonToByteBuffer(collection); Instant serializationEndTimeUTC = Instant.now(); SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics = new SerializationDiagnosticsContext.SerializationDiagnostics( serializationStartTimeUTC, serializationEndTimeUTC, SerializationDiagnosticsContext.SerializationType.CONTAINER_SERIALIZATION); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Replace, ResourceType.DocumentCollection, path, byteBuffer, requestHeaders, options); if (retryPolicyInstance != null){ retryPolicyInstance.onBeforeSendRequest(request); } SerializationDiagnosticsContext serializationDiagnosticsContext = BridgeInternal.getSerializationDiagnosticsContext(request.requestContext.cosmosDiagnostics); if (serializationDiagnosticsContext != null) { serializationDiagnosticsContext.addSerializationDiagnostics(serializationDiagnostics); } return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, DocumentCollection.class)) .doOnNext(resourceResponse -> { if (resourceResponse.getResource() != null) { this.sessionContainer.setSessionToken(resourceResponse.getResource().getResourceId(), getAltLink(resourceResponse.getResource()), resourceResponse.getResponseHeaders()); } }); } catch (Exception e) { logger.debug("Failure in replacing a collection. due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Mono<ResourceResponse<DocumentCollection>> deleteCollection(String collectionLink, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> deleteCollectionInternal(collectionLink, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<DocumentCollection>> deleteCollectionInternal(String collectionLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(collectionLink)) { throw new IllegalArgumentException("collectionLink"); } logger.debug("Deleting a Collection. collectionLink: [{}]", collectionLink); String path = Utils.joinPath(collectionLink, null); Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.DocumentCollection, OperationType.Delete); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Delete, ResourceType.DocumentCollection, path, requestHeaders, options); if (retryPolicyInstance != null){ retryPolicyInstance.onBeforeSendRequest(request); } return this.delete(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, DocumentCollection.class)); } catch (Exception e) { logger.debug("Failure in deleting a collection, due to [{}]", e.getMessage(), e); return Mono.error(e); } } private Mono<RxDocumentServiceResponse> delete(RxDocumentServiceRequest request, DocumentClientRetryPolicy documentClientRetryPolicy, OperationContextAndListenerTuple operationContextAndListenerTuple) { return populateHeadersAsync(request, RequestVerb.DELETE) .flatMap(requestPopulated -> { if (documentClientRetryPolicy.getRetryContext() != null && documentClientRetryPolicy.getRetryContext().getRetryCount() > 0) { documentClientRetryPolicy.getRetryContext().updateEndTime(); } return getStoreProxy(requestPopulated).processMessage(requestPopulated, operationContextAndListenerTuple); }); } private Mono<RxDocumentServiceResponse> deleteAllItemsByPartitionKey(RxDocumentServiceRequest request, DocumentClientRetryPolicy documentClientRetryPolicy, OperationContextAndListenerTuple operationContextAndListenerTuple) { return populateHeadersAsync(request, RequestVerb.POST) .flatMap(requestPopulated -> { RxStoreModel storeProxy = this.getStoreProxy(requestPopulated); if (documentClientRetryPolicy.getRetryContext() != null && documentClientRetryPolicy.getRetryContext().getRetryCount() > 0) { documentClientRetryPolicy.getRetryContext().updateEndTime(); } return storeProxy.processMessage(requestPopulated, operationContextAndListenerTuple); }); } private Mono<RxDocumentServiceResponse> read(RxDocumentServiceRequest request, DocumentClientRetryPolicy documentClientRetryPolicy) { return populateHeadersAsync(request, RequestVerb.GET) .flatMap(requestPopulated -> { if (documentClientRetryPolicy.getRetryContext() != null && documentClientRetryPolicy.getRetryContext().getRetryCount() > 0) { documentClientRetryPolicy.getRetryContext().updateEndTime(); } return getStoreProxy(requestPopulated).processMessage(requestPopulated); }); } Mono<RxDocumentServiceResponse> readFeed(RxDocumentServiceRequest request) { return populateHeadersAsync(request, RequestVerb.GET) .flatMap(requestPopulated -> getStoreProxy(requestPopulated).processMessage(requestPopulated)); } private Mono<RxDocumentServiceResponse> query(RxDocumentServiceRequest request) { return populateHeadersAsync(request, RequestVerb.POST) .flatMap(requestPopulated -> this.getStoreProxy(requestPopulated).processMessage(requestPopulated) .map(response -> { this.captureSessionToken(requestPopulated, response); return response; } )); } @Override public Mono<ResourceResponse<DocumentCollection>> readCollection(String collectionLink, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> readCollectionInternal(collectionLink, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<DocumentCollection>> readCollectionInternal(String collectionLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(collectionLink)) { throw new IllegalArgumentException("collectionLink"); } logger.debug("Reading a Collection. collectionLink: [{}]", collectionLink); String path = Utils.joinPath(collectionLink, null); Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.DocumentCollection, OperationType.Read); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Read, ResourceType.DocumentCollection, path, requestHeaders, options); if (retryPolicyInstance != null){ retryPolicyInstance.onBeforeSendRequest(request); } return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, DocumentCollection.class)); } catch (Exception e) { logger.debug("Failure in reading a collection, due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Flux<FeedResponse<DocumentCollection>> readCollections(String databaseLink, CosmosQueryRequestOptions options) { if (StringUtils.isEmpty(databaseLink)) { throw new IllegalArgumentException("databaseLink"); } return readFeed(options, ResourceType.DocumentCollection, DocumentCollection.class, Utils.joinPath(databaseLink, Paths.COLLECTIONS_PATH_SEGMENT)); } @Override public Flux<FeedResponse<DocumentCollection>> queryCollections(String databaseLink, String query, CosmosQueryRequestOptions options) { return createQuery(databaseLink, new SqlQuerySpec(query), options, DocumentCollection.class, ResourceType.DocumentCollection); } @Override public Flux<FeedResponse<DocumentCollection>> queryCollections(String databaseLink, SqlQuerySpec querySpec, CosmosQueryRequestOptions options) { return createQuery(databaseLink, querySpec, options, DocumentCollection.class, ResourceType.DocumentCollection); } private static String serializeProcedureParams(List<Object> objectArray) { String[] stringArray = new String[objectArray.size()]; for (int i = 0; i < objectArray.size(); ++i) { Object object = objectArray.get(i); if (object instanceof JsonSerializable) { stringArray[i] = ModelBridgeInternal.toJsonFromJsonSerializable((JsonSerializable) object); } else { try { stringArray[i] = mapper.writeValueAsString(object); } catch (IOException e) { throw new IllegalArgumentException("Can't serialize the object into the json string", e); } } } return String.format("[%s]", StringUtils.join(stringArray, ",")); } private static void validateResource(Resource resource) { if (!StringUtils.isEmpty(resource.getId())) { if (resource.getId().indexOf('/') != -1 || resource.getId().indexOf('\\') != -1 || resource.getId().indexOf('?') != -1 || resource.getId().indexOf(' throw new IllegalArgumentException("Id contains illegal chars."); } if (resource.getId().endsWith(" ")) { throw new IllegalArgumentException("Id ends with a space."); } } } private Map<String, String> getRequestHeaders(RequestOptions options, ResourceType resourceType, OperationType operationType) { Map<String, String> headers = new HashMap<>(); if (this.useMultipleWriteLocations) { headers.put(HttpConstants.HttpHeaders.ALLOW_TENTATIVE_WRITES, Boolean.TRUE.toString()); } if (consistencyLevel != null) { headers.put(HttpConstants.HttpHeaders.CONSISTENCY_LEVEL, consistencyLevel.toString()); } if (options == null) { if (!this.contentResponseOnWriteEnabled && resourceType.equals(ResourceType.Document) && operationType.isWriteOperation()) { headers.put(HttpConstants.HttpHeaders.PREFER, HttpConstants.HeaderValues.PREFER_RETURN_MINIMAL); } return headers; } Map<String, String> customOptions = options.getHeaders(); if (customOptions != null) { headers.putAll(customOptions); } boolean contentResponseOnWriteEnabled = this.contentResponseOnWriteEnabled; if (options.isContentResponseOnWriteEnabled() != null) { contentResponseOnWriteEnabled = options.isContentResponseOnWriteEnabled(); } if (!contentResponseOnWriteEnabled && resourceType.equals(ResourceType.Document) && operationType.isWriteOperation()) { headers.put(HttpConstants.HttpHeaders.PREFER, HttpConstants.HeaderValues.PREFER_RETURN_MINIMAL); } if (options.getIfMatchETag() != null) { headers.put(HttpConstants.HttpHeaders.IF_MATCH, options.getIfMatchETag()); } if(options.getIfNoneMatchETag() != null) { headers.put(HttpConstants.HttpHeaders.IF_NONE_MATCH, options.getIfNoneMatchETag()); } if (options.getConsistencyLevel() != null) { headers.put(HttpConstants.HttpHeaders.CONSISTENCY_LEVEL, options.getConsistencyLevel().toString()); } if (options.getIndexingDirective() != null) { headers.put(HttpConstants.HttpHeaders.INDEXING_DIRECTIVE, options.getIndexingDirective().toString()); } if (options.getPostTriggerInclude() != null && options.getPostTriggerInclude().size() > 0) { String postTriggerInclude = StringUtils.join(options.getPostTriggerInclude(), ","); headers.put(HttpConstants.HttpHeaders.POST_TRIGGER_INCLUDE, postTriggerInclude); } if (options.getPreTriggerInclude() != null && options.getPreTriggerInclude().size() > 0) { String preTriggerInclude = StringUtils.join(options.getPreTriggerInclude(), ","); headers.put(HttpConstants.HttpHeaders.PRE_TRIGGER_INCLUDE, preTriggerInclude); } if (!Strings.isNullOrEmpty(options.getSessionToken())) { headers.put(HttpConstants.HttpHeaders.SESSION_TOKEN, options.getSessionToken()); } if (options.getResourceTokenExpirySeconds() != null) { headers.put(HttpConstants.HttpHeaders.RESOURCE_TOKEN_EXPIRY, String.valueOf(options.getResourceTokenExpirySeconds())); } if (options.getOfferThroughput() != null && options.getOfferThroughput() >= 0) { headers.put(HttpConstants.HttpHeaders.OFFER_THROUGHPUT, options.getOfferThroughput().toString()); } else if (options.getOfferType() != null) { headers.put(HttpConstants.HttpHeaders.OFFER_TYPE, options.getOfferType()); } if (options.getOfferThroughput() == null) { if (options.getThroughputProperties() != null) { Offer offer = ModelBridgeInternal.getOfferFromThroughputProperties(options.getThroughputProperties()); final OfferAutoscaleSettings offerAutoscaleSettings = offer.getOfferAutoScaleSettings(); OfferAutoscaleAutoUpgradeProperties autoscaleAutoUpgradeProperties = null; if (offerAutoscaleSettings != null) { autoscaleAutoUpgradeProperties = offer.getOfferAutoScaleSettings().getAutoscaleAutoUpgradeProperties(); } if (offer.hasOfferThroughput() && (offerAutoscaleSettings != null && offerAutoscaleSettings.getMaxThroughput() >= 0 || autoscaleAutoUpgradeProperties != null && autoscaleAutoUpgradeProperties .getAutoscaleThroughputProperties() .getIncrementPercent() >= 0)) { throw new IllegalArgumentException("Autoscale provisioned throughput can not be configured with " + "fixed offer"); } if (offer.hasOfferThroughput()) { headers.put(HttpConstants.HttpHeaders.OFFER_THROUGHPUT, String.valueOf(offer.getThroughput())); } else if (offer.getOfferAutoScaleSettings() != null) { headers.put(HttpConstants.HttpHeaders.OFFER_AUTOPILOT_SETTINGS, ModelBridgeInternal.toJsonFromJsonSerializable(offer.getOfferAutoScaleSettings())); } } } if (options.isQuotaInfoEnabled()) { headers.put(HttpConstants.HttpHeaders.POPULATE_QUOTA_INFO, String.valueOf(true)); } if (options.isScriptLoggingEnabled()) { headers.put(HttpConstants.HttpHeaders.SCRIPT_ENABLE_LOGGING, String.valueOf(true)); } if (options.getDedicatedGatewayRequestOptions() != null && options.getDedicatedGatewayRequestOptions().getMaxIntegratedCacheStaleness() != null) { headers.put(HttpConstants.HttpHeaders.DEDICATED_GATEWAY_PER_REQUEST_CACHE_STALENESS, String.valueOf(Utils.getMaxIntegratedCacheStalenessInMillis(options.getDedicatedGatewayRequestOptions()))); } return headers; } public IRetryPolicyFactory getResetSessionTokenRetryPolicy() { return this.resetSessionTokenRetryPolicy; } private Mono<RxDocumentServiceRequest> addPartitionKeyInformation(RxDocumentServiceRequest request, ByteBuffer contentAsByteBuffer, Document document, RequestOptions options) { Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = this.collectionCache.resolveCollectionAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), request); return collectionObs .map(collectionValueHolder -> { addPartitionKeyInformation(request, contentAsByteBuffer, document, options, collectionValueHolder.v); return request; }); } private Mono<RxDocumentServiceRequest> addPartitionKeyInformation(RxDocumentServiceRequest request, ByteBuffer contentAsByteBuffer, Object document, RequestOptions options, Mono<Utils.ValueHolder<DocumentCollection>> collectionObs) { return collectionObs.map(collectionValueHolder -> { addPartitionKeyInformation(request, contentAsByteBuffer, document, options, collectionValueHolder.v); return request; }); } private void addPartitionKeyInformation(RxDocumentServiceRequest request, ByteBuffer contentAsByteBuffer, Object objectDoc, RequestOptions options, DocumentCollection collection) { PartitionKeyDefinition partitionKeyDefinition = collection.getPartitionKey(); PartitionKeyInternal partitionKeyInternal = null; if (options != null && options.getPartitionKey() != null && options.getPartitionKey().equals(PartitionKey.NONE)){ partitionKeyInternal = ModelBridgeInternal.getNonePartitionKey(partitionKeyDefinition); } else if (options != null && options.getPartitionKey() != null) { partitionKeyInternal = BridgeInternal.getPartitionKeyInternal(options.getPartitionKey()); } else if (partitionKeyDefinition == null || partitionKeyDefinition.getPaths().size() == 0) { partitionKeyInternal = PartitionKeyInternal.getEmpty(); } else if (contentAsByteBuffer != null || objectDoc != null) { InternalObjectNode internalObjectNode; if (objectDoc instanceof InternalObjectNode) { internalObjectNode = (InternalObjectNode) objectDoc; } else if (objectDoc instanceof ObjectNode) { internalObjectNode = new InternalObjectNode((ObjectNode)objectDoc); } else if (contentAsByteBuffer != null) { contentAsByteBuffer.rewind(); internalObjectNode = new InternalObjectNode(contentAsByteBuffer); } else { throw new IllegalStateException("ContentAsByteBuffer and objectDoc are null"); } Instant serializationStartTime = Instant.now(); partitionKeyInternal = extractPartitionKeyValueFromDocument(internalObjectNode, partitionKeyDefinition); Instant serializationEndTime = Instant.now(); SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics = new SerializationDiagnosticsContext.SerializationDiagnostics( serializationStartTime, serializationEndTime, SerializationDiagnosticsContext.SerializationType.PARTITION_KEY_FETCH_SERIALIZATION ); SerializationDiagnosticsContext serializationDiagnosticsContext = BridgeInternal.getSerializationDiagnosticsContext(request.requestContext.cosmosDiagnostics); if (serializationDiagnosticsContext != null) { serializationDiagnosticsContext.addSerializationDiagnostics(serializationDiagnostics); } } else { throw new UnsupportedOperationException("PartitionKey value must be supplied for this operation."); } request.setPartitionKeyInternal(partitionKeyInternal); request.getHeaders().put(HttpConstants.HttpHeaders.PARTITION_KEY, Utils.escapeNonAscii(partitionKeyInternal.toJson())); } public static PartitionKeyInternal extractPartitionKeyValueFromDocument( InternalObjectNode document, PartitionKeyDefinition partitionKeyDefinition) { if (partitionKeyDefinition != null) { switch (partitionKeyDefinition.getKind()) { case HASH: String path = partitionKeyDefinition.getPaths().iterator().next(); List<String> parts = PathParser.getPathParts(path); if (parts.size() >= 1) { Object value = ModelBridgeInternal.getObjectByPathFromJsonSerializable(document, parts); if (value == null || value.getClass() == ObjectNode.class) { value = ModelBridgeInternal.getNonePartitionKey(partitionKeyDefinition); } if (value instanceof PartitionKeyInternal) { return (PartitionKeyInternal) value; } else { return PartitionKeyInternal.fromObjectArray(Collections.singletonList(value), false); } } break; case MULTI_HASH: Object[] partitionKeyValues = new Object[partitionKeyDefinition.getPaths().size()]; for(int pathIter = 0 ; pathIter < partitionKeyDefinition.getPaths().size(); pathIter++){ String partitionPath = partitionKeyDefinition.getPaths().get(pathIter); List<String> partitionPathParts = PathParser.getPathParts(partitionPath); partitionKeyValues[pathIter] = ModelBridgeInternal.getObjectByPathFromJsonSerializable(document, partitionPathParts); } return PartitionKeyInternal.fromObjectArray(partitionKeyValues, false); default: throw new IllegalArgumentException("Unrecognized Partition kind: " + partitionKeyDefinition.getKind()); } } return null; } private Mono<RxDocumentServiceRequest> getCreateDocumentRequest(DocumentClientRetryPolicy requestRetryPolicy, String documentCollectionLink, Object document, RequestOptions options, boolean disableAutomaticIdGeneration, OperationType operationType) { if (StringUtils.isEmpty(documentCollectionLink)) { throw new IllegalArgumentException("documentCollectionLink"); } if (document == null) { throw new IllegalArgumentException("document"); } Instant serializationStartTimeUTC = Instant.now(); ByteBuffer content = BridgeInternal.serializeJsonToByteBuffer(document, mapper); Instant serializationEndTimeUTC = Instant.now(); SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics = new SerializationDiagnosticsContext.SerializationDiagnostics( serializationStartTimeUTC, serializationEndTimeUTC, SerializationDiagnosticsContext.SerializationType.ITEM_SERIALIZATION); String path = Utils.joinPath(documentCollectionLink, Paths.DOCUMENTS_PATH_SEGMENT); Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.Document, operationType); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, operationType, ResourceType.Document, path, requestHeaders, options, content); if (requestRetryPolicy != null) { requestRetryPolicy.onBeforeSendRequest(request); } SerializationDiagnosticsContext serializationDiagnosticsContext = BridgeInternal.getSerializationDiagnosticsContext(request.requestContext.cosmosDiagnostics); if (serializationDiagnosticsContext != null) { serializationDiagnosticsContext.addSerializationDiagnostics(serializationDiagnostics); } Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = this.collectionCache.resolveCollectionAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), request); return addPartitionKeyInformation(request, content, document, options, collectionObs); } private Mono<RxDocumentServiceRequest> getBatchDocumentRequest(DocumentClientRetryPolicy requestRetryPolicy, String documentCollectionLink, ServerBatchRequest serverBatchRequest, RequestOptions options, boolean disableAutomaticIdGeneration) { checkArgument(StringUtils.isNotEmpty(documentCollectionLink), "expected non empty documentCollectionLink"); checkNotNull(serverBatchRequest, "expected non null serverBatchRequest"); Instant serializationStartTimeUTC = Instant.now(); ByteBuffer content = ByteBuffer.wrap(Utils.getUTF8Bytes(serverBatchRequest.getRequestBody())); Instant serializationEndTimeUTC = Instant.now(); SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics = new SerializationDiagnosticsContext.SerializationDiagnostics( serializationStartTimeUTC, serializationEndTimeUTC, SerializationDiagnosticsContext.SerializationType.ITEM_SERIALIZATION); String path = Utils.joinPath(documentCollectionLink, Paths.DOCUMENTS_PATH_SEGMENT); Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.Document, OperationType.Batch); RxDocumentServiceRequest request = RxDocumentServiceRequest.create( this, OperationType.Batch, ResourceType.Document, path, requestHeaders, options, content); if (requestRetryPolicy != null) { requestRetryPolicy.onBeforeSendRequest(request); } SerializationDiagnosticsContext serializationDiagnosticsContext = BridgeInternal.getSerializationDiagnosticsContext(request.requestContext.cosmosDiagnostics); if (serializationDiagnosticsContext != null) { serializationDiagnosticsContext.addSerializationDiagnostics(serializationDiagnostics); } Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = this.collectionCache.resolveCollectionAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), request); return collectionObs.map((Utils.ValueHolder<DocumentCollection> collectionValueHolder) -> { addBatchHeaders(request, serverBatchRequest, collectionValueHolder.v); return request; }); } private RxDocumentServiceRequest addBatchHeaders(RxDocumentServiceRequest request, ServerBatchRequest serverBatchRequest, DocumentCollection collection) { if(serverBatchRequest instanceof SinglePartitionKeyServerBatchRequest) { PartitionKey partitionKey = ((SinglePartitionKeyServerBatchRequest) serverBatchRequest).getPartitionKeyValue(); PartitionKeyInternal partitionKeyInternal; if (partitionKey.equals(PartitionKey.NONE)) { PartitionKeyDefinition partitionKeyDefinition = collection.getPartitionKey(); partitionKeyInternal = ModelBridgeInternal.getNonePartitionKey(partitionKeyDefinition); } else { partitionKeyInternal = BridgeInternal.getPartitionKeyInternal(partitionKey); } request.setPartitionKeyInternal(partitionKeyInternal); request.getHeaders().put(HttpConstants.HttpHeaders.PARTITION_KEY, Utils.escapeNonAscii(partitionKeyInternal.toJson())); } else if(serverBatchRequest instanceof PartitionKeyRangeServerBatchRequest) { request.setPartitionKeyRangeIdentity(new PartitionKeyRangeIdentity(((PartitionKeyRangeServerBatchRequest) serverBatchRequest).getPartitionKeyRangeId())); } else { throw new UnsupportedOperationException("Unknown Server request."); } request.getHeaders().put(HttpConstants.HttpHeaders.IS_BATCH_REQUEST, Boolean.TRUE.toString()); request.getHeaders().put(HttpConstants.HttpHeaders.IS_BATCH_ATOMIC, String.valueOf(serverBatchRequest.isAtomicBatch())); request.getHeaders().put(HttpConstants.HttpHeaders.SHOULD_BATCH_CONTINUE_ON_ERROR, String.valueOf(serverBatchRequest.isShouldContinueOnError())); request.setNumberOfItemsInBatchRequest(serverBatchRequest.getOperations().size()); return request; } /** * NOTE: Caller needs to consume it by subscribing to this Mono in order for the request to populate headers * @param request request to populate headers to * @param httpMethod http method * @return Mono, which on subscription will populate the headers in the request passed in the argument. */ private Mono<RxDocumentServiceRequest> populateHeadersAsync(RxDocumentServiceRequest request, RequestVerb httpMethod) { request.getHeaders().put(HttpConstants.HttpHeaders.X_DATE, Utils.nowAsRFC1123()); if (this.masterKeyOrResourceToken != null || this.resourceTokensMap != null || this.cosmosAuthorizationTokenResolver != null || this.credential != null) { String resourceName = request.getResourceAddress(); String authorization = this.getUserAuthorizationToken( resourceName, request.getResourceType(), httpMethod, request.getHeaders(), AuthorizationTokenType.PrimaryMasterKey, request.properties); try { authorization = URLEncoder.encode(authorization, "UTF-8"); } catch (UnsupportedEncodingException e) { throw new IllegalStateException("Failed to encode authtoken.", e); } request.getHeaders().put(HttpConstants.HttpHeaders.AUTHORIZATION, authorization); } if (this.apiType != null) { request.getHeaders().put(HttpConstants.HttpHeaders.API_TYPE, this.apiType.toString()); } if ((RequestVerb.POST.equals(httpMethod) || RequestVerb.PUT.equals(httpMethod)) && !request.getHeaders().containsKey(HttpConstants.HttpHeaders.CONTENT_TYPE)) { request.getHeaders().put(HttpConstants.HttpHeaders.CONTENT_TYPE, RuntimeConstants.MediaTypes.JSON); } if (RequestVerb.PATCH.equals(httpMethod) && !request.getHeaders().containsKey(HttpConstants.HttpHeaders.CONTENT_TYPE)) { request.getHeaders().put(HttpConstants.HttpHeaders.CONTENT_TYPE, RuntimeConstants.MediaTypes.JSON_PATCH); } if (!request.getHeaders().containsKey(HttpConstants.HttpHeaders.ACCEPT)) { request.getHeaders().put(HttpConstants.HttpHeaders.ACCEPT, RuntimeConstants.MediaTypes.JSON); } MetadataDiagnosticsContext metadataDiagnosticsCtx = BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics); if (this.requiresFeedRangeFiltering(request)) { return request.getFeedRange() .populateFeedRangeFilteringHeaders( this.getPartitionKeyRangeCache(), request, this.collectionCache.resolveCollectionAsync(metadataDiagnosticsCtx, request)) .flatMap(this::populateAuthorizationHeader); } return this.populateAuthorizationHeader(request); } private boolean requiresFeedRangeFiltering(RxDocumentServiceRequest request) { if (request.getResourceType() != ResourceType.Document && request.getResourceType() != ResourceType.Conflict) { return false; } switch (request.getOperationType()) { case ReadFeed: case Query: case SqlQuery: return request.getFeedRange() != null; default: return false; } } @Override public Mono<RxDocumentServiceRequest> populateAuthorizationHeader(RxDocumentServiceRequest request) { if (request == null) { throw new IllegalArgumentException("request"); } if (this.authorizationTokenType == AuthorizationTokenType.AadToken) { return AadTokenAuthorizationHelper.getAuthorizationToken(this.tokenCredentialCache) .map(authorization -> { request.getHeaders().put(HttpConstants.HttpHeaders.AUTHORIZATION, authorization); return request; }); } else { return Mono.just(request); } } @Override public Mono<HttpHeaders> populateAuthorizationHeader(HttpHeaders httpHeaders) { if (httpHeaders == null) { throw new IllegalArgumentException("httpHeaders"); } if (this.authorizationTokenType == AuthorizationTokenType.AadToken) { return AadTokenAuthorizationHelper.getAuthorizationToken(this.tokenCredentialCache) .map(authorization -> { httpHeaders.set(HttpConstants.HttpHeaders.AUTHORIZATION, authorization); return httpHeaders; }); } return Mono.just(httpHeaders); } @Override public AuthorizationTokenType getAuthorizationTokenType() { return this.authorizationTokenType; } @Override public String getUserAuthorizationToken(String resourceName, ResourceType resourceType, RequestVerb requestVerb, Map<String, String> headers, AuthorizationTokenType tokenType, Map<String, Object> properties) { if (this.cosmosAuthorizationTokenResolver != null) { return this.cosmosAuthorizationTokenResolver.getAuthorizationToken(requestVerb.toUpperCase(), resourceName, this.resolveCosmosResourceType(resourceType).toString(), properties != null ? Collections.unmodifiableMap(properties) : null); } else if (credential != null) { return this.authorizationTokenProvider.generateKeyAuthorizationSignature(requestVerb, resourceName, resourceType, headers); } else if (masterKeyOrResourceToken != null && hasAuthKeyResourceToken && resourceTokensMap == null) { return masterKeyOrResourceToken; } else { assert resourceTokensMap != null; if(resourceType.equals(ResourceType.DatabaseAccount)) { return this.firstResourceTokenFromPermissionFeed; } return ResourceTokenAuthorizationHelper.getAuthorizationTokenUsingResourceTokens(resourceTokensMap, requestVerb, resourceName, headers); } } private CosmosResourceType resolveCosmosResourceType(ResourceType resourceType) { CosmosResourceType cosmosResourceType = ModelBridgeInternal.fromServiceSerializedFormat(resourceType.toString()); if (cosmosResourceType == null) { return CosmosResourceType.SYSTEM; } return cosmosResourceType; } void captureSessionToken(RxDocumentServiceRequest request, RxDocumentServiceResponse response) { this.sessionContainer.setSessionToken(request, response.getResponseHeaders()); } private Mono<RxDocumentServiceResponse> create(RxDocumentServiceRequest request, DocumentClientRetryPolicy documentClientRetryPolicy, OperationContextAndListenerTuple operationContextAndListenerTuple) { return populateHeadersAsync(request, RequestVerb.POST) .flatMap(requestPopulated -> { RxStoreModel storeProxy = this.getStoreProxy(requestPopulated); if (documentClientRetryPolicy.getRetryContext() != null && documentClientRetryPolicy.getRetryContext().getRetryCount() > 0) { documentClientRetryPolicy.getRetryContext().updateEndTime(); } return storeProxy.processMessage(requestPopulated, operationContextAndListenerTuple); }); } private Mono<RxDocumentServiceResponse> upsert(RxDocumentServiceRequest request, DocumentClientRetryPolicy documentClientRetryPolicy, OperationContextAndListenerTuple operationContextAndListenerTuple) { return populateHeadersAsync(request, RequestVerb.POST) .flatMap(requestPopulated -> { Map<String, String> headers = requestPopulated.getHeaders(); assert (headers != null); headers.put(HttpConstants.HttpHeaders.IS_UPSERT, "true"); if (documentClientRetryPolicy.getRetryContext() != null && documentClientRetryPolicy.getRetryContext().getRetryCount() > 0) { documentClientRetryPolicy.getRetryContext().updateEndTime(); } return getStoreProxy(requestPopulated).processMessage(requestPopulated, operationContextAndListenerTuple) .map(response -> { this.captureSessionToken(requestPopulated, response); return response; } ); }); } private Mono<RxDocumentServiceResponse> replace(RxDocumentServiceRequest request, DocumentClientRetryPolicy documentClientRetryPolicy) { return populateHeadersAsync(request, RequestVerb.PUT) .flatMap(requestPopulated -> { if (documentClientRetryPolicy.getRetryContext() != null && documentClientRetryPolicy.getRetryContext().getRetryCount() > 0) { documentClientRetryPolicy.getRetryContext().updateEndTime(); } return getStoreProxy(requestPopulated).processMessage(requestPopulated); }); } private Mono<RxDocumentServiceResponse> patch(RxDocumentServiceRequest request, DocumentClientRetryPolicy documentClientRetryPolicy) { return populateHeadersAsync(request, RequestVerb.PATCH) .flatMap(requestPopulated -> { if (documentClientRetryPolicy.getRetryContext() != null && documentClientRetryPolicy.getRetryContext().getRetryCount() > 0) { documentClientRetryPolicy.getRetryContext().updateEndTime(); } return getStoreProxy(requestPopulated).processMessage(requestPopulated); }); } @Override public Mono<ResourceResponse<Document>> createDocument(String collectionLink, Object document, RequestOptions options, boolean disableAutomaticIdGeneration) { DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(); if (options == null || options.getPartitionKey() == null) { requestRetryPolicy = new PartitionKeyMismatchRetryPolicy(collectionCache, requestRetryPolicy, collectionLink, options); } DocumentClientRetryPolicy finalRetryPolicyInstance = requestRetryPolicy; return ObservableHelper.inlineIfPossibleAsObs(() -> createDocumentInternal(collectionLink, document, options, disableAutomaticIdGeneration, finalRetryPolicyInstance), requestRetryPolicy); } private Mono<ResourceResponse<Document>> createDocumentInternal(String collectionLink, Object document, RequestOptions options, boolean disableAutomaticIdGeneration, DocumentClientRetryPolicy requestRetryPolicy) { try { logger.debug("Creating a Document. collectionLink: [{}]", collectionLink); Mono<RxDocumentServiceRequest> requestObs = getCreateDocumentRequest(requestRetryPolicy, collectionLink, document, options, disableAutomaticIdGeneration, OperationType.Create); Mono<RxDocumentServiceResponse> responseObservable = requestObs.flatMap(request -> create(request, requestRetryPolicy, getOperationContextAndListenerTuple(options))); return responseObservable .map(serviceResponse -> toResourceResponse(serviceResponse, Document.class)); } catch (Exception e) { logger.debug("Failure in creating a document due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Mono<ResourceResponse<Document>> upsertDocument(String collectionLink, Object document, RequestOptions options, boolean disableAutomaticIdGeneration) { DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(); if (options == null || options.getPartitionKey() == null) { requestRetryPolicy = new PartitionKeyMismatchRetryPolicy(collectionCache, requestRetryPolicy, collectionLink, options); } DocumentClientRetryPolicy finalRetryPolicyInstance = requestRetryPolicy; return ObservableHelper.inlineIfPossibleAsObs(() -> upsertDocumentInternal(collectionLink, document, options, disableAutomaticIdGeneration, finalRetryPolicyInstance), finalRetryPolicyInstance); } private Mono<ResourceResponse<Document>> upsertDocumentInternal(String collectionLink, Object document, RequestOptions options, boolean disableAutomaticIdGeneration, DocumentClientRetryPolicy retryPolicyInstance) { try { logger.debug("Upserting a Document. collectionLink: [{}]", collectionLink); Mono<RxDocumentServiceRequest> reqObs = getCreateDocumentRequest(retryPolicyInstance, collectionLink, document, options, disableAutomaticIdGeneration, OperationType.Upsert); Mono<RxDocumentServiceResponse> responseObservable = reqObs.flatMap(request -> upsert(request, retryPolicyInstance, getOperationContextAndListenerTuple(options))); return responseObservable .map(serviceResponse -> toResourceResponse(serviceResponse, Document.class)); } catch (Exception e) { logger.debug("Failure in upserting a document due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Mono<ResourceResponse<Document>> replaceDocument(String documentLink, Object document, RequestOptions options) { DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(); if (options == null || options.getPartitionKey() == null) { String collectionLink = Utils.getCollectionName(documentLink); requestRetryPolicy = new PartitionKeyMismatchRetryPolicy(collectionCache, requestRetryPolicy, collectionLink, options); } DocumentClientRetryPolicy finalRequestRetryPolicy = requestRetryPolicy; return ObservableHelper.inlineIfPossibleAsObs(() -> replaceDocumentInternal(documentLink, document, options, finalRequestRetryPolicy), requestRetryPolicy); } private Mono<ResourceResponse<Document>> replaceDocumentInternal(String documentLink, Object document, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(documentLink)) { throw new IllegalArgumentException("documentLink"); } if (document == null) { throw new IllegalArgumentException("document"); } Document typedDocument = documentFromObject(document, mapper); return this.replaceDocumentInternal(documentLink, typedDocument, options, retryPolicyInstance); } catch (Exception e) { logger.debug("Failure in replacing a document due to [{}]", e.getMessage()); return Mono.error(e); } } @Override public Mono<ResourceResponse<Document>> replaceDocument(Document document, RequestOptions options) { DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(); if (options == null || options.getPartitionKey() == null) { String collectionLink = document.getSelfLink(); requestRetryPolicy = new PartitionKeyMismatchRetryPolicy(collectionCache, requestRetryPolicy, collectionLink, options); } DocumentClientRetryPolicy finalRequestRetryPolicy = requestRetryPolicy; return ObservableHelper.inlineIfPossibleAsObs(() -> replaceDocumentInternal(document, options, finalRequestRetryPolicy), requestRetryPolicy); } private Mono<ResourceResponse<Document>> replaceDocumentInternal(Document document, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (document == null) { throw new IllegalArgumentException("document"); } return this.replaceDocumentInternal(document.getSelfLink(), document, options, retryPolicyInstance); } catch (Exception e) { logger.debug("Failure in replacing a database due to [{}]", e.getMessage()); return Mono.error(e); } } private Mono<ResourceResponse<Document>> replaceDocumentInternal(String documentLink, Document document, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { if (document == null) { throw new IllegalArgumentException("document"); } logger.debug("Replacing a Document. documentLink: [{}]", documentLink); final String path = Utils.joinPath(documentLink, null); final Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Document, OperationType.Replace); Instant serializationStartTimeUTC = Instant.now(); ByteBuffer content = serializeJsonToByteBuffer(document); Instant serializationEndTime = Instant.now(); SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics = new SerializationDiagnosticsContext.SerializationDiagnostics( serializationStartTimeUTC, serializationEndTime, SerializationDiagnosticsContext.SerializationType.ITEM_SERIALIZATION); final RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Replace, ResourceType.Document, path, requestHeaders, options, content); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } SerializationDiagnosticsContext serializationDiagnosticsContext = BridgeInternal.getSerializationDiagnosticsContext(request.requestContext.cosmosDiagnostics); if (serializationDiagnosticsContext != null) { serializationDiagnosticsContext.addSerializationDiagnostics(serializationDiagnostics); } Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = collectionCache.resolveCollectionAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), request); Mono<RxDocumentServiceRequest> requestObs = addPartitionKeyInformation(request, content, document, options, collectionObs); return requestObs.flatMap(req -> replace(request, retryPolicyInstance) .map(resp -> toResourceResponse(resp, Document.class))); } @Override public Mono<ResourceResponse<Document>> patchDocument(String documentLink, CosmosPatchOperations cosmosPatchOperations, RequestOptions options) { DocumentClientRetryPolicy documentClientRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> patchDocumentInternal(documentLink, cosmosPatchOperations, options, documentClientRetryPolicy), documentClientRetryPolicy); } private Mono<ResourceResponse<Document>> patchDocumentInternal(String documentLink, CosmosPatchOperations cosmosPatchOperations, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { checkArgument(StringUtils.isNotEmpty(documentLink), "expected non empty documentLink"); checkNotNull(cosmosPatchOperations, "expected non null cosmosPatchOperations"); logger.debug("Running patch operations on Document. documentLink: [{}]", documentLink); final String path = Utils.joinPath(documentLink, null); final Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Document, OperationType.Patch); Instant serializationStartTimeUTC = Instant.now(); ByteBuffer content = ByteBuffer.wrap(PatchUtil.serializeCosmosPatchToByteArray(cosmosPatchOperations, options)); Instant serializationEndTime = Instant.now(); SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics = new SerializationDiagnosticsContext.SerializationDiagnostics( serializationStartTimeUTC, serializationEndTime, SerializationDiagnosticsContext.SerializationType.ITEM_SERIALIZATION); final RxDocumentServiceRequest request = RxDocumentServiceRequest.create( this, OperationType.Patch, ResourceType.Document, path, requestHeaders, options, content); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } SerializationDiagnosticsContext serializationDiagnosticsContext = BridgeInternal.getSerializationDiagnosticsContext(request.requestContext.cosmosDiagnostics); if (serializationDiagnosticsContext != null) { serializationDiagnosticsContext.addSerializationDiagnostics(serializationDiagnostics); } Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = collectionCache.resolveCollectionAsync( BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), request); Mono<RxDocumentServiceRequest> requestObs = addPartitionKeyInformation( request, null, null, options, collectionObs); return requestObs.flatMap(req -> patch(request, retryPolicyInstance) .map(resp -> toResourceResponse(resp, Document.class))); } @Override public Mono<ResourceResponse<Document>> deleteDocument(String documentLink, RequestOptions options) { DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> deleteDocumentInternal(documentLink, null, options, requestRetryPolicy), requestRetryPolicy); } @Override public Mono<ResourceResponse<Document>> deleteDocument(String documentLink, InternalObjectNode internalObjectNode, RequestOptions options) { DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> deleteDocumentInternal(documentLink, internalObjectNode, options, requestRetryPolicy), requestRetryPolicy); } private Mono<ResourceResponse<Document>> deleteDocumentInternal(String documentLink, InternalObjectNode internalObjectNode, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(documentLink)) { throw new IllegalArgumentException("documentLink"); } logger.debug("Deleting a Document. documentLink: [{}]", documentLink); String path = Utils.joinPath(documentLink, null); Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.Document, OperationType.Delete); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Delete, ResourceType.Document, path, requestHeaders, options); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = collectionCache.resolveCollectionAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), request); Mono<RxDocumentServiceRequest> requestObs = addPartitionKeyInformation(request, null, internalObjectNode, options, collectionObs); return requestObs.flatMap(req -> this .delete(req, retryPolicyInstance, getOperationContextAndListenerTuple(options)) .map(serviceResponse -> toResourceResponse(serviceResponse, Document.class))); } catch (Exception e) { logger.debug("Failure in deleting a document due to [{}]", e.getMessage()); return Mono.error(e); } } @Override public Mono<ResourceResponse<Document>> deleteAllDocumentsByPartitionKey(String collectionLink, PartitionKey partitionKey, RequestOptions options) { DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> deleteAllDocumentsByPartitionKeyInternal(collectionLink, options, requestRetryPolicy), requestRetryPolicy); } private Mono<ResourceResponse<Document>> deleteAllDocumentsByPartitionKeyInternal(String collectionLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(collectionLink)) { throw new IllegalArgumentException("collectionLink"); } logger.debug("Deleting all items by Partition Key. collectionLink: [{}]", collectionLink); String path = Utils.joinPath(collectionLink, null); Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.PartitionKey, OperationType.Delete); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Delete, ResourceType.PartitionKey, path, requestHeaders, options); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = collectionCache.resolveCollectionAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), request); Mono<RxDocumentServiceRequest> requestObs = addPartitionKeyInformation(request, null, null, options, collectionObs); return requestObs.flatMap(req -> this .deleteAllItemsByPartitionKey(req, retryPolicyInstance, getOperationContextAndListenerTuple(options)) .map(serviceResponse -> toResourceResponse(serviceResponse, Document.class))); } catch (Exception e) { logger.debug("Failure in deleting documents due to [{}]", e.getMessage()); return Mono.error(e); } } @Override public Mono<ResourceResponse<Document>> readDocument(String documentLink, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> readDocumentInternal(documentLink, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<Document>> readDocumentInternal(String documentLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(documentLink)) { throw new IllegalArgumentException("documentLink"); } logger.debug("Reading a Document. documentLink: [{}]", documentLink); String path = Utils.joinPath(documentLink, null); Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.Document, OperationType.Read); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Read, ResourceType.Document, path, requestHeaders, options); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = this.collectionCache.resolveCollectionAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), request); Mono<RxDocumentServiceRequest> requestObs = addPartitionKeyInformation(request, null, null, options, collectionObs); return requestObs.flatMap(req -> { return this.read(request, retryPolicyInstance).map(serviceResponse -> toResourceResponse(serviceResponse, Document.class)); }); } catch (Exception e) { logger.debug("Failure in reading a document due to [{}]", e.getMessage()); return Mono.error(e); } } @Override public <T> Flux<FeedResponse<T>> readDocuments( String collectionLink, CosmosQueryRequestOptions options, Class<T> classOfT) { if (StringUtils.isEmpty(collectionLink)) { throw new IllegalArgumentException("collectionLink"); } return queryDocuments(collectionLink, "SELECT * FROM r", options, classOfT); } @Override public <T> Mono<FeedResponse<T>> readMany( List<CosmosItemIdentity> itemIdentityList, String collectionLink, CosmosQueryRequestOptions options, Class<T> klass) { String resourceLink = parentResourceLinkToQueryLink(collectionLink, ResourceType.Document); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Query, ResourceType.Document, collectionLink, null ); Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = collectionCache.resolveCollectionAsync(null, request); return collectionObs .flatMap(documentCollectionResourceResponse -> { final DocumentCollection collection = documentCollectionResourceResponse.v; if (collection == null) { throw new IllegalStateException("Collection cannot be null"); } final PartitionKeyDefinition pkDefinition = collection.getPartitionKey(); Mono<Utils.ValueHolder<CollectionRoutingMap>> valueHolderMono = partitionKeyRangeCache .tryLookupAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), collection.getResourceId(), null, null); return valueHolderMono.flatMap(collectionRoutingMapValueHolder -> { Map<PartitionKeyRange, List<CosmosItemIdentity>> partitionRangeItemKeyMap = new HashMap<>(); CollectionRoutingMap routingMap = collectionRoutingMapValueHolder.v; if (routingMap == null) { throw new IllegalStateException("Failed to get routing map."); } itemIdentityList .forEach(itemIdentity -> { String effectivePartitionKeyString = PartitionKeyInternalHelper .getEffectivePartitionKeyString( BridgeInternal.getPartitionKeyInternal( itemIdentity.getPartitionKey()), pkDefinition); PartitionKeyRange range = routingMap.getRangeByEffectivePartitionKey(effectivePartitionKeyString); if (partitionRangeItemKeyMap.get(range) == null) { List<CosmosItemIdentity> list = new ArrayList<>(); list.add(itemIdentity); partitionRangeItemKeyMap.put(range, list); } else { List<CosmosItemIdentity> pairs = partitionRangeItemKeyMap.get(range); pairs.add(itemIdentity); partitionRangeItemKeyMap.put(range, pairs); } }); Map<PartitionKeyRange, SqlQuerySpec> rangeQueryMap; rangeQueryMap = getRangeQueryMap(partitionRangeItemKeyMap, collection.getPartitionKey()); return createReadManyQuery( resourceLink, new SqlQuerySpec(DUMMY_SQL_QUERY), options, Document.class, ResourceType.Document, collection, Collections.unmodifiableMap(rangeQueryMap)) .collectList() .map(feedList -> { List<T> finalList = new ArrayList<>(); HashMap<String, String> headers = new HashMap<>(); ConcurrentMap<String, QueryMetrics> aggregatedQueryMetrics = new ConcurrentHashMap<>(); double requestCharge = 0; for (FeedResponse<Document> page : feedList) { ConcurrentMap<String, QueryMetrics> pageQueryMetrics = ModelBridgeInternal.queryMetrics(page); if (pageQueryMetrics != null) { pageQueryMetrics.forEach( aggregatedQueryMetrics::putIfAbsent); } requestCharge += page.getRequestCharge(); finalList.addAll(page.getResults().stream().map(document -> ModelBridgeInternal.toObjectFromJsonSerializable(document, klass)).collect(Collectors.toList())); } headers.put(HttpConstants.HttpHeaders.REQUEST_CHARGE, Double .toString(requestCharge)); FeedResponse<T> frp = BridgeInternal .createFeedResponse(finalList, headers); return frp; }); }); } ); } private Map<PartitionKeyRange, SqlQuerySpec> getRangeQueryMap( Map<PartitionKeyRange, List<CosmosItemIdentity>> partitionRangeItemKeyMap, PartitionKeyDefinition partitionKeyDefinition) { Map<PartitionKeyRange, SqlQuerySpec> rangeQueryMap = new HashMap<>(); String partitionKeySelector = createPkSelector(partitionKeyDefinition); for(Map.Entry<PartitionKeyRange, List<CosmosItemIdentity>> entry: partitionRangeItemKeyMap.entrySet()) { SqlQuerySpec sqlQuerySpec; if (partitionKeySelector.equals("[\"id\"]")) { sqlQuerySpec = createReadManyQuerySpecPartitionKeyIdSame(entry.getValue(), partitionKeySelector); } else { sqlQuerySpec = createReadManyQuerySpec(entry.getValue(), partitionKeySelector); } rangeQueryMap.put(entry.getKey(), sqlQuerySpec); } return rangeQueryMap; } private SqlQuerySpec createReadManyQuerySpecPartitionKeyIdSame( List<CosmosItemIdentity> idPartitionKeyPairList, String partitionKeySelector) { StringBuilder queryStringBuilder = new StringBuilder(); List<SqlParameter> parameters = new ArrayList<>(); queryStringBuilder.append("SELECT * FROM c WHERE c.id IN ( "); for (int i = 0; i < idPartitionKeyPairList.size(); i++) { CosmosItemIdentity itemIdentity = idPartitionKeyPairList.get(i); String idValue = itemIdentity.getId(); String idParamName = "@param" + i; PartitionKey pkValueAsPartitionKey = itemIdentity.getPartitionKey(); Object pkValue = ModelBridgeInternal.getPartitionKeyObject(pkValueAsPartitionKey); if (!Objects.equals(idValue, pkValue)) { continue; } parameters.add(new SqlParameter(idParamName, idValue)); queryStringBuilder.append(idParamName); if (i < idPartitionKeyPairList.size() - 1) { queryStringBuilder.append(", "); } } queryStringBuilder.append(" )"); return new SqlQuerySpec(queryStringBuilder.toString(), parameters); } private SqlQuerySpec createReadManyQuerySpec(List<CosmosItemIdentity> itemIdentities, String partitionKeySelector) { StringBuilder queryStringBuilder = new StringBuilder(); List<SqlParameter> parameters = new ArrayList<>(); queryStringBuilder.append("SELECT * FROM c WHERE ( "); for (int i = 0; i < itemIdentities.size(); i++) { CosmosItemIdentity itemIdentity = itemIdentities.get(i); PartitionKey pkValueAsPartitionKey = itemIdentity.getPartitionKey(); Object pkValue = ModelBridgeInternal.getPartitionKeyObject(pkValueAsPartitionKey); String pkParamName = "@param" + (2 * i); parameters.add(new SqlParameter(pkParamName, pkValue)); String idValue = itemIdentity.getId(); String idParamName = "@param" + (2 * i + 1); parameters.add(new SqlParameter(idParamName, idValue)); queryStringBuilder.append("("); queryStringBuilder.append("c.id = "); queryStringBuilder.append(idParamName); queryStringBuilder.append(" AND "); queryStringBuilder.append(" c"); queryStringBuilder.append(partitionKeySelector); queryStringBuilder.append((" = ")); queryStringBuilder.append(pkParamName); queryStringBuilder.append(" )"); if (i < itemIdentities.size() - 1) { queryStringBuilder.append(" OR "); } } queryStringBuilder.append(" )"); return new SqlQuerySpec(queryStringBuilder.toString(), parameters); } private String createPkSelector(PartitionKeyDefinition partitionKeyDefinition) { return partitionKeyDefinition.getPaths() .stream() .map(pathPart -> StringUtils.substring(pathPart, 1)) .map(pathPart -> StringUtils.replace(pathPart, "\"", "\\")) .map(part -> "[\"" + part + "\"]") .collect(Collectors.joining()); } private <T extends Resource> Flux<FeedResponse<T>> createReadManyQuery( String parentResourceLink, SqlQuerySpec sqlQuery, CosmosQueryRequestOptions options, Class<T> klass, ResourceType resourceTypeEnum, DocumentCollection collection, Map<PartitionKeyRange, SqlQuerySpec> rangeQueryMap) { UUID activityId = Utils.randomUUID(); IDocumentQueryClient queryClient = documentQueryClientImpl(RxDocumentClientImpl.this, getOperationContextAndListenerTuple(options)); Flux<? extends IDocumentQueryExecutionContext<T>> executionContext = DocumentQueryExecutionContextFactory.createReadManyQueryAsync(this, queryClient, collection.getResourceId(), sqlQuery, rangeQueryMap, options, collection.getResourceId(), parentResourceLink, activityId, klass, resourceTypeEnum); return executionContext.flatMap(IDocumentQueryExecutionContext<T>::executeAsync); } @Override public <T> Flux<FeedResponse<T>> queryDocuments( String collectionLink, String query, CosmosQueryRequestOptions options, Class<T> classOfT) { return queryDocuments(collectionLink, new SqlQuerySpec(query), options, classOfT); } private IDocumentQueryClient documentQueryClientImpl(RxDocumentClientImpl rxDocumentClientImpl, OperationContextAndListenerTuple operationContextAndListenerTuple) { return new IDocumentQueryClient () { @Override public RxCollectionCache getCollectionCache() { return RxDocumentClientImpl.this.collectionCache; } @Override public RxPartitionKeyRangeCache getPartitionKeyRangeCache() { return RxDocumentClientImpl.this.partitionKeyRangeCache; } @Override public IRetryPolicyFactory getResetSessionTokenRetryPolicy() { return RxDocumentClientImpl.this.resetSessionTokenRetryPolicy; } @Override public ConsistencyLevel getDefaultConsistencyLevelAsync() { return RxDocumentClientImpl.this.gatewayConfigurationReader.getDefaultConsistencyLevel(); } @Override public ConsistencyLevel getDesiredConsistencyLevelAsync() { return RxDocumentClientImpl.this.consistencyLevel; } @Override public Mono<RxDocumentServiceResponse> executeQueryAsync(RxDocumentServiceRequest request) { if (operationContextAndListenerTuple == null) { return RxDocumentClientImpl.this.query(request).single(); } else { final OperationListener listener = operationContextAndListenerTuple.getOperationListener(); final OperationContext operationContext = operationContextAndListenerTuple.getOperationContext(); request.getHeaders().put(HttpConstants.HttpHeaders.CORRELATED_ACTIVITY_ID, operationContext.getCorrelationActivityId()); listener.requestListener(operationContext, request); return RxDocumentClientImpl.this.query(request).single().doOnNext( response -> listener.responseListener(operationContext, response) ).doOnError( ex -> listener.exceptionListener(operationContext, ex) ); } } @Override public QueryCompatibilityMode getQueryCompatibilityMode() { return QueryCompatibilityMode.Default; } @Override public Mono<RxDocumentServiceResponse> readFeedAsync(RxDocumentServiceRequest request) { return null; } }; } @Override public <T> Flux<FeedResponse<T>> queryDocuments( String collectionLink, SqlQuerySpec querySpec, CosmosQueryRequestOptions options, Class<T> classOfT) { SqlQuerySpecLogger.getInstance().logQuery(querySpec); return createQuery(collectionLink, querySpec, options, classOfT, ResourceType.Document); } @Override public <T> Flux<FeedResponse<T>> queryDocumentChangeFeed( final DocumentCollection collection, final CosmosChangeFeedRequestOptions changeFeedOptions, Class<T> classOfT) { checkNotNull(collection, "Argument 'collection' must not be null."); ChangeFeedQueryImpl<T> changeFeedQueryImpl = new ChangeFeedQueryImpl<>( this, ResourceType.Document, classOfT, collection.getAltLink(), collection.getResourceId(), changeFeedOptions); return changeFeedQueryImpl.executeAsync(); } @Override public <T> Flux<FeedResponse<T>> readAllDocuments( String collectionLink, PartitionKey partitionKey, CosmosQueryRequestOptions options, Class<T> classOfT) { if (StringUtils.isEmpty(collectionLink)) { throw new IllegalArgumentException("collectionLink"); } if (partitionKey == null) { throw new IllegalArgumentException("partitionKey"); } RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Query, ResourceType.Document, collectionLink, null ); Flux<Utils.ValueHolder<DocumentCollection>> collectionObs = collectionCache.resolveCollectionAsync(null, request).flux(); return collectionObs.flatMap(documentCollectionResourceResponse -> { DocumentCollection collection = documentCollectionResourceResponse.v; if (collection == null) { throw new IllegalStateException("Collection cannot be null"); } PartitionKeyDefinition pkDefinition = collection.getPartitionKey(); String pkSelector = createPkSelector(pkDefinition); SqlQuerySpec querySpec = createLogicalPartitionScanQuerySpec(partitionKey, pkSelector); String resourceLink = parentResourceLinkToQueryLink(collectionLink, ResourceType.Document); UUID activityId = Utils.randomUUID(); IDocumentQueryClient queryClient = documentQueryClientImpl(RxDocumentClientImpl.this, getOperationContextAndListenerTuple(options)); final CosmosQueryRequestOptions effectiveOptions = ModelBridgeInternal.createQueryRequestOptions(options); InvalidPartitionExceptionRetryPolicy invalidPartitionExceptionRetryPolicy = new InvalidPartitionExceptionRetryPolicy( this.collectionCache, null, resourceLink, ModelBridgeInternal.getPropertiesFromQueryRequestOptions(effectiveOptions)); return ObservableHelper.fluxInlineIfPossibleAsObs( () -> { Flux<Utils.ValueHolder<CollectionRoutingMap>> valueHolderMono = this.partitionKeyRangeCache .tryLookupAsync( BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), collection.getResourceId(), null, null).flux(); return valueHolderMono.flatMap(collectionRoutingMapValueHolder -> { CollectionRoutingMap routingMap = collectionRoutingMapValueHolder.v; if (routingMap == null) { throw new IllegalStateException("Failed to get routing map."); } String effectivePartitionKeyString = PartitionKeyInternalHelper .getEffectivePartitionKeyString( BridgeInternal.getPartitionKeyInternal(partitionKey), pkDefinition); PartitionKeyRange range = routingMap.getRangeByEffectivePartitionKey(effectivePartitionKeyString); return createQueryInternal( resourceLink, querySpec, ModelBridgeInternal.setPartitionKeyRangeIdInternal(effectiveOptions, range.getId()), classOfT, ResourceType.Document, queryClient, activityId); }); }, invalidPartitionExceptionRetryPolicy); }); } @Override public Map<String, PartitionedQueryExecutionInfo> getQueryPlanCache() { return queryPlanCache; } @Override public Flux<FeedResponse<PartitionKeyRange>> readPartitionKeyRanges(final String collectionLink, CosmosQueryRequestOptions options) { if (StringUtils.isEmpty(collectionLink)) { throw new IllegalArgumentException("collectionLink"); } return readFeed(options, ResourceType.PartitionKeyRange, PartitionKeyRange.class, Utils.joinPath(collectionLink, Paths.PARTITION_KEY_RANGES_PATH_SEGMENT)); } private RxDocumentServiceRequest getStoredProcedureRequest(String collectionLink, StoredProcedure storedProcedure, RequestOptions options, OperationType operationType) { if (StringUtils.isEmpty(collectionLink)) { throw new IllegalArgumentException("collectionLink"); } if (storedProcedure == null) { throw new IllegalArgumentException("storedProcedure"); } validateResource(storedProcedure); String path = Utils.joinPath(collectionLink, Paths.STORED_PROCEDURES_PATH_SEGMENT); Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.StoredProcedure, operationType); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, operationType, ResourceType.StoredProcedure, path, storedProcedure, requestHeaders, options); return request; } private RxDocumentServiceRequest getUserDefinedFunctionRequest(String collectionLink, UserDefinedFunction udf, RequestOptions options, OperationType operationType) { if (StringUtils.isEmpty(collectionLink)) { throw new IllegalArgumentException("collectionLink"); } if (udf == null) { throw new IllegalArgumentException("udf"); } validateResource(udf); String path = Utils.joinPath(collectionLink, Paths.USER_DEFINED_FUNCTIONS_PATH_SEGMENT); Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.UserDefinedFunction, operationType); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, operationType, ResourceType.UserDefinedFunction, path, udf, requestHeaders, options); return request; } @Override public Mono<ResourceResponse<StoredProcedure>> createStoredProcedure(String collectionLink, StoredProcedure storedProcedure, RequestOptions options) { DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> createStoredProcedureInternal(collectionLink, storedProcedure, options, requestRetryPolicy), requestRetryPolicy); } private Mono<ResourceResponse<StoredProcedure>> createStoredProcedureInternal(String collectionLink, StoredProcedure storedProcedure, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { logger.debug("Creating a StoredProcedure. collectionLink: [{}], storedProcedure id [{}]", collectionLink, storedProcedure.getId()); RxDocumentServiceRequest request = getStoredProcedureRequest(collectionLink, storedProcedure, options, OperationType.Create); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.create(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, StoredProcedure.class)); } catch (Exception e) { logger.debug("Failure in creating a StoredProcedure due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Mono<ResourceResponse<StoredProcedure>> upsertStoredProcedure(String collectionLink, StoredProcedure storedProcedure, RequestOptions options) { DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> upsertStoredProcedureInternal(collectionLink, storedProcedure, options, requestRetryPolicy), requestRetryPolicy); } private Mono<ResourceResponse<StoredProcedure>> upsertStoredProcedureInternal(String collectionLink, StoredProcedure storedProcedure, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { logger.debug("Upserting a StoredProcedure. collectionLink: [{}], storedProcedure id [{}]", collectionLink, storedProcedure.getId()); RxDocumentServiceRequest request = getStoredProcedureRequest(collectionLink, storedProcedure, options, OperationType.Upsert); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.upsert(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, StoredProcedure.class)); } catch (Exception e) { logger.debug("Failure in upserting a StoredProcedure due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Mono<ResourceResponse<StoredProcedure>> replaceStoredProcedure(StoredProcedure storedProcedure, RequestOptions options) { DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> replaceStoredProcedureInternal(storedProcedure, options, requestRetryPolicy), requestRetryPolicy); } private Mono<ResourceResponse<StoredProcedure>> replaceStoredProcedureInternal(StoredProcedure storedProcedure, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (storedProcedure == null) { throw new IllegalArgumentException("storedProcedure"); } logger.debug("Replacing a StoredProcedure. storedProcedure id [{}]", storedProcedure.getId()); RxDocumentClientImpl.validateResource(storedProcedure); String path = Utils.joinPath(storedProcedure.getSelfLink(), null); Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.StoredProcedure, OperationType.Replace); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Replace, ResourceType.StoredProcedure, path, storedProcedure, requestHeaders, options); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, StoredProcedure.class)); } catch (Exception e) { logger.debug("Failure in replacing a StoredProcedure due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Mono<ResourceResponse<StoredProcedure>> deleteStoredProcedure(String storedProcedureLink, RequestOptions options) { DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> deleteStoredProcedureInternal(storedProcedureLink, options, requestRetryPolicy), requestRetryPolicy); } private Mono<ResourceResponse<StoredProcedure>> deleteStoredProcedureInternal(String storedProcedureLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(storedProcedureLink)) { throw new IllegalArgumentException("storedProcedureLink"); } logger.debug("Deleting a StoredProcedure. storedProcedureLink [{}]", storedProcedureLink); String path = Utils.joinPath(storedProcedureLink, null); Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.StoredProcedure, OperationType.Delete); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Delete, ResourceType.StoredProcedure, path, requestHeaders, options); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.delete(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, StoredProcedure.class)); } catch (Exception e) { logger.debug("Failure in deleting a StoredProcedure due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Mono<ResourceResponse<StoredProcedure>> readStoredProcedure(String storedProcedureLink, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> readStoredProcedureInternal(storedProcedureLink, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<StoredProcedure>> readStoredProcedureInternal(String storedProcedureLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(storedProcedureLink)) { throw new IllegalArgumentException("storedProcedureLink"); } logger.debug("Reading a StoredProcedure. storedProcedureLink [{}]", storedProcedureLink); String path = Utils.joinPath(storedProcedureLink, null); Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.StoredProcedure, OperationType.Read); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Read, ResourceType.StoredProcedure, path, requestHeaders, options); if (retryPolicyInstance != null){ retryPolicyInstance.onBeforeSendRequest(request); } return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, StoredProcedure.class)); } catch (Exception e) { logger.debug("Failure in reading a StoredProcedure due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Flux<FeedResponse<StoredProcedure>> readStoredProcedures(String collectionLink, CosmosQueryRequestOptions options) { if (StringUtils.isEmpty(collectionLink)) { throw new IllegalArgumentException("collectionLink"); } return readFeed(options, ResourceType.StoredProcedure, StoredProcedure.class, Utils.joinPath(collectionLink, Paths.STORED_PROCEDURES_PATH_SEGMENT)); } @Override public Flux<FeedResponse<StoredProcedure>> queryStoredProcedures(String collectionLink, String query, CosmosQueryRequestOptions options) { return queryStoredProcedures(collectionLink, new SqlQuerySpec(query), options); } @Override public Flux<FeedResponse<StoredProcedure>> queryStoredProcedures(String collectionLink, SqlQuerySpec querySpec, CosmosQueryRequestOptions options) { return createQuery(collectionLink, querySpec, options, StoredProcedure.class, ResourceType.StoredProcedure); } @Override public Mono<StoredProcedureResponse> executeStoredProcedure(String storedProcedureLink, List<Object> procedureParams) { return this.executeStoredProcedure(storedProcedureLink, null, procedureParams); } @Override public Mono<StoredProcedureResponse> executeStoredProcedure(String storedProcedureLink, RequestOptions options, List<Object> procedureParams) { DocumentClientRetryPolicy documentClientRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> executeStoredProcedureInternal(storedProcedureLink, options, procedureParams, documentClientRetryPolicy), documentClientRetryPolicy); } @Override public Mono<CosmosBatchResponse> executeBatchRequest(String collectionLink, ServerBatchRequest serverBatchRequest, RequestOptions options, boolean disableAutomaticIdGeneration) { DocumentClientRetryPolicy documentClientRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> executeBatchRequestInternal(collectionLink, serverBatchRequest, options, documentClientRetryPolicy, disableAutomaticIdGeneration), documentClientRetryPolicy); } private Mono<StoredProcedureResponse> executeStoredProcedureInternal(String storedProcedureLink, RequestOptions options, List<Object> procedureParams, DocumentClientRetryPolicy retryPolicy) { try { logger.debug("Executing a StoredProcedure. storedProcedureLink [{}]", storedProcedureLink); String path = Utils.joinPath(storedProcedureLink, null); Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.StoredProcedure, OperationType.ExecuteJavaScript); requestHeaders.put(HttpConstants.HttpHeaders.ACCEPT, RuntimeConstants.MediaTypes.JSON); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.ExecuteJavaScript, ResourceType.StoredProcedure, path, procedureParams != null && !procedureParams.isEmpty() ? RxDocumentClientImpl.serializeProcedureParams(procedureParams) : "", requestHeaders, options); if (retryPolicy != null) { retryPolicy.onBeforeSendRequest(request); } Mono<RxDocumentServiceRequest> reqObs = addPartitionKeyInformation(request, null, null, options); return reqObs.flatMap(req -> create(request, retryPolicy, getOperationContextAndListenerTuple(options)) .map(response -> { this.captureSessionToken(request, response); return toStoredProcedureResponse(response); })); } catch (Exception e) { logger.debug("Failure in executing a StoredProcedure due to [{}]", e.getMessage(), e); return Mono.error(e); } } private Mono<CosmosBatchResponse> executeBatchRequestInternal(String collectionLink, ServerBatchRequest serverBatchRequest, RequestOptions options, DocumentClientRetryPolicy requestRetryPolicy, boolean disableAutomaticIdGeneration) { try { logger.debug("Executing a Batch request with number of operations {}", serverBatchRequest.getOperations().size()); Mono<RxDocumentServiceRequest> requestObs = getBatchDocumentRequest(requestRetryPolicy, collectionLink, serverBatchRequest, options, disableAutomaticIdGeneration); Mono<RxDocumentServiceResponse> responseObservable = requestObs.flatMap(request -> create(request, requestRetryPolicy, getOperationContextAndListenerTuple(options))); return responseObservable .map(serviceResponse -> BatchResponseParser.fromDocumentServiceResponse(serviceResponse, serverBatchRequest, true)); } catch (Exception ex) { logger.debug("Failure in executing a batch due to [{}]", ex.getMessage(), ex); return Mono.error(ex); } } @Override public Mono<ResourceResponse<Trigger>> createTrigger(String collectionLink, Trigger trigger, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> createTriggerInternal(collectionLink, trigger, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<Trigger>> createTriggerInternal(String collectionLink, Trigger trigger, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { logger.debug("Creating a Trigger. collectionLink [{}], trigger id [{}]", collectionLink, trigger.getId()); RxDocumentServiceRequest request = getTriggerRequest(collectionLink, trigger, options, OperationType.Create); if (retryPolicyInstance != null){ retryPolicyInstance.onBeforeSendRequest(request); } return this.create(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, Trigger.class)); } catch (Exception e) { logger.debug("Failure in creating a Trigger due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Mono<ResourceResponse<Trigger>> upsertTrigger(String collectionLink, Trigger trigger, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> upsertTriggerInternal(collectionLink, trigger, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<Trigger>> upsertTriggerInternal(String collectionLink, Trigger trigger, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { logger.debug("Upserting a Trigger. collectionLink [{}], trigger id [{}]", collectionLink, trigger.getId()); RxDocumentServiceRequest request = getTriggerRequest(collectionLink, trigger, options, OperationType.Upsert); if (retryPolicyInstance != null){ retryPolicyInstance.onBeforeSendRequest(request); } return this.upsert(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, Trigger.class)); } catch (Exception e) { logger.debug("Failure in upserting a Trigger due to [{}]", e.getMessage(), e); return Mono.error(e); } } private RxDocumentServiceRequest getTriggerRequest(String collectionLink, Trigger trigger, RequestOptions options, OperationType operationType) { if (StringUtils.isEmpty(collectionLink)) { throw new IllegalArgumentException("collectionLink"); } if (trigger == null) { throw new IllegalArgumentException("trigger"); } RxDocumentClientImpl.validateResource(trigger); String path = Utils.joinPath(collectionLink, Paths.TRIGGERS_PATH_SEGMENT); Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Trigger, operationType); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, operationType, ResourceType.Trigger, path, trigger, requestHeaders, options); return request; } @Override public Mono<ResourceResponse<Trigger>> replaceTrigger(Trigger trigger, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> replaceTriggerInternal(trigger, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<Trigger>> replaceTriggerInternal(Trigger trigger, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (trigger == null) { throw new IllegalArgumentException("trigger"); } logger.debug("Replacing a Trigger. trigger id [{}]", trigger.getId()); RxDocumentClientImpl.validateResource(trigger); String path = Utils.joinPath(trigger.getSelfLink(), null); Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Trigger, OperationType.Replace); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Replace, ResourceType.Trigger, path, trigger, requestHeaders, options); if (retryPolicyInstance != null){ retryPolicyInstance.onBeforeSendRequest(request); } return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, Trigger.class)); } catch (Exception e) { logger.debug("Failure in replacing a Trigger due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Mono<ResourceResponse<Trigger>> deleteTrigger(String triggerLink, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> deleteTriggerInternal(triggerLink, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<Trigger>> deleteTriggerInternal(String triggerLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(triggerLink)) { throw new IllegalArgumentException("triggerLink"); } logger.debug("Deleting a Trigger. triggerLink [{}]", triggerLink); String path = Utils.joinPath(triggerLink, null); Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Trigger, OperationType.Delete); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Delete, ResourceType.Trigger, path, requestHeaders, options); if (retryPolicyInstance != null){ retryPolicyInstance.onBeforeSendRequest(request); } return this.delete(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, Trigger.class)); } catch (Exception e) { logger.debug("Failure in deleting a Trigger due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Mono<ResourceResponse<Trigger>> readTrigger(String triggerLink, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> readTriggerInternal(triggerLink, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<Trigger>> readTriggerInternal(String triggerLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(triggerLink)) { throw new IllegalArgumentException("triggerLink"); } logger.debug("Reading a Trigger. triggerLink [{}]", triggerLink); String path = Utils.joinPath(triggerLink, null); Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Trigger, OperationType.Read); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Read, ResourceType.Trigger, path, requestHeaders, options); if (retryPolicyInstance != null){ retryPolicyInstance.onBeforeSendRequest(request); } return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, Trigger.class)); } catch (Exception e) { logger.debug("Failure in reading a Trigger due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Flux<FeedResponse<Trigger>> readTriggers(String collectionLink, CosmosQueryRequestOptions options) { if (StringUtils.isEmpty(collectionLink)) { throw new IllegalArgumentException("collectionLink"); } return readFeed(options, ResourceType.Trigger, Trigger.class, Utils.joinPath(collectionLink, Paths.TRIGGERS_PATH_SEGMENT)); } @Override public Flux<FeedResponse<Trigger>> queryTriggers(String collectionLink, String query, CosmosQueryRequestOptions options) { return queryTriggers(collectionLink, new SqlQuerySpec(query), options); } @Override public Flux<FeedResponse<Trigger>> queryTriggers(String collectionLink, SqlQuerySpec querySpec, CosmosQueryRequestOptions options) { return createQuery(collectionLink, querySpec, options, Trigger.class, ResourceType.Trigger); } @Override public Mono<ResourceResponse<UserDefinedFunction>> createUserDefinedFunction(String collectionLink, UserDefinedFunction udf, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> createUserDefinedFunctionInternal(collectionLink, udf, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<UserDefinedFunction>> createUserDefinedFunctionInternal(String collectionLink, UserDefinedFunction udf, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { logger.debug("Creating a UserDefinedFunction. collectionLink [{}], udf id [{}]", collectionLink, udf.getId()); RxDocumentServiceRequest request = getUserDefinedFunctionRequest(collectionLink, udf, options, OperationType.Create); if (retryPolicyInstance != null){ retryPolicyInstance.onBeforeSendRequest(request); } return this.create(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, UserDefinedFunction.class)); } catch (Exception e) { logger.debug("Failure in creating a UserDefinedFunction due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Mono<ResourceResponse<UserDefinedFunction>> upsertUserDefinedFunction(String collectionLink, UserDefinedFunction udf, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> upsertUserDefinedFunctionInternal(collectionLink, udf, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<UserDefinedFunction>> upsertUserDefinedFunctionInternal(String collectionLink, UserDefinedFunction udf, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { logger.debug("Upserting a UserDefinedFunction. collectionLink [{}], udf id [{}]", collectionLink, udf.getId()); RxDocumentServiceRequest request = getUserDefinedFunctionRequest(collectionLink, udf, options, OperationType.Upsert); if (retryPolicyInstance != null){ retryPolicyInstance.onBeforeSendRequest(request); } return this.upsert(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, UserDefinedFunction.class)); } catch (Exception e) { logger.debug("Failure in upserting a UserDefinedFunction due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Mono<ResourceResponse<UserDefinedFunction>> replaceUserDefinedFunction(UserDefinedFunction udf, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> replaceUserDefinedFunctionInternal(udf, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<UserDefinedFunction>> replaceUserDefinedFunctionInternal(UserDefinedFunction udf, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (udf == null) { throw new IllegalArgumentException("udf"); } logger.debug("Replacing a UserDefinedFunction. udf id [{}]", udf.getId()); validateResource(udf); String path = Utils.joinPath(udf.getSelfLink(), null); Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.UserDefinedFunction, OperationType.Replace); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Replace, ResourceType.UserDefinedFunction, path, udf, requestHeaders, options); if (retryPolicyInstance != null){ retryPolicyInstance.onBeforeSendRequest(request); } return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, UserDefinedFunction.class)); } catch (Exception e) { logger.debug("Failure in replacing a UserDefinedFunction due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Mono<ResourceResponse<UserDefinedFunction>> deleteUserDefinedFunction(String udfLink, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> deleteUserDefinedFunctionInternal(udfLink, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<UserDefinedFunction>> deleteUserDefinedFunctionInternal(String udfLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(udfLink)) { throw new IllegalArgumentException("udfLink"); } logger.debug("Deleting a UserDefinedFunction. udfLink [{}]", udfLink); String path = Utils.joinPath(udfLink, null); Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.UserDefinedFunction, OperationType.Delete); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Delete, ResourceType.UserDefinedFunction, path, requestHeaders, options); if (retryPolicyInstance != null){ retryPolicyInstance.onBeforeSendRequest(request); } return this.delete(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, UserDefinedFunction.class)); } catch (Exception e) { logger.debug("Failure in deleting a UserDefinedFunction due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Mono<ResourceResponse<UserDefinedFunction>> readUserDefinedFunction(String udfLink, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> readUserDefinedFunctionInternal(udfLink, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<UserDefinedFunction>> readUserDefinedFunctionInternal(String udfLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(udfLink)) { throw new IllegalArgumentException("udfLink"); } logger.debug("Reading a UserDefinedFunction. udfLink [{}]", udfLink); String path = Utils.joinPath(udfLink, null); Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.UserDefinedFunction, OperationType.Read); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Read, ResourceType.UserDefinedFunction, path, requestHeaders, options); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, UserDefinedFunction.class)); } catch (Exception e) { logger.debug("Failure in reading a UserDefinedFunction due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Flux<FeedResponse<UserDefinedFunction>> readUserDefinedFunctions(String collectionLink, CosmosQueryRequestOptions options) { if (StringUtils.isEmpty(collectionLink)) { throw new IllegalArgumentException("collectionLink"); } return readFeed(options, ResourceType.UserDefinedFunction, UserDefinedFunction.class, Utils.joinPath(collectionLink, Paths.USER_DEFINED_FUNCTIONS_PATH_SEGMENT)); } @Override public Flux<FeedResponse<UserDefinedFunction>> queryUserDefinedFunctions(String collectionLink, String query, CosmosQueryRequestOptions options) { return queryUserDefinedFunctions(collectionLink, new SqlQuerySpec(query), options); } @Override public Flux<FeedResponse<UserDefinedFunction>> queryUserDefinedFunctions(String collectionLink, SqlQuerySpec querySpec, CosmosQueryRequestOptions options) { return createQuery(collectionLink, querySpec, options, UserDefinedFunction.class, ResourceType.UserDefinedFunction); } @Override public Mono<ResourceResponse<Conflict>> readConflict(String conflictLink, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> readConflictInternal(conflictLink, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<Conflict>> readConflictInternal(String conflictLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(conflictLink)) { throw new IllegalArgumentException("conflictLink"); } logger.debug("Reading a Conflict. conflictLink [{}]", conflictLink); String path = Utils.joinPath(conflictLink, null); Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Conflict, OperationType.Read); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Read, ResourceType.Conflict, path, requestHeaders, options); Mono<RxDocumentServiceRequest> reqObs = addPartitionKeyInformation(request, null, null, options); return reqObs.flatMap(req -> { if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, Conflict.class)); }); } catch (Exception e) { logger.debug("Failure in reading a Conflict due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Flux<FeedResponse<Conflict>> readConflicts(String collectionLink, CosmosQueryRequestOptions options) { if (StringUtils.isEmpty(collectionLink)) { throw new IllegalArgumentException("collectionLink"); } return readFeed(options, ResourceType.Conflict, Conflict.class, Utils.joinPath(collectionLink, Paths.CONFLICTS_PATH_SEGMENT)); } @Override public Flux<FeedResponse<Conflict>> queryConflicts(String collectionLink, String query, CosmosQueryRequestOptions options) { return queryConflicts(collectionLink, new SqlQuerySpec(query), options); } @Override public Flux<FeedResponse<Conflict>> queryConflicts(String collectionLink, SqlQuerySpec querySpec, CosmosQueryRequestOptions options) { return createQuery(collectionLink, querySpec, options, Conflict.class, ResourceType.Conflict); } @Override public Mono<ResourceResponse<Conflict>> deleteConflict(String conflictLink, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> deleteConflictInternal(conflictLink, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<Conflict>> deleteConflictInternal(String conflictLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(conflictLink)) { throw new IllegalArgumentException("conflictLink"); } logger.debug("Deleting a Conflict. conflictLink [{}]", conflictLink); String path = Utils.joinPath(conflictLink, null); Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Conflict, OperationType.Delete); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Delete, ResourceType.Conflict, path, requestHeaders, options); Mono<RxDocumentServiceRequest> reqObs = addPartitionKeyInformation(request, null, null, options); return reqObs.flatMap(req -> { if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.delete(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, Conflict.class)); }); } catch (Exception e) { logger.debug("Failure in deleting a Conflict due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Mono<ResourceResponse<User>> createUser(String databaseLink, User user, RequestOptions options) { DocumentClientRetryPolicy documentClientRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> createUserInternal(databaseLink, user, options, documentClientRetryPolicy), documentClientRetryPolicy); } private Mono<ResourceResponse<User>> createUserInternal(String databaseLink, User user, RequestOptions options, DocumentClientRetryPolicy documentClientRetryPolicy) { try { logger.debug("Creating a User. databaseLink [{}], user id [{}]", databaseLink, user.getId()); RxDocumentServiceRequest request = getUserRequest(databaseLink, user, options, OperationType.Create); return this.create(request, documentClientRetryPolicy, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, User.class)); } catch (Exception e) { logger.debug("Failure in creating a User due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Mono<ResourceResponse<User>> upsertUser(String databaseLink, User user, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> upsertUserInternal(databaseLink, user, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<User>> upsertUserInternal(String databaseLink, User user, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { logger.debug("Upserting a User. databaseLink [{}], user id [{}]", databaseLink, user.getId()); RxDocumentServiceRequest request = getUserRequest(databaseLink, user, options, OperationType.Upsert); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.upsert(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, User.class)); } catch (Exception e) { logger.debug("Failure in upserting a User due to [{}]", e.getMessage(), e); return Mono.error(e); } } private RxDocumentServiceRequest getUserRequest(String databaseLink, User user, RequestOptions options, OperationType operationType) { if (StringUtils.isEmpty(databaseLink)) { throw new IllegalArgumentException("databaseLink"); } if (user == null) { throw new IllegalArgumentException("user"); } RxDocumentClientImpl.validateResource(user); String path = Utils.joinPath(databaseLink, Paths.USERS_PATH_SEGMENT); Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.User, operationType); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, operationType, ResourceType.User, path, user, requestHeaders, options); return request; } @Override public Mono<ResourceResponse<User>> replaceUser(User user, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> replaceUserInternal(user, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<User>> replaceUserInternal(User user, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (user == null) { throw new IllegalArgumentException("user"); } logger.debug("Replacing a User. user id [{}]", user.getId()); RxDocumentClientImpl.validateResource(user); String path = Utils.joinPath(user.getSelfLink(), null); Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.User, OperationType.Replace); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Replace, ResourceType.User, path, user, requestHeaders, options); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, User.class)); } catch (Exception e) { logger.debug("Failure in replacing a User due to [{}]", e.getMessage(), e); return Mono.error(e); } } public Mono<ResourceResponse<User>> deleteUser(String userLink, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> deleteUserInternal(userLink, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<User>> deleteUserInternal(String userLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(userLink)) { throw new IllegalArgumentException("userLink"); } logger.debug("Deleting a User. userLink [{}]", userLink); String path = Utils.joinPath(userLink, null); Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.User, OperationType.Delete); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Delete, ResourceType.User, path, requestHeaders, options); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.delete(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, User.class)); } catch (Exception e) { logger.debug("Failure in deleting a User due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Mono<ResourceResponse<User>> readUser(String userLink, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> readUserInternal(userLink, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<User>> readUserInternal(String userLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(userLink)) { throw new IllegalArgumentException("userLink"); } logger.debug("Reading a User. userLink [{}]", userLink); String path = Utils.joinPath(userLink, null); Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.User, OperationType.Read); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Read, ResourceType.User, path, requestHeaders, options); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, User.class)); } catch (Exception e) { logger.debug("Failure in reading a User due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Flux<FeedResponse<User>> readUsers(String databaseLink, CosmosQueryRequestOptions options) { if (StringUtils.isEmpty(databaseLink)) { throw new IllegalArgumentException("databaseLink"); } return readFeed(options, ResourceType.User, User.class, Utils.joinPath(databaseLink, Paths.USERS_PATH_SEGMENT)); } @Override public Flux<FeedResponse<User>> queryUsers(String databaseLink, String query, CosmosQueryRequestOptions options) { return queryUsers(databaseLink, new SqlQuerySpec(query), options); } @Override public Flux<FeedResponse<User>> queryUsers(String databaseLink, SqlQuerySpec querySpec, CosmosQueryRequestOptions options) { return createQuery(databaseLink, querySpec, options, User.class, ResourceType.User); } @Override public Mono<ResourceResponse<ClientEncryptionKey>> readClientEncryptionKey(String clientEncryptionKeyLink, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> readClientEncryptionKeyInternal(clientEncryptionKeyLink, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<ClientEncryptionKey>> readClientEncryptionKeyInternal(String clientEncryptionKeyLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(clientEncryptionKeyLink)) { throw new IllegalArgumentException("clientEncryptionKeyLink"); } logger.debug("Reading a client encryption key. clientEncryptionKeyLink [{}]", clientEncryptionKeyLink); String path = Utils.joinPath(clientEncryptionKeyLink, null); Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.ClientEncryptionKey, OperationType.Read); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Read, ResourceType.ClientEncryptionKey, path, requestHeaders, options); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, ClientEncryptionKey.class)); } catch (Exception e) { logger.debug("Failure in reading a client encryption key due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Mono<ResourceResponse<ClientEncryptionKey>> createClientEncryptionKey(String databaseLink, ClientEncryptionKey clientEncryptionKey, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> createClientEncryptionKeyInternal(databaseLink, clientEncryptionKey, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<ClientEncryptionKey>> createClientEncryptionKeyInternal(String databaseLink, ClientEncryptionKey clientEncryptionKey, RequestOptions options, DocumentClientRetryPolicy documentClientRetryPolicy) { try { logger.debug("Creating a client encryption key. databaseLink [{}], clientEncryptionKey id [{}]", databaseLink, clientEncryptionKey.getId()); RxDocumentServiceRequest request = getClientEncryptionKeyRequest(databaseLink, clientEncryptionKey, options, OperationType.Create); return this.create(request, documentClientRetryPolicy, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, ClientEncryptionKey.class)); } catch (Exception e) { logger.debug("Failure in creating a client encryption key due to [{}]", e.getMessage(), e); return Mono.error(e); } } private RxDocumentServiceRequest getClientEncryptionKeyRequest(String databaseLink, ClientEncryptionKey clientEncryptionKey, RequestOptions options, OperationType operationType) { if (StringUtils.isEmpty(databaseLink)) { throw new IllegalArgumentException("databaseLink"); } if (clientEncryptionKey == null) { throw new IllegalArgumentException("clientEncryptionKey"); } RxDocumentClientImpl.validateResource(clientEncryptionKey); String path = Utils.joinPath(databaseLink, Paths.CLIENT_ENCRYPTION_KEY_PATH_SEGMENT); Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.ClientEncryptionKey, operationType); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, operationType, ResourceType.ClientEncryptionKey, path, clientEncryptionKey, requestHeaders, options); return request; } @Override public Mono<ResourceResponse<ClientEncryptionKey>> replaceClientEncryptionKey(ClientEncryptionKey clientEncryptionKey, String nameBasedLink, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> replaceClientEncryptionKeyInternal(clientEncryptionKey, nameBasedLink, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<ClientEncryptionKey>> replaceClientEncryptionKeyInternal(ClientEncryptionKey clientEncryptionKey, String nameBasedLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (clientEncryptionKey == null) { throw new IllegalArgumentException("clientEncryptionKey"); } logger.debug("Replacing a clientEncryptionKey. clientEncryptionKey id [{}]", clientEncryptionKey.getId()); RxDocumentClientImpl.validateResource(clientEncryptionKey); String path = Utils.joinPath(nameBasedLink, null); Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.ClientEncryptionKey, OperationType.Replace); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Replace, ResourceType.ClientEncryptionKey, path, clientEncryptionKey, requestHeaders, options); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, ClientEncryptionKey.class)); } catch (Exception e) { logger.debug("Failure in replacing a clientEncryptionKey due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Flux<FeedResponse<ClientEncryptionKey>> readClientEncryptionKeys(String databaseLink, CosmosQueryRequestOptions options) { if (StringUtils.isEmpty(databaseLink)) { throw new IllegalArgumentException("databaseLink"); } return readFeed(options, ResourceType.ClientEncryptionKey, ClientEncryptionKey.class, Utils.joinPath(databaseLink, Paths.CLIENT_ENCRYPTION_KEY_PATH_SEGMENT)); } @Override public Flux<FeedResponse<ClientEncryptionKey>> queryClientEncryptionKeys(String databaseLink, String query, CosmosQueryRequestOptions options) { return queryClientEncryptionKeys(databaseLink, new SqlQuerySpec(query), options); } @Override public Flux<FeedResponse<ClientEncryptionKey>> queryClientEncryptionKeys(String databaseLink, SqlQuerySpec querySpec, CosmosQueryRequestOptions options) { return createQuery(databaseLink, querySpec, options, ClientEncryptionKey.class, ResourceType.ClientEncryptionKey); } @Override public Mono<ResourceResponse<Permission>> createPermission(String userLink, Permission permission, RequestOptions options) { DocumentClientRetryPolicy documentClientRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> createPermissionInternal(userLink, permission, options, documentClientRetryPolicy), this.resetSessionTokenRetryPolicy.getRequestPolicy()); } private Mono<ResourceResponse<Permission>> createPermissionInternal(String userLink, Permission permission, RequestOptions options, DocumentClientRetryPolicy documentClientRetryPolicy) { try { logger.debug("Creating a Permission. userLink [{}], permission id [{}]", userLink, permission.getId()); RxDocumentServiceRequest request = getPermissionRequest(userLink, permission, options, OperationType.Create); return this.create(request, documentClientRetryPolicy, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, Permission.class)); } catch (Exception e) { logger.debug("Failure in creating a Permission due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Mono<ResourceResponse<Permission>> upsertPermission(String userLink, Permission permission, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> upsertPermissionInternal(userLink, permission, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<Permission>> upsertPermissionInternal(String userLink, Permission permission, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { logger.debug("Upserting a Permission. userLink [{}], permission id [{}]", userLink, permission.getId()); RxDocumentServiceRequest request = getPermissionRequest(userLink, permission, options, OperationType.Upsert); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.upsert(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, Permission.class)); } catch (Exception e) { logger.debug("Failure in upserting a Permission due to [{}]", e.getMessage(), e); return Mono.error(e); } } private RxDocumentServiceRequest getPermissionRequest(String userLink, Permission permission, RequestOptions options, OperationType operationType) { if (StringUtils.isEmpty(userLink)) { throw new IllegalArgumentException("userLink"); } if (permission == null) { throw new IllegalArgumentException("permission"); } RxDocumentClientImpl.validateResource(permission); String path = Utils.joinPath(userLink, Paths.PERMISSIONS_PATH_SEGMENT); Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Permission, operationType); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, operationType, ResourceType.Permission, path, permission, requestHeaders, options); return request; } @Override public Mono<ResourceResponse<Permission>> replacePermission(Permission permission, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> replacePermissionInternal(permission, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<Permission>> replacePermissionInternal(Permission permission, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (permission == null) { throw new IllegalArgumentException("permission"); } logger.debug("Replacing a Permission. permission id [{}]", permission.getId()); RxDocumentClientImpl.validateResource(permission); String path = Utils.joinPath(permission.getSelfLink(), null); Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Permission, OperationType.Replace); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Replace, ResourceType.Permission, path, permission, requestHeaders, options); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, Permission.class)); } catch (Exception e) { logger.debug("Failure in replacing a Permission due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Mono<ResourceResponse<Permission>> deletePermission(String permissionLink, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> deletePermissionInternal(permissionLink, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<Permission>> deletePermissionInternal(String permissionLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(permissionLink)) { throw new IllegalArgumentException("permissionLink"); } logger.debug("Deleting a Permission. permissionLink [{}]", permissionLink); String path = Utils.joinPath(permissionLink, null); Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Permission, OperationType.Delete); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Delete, ResourceType.Permission, path, requestHeaders, options); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.delete(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, Permission.class)); } catch (Exception e) { logger.debug("Failure in deleting a Permission due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Mono<ResourceResponse<Permission>> readPermission(String permissionLink, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> readPermissionInternal(permissionLink, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<Permission>> readPermissionInternal(String permissionLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance ) { try { if (StringUtils.isEmpty(permissionLink)) { throw new IllegalArgumentException("permissionLink"); } logger.debug("Reading a Permission. permissionLink [{}]", permissionLink); String path = Utils.joinPath(permissionLink, null); Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Permission, OperationType.Read); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Read, ResourceType.Permission, path, requestHeaders, options); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, Permission.class)); } catch (Exception e) { logger.debug("Failure in reading a Permission due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Flux<FeedResponse<Permission>> readPermissions(String userLink, CosmosQueryRequestOptions options) { if (StringUtils.isEmpty(userLink)) { throw new IllegalArgumentException("userLink"); } return readFeed(options, ResourceType.Permission, Permission.class, Utils.joinPath(userLink, Paths.PERMISSIONS_PATH_SEGMENT)); } @Override public Flux<FeedResponse<Permission>> queryPermissions(String userLink, String query, CosmosQueryRequestOptions options) { return queryPermissions(userLink, new SqlQuerySpec(query), options); } @Override public Flux<FeedResponse<Permission>> queryPermissions(String userLink, SqlQuerySpec querySpec, CosmosQueryRequestOptions options) { return createQuery(userLink, querySpec, options, Permission.class, ResourceType.Permission); } @Override public Mono<ResourceResponse<Offer>> replaceOffer(Offer offer) { DocumentClientRetryPolicy documentClientRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> replaceOfferInternal(offer, documentClientRetryPolicy), documentClientRetryPolicy); } private Mono<ResourceResponse<Offer>> replaceOfferInternal(Offer offer, DocumentClientRetryPolicy documentClientRetryPolicy) { try { if (offer == null) { throw new IllegalArgumentException("offer"); } logger.debug("Replacing an Offer. offer id [{}]", offer.getId()); RxDocumentClientImpl.validateResource(offer); String path = Utils.joinPath(offer.getSelfLink(), null); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Replace, ResourceType.Offer, path, offer, null, null); return this.replace(request, documentClientRetryPolicy).map(response -> toResourceResponse(response, Offer.class)); } catch (Exception e) { logger.debug("Failure in replacing an Offer due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Mono<ResourceResponse<Offer>> readOffer(String offerLink) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> readOfferInternal(offerLink, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<Offer>> readOfferInternal(String offerLink, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(offerLink)) { throw new IllegalArgumentException("offerLink"); } logger.debug("Reading an Offer. offerLink [{}]", offerLink); String path = Utils.joinPath(offerLink, null); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Read, ResourceType.Offer, path, (HashMap<String, String>)null, null); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, Offer.class)); } catch (Exception e) { logger.debug("Failure in reading an Offer due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Flux<FeedResponse<Offer>> readOffers(CosmosQueryRequestOptions options) { return readFeed(options, ResourceType.Offer, Offer.class, Utils.joinPath(Paths.OFFERS_PATH_SEGMENT, null)); } private <T> Flux<FeedResponse<T>> readFeed( CosmosQueryRequestOptions options, ResourceType resourceType, Class<T> klass, String resourceLink) { if (options == null) { options = new CosmosQueryRequestOptions(); } Integer maxItemCount = ModelBridgeInternal.getMaxItemCountFromQueryRequestOptions(options); int maxPageSize = maxItemCount != null ? maxItemCount : -1; final CosmosQueryRequestOptions finalCosmosQueryRequestOptions = options; DocumentClientRetryPolicy retryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(); BiFunction<String, Integer, RxDocumentServiceRequest> createRequestFunc = (continuationToken, pageSize) -> { Map<String, String> requestHeaders = new HashMap<>(); if (continuationToken != null) { requestHeaders.put(HttpConstants.HttpHeaders.CONTINUATION, continuationToken); } requestHeaders.put(HttpConstants.HttpHeaders.PAGE_SIZE, Integer.toString(pageSize)); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.ReadFeed, resourceType, resourceLink, requestHeaders, finalCosmosQueryRequestOptions); retryPolicy.onBeforeSendRequest(request); return request; }; Function<RxDocumentServiceRequest, Mono<FeedResponse<T>>> executeFunc = request -> ObservableHelper .inlineIfPossibleAsObs(() -> readFeed(request).map(response -> toFeedResponsePage( response, ImplementationBridgeHelpers .CosmosQueryRequestOptionsHelper .getCosmosQueryRequestOptionsAccessor() .getItemFactoryMethod(finalCosmosQueryRequestOptions, klass), klass)), retryPolicy); return Paginator.getPaginatedQueryResultAsObservable( options, createRequestFunc, executeFunc, maxPageSize); } @Override public Flux<FeedResponse<Offer>> queryOffers(String query, CosmosQueryRequestOptions options) { return queryOffers(new SqlQuerySpec(query), options); } @Override public Flux<FeedResponse<Offer>> queryOffers(SqlQuerySpec querySpec, CosmosQueryRequestOptions options) { return createQuery(null, querySpec, options, Offer.class, ResourceType.Offer); } @Override public Mono<DatabaseAccount> getDatabaseAccount() { DocumentClientRetryPolicy documentClientRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> getDatabaseAccountInternal(documentClientRetryPolicy), documentClientRetryPolicy); } @Override public DatabaseAccount getLatestDatabaseAccount() { return this.globalEndpointManager.getLatestDatabaseAccount(); } private Mono<DatabaseAccount> getDatabaseAccountInternal(DocumentClientRetryPolicy documentClientRetryPolicy) { try { logger.debug("Getting Database Account"); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Read, ResourceType.DatabaseAccount, "", (HashMap<String, String>) null, null); return this.read(request, documentClientRetryPolicy).map(ModelBridgeInternal::toDatabaseAccount); } catch (Exception e) { logger.debug("Failure in getting Database Account due to [{}]", e.getMessage(), e); return Mono.error(e); } } public Object getSession() { return this.sessionContainer; } public void setSession(Object sessionContainer) { this.sessionContainer = (SessionContainer) sessionContainer; } @Override public RxClientCollectionCache getCollectionCache() { return this.collectionCache; } @Override public RxPartitionKeyRangeCache getPartitionKeyRangeCache() { return partitionKeyRangeCache; } public Flux<DatabaseAccount> getDatabaseAccountFromEndpoint(URI endpoint) { return Flux.defer(() -> { RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Read, ResourceType.DatabaseAccount, "", null, (Object) null); return this.populateHeadersAsync(request, RequestVerb.GET) .flatMap(requestPopulated -> { requestPopulated.setEndpointOverride(endpoint); return this.gatewayProxy.processMessage(requestPopulated).doOnError(e -> { String message = String.format("Failed to retrieve database account information. %s", e.getCause() != null ? e.getCause().toString() : e.toString()); logger.warn(message); }).map(rsp -> rsp.getResource(DatabaseAccount.class)) .doOnNext(databaseAccount -> this.useMultipleWriteLocations = this.connectionPolicy.isMultipleWriteRegionsEnabled() && BridgeInternal.isEnableMultipleWriteLocations(databaseAccount)); }); }); } /** * Certain requests must be routed through gateway even when the client connectivity mode is direct. * * @param request * @return RxStoreModel */ private RxStoreModel getStoreProxy(RxDocumentServiceRequest request) { if (request.UseGatewayMode) { return this.gatewayProxy; } ResourceType resourceType = request.getResourceType(); OperationType operationType = request.getOperationType(); if (resourceType == ResourceType.Offer || resourceType == ResourceType.ClientEncryptionKey || resourceType.isScript() && operationType != OperationType.ExecuteJavaScript || resourceType == ResourceType.PartitionKeyRange || resourceType == ResourceType.PartitionKey && operationType == OperationType.Delete) { return this.gatewayProxy; } if (operationType == OperationType.Create || operationType == OperationType.Upsert) { if (resourceType == ResourceType.Database || resourceType == ResourceType.User || resourceType == ResourceType.DocumentCollection || resourceType == ResourceType.Permission) { return this.gatewayProxy; } else { return this.storeModel; } } else if (operationType == OperationType.Delete) { if (resourceType == ResourceType.Database || resourceType == ResourceType.User || resourceType == ResourceType.DocumentCollection) { return this.gatewayProxy; } else { return this.storeModel; } } else if (operationType == OperationType.Replace) { if (resourceType == ResourceType.DocumentCollection) { return this.gatewayProxy; } else { return this.storeModel; } } else if (operationType == OperationType.Read) { if (resourceType == ResourceType.DocumentCollection) { return this.gatewayProxy; } else { return this.storeModel; } } else { if ((operationType == OperationType.Query || operationType == OperationType.SqlQuery || operationType == OperationType.ReadFeed) && Utils.isCollectionChild(request.getResourceType())) { if (request.getPartitionKeyRangeIdentity() == null && request.getHeaders().get(HttpConstants.HttpHeaders.PARTITION_KEY) == null) { return this.gatewayProxy; } } return this.storeModel; } } @Override public void close() { logger.info("Attempting to close client {}", this.clientId); if (!closed.getAndSet(true)) { activeClientsCnt.decrementAndGet(); logger.info("Shutting down ..."); logger.info("Closing Global Endpoint Manager ..."); LifeCycleUtils.closeQuietly(this.globalEndpointManager); logger.info("Closing StoreClientFactory ..."); LifeCycleUtils.closeQuietly(this.storeClientFactory); logger.info("Shutting down reactorHttpClient ..."); LifeCycleUtils.closeQuietly(this.reactorHttpClient); logger.info("Shutting down CpuMonitor ..."); CpuMemoryMonitor.unregister(this); if (this.throughputControlEnabled.get()) { logger.info("Closing ThroughputControlStore ..."); this.throughputControlStore.close(); } logger.info("Shutting down completed."); } else { logger.warn("Already shutdown!"); } } @Override public ItemDeserializer getItemDeserializer() { return this.itemDeserializer; } @Override public synchronized void enableThroughputControlGroup(ThroughputControlGroupInternal group) { checkNotNull(group, "Throughput control group can not be null"); if (this.throughputControlEnabled.compareAndSet(false, true)) { this.throughputControlStore = new ThroughputControlStore( this.collectionCache, this.connectionPolicy.getConnectionMode(), this.partitionKeyRangeCache); this.storeModel.enableThroughputControl(throughputControlStore); } this.throughputControlStore.enableThroughputControlGroup(group); } @Override private static SqlQuerySpec createLogicalPartitionScanQuerySpec( PartitionKey partitionKey, String partitionKeySelector) { StringBuilder queryStringBuilder = new StringBuilder(); List<SqlParameter> parameters = new ArrayList<>(); queryStringBuilder.append("SELECT * FROM c WHERE"); Object pkValue = ModelBridgeInternal.getPartitionKeyObject(partitionKey); String pkParamName = "@pkValue"; parameters.add(new SqlParameter(pkParamName, pkValue)); queryStringBuilder.append(" c"); queryStringBuilder.append(partitionKeySelector); queryStringBuilder.append((" = ")); queryStringBuilder.append(pkParamName); return new SqlQuerySpec(queryStringBuilder.toString(), parameters); } @Override public Mono<List<FeedRange>> getFeedRanges(String collectionLink) { InvalidPartitionExceptionRetryPolicy invalidPartitionExceptionRetryPolicy = new InvalidPartitionExceptionRetryPolicy( this.collectionCache, null, collectionLink, new HashMap<>()); RxDocumentServiceRequest request = RxDocumentServiceRequest.create( this, OperationType.Query, ResourceType.Document, collectionLink, null); invalidPartitionExceptionRetryPolicy.onBeforeSendRequest(request); return ObservableHelper.inlineIfPossibleAsObs( () -> getFeedRangesInternal(request, collectionLink), invalidPartitionExceptionRetryPolicy); } private Mono<List<FeedRange>> getFeedRangesInternal(RxDocumentServiceRequest request, String collectionLink) { logger.debug("getFeedRange collectionLink=[{}]", collectionLink); if (StringUtils.isEmpty(collectionLink)) { throw new IllegalArgumentException("collectionLink"); } Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = collectionCache.resolveCollectionAsync(null, request); return collectionObs.flatMap(documentCollectionResourceResponse -> { final DocumentCollection collection = documentCollectionResourceResponse.v; if (collection == null) { throw new IllegalStateException("Collection cannot be null"); } Mono<Utils.ValueHolder<List<PartitionKeyRange>>> valueHolderMono = partitionKeyRangeCache .tryGetOverlappingRangesAsync( BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), collection.getResourceId(), RANGE_INCLUDING_ALL_PARTITION_KEY_RANGES, true, null); return valueHolderMono.map(partitionKeyRangeList -> toFeedRanges(partitionKeyRangeList, request)); }); } private static List<FeedRange> toFeedRanges( Utils.ValueHolder<List<PartitionKeyRange>> partitionKeyRangeListValueHolder, RxDocumentServiceRequest request) { final List<PartitionKeyRange> partitionKeyRangeList = partitionKeyRangeListValueHolder.v; if (partitionKeyRangeList == null) { request.forceNameCacheRefresh = true; throw new InvalidPartitionException(); } List<FeedRange> feedRanges = new ArrayList<>(); partitionKeyRangeList.forEach(pkRange -> feedRanges.add(toFeedRange(pkRange))); return feedRanges; } private static FeedRange toFeedRange(PartitionKeyRange pkRange) { return new FeedRangeEpkImpl(pkRange.toRange()); } }
reformat this line.
List<VcapPojo> parseVcapService(String vcapServices) { final List<VcapPojo> results = new ArrayList<>(); log("VcapParser.parse: vcapServices = " + vcapServices); if (StringUtils.hasText(vcapServices)) { try { final JsonParser parser = JsonParserFactory.getJsonParser(); final Map<String, Object> servicesMap = parser.parseMap(vcapServices); final Set<Map.Entry<String, Object>> services = servicesMap.entrySet(); Assert.notNull(services, "Services entrySet cannot be null."); for (final Map.Entry<String, Object> serviceEntry : services) { final String name = serviceEntry.getKey(); if (name.startsWith(AZURE) || USER_PROVIDED.equals(name)) { Assert.isInstanceOf(List.class, serviceEntry.getValue()); final List<VcapServiceConfig> azureServices = getVcapServiceConfigList(serviceEntry.getValue()); results.addAll( azureServices.stream() .map(service -> parseService(name, service, vcapServices)) .filter(Objects::nonNull).collect(Collectors.toList()) ); } } } catch (JsonParseException e) { LOGGER.error("Error parsing " + vcapServices, e); } } return results; }
.filter(Objects::nonNull).collect(Collectors.toList())
List<VcapPojo> parseVcapService(String vcapServices) { final List<VcapPojo> results = new ArrayList<>(); log("VcapParser.parse: vcapServices = " + vcapServices); if (StringUtils.hasText(vcapServices)) { try { final JsonParser parser = JsonParserFactory.getJsonParser(); final Map<String, Object> servicesMap = parser.parseMap(vcapServices); final Set<Map.Entry<String, Object>> services = servicesMap.entrySet(); Assert.notNull(services, "Services entrySet cannot be null."); for (final Map.Entry<String, Object> serviceEntry : services) { final String name = serviceEntry.getKey(); if (name.startsWith(AZURE) || USER_PROVIDED.equals(name)) { Assert.isInstanceOf(List.class, serviceEntry.getValue()); final List<VcapServiceConfig> azureServices = getVcapServiceConfigList(serviceEntry.getValue()); results.addAll( azureServices.stream() .map(service -> parseService(name, service, vcapServices)) .filter(Objects::nonNull).collect(Collectors.toList()) ); } } } catch (JsonParseException e) { LOGGER.error("Error parsing " + vcapServices, e); } } return results; }
class VcapProcessor implements EnvironmentPostProcessor { private static final Logger LOGGER = LoggerFactory.getLogger(VcapProcessor.class); /** * VCAP services */ private static final String VCAP_SERVICES = "VCAP_SERVICES"; /** * Log variable */ private static final String LOG_VARIABLE = "COM_MICROSOFT_AZURE_CLOUDFOUNDRY_SERVICE_LOG"; private static final String AZURE = "azure-"; private static final String USER_PROVIDED = "user-provided"; private static final String AZURE_SERVICE_BROKER_NAME = "azure-service-broker-name"; private static final String AZURE_SERVICE_PLAN = "azure-service-plan"; private static final String CREDENTIALS = "credentials"; private boolean logFlag = false; @Override public void postProcessEnvironment(ConfigurableEnvironment confEnv, SpringApplication app) { final Map<String, Object> environment = confEnv.getSystemEnvironment(); final String logValue = (String) environment.get(LOG_VARIABLE); if ("true".equals(logValue)) { logFlag = true; } log("VcapParser.postProcessEnvironment: Start"); final String vcapServices = (String) environment.get(VCAP_SERVICES); final List<VcapPojo> vcapPojos = parseVcapService(vcapServices); new VcapResult(confEnv, vcapPojos.toArray(new VcapPojo[0]), logFlag); log("VcapParser.postProcessEnvironment: End"); } @SuppressWarnings("unchecked") private VcapServiceConfig getVcapServiceConfig(@NonNull Map<String, Object> configMap) { final VcapServiceConfig serviceConfig = new VcapServiceConfig(); serviceConfig.setLabel((String) configMap.getOrDefault("label", null)); serviceConfig.setName((String) configMap.getOrDefault("name", null)); serviceConfig.setProvider((String) configMap.getOrDefault("provider", null)); serviceConfig.setSyslogDrainUrl((String) configMap.getOrDefault("syslog_drain_url", null)); serviceConfig.setPlan((String) configMap.getOrDefault("plan", null)); final List<String> tags = (List<String>) configMap.get("tags"); final List<String> volumeMounts = (List<String>) configMap.get("volume_mounts"); if (tags != null) { serviceConfig.setTags(tags.toArray(new String[0])); } if (volumeMounts != null) { serviceConfig.setVolumeMounts(volumeMounts.toArray(new String[0])); } serviceConfig.setCredentials((Map<String, String>) configMap.get(CREDENTIALS)); return serviceConfig; } private List<VcapServiceConfig> getVcapServiceConfigList(@NonNull Object value) { Assert.isInstanceOf(List.class, value); @SuppressWarnings("unchecked") final List<Map<String, Object>> configs = (List<Map<String, Object>>) value; return configs.stream().map(this::getVcapServiceConfig).collect(Collectors.toList()); } /** * Parses the VCap service. * * @param vcapServices the VCap service * @return the list of Vcap POJOs */ private VcapPojo parseService(String serviceBrokerName, VcapServiceConfig serviceConfig, String vCapServices) { final VcapPojo result = new VcapPojo(); final Map<String, String> credentials = serviceConfig.getCredentials(); if (USER_PROVIDED.equals(serviceBrokerName)) { if (credentials == null) { return null; } final String userServiceBrokerName = credentials.remove(AZURE_SERVICE_BROKER_NAME); if (userServiceBrokerName == null) { return null; } result.setServiceBrokerName(userServiceBrokerName); final String userServicePlan = credentials.remove(AZURE_SERVICE_PLAN); serviceConfig.setPlan(userServicePlan); serviceConfig.setCredentials(credentials); } else { result.setServiceBrokerName(serviceBrokerName); serviceConfig.setPlan(serviceConfig.getPlan()); if (credentials == null) { LOGGER.error("Found {}, but missing {} : {}", serviceBrokerName, CREDENTIALS, vCapServices); } } result.setServiceConfig(serviceConfig); return result; } private void log(String msg) { if (logFlag) { LOGGER.info(msg); } } }
class VcapProcessor implements EnvironmentPostProcessor { private static final Logger LOGGER = LoggerFactory.getLogger(VcapProcessor.class); /** * VCAP services */ private static final String VCAP_SERVICES = "VCAP_SERVICES"; /** * Log variable */ private static final String LOG_VARIABLE = "COM_MICROSOFT_AZURE_CLOUDFOUNDRY_SERVICE_LOG"; private static final String AZURE = "azure-"; private static final String USER_PROVIDED = "user-provided"; private static final String AZURE_SERVICE_BROKER_NAME = "azure-service-broker-name"; private static final String AZURE_SERVICE_PLAN = "azure-service-plan"; private static final String CREDENTIALS = "credentials"; private boolean logFlag = false; @Override public void postProcessEnvironment(ConfigurableEnvironment confEnv, SpringApplication app) { final Map<String, Object> environment = confEnv.getSystemEnvironment(); final String logValue = (String) environment.get(LOG_VARIABLE); if ("true".equals(logValue)) { logFlag = true; } log("VcapParser.postProcessEnvironment: Start"); final String vcapServices = (String) environment.get(VCAP_SERVICES); final List<VcapPojo> vcapPojos = parseVcapService(vcapServices); new VcapResult(confEnv, vcapPojos.toArray(new VcapPojo[0]), logFlag); log("VcapParser.postProcessEnvironment: End"); } @SuppressWarnings("unchecked") private VcapServiceConfig getVcapServiceConfig(@NonNull Map<String, Object> configMap) { final VcapServiceConfig serviceConfig = new VcapServiceConfig(); serviceConfig.setLabel((String) configMap.getOrDefault("label", null)); serviceConfig.setName((String) configMap.getOrDefault("name", null)); serviceConfig.setProvider((String) configMap.getOrDefault("provider", null)); serviceConfig.setSyslogDrainUrl((String) configMap.getOrDefault("syslog_drain_url", null)); serviceConfig.setPlan((String) configMap.getOrDefault("plan", null)); final List<String> tags = (List<String>) configMap.get("tags"); final List<String> volumeMounts = (List<String>) configMap.get("volume_mounts"); if (tags != null) { serviceConfig.setTags(tags.toArray(new String[0])); } if (volumeMounts != null) { serviceConfig.setVolumeMounts(volumeMounts.toArray(new String[0])); } serviceConfig.setCredentials((Map<String, String>) configMap.get(CREDENTIALS)); return serviceConfig; } private List<VcapServiceConfig> getVcapServiceConfigList(@NonNull Object value) { Assert.isInstanceOf(List.class, value); @SuppressWarnings("unchecked") final List<Map<String, Object>> configs = (List<Map<String, Object>>) value; return configs.stream().map(this::getVcapServiceConfig).collect(Collectors.toList()); } /** * Parses the VCap service. * * @param vcapServices the VCap service * @return the list of Vcap POJOs */ private VcapPojo parseService(String serviceBrokerName, VcapServiceConfig serviceConfig, String vCapServices) { final VcapPojo result = new VcapPojo(); final Map<String, String> credentials = serviceConfig.getCredentials(); if (USER_PROVIDED.equals(serviceBrokerName)) { if (credentials == null) { return null; } final String userServiceBrokerName = credentials.remove(AZURE_SERVICE_BROKER_NAME); if (userServiceBrokerName == null) { return null; } result.setServiceBrokerName(userServiceBrokerName); final String userServicePlan = credentials.remove(AZURE_SERVICE_PLAN); serviceConfig.setPlan(userServicePlan); serviceConfig.setCredentials(credentials); } else { result.setServiceBrokerName(serviceBrokerName); serviceConfig.setPlan(serviceConfig.getPlan()); if (credentials == null) { LOGGER.error("Found {}, but missing {} : {}", serviceBrokerName, CREDENTIALS, vCapServices); } } result.setServiceConfig(serviceConfig); return result; } private void log(String msg) { if (logFlag) { LOGGER.info(msg); } } }
Also a merge of "if statements".
protected void configureRetry(T builder) { RetryOptionsProvider.RetryOptions retry = null; if (azureProperties instanceof RetryOptionsProvider) { retry = ((RetryOptionsProvider) azureProperties).getRetry(); } if (retry == null) { return; } if (RetryOptionsProvider.RetryMode.EXPONENTIAL == retry.getMode()) { if (retry.getExponential() != null && retry.getExponential().getMaxRetries() != null) { builder.maxRetry(retry.getExponential().getMaxRetries()); } } else if (RetryOptionsProvider.RetryMode.FIXED == retry.getMode() && retry.getFixed() != null && retry.getFixed().getMaxRetries() != null) { builder.maxRetry(retry.getFixed().getMaxRetries()); } }
}
protected void configureRetry(T builder) { RetryOptionsProvider.RetryOptions retry = null; if (azureProperties instanceof RetryOptionsProvider) { retry = ((RetryOptionsProvider) azureProperties).getRetry(); } if (retry == null) { return; } if (RetryOptionsProvider.RetryMode.EXPONENTIAL == retry.getMode()) { if (retry.getExponential() != null && retry.getExponential().getMaxRetries() != null) { builder.maxRetry(retry.getExponential().getMaxRetries()); } } else if (RetryOptionsProvider.RetryMode.FIXED == retry.getMode() && retry.getFixed() != null && retry.getFixed().getMaxRetries() != null) { builder.maxRetry(retry.getFixed().getMaxRetries()); } }
class AbstractAzureCredentialBuilderFactory<T extends CredentialBuilderBase<T>> extends AbstractAzureHttpClientBuilderFactory<T> { private static final Logger LOGGER = LoggerFactory.getLogger(AbstractAzureCredentialBuilderFactory.class); private final AzureProperties azureProperties; /** * To create a {@link AbstractAzureCredentialBuilderFactory} instance with {@link AzureProperties}. * @param azureProperties The Azure properties. */ protected AbstractAzureCredentialBuilderFactory(AzureProperties azureProperties) { this.azureProperties = azureProperties; } @Override protected BiConsumer<T, HttpClient> consumeHttpClient() { return T::httpClient; } @Override protected BiConsumer<T, HttpPipeline> consumeHttpPipeline() { return T::httpPipeline; } @Override protected AzureProperties getAzureProperties() { return this.azureProperties; } @Override protected BiConsumer<T, Configuration> consumeConfiguration() { return T::configuration; } @Override @Override protected List<AuthenticationDescriptor<?>> getAuthenticationDescriptors(T builder) { return Collections.emptyList(); } @Override protected BiConsumer<T, ClientOptions> consumeClientOptions() { return (a, b) -> { }; } @Override protected BiConsumer<T, TokenCredential> consumeDefaultTokenCredential() { return (a, b) -> { }; } @Override protected BiConsumer<T, String> consumeConnectionString() { return (a, b) -> { }; } @Override protected BiConsumer<T, HttpLogOptions> consumeHttpLogOptions() { return (a, b) -> { }; } @Override protected BiConsumer<T, HttpPipelinePolicy> consumeHttpPipelinePolicy() { return (a, b) -> { }; } @Override protected BiConsumer<T, RetryPolicy> consumeRetryPolicy() { LOGGER.debug("No need to specify retry policy."); return (a, b) -> { }; } @Override protected void configureService(T builder) { } }
class AbstractAzureCredentialBuilderFactory<T extends CredentialBuilderBase<T>> extends AbstractAzureHttpClientBuilderFactory<T> { private static final Logger LOGGER = LoggerFactory.getLogger(AbstractAzureCredentialBuilderFactory.class); private final AzureProperties azureProperties; /** * To create a {@link AbstractAzureCredentialBuilderFactory} instance with {@link AzureProperties}. * @param azureProperties The Azure properties. */ protected AbstractAzureCredentialBuilderFactory(AzureProperties azureProperties) { this.azureProperties = azureProperties; } @Override protected BiConsumer<T, HttpClient> consumeHttpClient() { return T::httpClient; } @Override protected BiConsumer<T, HttpPipeline> consumeHttpPipeline() { return T::httpPipeline; } @Override protected AzureProperties getAzureProperties() { return this.azureProperties; } @Override protected BiConsumer<T, Configuration> consumeConfiguration() { return T::configuration; } @Override @Override protected List<AuthenticationDescriptor<?>> getAuthenticationDescriptors(T builder) { return Collections.emptyList(); } @Override protected BiConsumer<T, ClientOptions> consumeClientOptions() { return (a, b) -> { }; } @Override protected BiConsumer<T, TokenCredential> consumeDefaultTokenCredential() { return (a, b) -> { }; } @Override protected BiConsumer<T, String> consumeConnectionString() { return (a, b) -> { }; } @Override protected BiConsumer<T, HttpLogOptions> consumeHttpLogOptions() { return (a, b) -> { }; } @Override protected BiConsumer<T, HttpPipelinePolicy> consumeHttpPipelinePolicy() { return (a, b) -> { }; } @Override protected BiConsumer<T, RetryPolicy> consumeRetryPolicy() { LOGGER.debug("No need to specify retry policy."); return (a, b) -> { }; } @Override protected void configureService(T builder) { } }
local var "destination" hide field name will bring reading confusion. Use "dest" here for clarity and shortness. Besides, Does "getDesitnation(message)" looks more reasonable.
protected void handleMessageInternal(Message<?> message) { String dest = toDestination(message); Map<String, String> partitionHeaders = getPartitionFromExpression(message); Message<?> messageToSend = createMutableMessage(message, partitionHeaders); final Mono<Void> mono = this.sendOperation.sendAsync(dest, messageToSend); if (this.sync) { waitingSendResponse(mono, message); } else { handleSendResponseAsync(mono, message); } }
String dest = toDestination(message);
protected void handleMessageInternal(Message<?> message) { String dest = toDestination(message); Map<String, String> partitionHeaders = getPartitionFromExpression(message); Message<?> messageToSend = createMutableMessage(message, partitionHeaders); final Mono<Void> mono = this.sendOperation.sendAsync(dest, messageToSend); if (this.sync) { waitingSendResponse(mono, message); } else { handleSendResponseAsync(mono, message); } }
class DefaultMessageHandler extends AbstractMessageProducingHandler { private static final Logger LOGGER = LoggerFactory.getLogger(DefaultMessageHandler.class); private static final long DEFAULT_SEND_TIMEOUT = 10000; private final String destination; private final SendOperation sendOperation; private boolean sync = false; private ListenableFutureCallback<Void> sendCallback; private EvaluationContext evaluationContext; private Expression sendTimeoutExpression = new ValueExpression<>(DEFAULT_SEND_TIMEOUT); private ErrorMessageStrategy errorMessageStrategy = new DefaultErrorMessageStrategy(); private Expression partitionKeyExpression; private Expression partitionIdExpression; private MessageChannel sendFailureChannel; private String sendFailureChannelName; /** * Construct a {@link DefaultMessageHandler} with the specified destination and sendOperation. * * @param destination the destination * @param sendOperation operation for sending Messages to a destination */ public DefaultMessageHandler(String destination, @NonNull SendOperation sendOperation) { Assert.hasText(destination, "destination can't be null or empty"); this.destination = destination; this.sendOperation = sendOperation; } @Override protected void onInit() { super.onInit(); this.evaluationContext = ExpressionUtils.createStandardEvaluationContext(getBeanFactory()); LOGGER.info("Started DefaultMessageHandler with properties: {}", buildPropertiesMap()); } @Override private <T> void handleSendResponseAsync(Mono<T> mono, Message<?> message) { mono.doOnError(ex -> { if (LOGGER.isWarnEnabled()) { LOGGER.warn("{} sent failed in async mode due to {}", message, ex.getMessage()); } if (this.sendCallback != null) { this.sendCallback.onFailure(ex); } if (getSendFailureChannel() != null) { this.messagingTemplate.send(getSendFailureChannel(), getErrorMessageStrategy() .buildErrorMessage(new AzureSendFailureException(message, ex), null)); } }).doOnSuccess(t -> { if (LOGGER.isDebugEnabled()) { LOGGER.debug("{} sent successfully in async mode", message); } if (this.sendCallback != null) { this.sendCallback.onSuccess((Void) t); } }).subscribe(); } private <T> void waitingSendResponse(Mono<T> mono, Message<?> message) { Long sendTimeout = this.sendTimeoutExpression.getValue(this.evaluationContext, message, Long.class); if (sendTimeout == null || sendTimeout < 0) { try { mono.block(); } catch (Exception e) { throw new MessageDeliveryException(e.getMessage()); } } else { try { mono.block(Duration.of(sendTimeout, ChronoUnit.MILLIS)); if (LOGGER.isDebugEnabled()) { LOGGER.debug("{} sent successfully in sync mode", message); } } catch (Exception e) { if (e.getCause() instanceof TimeoutException) { throw new MessageTimeoutException(message, "Timeout waiting for send event hub response"); } throw new MessageDeliveryException(e.getMessage()); } } } /** * Set sync. * * @param sync the sync */ public void setSync(boolean sync) { this.sync = sync; LOGGER.info("DefaultMessageHandler sync becomes: {}", sync); } @Override public void setSendTimeout(long sendTimeout) { setSendTimeoutExpression(new ValueExpression<>(sendTimeout)); } /** * Set partition Key. * * @param partitionKey the partition Key */ public void setPartitionKey(String partitionKey) { setPartitionKeyExpression(new LiteralExpression(partitionKey)); } /** * Set partition key expression. * * @param partitionKeyExpression the partition key expression */ public void setPartitionKeyExpression(Expression partitionKeyExpression) { this.partitionKeyExpression = partitionKeyExpression; } /** * Set partition id expression. * * @param partitionIdExpression the partition id expression */ public void setPartitionIdExpression(Expression partitionIdExpression) { this.partitionIdExpression = partitionIdExpression; } /** * Set partition key expression string. * * @param partitionKeyExpression the partition key expression */ public void setPartitionKeyExpressionString(String partitionKeyExpression) { setPartitionKeyExpression(EXPRESSION_PARSER.parseExpression(partitionKeyExpression)); } private String toDestination(Message<?> message) { if (message.getHeaders().containsKey(AzureHeaders.NAME)) { return message.getHeaders().get(AzureHeaders.NAME, String.class); } return this.destination; } /** * To create a {@link Map} for partition id and/or key values computing from the partition id and key expression * for each message to send. * @param message to generate partition id and/or key for. * @return a {@link Map} containing partition id and/or key values. */ private Map<String, String> getPartitionFromExpression(Message<?> message) { Map<String, String> partitionMap = new HashMap<>(); evaluatePartition(message, this.partitionIdExpression) .ifPresent(id -> partitionMap.put(PARTITION_ID, id)); evaluatePartition(message, this.partitionKeyExpression) .ifPresent(key -> partitionMap.put(PARTITION_KEY, key)); return partitionMap; } private Optional<String> evaluatePartition(Message<?> message, Expression expression) { return Optional.ofNullable(expression) .map(exp -> exp.getValue(this.evaluationContext, message, String.class)); } /** * Create a {@code MutableMessage} to copy the message id and timestamp headers from the raw message, and set partition * headers extracted from partition expressions when there are no partition id and/or key headers in the raw messages. * @param rawMessage the raw message to copy from. * @param partitionHeaders a map containing the partition id and/or key headers computed from the partition expressions. * @return the {@code MutableMessage}. */ private Message<?> createMutableMessage(Message<?> rawMessage, Map<String, String> partitionHeaders) { return MutableMessageBuilder.fromMessage(rawMessage) .copyHeadersIfAbsent(partitionHeaders) .build(); } private Map<String, Object> buildPropertiesMap() { Map<String, Object> properties = new HashMap<>(); properties.put("sync", sync); properties.put("sendTimeout", sendTimeoutExpression); properties.put("destination", destination); return properties; } /** * Set send call back. * * @param callback the call back */ public void setSendCallback(ListenableFutureCallback<Void> callback) { this.sendCallback = callback; } /** * Get send time out expression. * * @return sendTimeoutExpression the send time out expression */ public Expression getSendTimeoutExpression() { return sendTimeoutExpression; } /** * Set send time out expression. * * @param sendTimeoutExpression the send time out expression */ public void setSendTimeoutExpression(Expression sendTimeoutExpression) { Assert.notNull(sendTimeoutExpression, "'sendTimeoutExpression' must not be null"); this.sendTimeoutExpression = sendTimeoutExpression; LOGGER.info("DefaultMessageHandler syncTimeout becomes: {}", sendTimeoutExpression); } /** * Get send failure channel. * * @return sendFailureChannel If sendFailureChannel or sendFailureChannelName is not null, null otherwise */ protected MessageChannel getSendFailureChannel() { if (this.sendFailureChannel != null) { return this.sendFailureChannel; } else if (this.sendFailureChannelName != null) { this.sendFailureChannel = getChannelResolver().resolveDestination(this.sendFailureChannelName); return this.sendFailureChannel; } return null; } /** * Set send failure channel. * * @param sendFailureChannel the send failure channel */ public void setSendFailureChannel(MessageChannel sendFailureChannel) { this.sendFailureChannel = sendFailureChannel; } /** * Set send failure channel name. * * @param sendFailureChannelName the send failure channel name */ public void setSendFailureChannelName(String sendFailureChannelName) { this.sendFailureChannelName = sendFailureChannelName; } /** * Get error message strategy. * * @return the error message strategy */ protected ErrorMessageStrategy getErrorMessageStrategy() { return this.errorMessageStrategy; } /** * Set error message strategy. * * @param errorMessageStrategy the error message strategy */ public void setErrorMessageStrategy(ErrorMessageStrategy errorMessageStrategy) { Assert.notNull(errorMessageStrategy, "'errorMessageStrategy' must not be null"); this.errorMessageStrategy = errorMessageStrategy; } }
class DefaultMessageHandler extends AbstractMessageProducingHandler { private static final Logger LOGGER = LoggerFactory.getLogger(DefaultMessageHandler.class); private static final long DEFAULT_SEND_TIMEOUT = 10000; private final String destination; private final SendOperation sendOperation; private boolean sync = false; private ListenableFutureCallback<Void> sendCallback; private EvaluationContext evaluationContext; private Expression sendTimeoutExpression = new ValueExpression<>(DEFAULT_SEND_TIMEOUT); private ErrorMessageStrategy errorMessageStrategy = new DefaultErrorMessageStrategy(); private Expression partitionKeyExpression; private Expression partitionIdExpression; private MessageChannel sendFailureChannel; private String sendFailureChannelName; /** * Construct a {@link DefaultMessageHandler} with the specified destination and sendOperation. * * @param destination the destination * @param sendOperation operation for sending Messages to a destination */ public DefaultMessageHandler(String destination, @NonNull SendOperation sendOperation) { Assert.hasText(destination, "destination can't be null or empty"); this.destination = destination; this.sendOperation = sendOperation; } @Override protected void onInit() { super.onInit(); this.evaluationContext = ExpressionUtils.createStandardEvaluationContext(getBeanFactory()); LOGGER.info("Started DefaultMessageHandler with properties: {}", buildPropertiesMap()); } @Override private <T> void handleSendResponseAsync(Mono<T> mono, Message<?> message) { mono.doOnError(ex -> { if (LOGGER.isWarnEnabled()) { LOGGER.warn("{} sent failed in async mode due to {}", message, ex.getMessage()); } if (this.sendCallback != null) { this.sendCallback.onFailure(ex); } if (getSendFailureChannel() != null) { this.messagingTemplate.send(getSendFailureChannel(), getErrorMessageStrategy() .buildErrorMessage(new AzureSendFailureException(message, ex), null)); } }).doOnSuccess(t -> { if (LOGGER.isDebugEnabled()) { LOGGER.debug("{} sent successfully in async mode", message); } if (this.sendCallback != null) { this.sendCallback.onSuccess((Void) t); } }).subscribe(); } private <T> void waitingSendResponse(Mono<T> mono, Message<?> message) { Long sendTimeout = this.sendTimeoutExpression.getValue(this.evaluationContext, message, Long.class); if (sendTimeout == null || sendTimeout < 0) { try { mono.block(); } catch (Exception e) { throw new MessageDeliveryException(e.getMessage()); } } else { try { mono.block(Duration.of(sendTimeout, ChronoUnit.MILLIS)); if (LOGGER.isDebugEnabled()) { LOGGER.debug("{} sent successfully in sync mode", message); } } catch (Exception e) { if (e.getCause() instanceof TimeoutException) { throw new MessageTimeoutException(message, "Timeout waiting for send event hub response"); } throw new MessageDeliveryException(e.getMessage()); } } } /** * Set sync. * * @param sync the sync */ public void setSync(boolean sync) { this.sync = sync; LOGGER.info("DefaultMessageHandler sync becomes: {}", sync); } @Override public void setSendTimeout(long sendTimeout) { setSendTimeoutExpression(new ValueExpression<>(sendTimeout)); } /** * Set partition Key. * * @param partitionKey the partition Key */ public void setPartitionKey(String partitionKey) { setPartitionKeyExpression(new LiteralExpression(partitionKey)); } /** * Set partition key expression. * * @param partitionKeyExpression the partition key expression */ public void setPartitionKeyExpression(Expression partitionKeyExpression) { this.partitionKeyExpression = partitionKeyExpression; } /** * Set partition id expression. * * @param partitionIdExpression the partition id expression */ public void setPartitionIdExpression(Expression partitionIdExpression) { this.partitionIdExpression = partitionIdExpression; } /** * Set partition key expression string. * * @param partitionKeyExpression the partition key expression */ public void setPartitionKeyExpressionString(String partitionKeyExpression) { setPartitionKeyExpression(EXPRESSION_PARSER.parseExpression(partitionKeyExpression)); } private String toDestination(Message<?> message) { if (message.getHeaders().containsKey(AzureHeaders.NAME)) { return message.getHeaders().get(AzureHeaders.NAME, String.class); } return this.destination; } /** * To create a {@link Map} for partition id and/or key values computing from the partition id and key expression * for each message to send. * @param message to generate partition id and/or key for. * @return a {@link Map} containing partition id and/or key values. */ private Map<String, String> getPartitionFromExpression(Message<?> message) { Map<String, String> partitionMap = new HashMap<>(); evaluatePartition(message, this.partitionIdExpression) .ifPresent(id -> partitionMap.put(PARTITION_ID, id)); evaluatePartition(message, this.partitionKeyExpression) .ifPresent(key -> partitionMap.put(PARTITION_KEY, key)); return partitionMap; } private Optional<String> evaluatePartition(Message<?> message, Expression expression) { return Optional.ofNullable(expression) .map(exp -> exp.getValue(this.evaluationContext, message, String.class)); } /** * Create a {@code MutableMessage} to copy the message id and timestamp headers from the raw message, and set partition * headers extracted from partition expressions when there are no partition id and/or key headers in the raw messages. * @param rawMessage the raw message to copy from. * @param partitionHeaders a map containing the partition id and/or key headers computed from the partition expressions. * @return the {@code MutableMessage}. */ private Message<?> createMutableMessage(Message<?> rawMessage, Map<String, String> partitionHeaders) { return MutableMessageBuilder.fromMessage(rawMessage) .copyHeadersIfAbsent(partitionHeaders) .build(); } private Map<String, Object> buildPropertiesMap() { Map<String, Object> properties = new HashMap<>(); properties.put("sync", sync); properties.put("sendTimeout", sendTimeoutExpression); properties.put("destination", destination); return properties; } /** * Set send call back. * * @param callback the call back */ public void setSendCallback(ListenableFutureCallback<Void> callback) { this.sendCallback = callback; } /** * Get send time out expression. * * @return sendTimeoutExpression the send time out expression */ public Expression getSendTimeoutExpression() { return sendTimeoutExpression; } /** * Set send time out expression. * * @param sendTimeoutExpression the send time out expression */ public void setSendTimeoutExpression(Expression sendTimeoutExpression) { Assert.notNull(sendTimeoutExpression, "'sendTimeoutExpression' must not be null"); this.sendTimeoutExpression = sendTimeoutExpression; LOGGER.info("DefaultMessageHandler syncTimeout becomes: {}", sendTimeoutExpression); } /** * Get send failure channel. * * @return sendFailureChannel If sendFailureChannel or sendFailureChannelName is not null, null otherwise */ protected MessageChannel getSendFailureChannel() { if (this.sendFailureChannel != null) { return this.sendFailureChannel; } else if (this.sendFailureChannelName != null) { this.sendFailureChannel = getChannelResolver().resolveDestination(this.sendFailureChannelName); return this.sendFailureChannel; } return null; } /** * Set send failure channel. * * @param sendFailureChannel the send failure channel */ public void setSendFailureChannel(MessageChannel sendFailureChannel) { this.sendFailureChannel = sendFailureChannel; } /** * Set send failure channel name. * * @param sendFailureChannelName the send failure channel name */ public void setSendFailureChannelName(String sendFailureChannelName) { this.sendFailureChannelName = sendFailureChannelName; } /** * Get error message strategy. * * @return the error message strategy */ protected ErrorMessageStrategy getErrorMessageStrategy() { return this.errorMessageStrategy; } /** * Set error message strategy. * * @param errorMessageStrategy the error message strategy */ public void setErrorMessageStrategy(ErrorMessageStrategy errorMessageStrategy) { Assert.notNull(errorMessageStrategy, "'errorMessageStrategy' must not be null"); this.errorMessageStrategy = errorMessageStrategy; } }
I am not sure about this, but the "spotbug" hints a low risk of defect. Seems just remove the logger in this file.
protected void doStop() { if (this.delegate != null) { this.delegate.stop(); } }
if (this.delegate != null) {
protected void doStop() { if (this.delegate != null) { this.delegate.stop(); } }
class EventHubsMessageListenerContainer extends AbstractMessageListenerContainer { private final EventHubsProcessorFactory processorFactory; private final EventHubsContainerProperties containerProperties; private EventHubsErrorHandler errorHandler; private EventProcessorClient delegate; /** * Create an instance using the supplied processor factory and container properties. * * @param processorFactory the processor factory. * @param containerProperties the container properties */ public EventHubsMessageListenerContainer(EventHubsProcessorFactory processorFactory, EventHubsContainerProperties containerProperties) { this.processorFactory = processorFactory; this.containerProperties = containerProperties == null ? new EventHubsContainerProperties() : containerProperties; } @Override protected void doStart() { String eventHubName = this.containerProperties.getEventHubName(); String consumerGroup = this.containerProperties.getConsumerGroup(); if (this.errorHandler != null) { this.containerProperties.setErrorHandler(this.errorHandler); } this.delegate = this.processorFactory.createProcessor(eventHubName, consumerGroup, this.containerProperties); this.delegate.start(); } @Override @Override public void setupMessageListener(MessageListener<?> messageListener) { this.containerProperties.setMessageListener(messageListener); } @Override public EventHubsContainerProperties getContainerProperties() { return containerProperties; } /** * Set the error handler to call when the listener throws an exception. * @param errorHandler the error handler. */ public void setErrorHandler(EventHubsErrorHandler errorHandler) { this.errorHandler = errorHandler; } }
class EventHubsMessageListenerContainer extends AbstractMessageListenerContainer { private final EventHubsProcessorFactory processorFactory; private final EventHubsContainerProperties containerProperties; private EventHubsErrorHandler errorHandler; private EventProcessorClient delegate; /** * Create an instance using the supplied processor factory and container properties. * * @param processorFactory the processor factory. * @param containerProperties the container properties */ public EventHubsMessageListenerContainer(EventHubsProcessorFactory processorFactory, EventHubsContainerProperties containerProperties) { this.processorFactory = processorFactory; this.containerProperties = containerProperties == null ? new EventHubsContainerProperties() : containerProperties; } @Override protected void doStart() { String eventHubName = this.containerProperties.getEventHubName(); String consumerGroup = this.containerProperties.getConsumerGroup(); if (this.errorHandler != null) { this.containerProperties.setErrorHandler(this.errorHandler); } this.delegate = this.processorFactory.createProcessor(eventHubName, consumerGroup, this.containerProperties); this.delegate.start(); } @Override @Override public void setupMessageListener(MessageListener<?> messageListener) { this.containerProperties.setMessageListener(messageListener); } @Override public EventHubsContainerProperties getContainerProperties() { return containerProperties; } /** * Set the error handler to call when the listener throws an exception. * @param errorHandler the error handler. */ public void setErrorHandler(EventHubsErrorHandler errorHandler) { this.errorHandler = errorHandler; } }
same as above.
protected void doStop() { if (this.delegate != null) { this.delegate.stop(); } }
if (this.delegate != null) {
protected void doStop() { if (this.delegate != null) { this.delegate.stop(); } }
class ServiceBusMessageListenerContainer extends AbstractMessageListenerContainer { private final ServiceBusProcessorFactory processorFactory; private final ServiceBusContainerProperties containerProperties; private ServiceBusErrorHandler errorHandler; private ServiceBusProcessorClient delegate; /** * Create an instance using the supplied processor factory and container properties. * * @param processorFactory the processor factory. * @param containerProperties the container properties. */ public ServiceBusMessageListenerContainer(ServiceBusProcessorFactory processorFactory, ServiceBusContainerProperties containerProperties) { this.processorFactory = processorFactory; this.containerProperties = containerProperties == null ? new ServiceBusContainerProperties() : containerProperties; } @Override protected void doStart() { String entityName = containerProperties.getEntityName(); String subscriptionName = containerProperties.getSubscriptionName(); if (this.errorHandler != null) { this.containerProperties.setErrorHandler(errorHandler); } if (StringUtils.hasText(subscriptionName)) { this.delegate = this.processorFactory.createProcessor(entityName, subscriptionName, containerProperties); } else { this.delegate = this.processorFactory.createProcessor(entityName, containerProperties); } this.delegate.start(); } @Override @Override public void setupMessageListener(MessageListener<?> messageListener) { this.containerProperties.setMessageListener(messageListener); } @Override public ServiceBusContainerProperties getContainerProperties() { return containerProperties; } /** * Set the error handler to call when the listener throws an exception. * @param errorHandler the error handler. */ public void setErrorHandler(ServiceBusErrorHandler errorHandler) { this.errorHandler = errorHandler; } }
class ServiceBusMessageListenerContainer extends AbstractMessageListenerContainer { private final ServiceBusProcessorFactory processorFactory; private final ServiceBusContainerProperties containerProperties; private ServiceBusErrorHandler errorHandler; private ServiceBusProcessorClient delegate; /** * Create an instance using the supplied processor factory and container properties. * * @param processorFactory the processor factory. * @param containerProperties the container properties. */ public ServiceBusMessageListenerContainer(ServiceBusProcessorFactory processorFactory, ServiceBusContainerProperties containerProperties) { this.processorFactory = processorFactory; this.containerProperties = containerProperties == null ? new ServiceBusContainerProperties() : containerProperties; } @Override protected void doStart() { String entityName = containerProperties.getEntityName(); String subscriptionName = containerProperties.getSubscriptionName(); if (this.errorHandler != null) { this.containerProperties.setErrorHandler(errorHandler); } if (StringUtils.hasText(subscriptionName)) { this.delegate = this.processorFactory.createProcessor(entityName, subscriptionName, containerProperties); } else { this.delegate = this.processorFactory.createProcessor(entityName, containerProperties); } this.delegate.start(); } @Override @Override public void setupMessageListener(MessageListener<?> messageListener) { this.containerProperties.setMessageListener(messageListener); } @Override public ServiceBusContainerProperties getContainerProperties() { return containerProperties; } /** * Set the error handler to call when the listener throws an exception. * @param errorHandler the error handler. */ public void setErrorHandler(ServiceBusErrorHandler errorHandler) { this.errorHandler = errorHandler; } }
I don't think this check is required, in this context, it does nothing
private boolean springBootVersionMatches() { for (String acceptedVersion : acceptedVersions) { try { if (this.matchSpringBootVersionFromManifest(acceptedVersion)) { return true; } } catch (FileNotFoundException e) { String versionString = stripWildCardFromVersion(acceptedVersion); String fullyQualifiedClassName = this.supportedVersions.get(versionString); if (classNameResolver.resolve(fullyQualifiedClassName)) { if (LOGGER.isDebugEnabled()) { LOGGER.debug("Predicate for Spring Boot Version of [{}] was matched", versionString); } return true; } } } return false; }
if (LOGGER.isDebugEnabled()) {
private boolean springBootVersionMatches() { for (String acceptedVersion : acceptedVersions) { try { if (this.matchSpringBootVersionFromManifest(acceptedVersion)) { return true; } } catch (FileNotFoundException e) { String versionString = stripWildCardFromVersion(acceptedVersion); String fullyQualifiedClassName = this.supportedVersions.get(versionString); if (classNameResolver.resolve(fullyQualifiedClassName)) { LOGGER.debug("Predicate for Spring Boot Version of [{}] was matched", versionString); return true; } } } return false; }
class AzureSpringBootVersionVerifier { private static final Logger LOGGER = LoggerFactory.getLogger(AzureSpringBootVersionVerifier.class); static final String SPRINGBOOT_CONDITIONAL_CLASS_NAME_OF_2_5 = "org.springframework.boot.context.properties.bind.Bindable.BindRestriction"; static final String SPRINGBOOT_CONDITIONAL_CLASS_NAME_OF_2_6 = "org.springframework.boot.autoconfigure.data.redis.ClientResourcesBuilderCustomizer"; /** * Versions supported by Spring Cloud Azure, for present is [2.5, 2.6]. Update this value if needed. */ private final Map<String, String> supportedVersions = new HashMap<>(); /** * Versionsspecified in the configuration or environment. */ private final List<String> acceptedVersions; private final ClassNameResolverPredicate classNameResolver; public AzureSpringBootVersionVerifier(List<String> acceptedVersions, ClassNameResolverPredicate classNameResolver) { this.acceptedVersions = acceptedVersions; this.classNameResolver = classNameResolver; initDefaultSupportedBootVersionCheckMeta(); } /** * Init default supported Spring Boot Version compatibility check meta data. */ private void initDefaultSupportedBootVersionCheckMeta() { supportedVersions.put("2.5", SPRINGBOOT_CONDITIONAL_CLASS_NAME_OF_2_5); supportedVersions.put("2.6", SPRINGBOOT_CONDITIONAL_CLASS_NAME_OF_2_6); } /** * Verify the current spring-boot version * * @return Verification result of spring-boot version * @throws AzureCompatibilityNotMetException thrown if using an unsupported spring-boot version */ public VerificationResult verify() { if (this.springBootVersionMatches()) { return VerificationResult.compatible(); } else { List<VerificationResult> errors = new ArrayList<>(Collections.singleton(VerificationResult.notCompatible(this.errorDescription(), this.action()))); throw new AzureCompatibilityNotMetException(errors); } } private String errorDescription() { String versionFromManifest = this.getVersionFromManifest(); return StringUtils.hasText(versionFromManifest) ? String.format("Spring Boot [%s] is not compatible with this" + " Spring Cloud Azure release", versionFromManifest) : "Spring Boot is not compatible with this " + "Spring Cloud Azure release"; } private String action() { return String.format("Change Spring Boot version to one of the following versions %s.%n" + "You can find the latest Spring Boot versions here [%s]. %n" + "If you want to learn more about the Spring Cloud Azure compatibility, " + "you can visit this page [%s] and check the [Which Version of Spring Cloud Azure Should I Use] section.%n" + "If you want to disable this check, " + "just set the property [spring.cloud.azure.compatibility-verifier.enabled=false]", this.acceptedVersions, "https: "https: } String getVersionFromManifest() { return SpringBootVersion.getVersion(); } private boolean matchSpringBootVersionFromManifest(String acceptedVersion) throws FileNotFoundException { String version = this.getVersionFromManifest(); LOGGER.debug("Version found in Boot manifest [{}]", version); if (!StringUtils.hasText(version)) { LOGGER.info("Cannot check Boot version from manifest"); throw new FileNotFoundException("Spring Boot version not found"); } else { return version.startsWith(stripWildCardFromVersion(acceptedVersion)); } } private static String stripWildCardFromVersion(String version) { return version.endsWith(".x") ? version.substring(0, version.indexOf(".x")) : version; } }
class AzureSpringBootVersionVerifier { private static final Logger LOGGER = LoggerFactory.getLogger(AzureSpringBootVersionVerifier.class); static final String SPRINGBOOT_CONDITIONAL_CLASS_NAME_OF_2_5 = "org.springframework.boot.context.properties.bind.Bindable.BindRestriction"; static final String SPRINGBOOT_CONDITIONAL_CLASS_NAME_OF_2_6 = "org.springframework.boot.autoconfigure.data.redis.ClientResourcesBuilderCustomizer"; /** * Versions supported by Spring Cloud Azure, for present is [2.5, 2.6]. Update this value if needed. */ private final Map<String, String> supportedVersions = new HashMap<>(); /** * Versionsspecified in the configuration or environment. */ private final List<String> acceptedVersions; private final ClassNameResolverPredicate classNameResolver; public AzureSpringBootVersionVerifier(List<String> acceptedVersions, ClassNameResolverPredicate classNameResolver) { this.acceptedVersions = acceptedVersions; this.classNameResolver = classNameResolver; initDefaultSupportedBootVersionCheckMeta(); } /** * Init default supported Spring Boot Version compatibility check meta data. */ private void initDefaultSupportedBootVersionCheckMeta() { supportedVersions.put("2.5", SPRINGBOOT_CONDITIONAL_CLASS_NAME_OF_2_5); supportedVersions.put("2.6", SPRINGBOOT_CONDITIONAL_CLASS_NAME_OF_2_6); } /** * Verify the current spring-boot version * * @return Verification result of spring-boot version * @throws AzureCompatibilityNotMetException thrown if using an unsupported spring-boot version */ public VerificationResult verify() { if (this.springBootVersionMatches()) { return VerificationResult.compatible(); } else { List<VerificationResult> errors = new ArrayList<>(Collections.singleton(VerificationResult.notCompatible(this.errorDescription(), this.action()))); throw new AzureCompatibilityNotMetException(errors); } } private String errorDescription() { String versionFromManifest = this.getVersionFromManifest(); return StringUtils.hasText(versionFromManifest) ? String.format("Spring Boot [%s] is not compatible with this" + " Spring Cloud Azure release", versionFromManifest) : "Spring Boot is not compatible with this " + "Spring Cloud Azure release"; } private String action() { return String.format("Change Spring Boot version to one of the following versions %s.%n" + "You can find the latest Spring Boot versions here [%s]. %n" + "If you want to learn more about the Spring Cloud Azure compatibility, " + "you can visit this page [%s] and check the [Which Version of Spring Cloud Azure Should I Use] section.%n" + "If you want to disable this check, " + "just set the property [spring.cloud.azure.compatibility-verifier.enabled=false]", this.acceptedVersions, "https: "https: } String getVersionFromManifest() { return SpringBootVersion.getVersion(); } private boolean matchSpringBootVersionFromManifest(String acceptedVersion) throws FileNotFoundException { String version = this.getVersionFromManifest(); LOGGER.debug("Version found in Boot manifest [{}]", version); if (!StringUtils.hasText(version)) { LOGGER.info("Cannot check Boot version from manifest"); throw new FileNotFoundException("Spring Boot version not found"); } else { return version.startsWith(stripWildCardFromVersion(acceptedVersion)); } } private static String stripWildCardFromVersion(String version) { return version.endsWith(".x") ? version.substring(0, version.indexOf(".x")) : version; } }
do we need this `if (LOGGER.isTraceEnabled())`?
public Object postProcessAfterInitialization(Object bean, String beanName) throws BeansException { if (bean instanceof AopInfrastructureBean || bean instanceof MessageListenerContainerFactory || bean instanceof AzureListenerEndpointRegistry) { return bean; } Class<?> targetClass = AopProxyUtils.ultimateTargetClass(bean); if (!this.nonAnnotatedClasses.contains(targetClass)) { Map<Method, Set<T>> annotatedMethods = MethodIntrospector.selectMethods(targetClass, (MethodIntrospector.MetadataLookup<Set<T>>) method -> { Set<T> listenerMethods = findListenerMethods(method); return (!listenerMethods.isEmpty() ? listenerMethods : null); }); if (annotatedMethods.isEmpty()) { this.nonAnnotatedClasses.add(targetClass); if (LOGGER.isTraceEnabled()) { LOGGER.trace("No @AzureMessageListener annotations found on bean type: {}", targetClass); } } else { annotatedMethods.forEach((method, listeners) -> listeners .forEach(listener -> processAzureListener(listener, method, bean))); if (LOGGER.isDebugEnabled()) { LOGGER.debug("{} @AzureMessageListener methods processed on bean '{}': {}", annotatedMethods.size(), beanName, annotatedMethods); } } } return bean; }
if (LOGGER.isTraceEnabled()) {
public Object postProcessAfterInitialization(Object bean, String beanName) throws BeansException { if (bean instanceof AopInfrastructureBean || bean instanceof MessageListenerContainerFactory || bean instanceof AzureListenerEndpointRegistry) { return bean; } Class<?> targetClass = AopProxyUtils.ultimateTargetClass(bean); if (!this.nonAnnotatedClasses.contains(targetClass)) { Map<Method, Set<T>> annotatedMethods = MethodIntrospector.selectMethods(targetClass, (MethodIntrospector.MetadataLookup<Set<T>>) method -> { Set<T> listenerMethods = findListenerMethods(method); return (!listenerMethods.isEmpty() ? listenerMethods : null); }); if (annotatedMethods.isEmpty()) { this.nonAnnotatedClasses.add(targetClass); LOGGER.trace("No @AzureMessageListener annotations found on bean type: {}", targetClass); } else { annotatedMethods.forEach((method, listeners) -> listeners .forEach(listener -> processAzureListener(listener, method, bean))); LOGGER.debug("{} @AzureMessageListener methods processed on bean '{}': {}", annotatedMethods.size(), beanName, annotatedMethods); } } return bean; }
class AzureListenerAnnotationBeanPostProcessorAdapter<T> implements MergedBeanDefinitionPostProcessor, Ordered, BeanFactoryAware, SmartInitializingSingleton { private static final Logger LOGGER = LoggerFactory.getLogger(AzureListenerAnnotationBeanPostProcessorAdapter.class); public static final String DEFAULT_AZURE_LISTENER_ENDPOINT_REGISTRY_BEAN_NAME = "azureListenerEndpointRegistry"; /** * The bean name of the default {@link MessageListenerContainerFactory}. */ protected String containerFactoryBeanName; private final MessageHandlerMethodFactoryAdapter messageHandlerMethodFactory = new MessageHandlerMethodFactoryAdapter(); protected final AtomicInteger counter = new AtomicInteger(); private final Set<Class<?>> nonAnnotatedClasses = Collections.newSetFromMap(new ConcurrentHashMap<>(64)); private final AzureListenerEndpointRegistrar registrar = new AzureListenerEndpointRegistrar(); @Nullable private AzureListenerEndpointRegistry endpointRegistry; @Nullable private BeanFactory beanFactory; @Nullable private StringValueResolver embeddedValueResolver; /** * Set the container factory bean name. * @param containerFactoryBeanName the container factory bean name. */ public void setContainerFactoryBeanName(String containerFactoryBeanName) { this.containerFactoryBeanName = containerFactoryBeanName; } @Override public int getOrder() { return LOWEST_PRECEDENCE; } /** * Making a {@link BeanFactory} available is optional; if not set, * {@link AzureListenerConfigurer} beans won't get autodetected and an */ @Override public void setBeanFactory(BeanFactory beanFactory) { this.beanFactory = beanFactory; if (beanFactory instanceof ConfigurableBeanFactory) { this.embeddedValueResolver = new EmbeddedValueResolver((ConfigurableBeanFactory) beanFactory); } this.registrar.setBeanFactory(beanFactory); } @Override public void afterSingletonsInstantiated() { this.nonAnnotatedClasses.clear(); if (this.beanFactory instanceof ListableBeanFactory) { Map<String, AzureListenerConfigurer> beans = ((ListableBeanFactory) this.beanFactory).getBeansOfType(AzureListenerConfigurer.class); List<AzureListenerConfigurer> configurers = new ArrayList<>(beans.values()); AnnotationAwareOrderComparator.sort(configurers); for (AzureListenerConfigurer configurer : configurers) { configurer.configureAzureListeners(this.registrar); } } if (this.containerFactoryBeanName != null) { this.registrar.setContainerFactoryBeanName(this.containerFactoryBeanName); } if (this.registrar.getEndpointRegistry() == null) { if (this.endpointRegistry == null) { Assert.state(this.beanFactory != null, "BeanFactory must be set to find endpoint registry by bean name"); this.endpointRegistry = this.beanFactory.getBean(DEFAULT_AZURE_LISTENER_ENDPOINT_REGISTRY_BEAN_NAME, AzureListenerEndpointRegistry.class); } this.registrar.setEndpointRegistry(this.endpointRegistry); } MessageHandlerMethodFactory handlerMethodFactory = this.registrar.getMessageHandlerMethodFactory(); if (handlerMethodFactory != null) { this.messageHandlerMethodFactory.setMessageHandlerMethodFactory(handlerMethodFactory); } this.registrar.afterPropertiesSet(); } @Override public void postProcessMergedBeanDefinition(RootBeanDefinition beanDefinition, Class<?> beanType, String beanName) { } /** * @throws BeansException in case of errors */ @Override public Object postProcessBeforeInitialization(Object bean, String beanName) throws BeansException { return bean; } /** * @throws BeansException in case of errors */ @Override /** * Process the given {@link T} annotation on the given method, * registering a corresponding endpoint for the given bean instance. * * @param listenerAnnotation the annotation to process * @param mostSpecificMethod the annotated method * @param bean the instance to invoke the method on * @see AzureListenerEndpointRegistrar * @throws BeanInitializationException If no ListenerContainerFactory could be found. */ private void processAzureListener(T listenerAnnotation, Method mostSpecificMethod, Object bean) { Method invocableMethod = AopUtils.selectInvocableMethod(mostSpecificMethod, bean.getClass()); AzureListenerEndpoint endpoint = createAndConfigureMethodListenerEndpoint(listenerAnnotation, bean, invocableMethod, beanFactory, messageHandlerMethodFactory); MessageListenerContainerFactory<?> factory = null; String containerFactoryBeanNameResolved = resolve(getContainerFactoryBeanName(listenerAnnotation)); if (StringUtils.hasText(containerFactoryBeanNameResolved)) { Assert.state(this.beanFactory != null, "BeanFactory must be set to obtain container factory by bean name"); try { factory = this.beanFactory.getBean(containerFactoryBeanNameResolved, MessageListenerContainerFactory.class); } catch (NoSuchBeanDefinitionException ex) { throw new BeanInitializationException( "Could not register Azure listener endpoint on [" + mostSpecificMethod + "], no " + MessageListenerContainerFactory.class.getSimpleName() + " with id '" + containerFactoryBeanNameResolved + "' was found in the application context", ex); } } this.registrar.registerEndpoint(endpoint, factory); } protected abstract Set<T> findListenerMethods(Method method); /** * Instantiate an empty {@link AzureListenerEndpoint} and perform further * configuration with provided parameters in {@link * * @param listenerAnnotation the listener annotation * @param bean the object instance that should manage this endpoint. * @param method the method to invoke to process a message managed by this endpoint. * @param beanFactory the Spring bean factory to use to resolve expressions * @param messageHandlerMethodFactory the {@link MessageHandlerMethodFactory} to use to build the * {@link InvocableHandlerMethod} responsible to manage the invocation of * this endpoint. * * @return an {@link AzureListenerEndpoint} implementation. */ protected abstract AzureListenerEndpoint createAndConfigureMethodListenerEndpoint( T listenerAnnotation, Object bean, Method method, BeanFactory beanFactory, MessageHandlerMethodFactory messageHandlerMethodFactory); protected abstract String getEndpointId(T listenerAnnotation); protected abstract String getContainerFactoryBeanName(T listenerAnnotation); protected abstract Class<T> getListenerType(); @Nullable protected String resolve(String value) { return (this.embeddedValueResolver != null ? this.embeddedValueResolver.resolveStringValue(value) : value); } /** * A {@link MessageHandlerMethodFactory} adapter that offers a configurable underlying * instance to use. Useful if the factory to use is determined once the endpoints * have been registered but not created yet. * * @see AzureListenerEndpointRegistrar */ private class MessageHandlerMethodFactoryAdapter implements MessageHandlerMethodFactory { @Nullable private MessageHandlerMethodFactory messageHandlerMethodFactory; @Override public InvocableHandlerMethod createInvocableHandlerMethod(Object bean, Method method) { return getMessageHandlerMethodFactory().createInvocableHandlerMethod(bean, method); } private MessageHandlerMethodFactory getMessageHandlerMethodFactory() { if (this.messageHandlerMethodFactory == null) { this.messageHandlerMethodFactory = createDefaultAzureHandlerMethodFactory(); } return this.messageHandlerMethodFactory; } public void setMessageHandlerMethodFactory(MessageHandlerMethodFactory messageHandlerMethodFactory) { this.messageHandlerMethodFactory = messageHandlerMethodFactory; } private MessageHandlerMethodFactory createDefaultAzureHandlerMethodFactory() { DefaultMessageHandlerMethodFactory defaultFactory = new DefaultMessageHandlerMethodFactory(); if (beanFactory != null) { defaultFactory.setBeanFactory(beanFactory); } defaultFactory.afterPropertiesSet(); return defaultFactory; } } /** * Get the default bean name for an implementation class of {@link AzureListenerAnnotationBeanPostProcessorAdapter}. * @return the default bean name for the implementation class. */ public abstract String getDefaultAzureListenerAnnotationBeanPostProcessorBeanName(); }
class AzureListenerAnnotationBeanPostProcessorAdapter<T> implements MergedBeanDefinitionPostProcessor, Ordered, BeanFactoryAware, SmartInitializingSingleton { private static final Logger LOGGER = LoggerFactory.getLogger(AzureListenerAnnotationBeanPostProcessorAdapter.class); public static final String DEFAULT_AZURE_LISTENER_ENDPOINT_REGISTRY_BEAN_NAME = "azureListenerEndpointRegistry"; /** * The bean name of the default {@link MessageListenerContainerFactory}. */ protected String containerFactoryBeanName; private final MessageHandlerMethodFactoryAdapter messageHandlerMethodFactory = new MessageHandlerMethodFactoryAdapter(); protected final AtomicInteger counter = new AtomicInteger(); private final Set<Class<?>> nonAnnotatedClasses = Collections.newSetFromMap(new ConcurrentHashMap<>(64)); private final AzureListenerEndpointRegistrar registrar = new AzureListenerEndpointRegistrar(); @Nullable private AzureListenerEndpointRegistry endpointRegistry; @Nullable private BeanFactory beanFactory; @Nullable private StringValueResolver embeddedValueResolver; /** * Set the container factory bean name. * @param containerFactoryBeanName the container factory bean name. */ public void setContainerFactoryBeanName(String containerFactoryBeanName) { this.containerFactoryBeanName = containerFactoryBeanName; } @Override public int getOrder() { return LOWEST_PRECEDENCE; } /** * Making a {@link BeanFactory} available is optional; if not set, * {@link AzureListenerConfigurer} beans won't get autodetected and an */ @Override public void setBeanFactory(BeanFactory beanFactory) { this.beanFactory = beanFactory; if (beanFactory instanceof ConfigurableBeanFactory) { this.embeddedValueResolver = new EmbeddedValueResolver((ConfigurableBeanFactory) beanFactory); } this.registrar.setBeanFactory(beanFactory); } @Override public void afterSingletonsInstantiated() { this.nonAnnotatedClasses.clear(); if (this.beanFactory instanceof ListableBeanFactory) { Map<String, AzureListenerConfigurer> beans = ((ListableBeanFactory) this.beanFactory).getBeansOfType(AzureListenerConfigurer.class); List<AzureListenerConfigurer> configurers = new ArrayList<>(beans.values()); AnnotationAwareOrderComparator.sort(configurers); for (AzureListenerConfigurer configurer : configurers) { configurer.configureAzureListeners(this.registrar); } } if (this.containerFactoryBeanName != null) { this.registrar.setContainerFactoryBeanName(this.containerFactoryBeanName); } if (this.registrar.getEndpointRegistry() == null) { if (this.endpointRegistry == null) { Assert.state(this.beanFactory != null, "BeanFactory must be set to find endpoint registry by bean name"); this.endpointRegistry = this.beanFactory.getBean(DEFAULT_AZURE_LISTENER_ENDPOINT_REGISTRY_BEAN_NAME, AzureListenerEndpointRegistry.class); } this.registrar.setEndpointRegistry(this.endpointRegistry); } MessageHandlerMethodFactory handlerMethodFactory = this.registrar.getMessageHandlerMethodFactory(); if (handlerMethodFactory != null) { this.messageHandlerMethodFactory.setMessageHandlerMethodFactory(handlerMethodFactory); } this.registrar.afterPropertiesSet(); } @Override public void postProcessMergedBeanDefinition(RootBeanDefinition beanDefinition, Class<?> beanType, String beanName) { } /** * @throws BeansException in case of errors */ @Override public Object postProcessBeforeInitialization(Object bean, String beanName) throws BeansException { return bean; } /** * @throws BeansException in case of errors */ @Override /** * Process the given {@link T} annotation on the given method, * registering a corresponding endpoint for the given bean instance. * * @param listenerAnnotation the annotation to process * @param mostSpecificMethod the annotated method * @param bean the instance to invoke the method on * @see AzureListenerEndpointRegistrar * @throws BeanInitializationException If no ListenerContainerFactory could be found. */ private void processAzureListener(T listenerAnnotation, Method mostSpecificMethod, Object bean) { Method invocableMethod = AopUtils.selectInvocableMethod(mostSpecificMethod, bean.getClass()); AzureListenerEndpoint endpoint = createAndConfigureMethodListenerEndpoint(listenerAnnotation, bean, invocableMethod, beanFactory, messageHandlerMethodFactory); MessageListenerContainerFactory<?> factory = null; String containerFactoryBeanNameResolved = resolve(getContainerFactoryBeanName(listenerAnnotation)); if (StringUtils.hasText(containerFactoryBeanNameResolved)) { Assert.state(this.beanFactory != null, "BeanFactory must be set to obtain container factory by bean name"); try { factory = this.beanFactory.getBean(containerFactoryBeanNameResolved, MessageListenerContainerFactory.class); } catch (NoSuchBeanDefinitionException ex) { throw new BeanInitializationException( "Could not register Azure listener endpoint on [" + mostSpecificMethod + "], no " + MessageListenerContainerFactory.class.getSimpleName() + " with id '" + containerFactoryBeanNameResolved + "' was found in the application context", ex); } } this.registrar.registerEndpoint(endpoint, factory); } protected abstract Set<T> findListenerMethods(Method method); /** * Instantiate an empty {@link AzureListenerEndpoint} and perform further * configuration with provided parameters in {@link * * @param listenerAnnotation the listener annotation * @param bean the object instance that should manage this endpoint. * @param method the method to invoke to process a message managed by this endpoint. * @param beanFactory the Spring bean factory to use to resolve expressions * @param messageHandlerMethodFactory the {@link MessageHandlerMethodFactory} to use to build the * {@link InvocableHandlerMethod} responsible to manage the invocation of * this endpoint. * * @return an {@link AzureListenerEndpoint} implementation. */ protected abstract AzureListenerEndpoint createAndConfigureMethodListenerEndpoint( T listenerAnnotation, Object bean, Method method, BeanFactory beanFactory, MessageHandlerMethodFactory messageHandlerMethodFactory); protected abstract String getEndpointId(T listenerAnnotation); protected abstract String getContainerFactoryBeanName(T listenerAnnotation); protected abstract Class<T> getListenerType(); @Nullable protected String resolve(String value) { return (this.embeddedValueResolver != null ? this.embeddedValueResolver.resolveStringValue(value) : value); } /** * A {@link MessageHandlerMethodFactory} adapter that offers a configurable underlying * instance to use. Useful if the factory to use is determined once the endpoints * have been registered but not created yet. * * @see AzureListenerEndpointRegistrar */ private class MessageHandlerMethodFactoryAdapter implements MessageHandlerMethodFactory { @Nullable private MessageHandlerMethodFactory messageHandlerMethodFactory; @Override public InvocableHandlerMethod createInvocableHandlerMethod(Object bean, Method method) { return getMessageHandlerMethodFactory().createInvocableHandlerMethod(bean, method); } private MessageHandlerMethodFactory getMessageHandlerMethodFactory() { if (this.messageHandlerMethodFactory == null) { this.messageHandlerMethodFactory = createDefaultAzureHandlerMethodFactory(); } return this.messageHandlerMethodFactory; } public void setMessageHandlerMethodFactory(MessageHandlerMethodFactory messageHandlerMethodFactory) { this.messageHandlerMethodFactory = messageHandlerMethodFactory; } private MessageHandlerMethodFactory createDefaultAzureHandlerMethodFactory() { DefaultMessageHandlerMethodFactory defaultFactory = new DefaultMessageHandlerMethodFactory(); if (beanFactory != null) { defaultFactory.setBeanFactory(beanFactory); } defaultFactory.afterPropertiesSet(); return defaultFactory; } } /** * Get the default bean name for an implementation class of {@link AzureListenerAnnotationBeanPostProcessorAdapter}. * @return the default bean name for the implementation class. */ public abstract String getDefaultAzureListenerAnnotationBeanPostProcessorBeanName(); }
should we return immediately here if `get` returns null?
public Mono<Boolean> authorizeRequestOnChallenge(HttpPipelineCallContext context, HttpResponse response) { return Mono.defer(() -> { Map<String, String> challengeAttributes = extractChallengeAttributes(response.getHeaderValue(WWW_AUTHENTICATE), BEARER_TOKEN_PREFIX); String authorizationUriString = challengeAttributes.get("authorization_uri"); final URI authorizationUri; try { authorizationUri = new URI(authorizationUriString); } catch (URISyntaxException e) { return Mono.just(false); } this.tenantId = authorizationUri.getPath().split("/")[1]; TokenRequestContext tokenRequestContext = new TokenRequestContext() .addScopes(this.scopes) .setTenantId(this.tenantId); return setAuthorizationHeader(context, tokenRequestContext) .then(Mono.just(true)); }); }
String authorizationUriString = challengeAttributes.get("authorization_uri");
public Mono<Boolean> authorizeRequestOnChallenge(HttpPipelineCallContext context, HttpResponse response) { return Mono.defer(() -> { Map<String, String> challengeAttributes = extractChallengeAttributes(response.getHeaderValue(WWW_AUTHENTICATE), BEARER_TOKEN_PREFIX); String authorizationUriString = challengeAttributes.get("authorization_uri"); final URI authorizationUri; try { authorizationUri = new URI(authorizationUriString); } catch (URISyntaxException e) { return Mono.just(false); } this.tenantId = authorizationUri.getPath().split("/")[1]; TokenRequestContext tokenRequestContext = new TokenRequestContext() .addScopes(this.scopes) .setTenantId(this.tenantId); return setAuthorizationHeader(context, tokenRequestContext) .then(Mono.just(true)); }); }
class TableBearerTokenChallengeAuthorizationPolicy extends BearerTokenAuthenticationPolicy { private static final String BEARER_TOKEN_PREFIX = "Bearer "; private static final String WWW_AUTHENTICATE = "WWW-Authenticate"; private String[] scopes; private String tenantId; private boolean enabledTenantDiscovery; /** * Creates a {@link TableBearerTokenChallengeAuthorizationPolicy}. * * @param credential The token credential to authenticate the request. */ public TableBearerTokenChallengeAuthorizationPolicy(TokenCredential credential, boolean enabledTenantDiscovery, String... scopes) { super(credential, scopes); this.scopes = scopes; this.enabledTenantDiscovery = enabledTenantDiscovery; } /** * Extracts attributes off the bearer challenge in the authentication header. * * @param authenticateHeader The authentication header containing the challenge. * @param authChallengePrefix The authentication challenge name. * * @return A challenge attributes map. */ private static Map<String, String> extractChallengeAttributes(String authenticateHeader, String authChallengePrefix) { if (!isBearerChallenge(authenticateHeader, authChallengePrefix)) { return Collections.emptyMap(); } authenticateHeader = authenticateHeader.toLowerCase(Locale.ROOT).replace(authChallengePrefix.toLowerCase(Locale.ROOT), ""); String[] attributes = authenticateHeader.split(", "); Map<String, String> attributeMap = new HashMap<>(); for (String pair : attributes) { String[] keyValue = pair.split("="); attributeMap.put(keyValue[0].replaceAll("\"", ""), keyValue[1].replaceAll("\"", "")); } return attributeMap; } /** * Verifies whether a challenge is bearer or not. * * @param authenticateHeader The authentication header containing all the challenges. * @param authChallengePrefix The authentication challenge name. * * @return A boolean indicating if the challenge is a bearer challenge or not. */ private static boolean isBearerChallenge(String authenticateHeader, String authChallengePrefix) { return (!CoreUtils.isNullOrEmpty(authenticateHeader) && authenticateHeader.toLowerCase(Locale.ROOT).startsWith(authChallengePrefix.toLowerCase(Locale.ROOT))); } @Override public Mono<Void> authorizeRequest(HttpPipelineCallContext context) { return Mono.defer(() -> { if (this.tenantId != null || !enabledTenantDiscovery) { TokenRequestContext tokenRequestContext = new TokenRequestContext() .addScopes(this.scopes) .setTenantId(this.tenantId); return setAuthorizationHeader(context, tokenRequestContext); } return Mono.empty(); }); } @Override }
class TableBearerTokenChallengeAuthorizationPolicy extends BearerTokenAuthenticationPolicy { private static final String BEARER_TOKEN_PREFIX = "Bearer "; private static final String WWW_AUTHENTICATE = "WWW-Authenticate"; private String[] scopes; private volatile String tenantId; private boolean enableTenantDiscovery; /** * Creates a {@link TableBearerTokenChallengeAuthorizationPolicy}. * * @param credential The token credential to authenticate the request. */ public TableBearerTokenChallengeAuthorizationPolicy(TokenCredential credential, boolean enableTenantDiscovery, String... scopes) { super(credential, scopes); this.scopes = scopes; this.enableTenantDiscovery = enableTenantDiscovery; } /** * Extracts attributes off the bearer challenge in the authentication header. * * @param authenticateHeader The authentication header containing the challenge. * @param authChallengePrefix The authentication challenge name. * * @return A challenge attributes map. */ private static Map<String, String> extractChallengeAttributes(String authenticateHeader, String authChallengePrefix) { if (!isBearerChallenge(authenticateHeader, authChallengePrefix)) { return Collections.emptyMap(); } authenticateHeader = authenticateHeader.toLowerCase(Locale.ROOT).replace(authChallengePrefix.toLowerCase(Locale.ROOT), ""); String[] attributes = authenticateHeader.split(" "); Map<String, String> attributeMap = new HashMap<>(); for (String pair : attributes) { String[] keyValue = pair.split("="); attributeMap.put(keyValue[0].replaceAll("\"", ""), keyValue[1].replaceAll("\"", "")); } return attributeMap; } /** * Verifies whether a challenge is bearer or not. * * @param authenticateHeader The authentication header containing all the challenges. * @param authChallengePrefix The authentication challenge name. * * @return A boolean indicating if the challenge is a bearer challenge or not. */ private static boolean isBearerChallenge(String authenticateHeader, String authChallengePrefix) { return (!CoreUtils.isNullOrEmpty(authenticateHeader) && authenticateHeader.toLowerCase(Locale.ROOT).startsWith(authChallengePrefix.toLowerCase(Locale.ROOT))); } @Override public Mono<Void> authorizeRequest(HttpPipelineCallContext context) { return Mono.defer(() -> { if (this.tenantId != null || !enableTenantDiscovery) { TokenRequestContext tokenRequestContext = new TokenRequestContext() .addScopes(this.scopes) .setTenantId(this.tenantId); return setAuthorizationHeader(context, tokenRequestContext); } return Mono.empty(); }); } @Override }
Any reason for using this overload with offset and count over just `new String(byteBuffer.array())`?
public Mono<HttpResponse> process(HttpPipelineCallContext context, HttpPipelineNextPolicy next) { final HttpRequest request = context.getHttpRequest(); final HttpHeader contentType = request.getHeaders().get(Constants.CONTENT_TYPE); StringBuilder bodyStringBuilder = new StringBuilder(); if (TracerProxy.isTracingEnabled() && contentType != null && Constants.CLOUD_EVENT_CONTENT_TYPE.equals(contentType.getValue())) { return request.getBody() .map(byteBuffer -> { if (byteBuffer.hasArray()) { return bodyStringBuilder.append(new String(byteBuffer.array(), byteBuffer.arrayOffset() + byteBuffer.position(), byteBuffer.remaining(), StandardCharsets.UTF_8)); } else { return bodyStringBuilder.append(new String(FluxUtil.byteBufferToArray(byteBuffer.duplicate()), StandardCharsets.UTF_8)); } }) .then(Mono.fromCallable(() -> replaceTracingPlaceHolder(request, bodyStringBuilder))) .then(next.process()); } else { return next.process(); } }
byteBuffer.arrayOffset() + byteBuffer.position(), byteBuffer.remaining(),
public Mono<HttpResponse> process(HttpPipelineCallContext context, HttpPipelineNextPolicy next) { final HttpRequest request = context.getHttpRequest(); final HttpHeader contentType = request.getHeaders().get(Constants.CONTENT_TYPE); StringBuilder bodyStringBuilder = new StringBuilder(); if (TracerProxy.isTracingEnabled() && contentType != null && Constants.CLOUD_EVENT_CONTENT_TYPE.equals(contentType.getValue())) { return request.getBody() .map(byteBuffer -> { if (byteBuffer.hasArray()) { return bodyStringBuilder.append(new String(byteBuffer.array(), byteBuffer.arrayOffset() + byteBuffer.position(), byteBuffer.remaining(), StandardCharsets.UTF_8)); } else { return bodyStringBuilder.append(new String(FluxUtil.byteBufferToArray(byteBuffer.duplicate()), StandardCharsets.UTF_8)); } }) .then(Mono.fromCallable(() -> replaceTracingPlaceHolder(request, bodyStringBuilder))) .then(next.process()); } else { return next.process(); } }
class CloudEventTracingPipelinePolicy implements HttpPipelinePolicy { @Override /** * * @param request The {@link HttpRequest}, whose body will be mutated by replacing traceparent and tracestate * placeholders. * @param bodyStringBuilder The {@link StringBuilder} that contains the full HttpRequest body string. * @return The new body string with the place holders replaced (if header has tracing) * or removed (if header no tracing). */ static String replaceTracingPlaceHolder(HttpRequest request, StringBuilder bodyStringBuilder) { final int traceParentPlaceHolderIndex = bodyStringBuilder.indexOf(Constants.TRACE_PARENT_PLACEHOLDER); if (traceParentPlaceHolderIndex >= 0) { final HttpHeader traceparentHeader = request.getHeaders().get(Constants.TRACE_PARENT); bodyStringBuilder.replace(traceParentPlaceHolderIndex, Constants.TRACE_PARENT_PLACEHOLDER.length() + traceParentPlaceHolderIndex, traceparentHeader != null ? String.format(",\"%s\":\"%s\"", Constants.TRACE_PARENT, traceparentHeader.getValue()) : ""); } final int traceStatePlaceHolderIndex = bodyStringBuilder.indexOf(Constants.TRACE_STATE_PLACEHOLDER); if (traceStatePlaceHolderIndex >= 0) { final HttpHeader tracestateHeader = request.getHeaders().get(Constants.TRACE_STATE); bodyStringBuilder.replace(traceStatePlaceHolderIndex, Constants.TRACE_STATE_PLACEHOLDER.length() + traceStatePlaceHolderIndex, tracestateHeader != null ? String.format(",\"%s\":\"%s\"", Constants.TRACE_STATE, tracestateHeader.getValue()) : ""); } String newBodyString = bodyStringBuilder.toString(); request.setHeader(Constants.CONTENT_LENGTH, String.valueOf(newBodyString.length())); request.setBody(newBodyString); return newBodyString; } }
class CloudEventTracingPipelinePolicy implements HttpPipelinePolicy { @Override /** * * @param request The {@link HttpRequest}, whose body will be mutated by replacing traceparent and tracestate * placeholders. * @param bodyStringBuilder The {@link StringBuilder} that contains the full HttpRequest body string. * @return The new body string with the place holders replaced (if header has tracing) * or removed (if header no tracing). */ static String replaceTracingPlaceHolder(HttpRequest request, StringBuilder bodyStringBuilder) { final int traceParentPlaceHolderIndex = bodyStringBuilder.indexOf(Constants.TRACE_PARENT_PLACEHOLDER); if (traceParentPlaceHolderIndex >= 0) { final HttpHeader traceparentHeader = request.getHeaders().get(Constants.TRACE_PARENT); bodyStringBuilder.replace(traceParentPlaceHolderIndex, Constants.TRACE_PARENT_PLACEHOLDER.length() + traceParentPlaceHolderIndex, traceparentHeader != null ? String.format(",\"%s\":\"%s\"", Constants.TRACE_PARENT, traceparentHeader.getValue()) : ""); } final int traceStatePlaceHolderIndex = bodyStringBuilder.indexOf(Constants.TRACE_STATE_PLACEHOLDER); if (traceStatePlaceHolderIndex >= 0) { final HttpHeader tracestateHeader = request.getHeaders().get(Constants.TRACE_STATE); bodyStringBuilder.replace(traceStatePlaceHolderIndex, Constants.TRACE_STATE_PLACEHOLDER.length() + traceStatePlaceHolderIndex, tracestateHeader != null ? String.format(",\"%s\":\"%s\"", Constants.TRACE_STATE, tracestateHeader.getValue()) : ""); } String newBodyString = bodyStringBuilder.toString(); request.setHeader(Constants.CONTENT_LENGTH, String.valueOf(newBodyString.length())); request.setBody(newBodyString); return newBodyString; } }
nit: maybe we should move `byteBufferToArray` helper method to `CoreUtils` instead of having it in `FluxUtil`.
public Mono<HttpResponse> process(HttpPipelineCallContext context, HttpPipelineNextPolicy next) { final HttpRequest request = context.getHttpRequest(); final HttpHeader contentType = request.getHeaders().get(Constants.CONTENT_TYPE); StringBuilder bodyStringBuilder = new StringBuilder(); if (TracerProxy.isTracingEnabled() && contentType != null && Constants.CLOUD_EVENT_CONTENT_TYPE.equals(contentType.getValue())) { return request.getBody() .map(byteBuffer -> { if (byteBuffer.hasArray()) { return bodyStringBuilder.append(new String(byteBuffer.array(), byteBuffer.arrayOffset() + byteBuffer.position(), byteBuffer.remaining(), StandardCharsets.UTF_8)); } else { return bodyStringBuilder.append(new String(FluxUtil.byteBufferToArray(byteBuffer.duplicate()), StandardCharsets.UTF_8)); } }) .then(Mono.fromCallable(() -> replaceTracingPlaceHolder(request, bodyStringBuilder))) .then(next.process()); } else { return next.process(); } }
return bodyStringBuilder.append(new String(FluxUtil.byteBufferToArray(byteBuffer.duplicate()),
public Mono<HttpResponse> process(HttpPipelineCallContext context, HttpPipelineNextPolicy next) { final HttpRequest request = context.getHttpRequest(); final HttpHeader contentType = request.getHeaders().get(Constants.CONTENT_TYPE); StringBuilder bodyStringBuilder = new StringBuilder(); if (TracerProxy.isTracingEnabled() && contentType != null && Constants.CLOUD_EVENT_CONTENT_TYPE.equals(contentType.getValue())) { return request.getBody() .map(byteBuffer -> { if (byteBuffer.hasArray()) { return bodyStringBuilder.append(new String(byteBuffer.array(), byteBuffer.arrayOffset() + byteBuffer.position(), byteBuffer.remaining(), StandardCharsets.UTF_8)); } else { return bodyStringBuilder.append(new String(FluxUtil.byteBufferToArray(byteBuffer.duplicate()), StandardCharsets.UTF_8)); } }) .then(Mono.fromCallable(() -> replaceTracingPlaceHolder(request, bodyStringBuilder))) .then(next.process()); } else { return next.process(); } }
class CloudEventTracingPipelinePolicy implements HttpPipelinePolicy { @Override /** * * @param request The {@link HttpRequest}, whose body will be mutated by replacing traceparent and tracestate * placeholders. * @param bodyStringBuilder The {@link StringBuilder} that contains the full HttpRequest body string. * @return The new body string with the place holders replaced (if header has tracing) * or removed (if header no tracing). */ static String replaceTracingPlaceHolder(HttpRequest request, StringBuilder bodyStringBuilder) { final int traceParentPlaceHolderIndex = bodyStringBuilder.indexOf(Constants.TRACE_PARENT_PLACEHOLDER); if (traceParentPlaceHolderIndex >= 0) { final HttpHeader traceparentHeader = request.getHeaders().get(Constants.TRACE_PARENT); bodyStringBuilder.replace(traceParentPlaceHolderIndex, Constants.TRACE_PARENT_PLACEHOLDER.length() + traceParentPlaceHolderIndex, traceparentHeader != null ? String.format(",\"%s\":\"%s\"", Constants.TRACE_PARENT, traceparentHeader.getValue()) : ""); } final int traceStatePlaceHolderIndex = bodyStringBuilder.indexOf(Constants.TRACE_STATE_PLACEHOLDER); if (traceStatePlaceHolderIndex >= 0) { final HttpHeader tracestateHeader = request.getHeaders().get(Constants.TRACE_STATE); bodyStringBuilder.replace(traceStatePlaceHolderIndex, Constants.TRACE_STATE_PLACEHOLDER.length() + traceStatePlaceHolderIndex, tracestateHeader != null ? String.format(",\"%s\":\"%s\"", Constants.TRACE_STATE, tracestateHeader.getValue()) : ""); } String newBodyString = bodyStringBuilder.toString(); request.setHeader(Constants.CONTENT_LENGTH, String.valueOf(newBodyString.length())); request.setBody(newBodyString); return newBodyString; } }
class CloudEventTracingPipelinePolicy implements HttpPipelinePolicy { @Override /** * * @param request The {@link HttpRequest}, whose body will be mutated by replacing traceparent and tracestate * placeholders. * @param bodyStringBuilder The {@link StringBuilder} that contains the full HttpRequest body string. * @return The new body string with the place holders replaced (if header has tracing) * or removed (if header no tracing). */ static String replaceTracingPlaceHolder(HttpRequest request, StringBuilder bodyStringBuilder) { final int traceParentPlaceHolderIndex = bodyStringBuilder.indexOf(Constants.TRACE_PARENT_PLACEHOLDER); if (traceParentPlaceHolderIndex >= 0) { final HttpHeader traceparentHeader = request.getHeaders().get(Constants.TRACE_PARENT); bodyStringBuilder.replace(traceParentPlaceHolderIndex, Constants.TRACE_PARENT_PLACEHOLDER.length() + traceParentPlaceHolderIndex, traceparentHeader != null ? String.format(",\"%s\":\"%s\"", Constants.TRACE_PARENT, traceparentHeader.getValue()) : ""); } final int traceStatePlaceHolderIndex = bodyStringBuilder.indexOf(Constants.TRACE_STATE_PLACEHOLDER); if (traceStatePlaceHolderIndex >= 0) { final HttpHeader tracestateHeader = request.getHeaders().get(Constants.TRACE_STATE); bodyStringBuilder.replace(traceStatePlaceHolderIndex, Constants.TRACE_STATE_PLACEHOLDER.length() + traceStatePlaceHolderIndex, tracestateHeader != null ? String.format(",\"%s\":\"%s\"", Constants.TRACE_STATE, tracestateHeader.getValue()) : ""); } String newBodyString = bodyStringBuilder.toString(); request.setHeader(Constants.CONTENT_LENGTH, String.valueOf(newBodyString.length())); request.setBody(newBodyString); return newBodyString; } }
A stupid question, why start from `byteBuffer.position()`? Is it common that someone sets the position? And if someone did, do we intent to read the remaining or the whole?
public String toString() { if (transactionId.hasArray()) { return new String(transactionId.array(), transactionId.arrayOffset() + transactionId.position(), transactionId.remaining(), StandardCharsets.UTF_8); } else { return new String(FluxUtil.byteBufferToArray(transactionId.duplicate()), StandardCharsets.UTF_8); } }
transactionId.remaining(), StandardCharsets.UTF_8);
public String toString() { if (transactionId.hasArray()) { return new String(transactionId.array(), transactionId.arrayOffset() + transactionId.position(), transactionId.remaining(), StandardCharsets.UTF_8); } else { return new String(FluxUtil.byteBufferToArray(transactionId.duplicate()), StandardCharsets.UTF_8); } }
class AmqpTransaction { private final ByteBuffer transactionId; /** * Creates {@link AmqpTransaction} given {@code transactionId}. * * @param transactionId The id for this transaction. * * @throws NullPointerException if {@code transactionId} is null. */ public AmqpTransaction(ByteBuffer transactionId) { this.transactionId = Objects.requireNonNull(transactionId, "'transactionId' cannot be null."); } /** * Gets the id for this transaction. * * @return The id for this transaction. */ public ByteBuffer getTransactionId() { return transactionId; } /** * String representation of the transaction id. * * @return string representation of the transaction id. */ }
class AmqpTransaction { private final ByteBuffer transactionId; /** * Creates {@link AmqpTransaction} given {@code transactionId}. * * @param transactionId The id for this transaction. * * @throws NullPointerException if {@code transactionId} is null. */ public AmqpTransaction(ByteBuffer transactionId) { this.transactionId = Objects.requireNonNull(transactionId, "'transactionId' cannot be null."); } /** * Gets the id for this transaction. * * @return The id for this transaction. */ public ByteBuffer getTransactionId() { return transactionId; } /** * String representation of the transaction id. * * @return string representation of the transaction id. */ }
Add javadoc
public EventData() { this.context = Context.NONE; this.annotatedMessage = EMPTY_MESSAGE; this.properties = annotatedMessage.getApplicationProperties(); this.systemProperties = new SystemProperties(); }
}
public EventData() { this.context = Context.NONE; this.annotatedMessage = EMPTY_MESSAGE; this.properties = annotatedMessage.getApplicationProperties(); this.systemProperties = new SystemProperties(); }
class EventData extends MessageContent { /* * These are properties owned by the service and set when a message is received. */ static final Set<String> RESERVED_SYSTEM_PROPERTIES; static final AmqpAnnotatedMessage EMPTY_MESSAGE = new AmqpAnnotatedMessage(AmqpMessageBody.fromData(new byte[0])); private static final ClientLogger LOGGER = new ClientLogger(EventData.class); private final Map<String, Object> properties; private final SystemProperties systemProperties; private AmqpAnnotatedMessage annotatedMessage; private Context context; static { final Set<String> properties = new HashSet<>(); properties.add(OFFSET_ANNOTATION_NAME.getValue()); properties.add(PARTITION_KEY_ANNOTATION_NAME.getValue()); properties.add(SEQUENCE_NUMBER_ANNOTATION_NAME.getValue()); properties.add(ENQUEUED_TIME_UTC_ANNOTATION_NAME.getValue()); properties.add(PUBLISHER_ANNOTATION_NAME.getValue()); RESERVED_SYSTEM_PROPERTIES = Collections.unmodifiableSet(properties); } /** * Creates an event containing the {@code body}. * * @param body The data to set for this event. * * @throws NullPointerException if {@code body} is {@code null}. */ public EventData(byte[] body) { this.context = Context.NONE; final AmqpMessageBody messageBody = AmqpMessageBody.fromData( Objects.requireNonNull(body, "'body' cannot be null.")); this.annotatedMessage = new AmqpAnnotatedMessage(messageBody); this.properties = annotatedMessage.getApplicationProperties(); this.systemProperties = new SystemProperties(); } /** * Creates an event containing the {@code body}. * * @param body The data to set for this event. * * @throws NullPointerException if {@code body} is {@code null}. */ public EventData(ByteBuffer body) { this(Objects.requireNonNull(body, "'body' cannot be null.").array()); } /** * Creates an event by encoding the {@code body} using UTF-8 charset. * * @param body The string that will be UTF-8 encoded to create an event. * * @throws NullPointerException if {@code body} is {@code null}. */ public EventData(String body) { this(Objects.requireNonNull(body, "'body' cannot be null.").getBytes(UTF_8)); } /** * Creates an event with the provided {@link BinaryData} as payload. * * @param body The {@link BinaryData} payload for this event. */ public EventData(BinaryData body) { this(Objects.requireNonNull(body, "'body' cannot be null.").toBytes()); } /** * Creates an event with the given {@code body}, system properties and context. Used in the case where a message * is received from the service. * * @param context A specified key-value pair of type {@link Context}. * @param amqpAnnotatedMessage Backing annotated message. * * @throws NullPointerException if {@code amqpAnnotatedMessage} or {@code context} is {@code null}. * @throws IllegalArgumentException if {@code amqpAnnotatedMessage}'s body type is unknown. */ EventData(AmqpAnnotatedMessage amqpAnnotatedMessage, SystemProperties systemProperties, Context context) { this.context = Objects.requireNonNull(context, "'context' cannot be null."); this.properties = Collections.unmodifiableMap(amqpAnnotatedMessage.getApplicationProperties()); this.annotatedMessage = Objects.requireNonNull(amqpAnnotatedMessage, "'amqpAnnotatedMessage' cannot be null."); this.systemProperties = systemProperties; switch (annotatedMessage.getBody().getBodyType()) { case DATA: break; case SEQUENCE: case VALUE: LOGGER.warning("Message body type '{}' is not supported in EH. " + " Getting contents of body may throw.", annotatedMessage.getBody().getBodyType()); break; default: throw LOGGER.logExceptionAsError(new IllegalArgumentException( "Body type not valid " + annotatedMessage.getBody().getBodyType())); } } /** * Gets the set of free-form event properties which may be used for passing metadata associated with the event with * the event body during Event Hubs operations. A common use-case for {@code properties()} is to associate * serialization hints for the {@link * * <p><strong>Adding serialization hint using {@code getProperties()}</strong></p> * <p>In the sample, the type of telemetry is indicated by adding an application property with key "eventType".</p> * * <!-- src_embed com.azure.messaging.eventhubs.eventdata.getProperties --> * <pre> * TelemetryEvent telemetry = new TelemetryEvent& * byte[] serializedTelemetryData = telemetry.toString& * * EventData eventData = new EventData& * eventData.getProperties& * </pre> * <!-- end com.azure.messaging.eventhubs.eventdata.getProperties --> * * @return Application properties associated with this {@link EventData}. For received {@link EventData}, the map is * a read-only view. */ public Map<String, Object> getProperties() { return properties; } /** * Properties that are populated by Event Hubs service. As these are populated by the Event Hubs service, they are * only present on a <b>received</b> {@link EventData}. Provides an abstraction on top of properties exposed by * {@link * {@link * * @return An encapsulation of all system properties appended by EventHubs service into {@link EventData}. If the * {@link EventData} is not received from the Event Hubs service, the values returned are {@code null}. */ public Map<String, Object> getSystemProperties() { return systemProperties; } /** * Gets the actual payload/data wrapped by EventData. * * <p> * If the means for deserializing the raw data is not apparent to consumers, a common technique is to make use of * {@link * wish to deserialize the binary data. * </p> * * @return A byte array representing the data. */ public byte[] getBody() { return annotatedMessage.getBody().getFirstData(); } /** * Returns event data as UTF-8 decoded string. * * @return UTF-8 decoded string representation of the event data. */ public String getBodyAsString() { return new String(annotatedMessage.getBody().getFirstData(), UTF_8); } /** * Returns the {@link BinaryData} payload associated with this event. * * @return the {@link BinaryData} payload associated with this event. */ @Override public BinaryData getBodyAsBinaryData() { return BinaryData.fromBytes(annotatedMessage.getBody().getFirstData()); } /** * {@inheritDoc} */ @Override public EventData setBodyAsBinaryData(BinaryData binaryData) { this.annotatedMessage = new AmqpAnnotatedMessage(AmqpMessageBody.fromData(binaryData.toBytes())); return this; } /** * Gets the offset of the event when it was received from the associated Event Hub partition. This is only present * on a <b>received</b> {@link EventData}. * * @return The offset within the Event Hub partition of the received event. {@code null} if the {@link EventData} * was not received from Event Hubs service. */ public Long getOffset() { return systemProperties.getOffset(); } /** * Gets the partition hashing key if it was set when originally publishing the event. If it exists, this value was * used to compute a hash to select a partition to send the message to. This is only present on a <b>received</b> * {@link EventData}. * * @return A partition key for this Event Data. {@code null} if the {@link EventData} was not received from Event * Hubs service or there was no partition key set when the event was sent to the Event Hub. */ public String getPartitionKey() { return systemProperties.getPartitionKey(); } /** * Gets the instant, in UTC, of when the event was enqueued in the Event Hub partition. This is only present on a * <b>received</b> {@link EventData}. * * @return The instant, in UTC, this was enqueued in the Event Hub partition. {@code null} if the {@link EventData} * was not received from Event Hubs service. */ public Instant getEnqueuedTime() { return systemProperties.getEnqueuedTime(); } /** * Gets the sequence number assigned to the event when it was enqueued in the associated Event Hub partition. This * is unique for every message received in the Event Hub partition. This is only present on a <b>received</b> {@link * EventData}. * * @return The sequence number for this event. {@code null} if the {@link EventData} was not received from Event * Hubs service. */ public Long getSequenceNumber() { return systemProperties.getSequenceNumber(); } /** * Gets the underlying AMQP message. * * @return The underlying AMQP message. */ public AmqpAnnotatedMessage getRawAmqpMessage() { return annotatedMessage; } /** * Gets the content type. * * @return The content type. */ public String getContentType() { return annotatedMessage.getProperties().getContentType(); } /** * Sets the content type. * * @param contentType The content type. * * @return The updated {@link EventData}. */ public EventData setContentType(String contentType) { annotatedMessage.getProperties().setContentType(contentType); return this; } /** * Gets the correlation id. * * @return The correlation id. {@code null} if there is none set. */ public String getCorrelationId() { final AmqpMessageId messageId = annotatedMessage.getProperties().getCorrelationId(); return messageId != null ? messageId.toString() : null; } /** * Sets the correlation id. * * @param correlationId The correlation id. * * @return The updated {@link EventData}. */ public EventData setCorrelationId(String correlationId) { final AmqpMessageId id = correlationId != null ? new AmqpMessageId(correlationId) : null; annotatedMessage.getProperties().setCorrelationId(id); return this; } /** * Gets the message id. * * @return The message id. {@code null} if there is none set. */ public String getMessageId() { final AmqpMessageId messageId = annotatedMessage.getProperties().getMessageId(); return messageId != null ? messageId.toString() : null; } /** * Sets the message id. * * @param messageId The message id. * * @return The updated {@link EventData}. */ public EventData setMessageId(String messageId) { final AmqpMessageId id = messageId != null ? new AmqpMessageId(messageId) : null; annotatedMessage.getProperties().setMessageId(id); return this; } /** * {@inheritDoc} */ @Override public boolean equals(Object o) { if (this == o) { return true; } if (o == null || getClass() != o.getClass()) { return false; } EventData eventData = (EventData) o; return Arrays.equals(annotatedMessage.getBody().getFirstData(), eventData.annotatedMessage.getBody().getFirstData()); } /** * {@inheritDoc} */ @Override public int hashCode() { return Arrays.hashCode(annotatedMessage.getBody().getFirstData()); } /** * A specified key-value pair of type {@link Context} to set additional information on the event. * * @return the {@link Context} object set on the event */ Context getContext() { return context; } /** * Adds a new key value pair to the existing context on Event Data. * * @param key The key for this context object * @param value The value for this context object. * * @return The updated {@link EventData}. * * @throws NullPointerException if {@code key} or {@code value} is null. */ public EventData addContext(String key, Object value) { Objects.requireNonNull(key, "The 'key' parameter cannot be null."); Objects.requireNonNull(value, "The 'value' parameter cannot be null."); this.context = context.addData(key, value); return this; } }
class EventData extends MessageContent { /* * These are properties owned by the service and set when a message is received. */ static final Set<String> RESERVED_SYSTEM_PROPERTIES; static final AmqpAnnotatedMessage EMPTY_MESSAGE = new AmqpAnnotatedMessage(AmqpMessageBody.fromData(new byte[0])); private static final ClientLogger LOGGER = new ClientLogger(EventData.class); private final Map<String, Object> properties; private final SystemProperties systemProperties; private AmqpAnnotatedMessage annotatedMessage; private Context context; static { final Set<String> properties = new HashSet<>(); properties.add(OFFSET_ANNOTATION_NAME.getValue()); properties.add(PARTITION_KEY_ANNOTATION_NAME.getValue()); properties.add(SEQUENCE_NUMBER_ANNOTATION_NAME.getValue()); properties.add(ENQUEUED_TIME_UTC_ANNOTATION_NAME.getValue()); properties.add(PUBLISHER_ANNOTATION_NAME.getValue()); RESERVED_SYSTEM_PROPERTIES = Collections.unmodifiableSet(properties); } /** * Creates an event with an empty body. */ /** * Creates an event containing the {@code body}. * * @param body The data to set for this event. * * @throws NullPointerException if {@code body} is {@code null}. */ public EventData(byte[] body) { this.context = Context.NONE; final AmqpMessageBody messageBody = AmqpMessageBody.fromData( Objects.requireNonNull(body, "'body' cannot be null.")); this.annotatedMessage = new AmqpAnnotatedMessage(messageBody); this.properties = annotatedMessage.getApplicationProperties(); this.systemProperties = new SystemProperties(); } /** * Creates an event containing the {@code body}. * * @param body The data to set for this event. * * @throws NullPointerException if {@code body} is {@code null}. */ public EventData(ByteBuffer body) { this(Objects.requireNonNull(body, "'body' cannot be null.").array()); } /** * Creates an event by encoding the {@code body} using UTF-8 charset. * * @param body The string that will be UTF-8 encoded to create an event. * * @throws NullPointerException if {@code body} is {@code null}. */ public EventData(String body) { this(Objects.requireNonNull(body, "'body' cannot be null.").getBytes(UTF_8)); } /** * Creates an event with the provided {@link BinaryData} as payload. * * @param body The {@link BinaryData} payload for this event. */ public EventData(BinaryData body) { this(Objects.requireNonNull(body, "'body' cannot be null.").toBytes()); } /** * Creates an event with the given {@code body}, system properties and context. Used in the case where a message * is received from the service. * * @param context A specified key-value pair of type {@link Context}. * @param amqpAnnotatedMessage Backing annotated message. * * @throws NullPointerException if {@code amqpAnnotatedMessage} or {@code context} is {@code null}. * @throws IllegalArgumentException if {@code amqpAnnotatedMessage}'s body type is unknown. */ EventData(AmqpAnnotatedMessage amqpAnnotatedMessage, SystemProperties systemProperties, Context context) { this.context = Objects.requireNonNull(context, "'context' cannot be null."); this.properties = Collections.unmodifiableMap(amqpAnnotatedMessage.getApplicationProperties()); this.annotatedMessage = Objects.requireNonNull(amqpAnnotatedMessage, "'amqpAnnotatedMessage' cannot be null."); this.systemProperties = systemProperties; switch (annotatedMessage.getBody().getBodyType()) { case DATA: break; case SEQUENCE: case VALUE: LOGGER.warning("Message body type '{}' is not supported in EH. " + " Getting contents of body may throw.", annotatedMessage.getBody().getBodyType()); break; default: throw LOGGER.logExceptionAsError(new IllegalArgumentException( "Body type not valid " + annotatedMessage.getBody().getBodyType())); } } /** * Gets the set of free-form event properties which may be used for passing metadata associated with the event with * the event body during Event Hubs operations. A common use-case for {@code properties()} is to associate * serialization hints for the {@link * * <p><strong>Adding serialization hint using {@code getProperties()}</strong></p> * <p>In the sample, the type of telemetry is indicated by adding an application property with key "eventType".</p> * * <!-- src_embed com.azure.messaging.eventhubs.eventdata.getProperties --> * <pre> * TelemetryEvent telemetry = new TelemetryEvent& * byte[] serializedTelemetryData = telemetry.toString& * * EventData eventData = new EventData& * eventData.getProperties& * </pre> * <!-- end com.azure.messaging.eventhubs.eventdata.getProperties --> * * @return Application properties associated with this {@link EventData}. For received {@link EventData}, the map is * a read-only view. */ public Map<String, Object> getProperties() { return properties; } /** * Properties that are populated by Event Hubs service. As these are populated by the Event Hubs service, they are * only present on a <b>received</b> {@link EventData}. Provides an abstraction on top of properties exposed by * {@link * {@link * * @return An encapsulation of all system properties appended by EventHubs service into {@link EventData}. If the * {@link EventData} is not received from the Event Hubs service, the values returned are {@code null}. */ public Map<String, Object> getSystemProperties() { return systemProperties; } /** * Gets the actual payload/data wrapped by EventData. * * <p> * If the means for deserializing the raw data is not apparent to consumers, a common technique is to make use of * {@link * wish to deserialize the binary data. * </p> * * @return A byte array representing the data. */ public byte[] getBody() { return annotatedMessage.getBody().getFirstData(); } /** * Returns event data as UTF-8 decoded string. * * @return UTF-8 decoded string representation of the event data. */ public String getBodyAsString() { return new String(annotatedMessage.getBody().getFirstData(), UTF_8); } /** * Returns the {@link BinaryData} payload associated with this event. * * @return the {@link BinaryData} payload associated with this event. */ @Override public BinaryData getBodyAsBinaryData() { return BinaryData.fromBytes(annotatedMessage.getBody().getFirstData()); } /** * Sets a new binary body and corresponding {@link AmqpAnnotatedMessage} on the event. Contents from * {@link */ @Override public EventData setBodyAsBinaryData(BinaryData binaryData) { final AmqpAnnotatedMessage current = this.annotatedMessage; this.annotatedMessage = new AmqpAnnotatedMessage(AmqpMessageBody.fromData(binaryData.toBytes())); if (current == null) { return this; } this.annotatedMessage.getApplicationProperties().putAll(current.getApplicationProperties()); this.annotatedMessage.getDeliveryAnnotations().putAll(current.getDeliveryAnnotations()); this.annotatedMessage.getFooter().putAll(current.getFooter()); this.annotatedMessage.getMessageAnnotations().putAll(current.getMessageAnnotations()); final AmqpMessageHeader header = this.annotatedMessage.getHeader(); header.setDeliveryCount(current.getHeader().getDeliveryCount()) .setDurable(current.getHeader().isDurable()) .setFirstAcquirer(current.getHeader().isFirstAcquirer()) .setPriority(current.getHeader().getPriority()) .setTimeToLive(current.getHeader().getTimeToLive()); final AmqpMessageProperties props = this.annotatedMessage.getProperties(); props.setAbsoluteExpiryTime(current.getProperties().getAbsoluteExpiryTime()) .setContentEncoding(current.getProperties().getContentEncoding()) .setContentType(current.getProperties().getContentType()) .setCorrelationId(current.getProperties().getCorrelationId()) .setCreationTime(current.getProperties().getCreationTime()) .setGroupId(current.getProperties().getGroupId()) .setGroupSequence(current.getProperties().getGroupSequence()) .setMessageId(current.getProperties().getMessageId()) .setReplyTo(current.getProperties().getReplyTo()) .setReplyToGroupId(current.getProperties().getReplyToGroupId()) .setSubject(current.getProperties().getSubject()) .setTo(current.getProperties().getTo()) .setUserId(current.getProperties().getUserId()); return this; } /** * Gets the offset of the event when it was received from the associated Event Hub partition. This is only present * on a <b>received</b> {@link EventData}. * * @return The offset within the Event Hub partition of the received event. {@code null} if the {@link EventData} * was not received from Event Hubs service. */ public Long getOffset() { return systemProperties.getOffset(); } /** * Gets the partition hashing key if it was set when originally publishing the event. If it exists, this value was * used to compute a hash to select a partition to send the message to. This is only present on a <b>received</b> * {@link EventData}. * * @return A partition key for this Event Data. {@code null} if the {@link EventData} was not received from Event * Hubs service or there was no partition key set when the event was sent to the Event Hub. */ public String getPartitionKey() { return systemProperties.getPartitionKey(); } /** * Gets the instant, in UTC, of when the event was enqueued in the Event Hub partition. This is only present on a * <b>received</b> {@link EventData}. * * @return The instant, in UTC, this was enqueued in the Event Hub partition. {@code null} if the {@link EventData} * was not received from Event Hubs service. */ public Instant getEnqueuedTime() { return systemProperties.getEnqueuedTime(); } /** * Gets the sequence number assigned to the event when it was enqueued in the associated Event Hub partition. This * is unique for every message received in the Event Hub partition. This is only present on a <b>received</b> {@link * EventData}. * * @return The sequence number for this event. {@code null} if the {@link EventData} was not received from Event * Hubs service. */ public Long getSequenceNumber() { return systemProperties.getSequenceNumber(); } /** * Gets the underlying AMQP message. * * @return The underlying AMQP message. */ public AmqpAnnotatedMessage getRawAmqpMessage() { return annotatedMessage; } /** * Gets the content type. * * @return The content type. */ public String getContentType() { return annotatedMessage.getProperties().getContentType(); } /** * Sets the content type. * * @param contentType The content type. * * @return The updated {@link EventData}. */ public EventData setContentType(String contentType) { annotatedMessage.getProperties().setContentType(contentType); return this; } /** * Gets the correlation id. * * @return The correlation id. {@code null} if there is none set. */ public String getCorrelationId() { final AmqpMessageId messageId = annotatedMessage.getProperties().getCorrelationId(); return messageId != null ? messageId.toString() : null; } /** * Sets the correlation id. * * @param correlationId The correlation id. * * @return The updated {@link EventData}. */ public EventData setCorrelationId(String correlationId) { final AmqpMessageId id = correlationId != null ? new AmqpMessageId(correlationId) : null; annotatedMessage.getProperties().setCorrelationId(id); return this; } /** * Gets the message id. * * @return The message id. {@code null} if there is none set. */ public String getMessageId() { final AmqpMessageId messageId = annotatedMessage.getProperties().getMessageId(); return messageId != null ? messageId.toString() : null; } /** * Sets the message id. * * @param messageId The message id. * * @return The updated {@link EventData}. */ public EventData setMessageId(String messageId) { final AmqpMessageId id = messageId != null ? new AmqpMessageId(messageId) : null; annotatedMessage.getProperties().setMessageId(id); return this; } /** * {@inheritDoc} */ @Override public boolean equals(Object o) { if (this == o) { return true; } if (o == null || getClass() != o.getClass()) { return false; } EventData eventData = (EventData) o; return Arrays.equals(annotatedMessage.getBody().getFirstData(), eventData.annotatedMessage.getBody().getFirstData()); } /** * {@inheritDoc} */ @Override public int hashCode() { return Arrays.hashCode(annotatedMessage.getBody().getFirstData()); } /** * A specified key-value pair of type {@link Context} to set additional information on the event. * * @return the {@link Context} object set on the event */ Context getContext() { return context; } /** * Adds a new key value pair to the existing context on Event Data. * * @param key The key for this context object * @param value The value for this context object. * * @return The updated {@link EventData}. * * @throws NullPointerException if {@code key} or {@code value} is null. */ public EventData addContext(String key, Object value) { Objects.requireNonNull(key, "The 'key' parameter cannot be null."); Objects.requireNonNull(value, "The 'value' parameter cannot be null."); this.context = context.addData(key, value); return this; } }
Would the time required to "delete" the file after each download count towards the perf test op/s metrics?
public Mono<Void> runAsync() { File file = new File(tempDir, UUID.randomUUID().toString()); return blobAsyncClient.downloadToFile(file.getAbsolutePath()) .doFinally(ignored -> { if (!file.delete()){ throw new IllegalStateException("Unable to delete test file"); } }) .then(); }
if (!file.delete()){
public Mono<Void> runAsync() { File file = new File(tempDir, UUID.randomUUID().toString()); return blobAsyncClient.downloadToFile(file.getAbsolutePath()) .doFinally(ignored -> { if (!file.delete()){ throw new IllegalStateException("Unable to delete test file"); } }) .then(); }
class DownloadBlobToFileTest extends ContainerTest<PerfStressOptions> { private final BlobClient blobClient; private final BlobAsyncClient blobAsyncClient; private final File tempDir; public DownloadBlobToFileTest(PerfStressOptions options) { super(options); String blobName = "downloadToFileTest"; blobClient = blobContainerClient.getBlobClient(blobName); blobAsyncClient = blobContainerAsyncClient.getBlobAsyncClient(blobName); try { tempDir = Files.createTempDirectory("downloadToFileTest").toFile(); tempDir.deleteOnExit(); } catch (IOException e) { throw new UncheckedIOException(e); } } public Mono<Void> globalSetupAsync() { return super.globalSetupAsync() .then(blobAsyncClient.upload(createRandomByteBufferFlux(options.getSize()), null)) .then(); } @Override public void run() { File file = new File(tempDir, UUID.randomUUID().toString()); try { blobClient.downloadToFile(file.getAbsolutePath()); } finally { if (!file.delete()){ throw new IllegalStateException("Unable to delete test file"); } } } @Override }
class DownloadBlobToFileTest extends ContainerTest<PerfStressOptions> { private final BlobClient blobClient; private final BlobAsyncClient blobAsyncClient; private final File tempDir; public DownloadBlobToFileTest(PerfStressOptions options) { super(options); String blobName = "downloadToFileTest"; blobClient = blobContainerClient.getBlobClient(blobName); blobAsyncClient = blobContainerAsyncClient.getBlobAsyncClient(blobName); try { tempDir = Files.createTempDirectory("downloadToFileTest").toFile(); tempDir.deleteOnExit(); } catch (IOException e) { throw new UncheckedIOException(e); } } public Mono<Void> globalSetupAsync() { return super.globalSetupAsync() .then(blobAsyncClient.upload(createRandomByteBufferFlux(options.getSize()), null)) .then(); } @Override public void run() { File file = new File(tempDir, UUID.randomUUID().toString()); try { blobClient.downloadToFile(file.getAbsolutePath()); } finally { if (!file.delete()){ throw new IllegalStateException("Unable to delete test file"); } } } @Override }
NIT: shouldn't that extra space be reverted?
protected CosmosException(int statusCode, String message, Map<String, String> responseHeaders, Throwable cause) { super(message, cause); this.statusCode = statusCode; this.responseHeaders = new ConcurrentHashMap<>(); if (responseHeaders != null) { for (Map.Entry<String, String> entry : responseHeaders.entrySet()) { if (entry.getKey() != null && entry.getValue() != null) { this.responseHeaders.put(entry.getKey(), entry.getValue()); } } } }
for (Map.Entry<String, String> entry : responseHeaders.entrySet()) {
protected CosmosException(int statusCode, String message, Map<String, String> responseHeaders, Throwable cause) { super(message, cause); this.statusCode = statusCode; this.responseHeaders = new ConcurrentHashMap<>(); if (responseHeaders != null) { for (Map.Entry<String, String> entry: responseHeaders.entrySet()) { if (entry.getKey() != null && entry.getValue() != null) { this.responseHeaders.put(entry.getKey(), entry.getValue()); } } } }
class CosmosException extends AzureException { private static final long serialVersionUID = 1L; private static final ObjectMapper mapper = new ObjectMapper(); private final static String USER_AGENT = Utils.getUserAgent(); /** * Status code */ private final int statusCode; /** * Response headers */ private final Map<String, String> responseHeaders; /** * Cosmos diagnostics */ private CosmosDiagnostics cosmosDiagnostics; /** * Request timeline */ private RequestTimeline requestTimeline; /** * Channel acquisition timeline */ private RntbdChannelAcquisitionTimeline channelAcquisitionTimeline; /** * Cosmos error */ private CosmosError cosmosError; /** * RNTBD channel task queue size */ private int rntbdChannelTaskQueueSize; /** * RNTBD endpoint statistics */ private RntbdEndpointStatistics rntbdEndpointStatistics; /** * LSN */ long lsn; /** * Partition key range ID */ String partitionKeyRangeId; /** * Request headers */ Map<String, String> requestHeaders; /** * Request URI */ Uri requestUri; /** * Resource address */ String resourceAddress; /** * Request payload length */ private int requestPayloadLength; /** * RNTBD pending request queue size */ private int rntbdPendingRequestQueueSize; /** * RNTBD request length */ private int rntbdRequestLength; /** * RNTBD response length */ private int rntbdResponseLength; /** * Sending request has started */ private boolean sendingRequestHasStarted; /** * Creates a new instance of the CosmosException class. * * @param statusCode the http status code of the response. * @param message the string message. * @param responseHeaders the response headers. * @param cause the inner exception */ /** * Creates a new instance of the CosmosException class. * * @param statusCode the http status code of the response. * @param errorMessage the error message. */ protected CosmosException(int statusCode, String errorMessage) { this(statusCode, errorMessage, null, null); this.cosmosError = new CosmosError(); ModelBridgeInternal.setProperty(cosmosError, Constants.Properties.MESSAGE, errorMessage); } /** * Creates a new instance of the CosmosException class. * * @param statusCode the http status code of the response. * @param innerException the original exception. */ protected CosmosException(int statusCode, Exception innerException) { this(statusCode, null, null, innerException); } /** * Creates a new instance of the CosmosException class. * * @param statusCode the http status code of the response. * @param cosmosErrorResource the error resource object. * @param responseHeaders the response headers. */ protected CosmosException(int statusCode, CosmosError cosmosErrorResource, Map<String, String> responseHeaders) { this(/* resourceAddress */ null, statusCode, cosmosErrorResource, responseHeaders); } /** * Creates a new instance of the CosmosException class. * * @param resourceAddress the address of the resource the request is associated with. * @param statusCode the http status code of the response. * @param cosmosErrorResource the error resource object. * @param responseHeaders the response headers. */ protected CosmosException(String resourceAddress, int statusCode, CosmosError cosmosErrorResource, Map<String, String> responseHeaders) { this(statusCode, cosmosErrorResource == null ? null : cosmosErrorResource.getMessage(), responseHeaders, null); this.resourceAddress = resourceAddress; this.cosmosError = cosmosErrorResource; } /** * Creates a new instance of the CosmosException class. * * @param resourceAddress the address of the resource the request is associated with. * @param statusCode the http status code of the response. * @param cosmosErrorResource the error resource object. * @param responseHeaders the response headers. * @param cause the inner exception */ protected CosmosException(String resourceAddress, int statusCode, CosmosError cosmosErrorResource, Map<String, String> responseHeaders, Throwable cause) { this(statusCode, cosmosErrorResource == null ? null : cosmosErrorResource.getMessage(), responseHeaders, cause); this.resourceAddress = resourceAddress; this.cosmosError = cosmosErrorResource; } /** * Creates a new instance of the CosmosException class. * * @param message the string message. * @param statusCode the http status code of the response. * @param exception the exception object. * @param responseHeaders the response headers. * @param resourceAddress the address of the resource the request is associated with. */ protected CosmosException(String message, Exception exception, Map<String, String> responseHeaders, int statusCode, String resourceAddress) { this(statusCode, message, responseHeaders, exception); this.resourceAddress = resourceAddress; } @Override public String getMessage() { try { ObjectNode messageNode = mapper.createObjectNode(); messageNode.put("innerErrorMessage", innerErrorMessage()); if (cosmosDiagnostics != null) { cosmosDiagnostics.fillCosmosDiagnostics(messageNode, null); } return mapper.writeValueAsString(messageNode); } catch (JsonProcessingException e) { if (cosmosDiagnostics == null) { return innerErrorMessage(); } return innerErrorMessage() + ", " + cosmosDiagnostics.toString(); } } /** * Gets the activity ID associated with the request. * * @return the activity ID. */ public String getActivityId() { if (this.responseHeaders != null) { return this.responseHeaders.get(HttpConstants.HttpHeaders.ACTIVITY_ID); } return null; } /** * Gets the http status code. * * @return the status code. */ public int getStatusCode() { return this.statusCode; } /** * Gets the sub status code. * * @return the status code. */ public int getSubStatusCode() { int code = HttpConstants.SubStatusCodes.UNKNOWN; if (this.responseHeaders != null) { String subStatusString = this.responseHeaders.get(HttpConstants.HttpHeaders.SUB_STATUS); if (StringUtils.isNotEmpty(subStatusString)) { try { code = Integer.parseInt(subStatusString); } catch (NumberFormatException e) { } } } return code; } void setSubStatusCode(int subStatusCode) { this.responseHeaders.put(HttpConstants.HttpHeaders.SUB_STATUS, Integer.toString(subStatusCode)); } /** * Gets the error code associated with the exception. * * @return the error. */ CosmosError getError() { return this.cosmosError; } void setError(CosmosError cosmosError) { this.cosmosError = cosmosError; } /** * Gets the recommended time duration after which the client can retry failed * requests * * @return the recommended time duration after which the client can retry failed * requests. */ public Duration getRetryAfterDuration() { long retryIntervalInMilliseconds = 0; if (this.responseHeaders != null) { String header = this.responseHeaders.get(HttpConstants.HttpHeaders.RETRY_AFTER_IN_MILLISECONDS); if (StringUtils.isNotEmpty(header)) { try { retryIntervalInMilliseconds = Long.parseLong(header); } catch (NumberFormatException e) { } } } return Duration.ofMillis(retryIntervalInMilliseconds); } /** * Gets the response headers as key-value pairs * * @return the response headers */ public Map<String, String> getResponseHeaders() { return this.responseHeaders; } /** * Gets the resource address associated with this exception. * * @return the resource address associated with this exception. */ String getResourceAddress() { return this.resourceAddress; } /** * Gets the Cosmos Diagnostic Statistics associated with this exception. * * @return Cosmos Diagnostic Statistics associated with this exception. */ public CosmosDiagnostics getDiagnostics() { return cosmosDiagnostics; } CosmosException setDiagnostics(CosmosDiagnostics cosmosDiagnostics) { this.cosmosDiagnostics = cosmosDiagnostics; return this; } /** * Gets the request charge as request units (RU) consumed by the operation. * <p> * For more information about the RU and factors that can impact the effective charges please visit * <a href="https: * * @return the request charge. */ public double getRequestCharge() { String value = this.getResponseHeaders().get(HttpConstants.HttpHeaders.REQUEST_CHARGE); if (StringUtils.isEmpty(value)) { return 0; } return Double.parseDouble(value); } @Override public String toString() { try { ObjectNode exceptionMessageNode = mapper.createObjectNode(); exceptionMessageNode.put("ClassName", getClass().getSimpleName()); exceptionMessageNode.put(USER_AGENT_KEY, USER_AGENT); exceptionMessageNode.put("statusCode", statusCode); exceptionMessageNode.put("resourceAddress", resourceAddress); if (cosmosError != null) { exceptionMessageNode.put("error", cosmosError.toJson()); } exceptionMessageNode.put("innerErrorMessage", innerErrorMessage()); exceptionMessageNode.put("causeInfo", causeInfo()); if (responseHeaders != null) { exceptionMessageNode.put("responseHeaders", responseHeaders.toString()); } List<Map.Entry<String, String>> filterRequestHeaders = filterSensitiveData(requestHeaders); if (filterRequestHeaders != null) { exceptionMessageNode.put("requestHeaders", filterRequestHeaders.toString()); } if(this.cosmosDiagnostics != null) { cosmosDiagnostics.fillCosmosDiagnostics(exceptionMessageNode, null); } return mapper.writeValueAsString(exceptionMessageNode); } catch (JsonProcessingException ex) { return getClass().getSimpleName() + "{" + USER_AGENT_KEY +"=" + USER_AGENT + ", error=" + cosmosError + ", " + "resourceAddress='" + resourceAddress + ", statusCode=" + statusCode + ", message=" + getMessage() + ", causeInfo=" + causeInfo() + ", responseHeaders=" + responseHeaders + ", requestHeaders=" + filterSensitiveData(requestHeaders) + '}'; } } String innerErrorMessage() { String innerErrorMessage = super.getMessage(); if (cosmosError != null) { innerErrorMessage = cosmosError.getMessage(); if (innerErrorMessage == null) { innerErrorMessage = String.valueOf( ModelBridgeInternal.getObjectFromJsonSerializable(cosmosError, "Errors")); } } return innerErrorMessage; } private String causeInfo() { Throwable cause = getCause(); if (cause != null) { return String.format("[class: %s, message: %s]", cause.getClass(), cause.getMessage()); } return null; } private List<Map.Entry<String, String>> filterSensitiveData(Map<String, String> requestHeaders) { if (requestHeaders == null) { return null; } return requestHeaders.entrySet().stream().filter(entry -> !HttpConstants.HttpHeaders.AUTHORIZATION.equalsIgnoreCase(entry.getKey())) .collect(Collectors.toList()); } RequestTimeline getRequestTimeline() { return this.requestTimeline; } void setRequestTimeline(RequestTimeline requestTimeline) { this.requestTimeline = requestTimeline; } RntbdChannelAcquisitionTimeline getChannelAcquisitionTimeline() { return this.channelAcquisitionTimeline; } void setChannelAcquisitionTimeline(RntbdChannelAcquisitionTimeline channelAcquisitionTimeline) { this.channelAcquisitionTimeline = channelAcquisitionTimeline; } void setResourceAddress(String resourceAddress) { this.resourceAddress = resourceAddress; } void setRntbdServiceEndpointStatistics(RntbdEndpointStatistics rntbdEndpointStatistics) { this.rntbdEndpointStatistics = rntbdEndpointStatistics; } RntbdEndpointStatistics getRntbdServiceEndpointStatistics() { return this.rntbdEndpointStatistics; } void setRntbdRequestLength(int rntbdRequestLength) { this.rntbdRequestLength = rntbdRequestLength; } int getRntbdRequestLength() { return this.rntbdRequestLength; } void setRntbdResponseLength(int rntbdResponseLength) { this.rntbdResponseLength = rntbdResponseLength; } int getRntbdResponseLength() { return this.rntbdResponseLength; } void setRequestPayloadLength(int requestBodyLength) { this.requestPayloadLength = requestBodyLength; } int getRequestPayloadLength() { return this.requestPayloadLength; } boolean hasSendingRequestStarted() { return this.sendingRequestHasStarted; } void setSendingRequestHasStarted(boolean hasSendingRequestStarted) { this.sendingRequestHasStarted = hasSendingRequestStarted; } int getRntbdChannelTaskQueueSize() { return this.rntbdChannelTaskQueueSize; } void setRntbdChannelTaskQueueSize(int rntbdChannelTaskQueueSize) { this.rntbdChannelTaskQueueSize = rntbdChannelTaskQueueSize; } int getRntbdPendingRequestQueueSize() { return this.rntbdChannelTaskQueueSize; } void setRntbdPendingRequestQueueSize(int rntbdPendingRequestQueueSize) { this.rntbdPendingRequestQueueSize = rntbdPendingRequestQueueSize; } static { ImplementationBridgeHelpers.CosmosExceptionHelper.setCosmosExceptionAccessor( new ImplementationBridgeHelpers.CosmosExceptionHelper.CosmosExceptionAccessor() { @Override public CosmosException createCosmosException(int statusCode, Exception innerException) { return new CosmosException(statusCode, innerException); } }); } }
class CosmosException extends AzureException { private static final long MAX_RETRY_AFTER_IN_MS = BatchExecUtils.MAX_RETRY_AFTER_IN_MS; private static final long serialVersionUID = 1L; private static final ObjectMapper mapper = new ObjectMapper(); private final static String USER_AGENT = Utils.getUserAgent(); /** * Status code */ private final int statusCode; /** * Response headers */ private final Map<String, String> responseHeaders; /** * Cosmos diagnostics */ private CosmosDiagnostics cosmosDiagnostics; /** * Request timeline */ private RequestTimeline requestTimeline; /** * Channel acquisition timeline */ private RntbdChannelAcquisitionTimeline channelAcquisitionTimeline; /** * Cosmos error */ private CosmosError cosmosError; /** * RNTBD channel task queue size */ private int rntbdChannelTaskQueueSize; /** * RNTBD endpoint statistics */ private RntbdEndpointStatistics rntbdEndpointStatistics; /** * LSN */ long lsn; /** * Partition key range ID */ String partitionKeyRangeId; /** * Request headers */ Map<String, String> requestHeaders; /** * Request URI */ Uri requestUri; /** * Resource address */ String resourceAddress; /** * Request payload length */ private int requestPayloadLength; /** * RNTBD pending request queue size */ private int rntbdPendingRequestQueueSize; /** * RNTBD request length */ private int rntbdRequestLength; /** * RNTBD response length */ private int rntbdResponseLength; /** * Sending request has started */ private boolean sendingRequestHasStarted; /** * Creates a new instance of the CosmosException class. * * @param statusCode the http status code of the response. * @param message the string message. * @param responseHeaders the response headers. * @param cause the inner exception */ /** * Creates a new instance of the CosmosException class. * * @param statusCode the http status code of the response. * @param errorMessage the error message. */ protected CosmosException(int statusCode, String errorMessage) { this(statusCode, errorMessage, null, null); this.cosmosError = new CosmosError(); ModelBridgeInternal.setProperty(cosmosError, Constants.Properties.MESSAGE, errorMessage); } /** * Creates a new instance of the CosmosException class. * * @param statusCode the http status code of the response. * @param innerException the original exception. */ protected CosmosException(int statusCode, Exception innerException) { this(statusCode, null, null, innerException); } /** * Creates a new instance of the CosmosException class. * * @param statusCode the http status code of the response. * @param cosmosErrorResource the error resource object. * @param responseHeaders the response headers. */ protected CosmosException(int statusCode, CosmosError cosmosErrorResource, Map<String, String> responseHeaders) { this(/* resourceAddress */ null, statusCode, cosmosErrorResource, responseHeaders); } /** * Creates a new instance of the CosmosException class. * * @param resourceAddress the address of the resource the request is associated with. * @param statusCode the http status code of the response. * @param cosmosErrorResource the error resource object. * @param responseHeaders the response headers. */ protected CosmosException(String resourceAddress, int statusCode, CosmosError cosmosErrorResource, Map<String, String> responseHeaders) { this(statusCode, cosmosErrorResource == null ? null : cosmosErrorResource.getMessage(), responseHeaders, null); this.resourceAddress = resourceAddress; this.cosmosError = cosmosErrorResource; } /** * Creates a new instance of the CosmosException class. * * @param resourceAddress the address of the resource the request is associated with. * @param statusCode the http status code of the response. * @param cosmosErrorResource the error resource object. * @param responseHeaders the response headers. * @param cause the inner exception */ protected CosmosException(String resourceAddress, int statusCode, CosmosError cosmosErrorResource, Map<String, String> responseHeaders, Throwable cause) { this(statusCode, cosmosErrorResource == null ? null : cosmosErrorResource.getMessage(), responseHeaders, cause); this.resourceAddress = resourceAddress; this.cosmosError = cosmosErrorResource; } /** * Creates a new instance of the CosmosException class. * * @param message the string message. * @param statusCode the http status code of the response. * @param exception the exception object. * @param responseHeaders the response headers. * @param resourceAddress the address of the resource the request is associated with. */ protected CosmosException(String message, Exception exception, Map<String, String> responseHeaders, int statusCode, String resourceAddress) { this(statusCode, message, responseHeaders, exception); this.resourceAddress = resourceAddress; } @Override public String getMessage() { try { ObjectNode messageNode = mapper.createObjectNode(); messageNode.put("innerErrorMessage", innerErrorMessage()); if (cosmosDiagnostics != null) { cosmosDiagnostics.fillCosmosDiagnostics(messageNode, null); } return mapper.writeValueAsString(messageNode); } catch (JsonProcessingException e) { if (cosmosDiagnostics == null) { return innerErrorMessage(); } return innerErrorMessage() + ", " + cosmosDiagnostics.toString(); } } /** * Gets the activity ID associated with the request. * * @return the activity ID. */ public String getActivityId() { if (this.responseHeaders != null) { return this.responseHeaders.get(HttpConstants.HttpHeaders.ACTIVITY_ID); } return null; } /** * Gets the http status code. * * @return the status code. */ public int getStatusCode() { return this.statusCode; } /** * Gets the sub status code. * * @return the status code. */ public int getSubStatusCode() { int code = HttpConstants.SubStatusCodes.UNKNOWN; if (this.responseHeaders != null) { String subStatusString = this.responseHeaders.get(HttpConstants.HttpHeaders.SUB_STATUS); if (StringUtils.isNotEmpty(subStatusString)) { try { code = Integer.parseInt(subStatusString); } catch (NumberFormatException e) { } } } return code; } void setSubStatusCode(int subStatusCode) { this.responseHeaders.put(HttpConstants.HttpHeaders.SUB_STATUS, Integer.toString(subStatusCode)); } /** * Gets the error code associated with the exception. * * @return the error. */ CosmosError getError() { return this.cosmosError; } void setError(CosmosError cosmosError) { this.cosmosError = cosmosError; } /** * Gets the recommended time duration after which the client can retry failed * requests * * @return the recommended time duration after which the client can retry failed * requests. */ public Duration getRetryAfterDuration() { long retryIntervalInMilliseconds = 0; if (this.responseHeaders != null) { String header = this.responseHeaders.get(HttpConstants.HttpHeaders.RETRY_AFTER_IN_MILLISECONDS); if (StringUtils.isNotEmpty(header)) { try { retryIntervalInMilliseconds = Math.min(Long.parseLong(header), MAX_RETRY_AFTER_IN_MS); } catch (NumberFormatException e) { } } } return Duration.ofMillis(retryIntervalInMilliseconds); } /** * Gets the response headers as key-value pairs * * @return the response headers */ public Map<String, String> getResponseHeaders() { return this.responseHeaders; } /** * Gets the resource address associated with this exception. * * @return the resource address associated with this exception. */ String getResourceAddress() { return this.resourceAddress; } /** * Gets the Cosmos Diagnostic Statistics associated with this exception. * * @return Cosmos Diagnostic Statistics associated with this exception. */ public CosmosDiagnostics getDiagnostics() { return cosmosDiagnostics; } CosmosException setDiagnostics(CosmosDiagnostics cosmosDiagnostics) { this.cosmosDiagnostics = cosmosDiagnostics; return this; } /** * Gets the request charge as request units (RU) consumed by the operation. * <p> * For more information about the RU and factors that can impact the effective charges please visit * <a href="https: * * @return the request charge. */ public double getRequestCharge() { String value = this.getResponseHeaders().get(HttpConstants.HttpHeaders.REQUEST_CHARGE); if (StringUtils.isEmpty(value)) { return 0; } return Double.parseDouble(value); } @Override public String toString() { try { ObjectNode exceptionMessageNode = mapper.createObjectNode(); exceptionMessageNode.put("ClassName", getClass().getSimpleName()); exceptionMessageNode.put(USER_AGENT_KEY, USER_AGENT); exceptionMessageNode.put("statusCode", statusCode); exceptionMessageNode.put("resourceAddress", resourceAddress); if (cosmosError != null) { exceptionMessageNode.put("error", cosmosError.toJson()); } exceptionMessageNode.put("innerErrorMessage", innerErrorMessage()); exceptionMessageNode.put("causeInfo", causeInfo()); if (responseHeaders != null) { exceptionMessageNode.put("responseHeaders", responseHeaders.toString()); } List<Map.Entry<String, String>> filterRequestHeaders = filterSensitiveData(requestHeaders); if (filterRequestHeaders != null) { exceptionMessageNode.put("requestHeaders", filterRequestHeaders.toString()); } if(this.cosmosDiagnostics != null) { cosmosDiagnostics.fillCosmosDiagnostics(exceptionMessageNode, null); } return mapper.writeValueAsString(exceptionMessageNode); } catch (JsonProcessingException ex) { return getClass().getSimpleName() + "{" + USER_AGENT_KEY +"=" + USER_AGENT + ", error=" + cosmosError + ", " + "resourceAddress='" + resourceAddress + ", statusCode=" + statusCode + ", message=" + getMessage() + ", causeInfo=" + causeInfo() + ", responseHeaders=" + responseHeaders + ", requestHeaders=" + filterSensitiveData(requestHeaders) + '}'; } } String innerErrorMessage() { String innerErrorMessage = super.getMessage(); if (cosmosError != null) { innerErrorMessage = cosmosError.getMessage(); if (innerErrorMessage == null) { innerErrorMessage = String.valueOf( ModelBridgeInternal.getObjectFromJsonSerializable(cosmosError, "Errors")); } } return innerErrorMessage; } private String causeInfo() { Throwable cause = getCause(); if (cause != null) { return String.format("[class: %s, message: %s]", cause.getClass(), cause.getMessage()); } return null; } private List<Map.Entry<String, String>> filterSensitiveData(Map<String, String> requestHeaders) { if (requestHeaders == null) { return null; } return requestHeaders.entrySet().stream().filter(entry -> !HttpConstants.HttpHeaders.AUTHORIZATION.equalsIgnoreCase(entry.getKey())) .collect(Collectors.toList()); } RequestTimeline getRequestTimeline() { return this.requestTimeline; } void setRequestTimeline(RequestTimeline requestTimeline) { this.requestTimeline = requestTimeline; } RntbdChannelAcquisitionTimeline getChannelAcquisitionTimeline() { return this.channelAcquisitionTimeline; } void setChannelAcquisitionTimeline(RntbdChannelAcquisitionTimeline channelAcquisitionTimeline) { this.channelAcquisitionTimeline = channelAcquisitionTimeline; } void setResourceAddress(String resourceAddress) { this.resourceAddress = resourceAddress; } void setRntbdServiceEndpointStatistics(RntbdEndpointStatistics rntbdEndpointStatistics) { this.rntbdEndpointStatistics = rntbdEndpointStatistics; } RntbdEndpointStatistics getRntbdServiceEndpointStatistics() { return this.rntbdEndpointStatistics; } void setRntbdRequestLength(int rntbdRequestLength) { this.rntbdRequestLength = rntbdRequestLength; } int getRntbdRequestLength() { return this.rntbdRequestLength; } void setRntbdResponseLength(int rntbdResponseLength) { this.rntbdResponseLength = rntbdResponseLength; } int getRntbdResponseLength() { return this.rntbdResponseLength; } void setRequestPayloadLength(int requestBodyLength) { this.requestPayloadLength = requestBodyLength; } int getRequestPayloadLength() { return this.requestPayloadLength; } boolean hasSendingRequestStarted() { return this.sendingRequestHasStarted; } void setSendingRequestHasStarted(boolean hasSendingRequestStarted) { this.sendingRequestHasStarted = hasSendingRequestStarted; } int getRntbdChannelTaskQueueSize() { return this.rntbdChannelTaskQueueSize; } void setRntbdChannelTaskQueueSize(int rntbdChannelTaskQueueSize) { this.rntbdChannelTaskQueueSize = rntbdChannelTaskQueueSize; } int getRntbdPendingRequestQueueSize() { return this.rntbdChannelTaskQueueSize; } void setRntbdPendingRequestQueueSize(int rntbdPendingRequestQueueSize) { this.rntbdPendingRequestQueueSize = rntbdPendingRequestQueueSize; } static { ImplementationBridgeHelpers.CosmosExceptionHelper.setCosmosExceptionAccessor( new ImplementationBridgeHelpers.CosmosExceptionHelper.CosmosExceptionAccessor() { @Override public CosmosException createCosmosException(int statusCode, Exception innerException) { return new CosmosException(statusCode, innerException); } }); } }
This is different behavior from the constructor that takes a string input. I guess the constructor throws an exception and we should have the same behavior here.
static DateTimeRfc1123 fromString(final String date) { if (CoreUtils.isNullOrEmpty(date)) { return null; } return new DateTimeRfc1123(date); }
}
static DateTimeRfc1123 fromString(final String date) { if (CoreUtils.isNullOrEmpty(date)) { return null; } return new DateTimeRfc1123(date); }
class DateTimeRfc1123 { private static final ClientLogger LOGGER = new ClientLogger(DateTimeRfc1123.class); /** * The actual datetime object. */ private final OffsetDateTime dateTime; /** * Creates a new DateTimeRfc1123 object with the specified DateTime. * @param dateTime The DateTime object to wrap. */ public DateTimeRfc1123(OffsetDateTime dateTime) { this.dateTime = dateTime; } /** * Creates a new DateTimeRfc1123 object with the specified DateTime. * @param formattedString The datetime string in RFC1123 format */ public DateTimeRfc1123(String formattedString) { this.dateTime = parse(formattedString); } /** * Returns the underlying DateTime. * @return The underlying DateTime. */ public OffsetDateTime getDateTime() { return this.dateTime; } /** * JSON creator for DateTimeRfc1123. * <p> * If {@code date} is null or an empty string null will be returned. * * @param date RFC1123 datetime string. * @return The DateTimeRfc1123 representation of the datetime string, or null if {@code date} is null or empty. */ @JsonCreator /** * Parses the RFC1123 format datetime string into OffsetDateTime. * * @param date The datetime string in RFC1123 format * @return The underlying OffsetDateTime. * * @throws DateTimeException If the processing character is not a digit character. * @throws IllegalArgumentException if the given character is not recognized in the pattern of Month. such as 'Jan'. * @throws IndexOutOfBoundsException if the {@code beginIndex} is negative, or beginIndex is larger than length of * {@code date}. */ private static OffsetDateTime parse(final String date) { try { return OffsetDateTime.of( parseInt(date, 12, 16), parseMonth(date, 8), parseInt(date, 5, 7), parseInt(date, 17, 19), parseInt(date, 20, 22), parseInt(date, 23, 25), 0, ZoneOffset.UTC); } catch (DateTimeException | IllegalArgumentException | IndexOutOfBoundsException e) { return OffsetDateTime.parse(date, DateTimeFormatter.RFC_1123_DATE_TIME); } } /** * Parses the specified substring of datetime to a 'int' value. * * @param date The datetime string in RFC1123 format. * @param beginIndex The beginning index, inclusive. * @param endIndex The ending index, exclusive. * @return The specified substring. * * @throws DateTimeException If the processing character is not digit character. */ private static int parseInt(final CharSequence date, final int beginIndex, final int endIndex) { int num = 0; for (int i = beginIndex; i < endIndex; i++) { final char c = date.charAt(i); if (c < '0' || c > '9') { throw LOGGER.logExceptionAsError(new DateTimeException("Invalid date time: " + date)); } num = num * 10 + (c - '0'); } return num; } /** * Parses the specified month substring of date time to a number value, '1' represents the month of January, * '12' represents the month of December. * * @param date The date time string in RFC1123 format. * @param beginIndex The beginning index, inclusive, to the * @return The number value which represents the month of year. '1' represents the month of January, * '12' represents the month of December. * @throws IllegalArgumentException if the given character is not recognized in the pattern of Month. such as 'Jan'. * @throws IndexOutOfBoundsException if the {@code beginIndex} is negative, or beginIndex is larger than length of * {@code date}. */ private static int parseMonth(final CharSequence date, final int beginIndex) { switch (date.charAt(beginIndex)) { case 'J': switch (date.charAt(beginIndex + 1)) { case 'a': return 1; case 'u': switch (date.charAt(beginIndex + 2)) { case 'n': return 6; case 'l': return 7; default: throw LOGGER.logExceptionAsError( new IllegalArgumentException("Unknown month " + date)); } default: throw LOGGER.logExceptionAsError(new IllegalArgumentException("Unknown month " + date)); } case 'F': return 2; case 'M': switch (date.charAt(beginIndex + 2)) { case 'r': return 3; case 'y': return 5; default: throw LOGGER.logExceptionAsError(new IllegalArgumentException("Unknown month " + date)); } case 'A': switch (date.charAt(beginIndex + 2)) { case 'r': return 4; case 'g': return 8; default: throw LOGGER.logExceptionAsError(new IllegalArgumentException("Unknown month " + date)); } case 'S': return 9; case 'O': return 10; case 'N': return 11; case 'D': return 12; default: throw LOGGER.logExceptionAsError(new IllegalArgumentException("Unknown month " + date)); } } /** * Convert the {@link OffsetDateTime dateTime} to date time string in RFC1123 format. * * @param dateTime The date time in OffsetDateTime format. * @return The date time string in RFC1123 format. * @throws IllegalArgumentException If {@link OffsetDateTime * {@link OffsetDateTime */ public static String toRfc1123String(OffsetDateTime dateTime) { dateTime = dateTime.withOffsetSameInstant(ZoneOffset.UTC); StringBuilder sb = new StringBuilder(32); final DayOfWeek dayOfWeek = dateTime.getDayOfWeek(); switch (dayOfWeek) { case MONDAY: sb.append("Mon, "); break; case TUESDAY: sb.append("Tue, "); break; case WEDNESDAY: sb.append("Wed, "); break; case THURSDAY: sb.append("Thu, "); break; case FRIDAY: sb.append("Fri, "); break; case SATURDAY: sb.append("Sat, "); break; case SUNDAY: sb.append("Sun, "); break; default: throw LOGGER.logExceptionAsError(new IllegalArgumentException("Unknown day of week " + dayOfWeek)); } zeroPad(dateTime.getDayOfMonth(), sb); final Month month = dateTime.getMonth(); switch (month) { case JANUARY: sb.append(" Jan "); break; case FEBRUARY: sb.append(" Feb "); break; case MARCH: sb.append(" Mar "); break; case APRIL: sb.append(" Apr "); break; case MAY: sb.append(" May "); break; case JUNE: sb.append(" Jun "); break; case JULY: sb.append(" Jul "); break; case AUGUST: sb.append(" Aug "); break; case SEPTEMBER: sb.append(" Sep "); break; case OCTOBER: sb.append(" Oct "); break; case NOVEMBER: sb.append(" Nov "); break; case DECEMBER: sb.append(" Dec "); break; default: throw LOGGER.logExceptionAsError(new IllegalArgumentException("Unknown month " + month)); } sb.append(dateTime.getYear()); sb.append(" "); zeroPad(dateTime.getHour(), sb); sb.append(":"); zeroPad(dateTime.getMinute(), sb); sb.append(":"); zeroPad(dateTime.getSecond(), sb); sb.append(" GMT"); return sb.toString(); } private static void zeroPad(int value, StringBuilder sb) { if (value < 10) { sb.append("0"); } sb.append(value); } @Override public String toString() { return toRfc1123String(this.dateTime); } @Override public int hashCode() { return this.dateTime.hashCode(); } @Override public boolean equals(Object obj) { if (obj == null) { return false; } if (!(obj instanceof DateTimeRfc1123)) { return false; } DateTimeRfc1123 rhs = (DateTimeRfc1123) obj; return this.dateTime.equals(rhs.getDateTime()); } }
class DateTimeRfc1123 { private static final ClientLogger LOGGER = new ClientLogger(DateTimeRfc1123.class); /** * The actual datetime object. */ private final OffsetDateTime dateTime; /** * Creates a new DateTimeRfc1123 object with the specified DateTime. * @param dateTime The DateTime object to wrap. */ public DateTimeRfc1123(OffsetDateTime dateTime) { this.dateTime = dateTime; } /** * Creates a new DateTimeRfc1123 object with the specified DateTime. * @param formattedString The datetime string in RFC1123 format */ public DateTimeRfc1123(String formattedString) { this.dateTime = parse(formattedString); } /** * Returns the underlying DateTime. * @return The underlying DateTime. */ public OffsetDateTime getDateTime() { return this.dateTime; } /** * JSON creator for DateTimeRfc1123. * <p> * If {@code date} is null or an empty string null will be returned. * * @param date RFC1123 datetime string. * @return The DateTimeRfc1123 representation of the datetime string, or null if {@code date} is null or empty. */ @JsonCreator /** * Parses the RFC1123 format datetime string into OffsetDateTime. * * @param date The datetime string in RFC1123 format * @return The underlying OffsetDateTime. * * @throws DateTimeException If the processing character is not a digit character. * @throws IllegalArgumentException if the given character is not recognized in the pattern of Month. such as 'Jan'. * @throws IndexOutOfBoundsException if the {@code beginIndex} is negative, or beginIndex is larger than length of * {@code date}. */ private static OffsetDateTime parse(final String date) { try { return OffsetDateTime.of( parseInt(date, 12, 16), parseMonth(date, 8), parseInt(date, 5, 7), parseInt(date, 17, 19), parseInt(date, 20, 22), parseInt(date, 23, 25), 0, ZoneOffset.UTC); } catch (DateTimeException | IllegalArgumentException | IndexOutOfBoundsException e) { return OffsetDateTime.parse(date, DateTimeFormatter.RFC_1123_DATE_TIME); } } /** * Parses the specified substring of datetime to a 'int' value. * * @param date The datetime string in RFC1123 format. * @param beginIndex The beginning index, inclusive. * @param endIndex The ending index, exclusive. * @return The specified substring. * * @throws DateTimeException If the processing character is not digit character. */ private static int parseInt(final CharSequence date, final int beginIndex, final int endIndex) { int num = 0; for (int i = beginIndex; i < endIndex; i++) { final char c = date.charAt(i); if (c < '0' || c > '9') { throw LOGGER.logExceptionAsError(new DateTimeException("Invalid date time: " + date)); } num = num * 10 + (c - '0'); } return num; } /** * Parses the specified month substring of date time to a number value, '1' represents the month of January, * '12' represents the month of December. * * @param date The date time string in RFC1123 format. * @param beginIndex The beginning index, inclusive, to the * @return The number value which represents the month of year. '1' represents the month of January, * '12' represents the month of December. * @throws IllegalArgumentException if the given character is not recognized in the pattern of Month. such as 'Jan'. * @throws IndexOutOfBoundsException if the {@code beginIndex} is negative, or beginIndex is larger than length of * {@code date}. */ private static int parseMonth(final CharSequence date, final int beginIndex) { switch (date.charAt(beginIndex)) { case 'J': switch (date.charAt(beginIndex + 1)) { case 'a': return 1; case 'u': switch (date.charAt(beginIndex + 2)) { case 'n': return 6; case 'l': return 7; default: throw LOGGER.logExceptionAsError( new IllegalArgumentException("Unknown month " + date)); } default: throw LOGGER.logExceptionAsError(new IllegalArgumentException("Unknown month " + date)); } case 'F': return 2; case 'M': switch (date.charAt(beginIndex + 2)) { case 'r': return 3; case 'y': return 5; default: throw LOGGER.logExceptionAsError(new IllegalArgumentException("Unknown month " + date)); } case 'A': switch (date.charAt(beginIndex + 2)) { case 'r': return 4; case 'g': return 8; default: throw LOGGER.logExceptionAsError(new IllegalArgumentException("Unknown month " + date)); } case 'S': return 9; case 'O': return 10; case 'N': return 11; case 'D': return 12; default: throw LOGGER.logExceptionAsError(new IllegalArgumentException("Unknown month " + date)); } } /** * Convert the {@link OffsetDateTime dateTime} to date time string in RFC1123 format. * * @param dateTime The date time in OffsetDateTime format. * @return The date time string in RFC1123 format. * @throws IllegalArgumentException If {@link OffsetDateTime * {@link OffsetDateTime */ public static String toRfc1123String(OffsetDateTime dateTime) { dateTime = dateTime.withOffsetSameInstant(ZoneOffset.UTC); StringBuilder sb = new StringBuilder(32); final DayOfWeek dayOfWeek = dateTime.getDayOfWeek(); switch (dayOfWeek) { case MONDAY: sb.append("Mon, "); break; case TUESDAY: sb.append("Tue, "); break; case WEDNESDAY: sb.append("Wed, "); break; case THURSDAY: sb.append("Thu, "); break; case FRIDAY: sb.append("Fri, "); break; case SATURDAY: sb.append("Sat, "); break; case SUNDAY: sb.append("Sun, "); break; default: throw LOGGER.logExceptionAsError(new IllegalArgumentException("Unknown day of week " + dayOfWeek)); } zeroPad(dateTime.getDayOfMonth(), sb); final Month month = dateTime.getMonth(); switch (month) { case JANUARY: sb.append(" Jan "); break; case FEBRUARY: sb.append(" Feb "); break; case MARCH: sb.append(" Mar "); break; case APRIL: sb.append(" Apr "); break; case MAY: sb.append(" May "); break; case JUNE: sb.append(" Jun "); break; case JULY: sb.append(" Jul "); break; case AUGUST: sb.append(" Aug "); break; case SEPTEMBER: sb.append(" Sep "); break; case OCTOBER: sb.append(" Oct "); break; case NOVEMBER: sb.append(" Nov "); break; case DECEMBER: sb.append(" Dec "); break; default: throw LOGGER.logExceptionAsError(new IllegalArgumentException("Unknown month " + month)); } sb.append(dateTime.getYear()); sb.append(" "); zeroPad(dateTime.getHour(), sb); sb.append(":"); zeroPad(dateTime.getMinute(), sb); sb.append(":"); zeroPad(dateTime.getSecond(), sb); sb.append(" GMT"); return sb.toString(); } private static void zeroPad(int value, StringBuilder sb) { if (value < 10) { sb.append("0"); } sb.append(value); } @Override public String toString() { return toRfc1123String(this.dateTime); } @Override public int hashCode() { return this.dateTime.hashCode(); } @Override public boolean equals(Object obj) { if (obj == null) { return false; } if (!(obj instanceof DateTimeRfc1123)) { return false; } DateTimeRfc1123 rhs = (DateTimeRfc1123) obj; return this.dateTime.equals(rhs.getDateTime()); } }
Is it possible to reach this catch block before `retain`ing the `ByteBuf`?
public void onNext(ByteBuf bytes) { try { if (isWriting) { onError(new IllegalStateException("Received onNext while processing another write operation.")); } else { bytes = bytes.retain(); write(bytes, bytes.nioBuffer()); } } catch (Throwable throwable) { bytes.release(); onError(throwable); } }
bytes.release();
public void onNext(ByteBuf bytes) { try { bytes = bytes.retain(); if (isWriting) { onError(new IllegalStateException("Received onNext while processing another write operation.")); } else { write(bytes, bytes.nioBuffer()); } } catch (Throwable throwable) { bytes.release(); onError(throwable); } }
class NettyFileWriteSubscriber implements Subscriber<ByteBuf> { private volatile boolean isWriting = false; private volatile boolean isCompleted = false; private static final ClientLogger LOGGER = new ClientLogger(NettyFileWriteSubscriber.class); private final AsynchronousFileChannel fileChannel; private final AtomicLong position; private final MonoSink<Void> emitter; private Subscription subscription; public NettyFileWriteSubscriber(AsynchronousFileChannel fileChannel, long position, MonoSink<Void> emitter) { this.fileChannel = fileChannel; this.position = new AtomicLong(position); this.emitter = emitter; } @Override public void onSubscribe(Subscription s) { if (Operators.validate(this.subscription, s)) { subscription = s; s.request(1); } } @Override private void write(ByteBuf nettyBytes, ByteBuffer nioBytes) { isWriting = true; fileChannel.write(nioBytes, position.get(), nettyBytes, new CompletionHandler<Integer, ByteBuf>() { @Override public void completed(Integer result, ByteBuf attachment) { position.addAndGet(result); if (nioBytes.hasRemaining()) { write(nettyBytes, nioBytes); } else { nettyBytes.release(); isWriting = false; if (isCompleted) { emitter.success(); } else { subscription.request(1); } } } @Override public void failed(Throwable exc, ByteBuf attachment) { attachment.release(); onError(exc); } }); } @Override public void onError(Throwable throwable) { isWriting = false; subscription.cancel(); emitter.error(LOGGER.logThrowableAsError(throwable)); } @Override public void onComplete() { isCompleted = true; if (!isWriting) { emitter.success(); } } }
class NettyFileWriteSubscriber implements Subscriber<ByteBuf> { private volatile boolean isWriting = false; private volatile boolean isCompleted = false; private static final ClientLogger LOGGER = new ClientLogger(NettyFileWriteSubscriber.class); private final AsynchronousFileChannel fileChannel; private final AtomicLong position; private final MonoSink<Void> emitter; private Subscription subscription; public NettyFileWriteSubscriber(AsynchronousFileChannel fileChannel, long position, MonoSink<Void> emitter) { this.fileChannel = fileChannel; this.position = new AtomicLong(position); this.emitter = emitter; } @Override public void onSubscribe(Subscription s) { if (Operators.validate(this.subscription, s)) { subscription = s; s.request(1); } } @Override private void write(ByteBuf nettyBytes, ByteBuffer nioBytes) { isWriting = true; fileChannel.write(nioBytes, position.get(), nettyBytes, new CompletionHandler<Integer, ByteBuf>() { @Override public void completed(Integer result, ByteBuf attachment) { position.addAndGet(result); if (nioBytes.hasRemaining()) { write(nettyBytes, nioBytes); } else { nettyBytes.release(); isWriting = false; if (isCompleted) { emitter.success(); } else { subscription.request(1); } } } @Override public void failed(Throwable exc, ByteBuf attachment) { attachment.release(); onError(exc); } }); } @Override public void onError(Throwable throwable) { isWriting = false; subscription.cancel(); emitter.error(LOGGER.logThrowableAsError(throwable)); } @Override public void onComplete() { isCompleted = true; if (!isWriting) { emitter.success(); } } }
is `serviceRegistry.id()` guaranteed not null?
public boolean hasServiceRegistryBinding() { Map<String, Map<String, Object>> addonConfigs = innerModel().properties().addonConfigs(); if (addonConfigs == null) { return false; } SpringServiceRegistry serviceRegistry = parent().getDefaultServiceRegistry(); if (serviceRegistry == null) { return false; } return addonConfigs.get(Constants.SERVICE_REGISTRY_KEY) != null && serviceRegistry.id().equalsIgnoreCase((String) addonConfigs.get(Constants.SERVICE_REGISTRY_KEY).get(Constants.BINDING_RESOURCE_ID)); }
&& serviceRegistry.id().equalsIgnoreCase((String) addonConfigs.get(Constants.SERVICE_REGISTRY_KEY).get(Constants.BINDING_RESOURCE_ID));
public boolean hasServiceRegistryBinding() { Map<String, Map<String, Object>> addonConfigs = innerModel().properties().addonConfigs(); if (addonConfigs == null) { return false; } SpringServiceRegistry serviceRegistry = parent().getDefaultServiceRegistry(); if (serviceRegistry == null) { return false; } return addonConfigs.get(Constants.SERVICE_REGISTRY_KEY) != null && serviceRegistry.id().equalsIgnoreCase((String) addonConfigs.get(Constants.SERVICE_REGISTRY_KEY).get(Constants.BINDING_RESOURCE_ID)); }
class SpringAppImpl extends ExternalChildResourceImpl<SpringApp, AppResourceInner, SpringServiceImpl, SpringService> implements SpringApp, SpringApp.Definition, SpringApp.Update { private Creatable<SpringAppDeployment> springAppDeploymentToCreate = null; private final SpringAppDeploymentsImpl deployments = new SpringAppDeploymentsImpl(this); private final SpringAppServiceBindingsImpl serviceBindings = new SpringAppServiceBindingsImpl(this); private final SpringAppDomainsImpl domains = new SpringAppDomainsImpl(this); private FunctionalTaskItem setActiveDeploymentTask = null; SpringAppImpl(String name, SpringServiceImpl parent, AppResourceInner innerObject) { super(name, parent, innerObject); } @Override public boolean isPublic() { if (innerModel().properties() == null) { return false; } return innerModel().properties().publicProperty(); } @Override public boolean isHttpsOnly() { if (innerModel().properties() == null) { return false; } return innerModel().properties().httpsOnly(); } @Override public String url() { if (innerModel().properties() == null) { return null; } return innerModel().properties().url(); } @Override public String fqdn() { if (innerModel().properties() == null) { return null; } return innerModel().properties().fqdn(); } @Override public TemporaryDisk temporaryDisk() { if (innerModel().properties() == null) { return null; } return innerModel().properties().temporaryDisk(); } @Override public PersistentDisk persistentDisk() { if (innerModel().properties() == null) { return null; } return innerModel().properties().persistentDisk(); } @Override public ManagedIdentityProperties identity() { return innerModel().identity(); } @Override public String activeDeploymentName() { Optional<SpringAppDeployment> deployment = deployments.list().stream().filter(SpringAppDeployment::isActive).findFirst(); return deployment.map(SpringAppDeployment::appName).orElse(null); } @Override public SpringAppDeployment getActiveDeployment() { return getActiveDeploymentAsync().block(); } @Override public Mono<SpringAppDeployment> getActiveDeploymentAsync() { return deployments.listAsync().filter(SpringAppDeployment::isActive).singleOrEmpty(); } @Override @SuppressWarnings("unchecked") public <T extends SpringAppDeployment.DefinitionStages.WithCreate<T>> SpringAppDeployments<T> deployments() { return (SpringAppDeployments<T>) deployments; } @Override public SpringAppServiceBindings serviceBindings() { return serviceBindings; } @Override public SpringAppDomains customDomains() { return domains; } @Override public Mono<ResourceUploadDefinition> getResourceUploadUrlAsync() { return manager().serviceClient().getApps().getResourceUploadUrlAsync( parent().resourceGroupName(), parent().name(), name()); } @Override public ResourceUploadDefinition getResourceUploadUrl() { return getResourceUploadUrlAsync().block(); } private void ensureProperty() { if (innerModel().properties() == null) { innerModel().withProperties(new AppResourceProperties()); } } @Override public boolean hasConfigurationServiceBinding() { Map<String, Map<String, Object>> addonConfigs = innerModel().properties().addonConfigs(); if (addonConfigs == null) { return false; } SpringConfigurationService configurationService = parent().getDefaultConfigurationService(); if (configurationService == null) { return false; } return addonConfigs.get(Constants.APPLICATION_CONFIGURATION_SERVICE_KEY) != null && configurationService.id().equalsIgnoreCase((String) addonConfigs.get(Constants.APPLICATION_CONFIGURATION_SERVICE_KEY).get(Constants.BINDING_RESOURCE_ID)); } @Override @Override public SpringAppImpl withDefaultPublicEndpoint() { ensureProperty(); innerModel().properties().withPublicProperty(true); return this; } @Override public SpringAppImpl withoutDefaultPublicEndpoint() { ensureProperty(); innerModel().properties().withPublicProperty(false); return this; } @Override public SpringAppImpl withCustomDomain(String domain) { domains.prepareCreateOrUpdate(domain, new CustomDomainProperties()); return this; } @Override public SpringAppImpl withCustomDomain(String domain, String certThumbprint) { domains.prepareCreateOrUpdate(domain, new CustomDomainProperties().withThumbprint(certThumbprint)); return this; } @Override public Update withoutCustomDomain(String domain) { domains.prepareDelete(domain); return this; } @Override public SpringAppImpl withHttpsOnly() { ensureProperty(); innerModel().properties().withHttpsOnly(true); return this; } @Override public SpringAppImpl withoutHttpsOnly() { ensureProperty(); innerModel().properties().withHttpsOnly(false); return this; } @Override public SpringAppImpl withTemporaryDisk(int sizeInGB, String mountPath) { ensureProperty(); innerModel().properties().withTemporaryDisk( new TemporaryDisk().withSizeInGB(sizeInGB).withMountPath(mountPath)); return this; } @Override public SpringAppImpl withPersistentDisk(int sizeInGB, String mountPath) { ensureProperty(); innerModel().properties().withPersistentDisk( new PersistentDisk().withSizeInGB(sizeInGB).withMountPath(mountPath)); return this; } @Override public SpringAppImpl withActiveDeployment(String name) { if (CoreUtils.isNullOrEmpty(name)) { return this; } this.setActiveDeploymentTask = context -> manager().serviceClient().getApps() .setActiveDeploymentsAsync(parent().resourceGroupName(), parent().name(), name(), new ActiveDeploymentCollection().withActiveDeploymentNames(Arrays.asList(name))) .then(context.voidMono()); return this; } @Override public void beforeGroupCreateOrUpdate() { if (setActiveDeploymentTask != null) { this.addPostRunDependent(setActiveDeploymentTask); } setActiveDeploymentTask = null; } @Override public Mono<SpringApp> createResourceAsync() { if (springAppDeploymentToCreate == null) { withDefaultActiveDeployment(); } return manager().serviceClient().getApps().createOrUpdateAsync( parent().resourceGroupName(), parent().name(), name(), new AppResourceInner()) .thenMany(springAppDeploymentToCreate.createAsync()) .then(updateResourceAsync()); } @Override public Mono<SpringApp> updateResourceAsync() { return manager().serviceClient().getApps().updateAsync( parent().resourceGroupName(), parent().name(), name(), innerModel()) .map(inner -> { setInner(inner); return this; }); } @Override public Mono<Void> deleteResourceAsync() { return manager().serviceClient().getApps().deleteAsync(parent().resourceGroupName(), parent().name(), name()); } @Override protected Mono<AppResourceInner> getInnerAsync() { return manager().serviceClient().getApps().getAsync(parent().resourceGroupName(), parent().name(), name()); } @Override public String id() { return innerModel().id(); } @Override public SpringAppImpl update() { prepareUpdate(); return this; } public AppPlatformManager manager() { return parent().manager(); } @Override public SpringAppImpl withServiceBinding(String name, BindingResourceProperties bindingProperties) { serviceBindings.prepareCreateOrUpdate(name, bindingProperties); return this; } @Override public SpringAppImpl withoutServiceBinding(String name) { serviceBindings.prepareDelete(name); return this; } @Override public SpringAppImpl withDefaultActiveDeployment() { String defaultDeploymentName = "default"; withActiveDeployment(defaultDeploymentName); springAppDeploymentToCreate = deployments().define(defaultDeploymentName) .withExistingSource(UserSourceType.JAR, String.format("<%s>", defaultDeploymentName)); return this; } @Override @SuppressWarnings("unchecked") public <T extends SpringAppDeployment.DefinitionStages.WithAttach<? extends SpringApp.DefinitionStages.WithCreate, T>> SpringAppDeployment.DefinitionStages.Blank<T> defineActiveDeployment(String name) { return (SpringAppDeployment.DefinitionStages.Blank<T>) deployments.define(name); } SpringAppImpl addActiveDeployment(SpringAppDeploymentImpl deployment) { withActiveDeployment(deployment.name()); springAppDeploymentToCreate = deployment; return this; } @Override public SpringAppImpl withConfigurationServiceBinding() { ensureProperty(); Map<String, Map<String, Object>> addonConfigs = innerModel().properties().addonConfigs(); if (addonConfigs == null) { addonConfigs = new HashMap<>(); innerModel().properties().withAddonConfigs(addonConfigs); } SpringConfigurationService configurationService = parent().getDefaultConfigurationService(); if (configurationService != null) { Map<String, Object> configurationServiceConfigs = addonConfigs.computeIfAbsent(Constants.APPLICATION_CONFIGURATION_SERVICE_KEY, k -> new HashMap<>()); configurationServiceConfigs.put(Constants.BINDING_RESOURCE_ID, configurationService.id()); } return this; } @Override public SpringAppImpl withoutConfigurationServiceBinding() { if (innerModel().properties() == null) { return this; } Map<String, Map<String, Object>> addonConfigs = innerModel().properties().addonConfigs(); if (addonConfigs == null) { return this; } Map<String, Object> configurationServiceConfigs = addonConfigs.get(Constants.APPLICATION_CONFIGURATION_SERVICE_KEY); if (configurationServiceConfigs == null) { return this; } configurationServiceConfigs.put(Constants.BINDING_RESOURCE_ID, ""); return this; } @Override public SpringAppImpl withServiceRegistryBinding() { ensureProperty(); Map<String, Map<String, Object>> addonConfigs = innerModel().properties().addonConfigs(); if (addonConfigs == null) { addonConfigs = new HashMap<>(); innerModel().properties().withAddonConfigs(addonConfigs); } SpringServiceRegistry serviceRegistry = parent().getDefaultServiceRegistry(); if (serviceRegistry != null) { Map<String, Object> serviceRegistryConfigs = addonConfigs.computeIfAbsent(Constants.SERVICE_REGISTRY_KEY, k -> new HashMap<>()); serviceRegistryConfigs.put(Constants.BINDING_RESOURCE_ID, serviceRegistry.id()); } return this; } @Override public SpringAppImpl withoutServiceRegistryBinding() { if (innerModel().properties() == null) { return this; } Map<String, Map<String, Object>> addonConfigs = innerModel().properties().addonConfigs(); if (addonConfigs == null) { return this; } Map<String, Object> configurationServiceConfigs = addonConfigs.get(Constants.SERVICE_REGISTRY_KEY); if (configurationServiceConfigs == null) { return this; } configurationServiceConfigs.put(Constants.BINDING_RESOURCE_ID, ""); return this; } }
class SpringAppImpl extends ExternalChildResourceImpl<SpringApp, AppResourceInner, SpringServiceImpl, SpringService> implements SpringApp, SpringApp.Definition, SpringApp.Update { private Creatable<SpringAppDeployment> springAppDeploymentToCreate = null; private final SpringAppDeploymentsImpl deployments = new SpringAppDeploymentsImpl(this); private final SpringAppServiceBindingsImpl serviceBindings = new SpringAppServiceBindingsImpl(this); private final SpringAppDomainsImpl domains = new SpringAppDomainsImpl(this); private FunctionalTaskItem setActiveDeploymentTask = null; SpringAppImpl(String name, SpringServiceImpl parent, AppResourceInner innerObject) { super(name, parent, innerObject); } @Override public boolean isPublic() { if (innerModel().properties() == null) { return false; } return innerModel().properties().publicProperty(); } @Override public boolean isHttpsOnly() { if (innerModel().properties() == null) { return false; } return innerModel().properties().httpsOnly(); } @Override public String url() { if (innerModel().properties() == null) { return null; } return innerModel().properties().url(); } @Override public String fqdn() { if (innerModel().properties() == null) { return null; } return innerModel().properties().fqdn(); } @Override public TemporaryDisk temporaryDisk() { if (innerModel().properties() == null) { return null; } return innerModel().properties().temporaryDisk(); } @Override public PersistentDisk persistentDisk() { if (innerModel().properties() == null) { return null; } return innerModel().properties().persistentDisk(); } @Override public ManagedIdentityProperties identity() { return innerModel().identity(); } @Override public String activeDeploymentName() { Optional<SpringAppDeployment> deployment = deployments.list().stream().filter(SpringAppDeployment::isActive).findFirst(); return deployment.map(SpringAppDeployment::appName).orElse(null); } @Override public SpringAppDeployment getActiveDeployment() { return getActiveDeploymentAsync().block(); } @Override public Mono<SpringAppDeployment> getActiveDeploymentAsync() { return deployments.listAsync().filter(SpringAppDeployment::isActive).singleOrEmpty(); } @Override @SuppressWarnings("unchecked") public <T extends SpringAppDeployment.DefinitionStages.WithCreate<T>> SpringAppDeployments<T> deployments() { return (SpringAppDeployments<T>) deployments; } @Override public SpringAppServiceBindings serviceBindings() { return serviceBindings; } @Override public SpringAppDomains customDomains() { return domains; } @Override public Mono<ResourceUploadDefinition> getResourceUploadUrlAsync() { return manager().serviceClient().getApps().getResourceUploadUrlAsync( parent().resourceGroupName(), parent().name(), name()); } @Override public ResourceUploadDefinition getResourceUploadUrl() { return getResourceUploadUrlAsync().block(); } private void ensureProperty() { if (innerModel().properties() == null) { innerModel().withProperties(new AppResourceProperties()); } } @Override public boolean hasConfigurationServiceBinding() { Map<String, Map<String, Object>> addonConfigs = innerModel().properties().addonConfigs(); if (addonConfigs == null) { return false; } SpringConfigurationService configurationService = parent().getDefaultConfigurationService(); if (configurationService == null) { return false; } return addonConfigs.get(Constants.APPLICATION_CONFIGURATION_SERVICE_KEY) != null && configurationService.id().equalsIgnoreCase((String) addonConfigs.get(Constants.APPLICATION_CONFIGURATION_SERVICE_KEY).get(Constants.BINDING_RESOURCE_ID)); } @Override @Override public SpringAppImpl withDefaultPublicEndpoint() { ensureProperty(); innerModel().properties().withPublicProperty(true); return this; } @Override public SpringAppImpl withoutDefaultPublicEndpoint() { ensureProperty(); innerModel().properties().withPublicProperty(false); return this; } @Override public SpringAppImpl withCustomDomain(String domain) { domains.prepareCreateOrUpdate(domain, new CustomDomainProperties()); return this; } @Override public SpringAppImpl withCustomDomain(String domain, String certThumbprint) { domains.prepareCreateOrUpdate(domain, new CustomDomainProperties().withThumbprint(certThumbprint)); return this; } @Override public Update withoutCustomDomain(String domain) { domains.prepareDelete(domain); return this; } @Override public SpringAppImpl withHttpsOnly() { ensureProperty(); innerModel().properties().withHttpsOnly(true); return this; } @Override public SpringAppImpl withoutHttpsOnly() { ensureProperty(); innerModel().properties().withHttpsOnly(false); return this; } @Override public SpringAppImpl withTemporaryDisk(int sizeInGB, String mountPath) { ensureProperty(); innerModel().properties().withTemporaryDisk( new TemporaryDisk().withSizeInGB(sizeInGB).withMountPath(mountPath)); return this; } @Override public SpringAppImpl withPersistentDisk(int sizeInGB, String mountPath) { ensureProperty(); innerModel().properties().withPersistentDisk( new PersistentDisk().withSizeInGB(sizeInGB).withMountPath(mountPath)); return this; } @Override public SpringAppImpl withActiveDeployment(String name) { if (CoreUtils.isNullOrEmpty(name)) { return this; } this.setActiveDeploymentTask = context -> manager().serviceClient().getApps() .setActiveDeploymentsAsync(parent().resourceGroupName(), parent().name(), name(), new ActiveDeploymentCollection().withActiveDeploymentNames(Arrays.asList(name))) .then(context.voidMono()); return this; } @Override public void beforeGroupCreateOrUpdate() { if (setActiveDeploymentTask != null) { this.addPostRunDependent(setActiveDeploymentTask); } setActiveDeploymentTask = null; } @Override public Mono<SpringApp> createResourceAsync() { if (springAppDeploymentToCreate == null) { withDefaultActiveDeployment(); } return manager().serviceClient().getApps().createOrUpdateAsync( parent().resourceGroupName(), parent().name(), name(), new AppResourceInner()) .thenMany(springAppDeploymentToCreate.createAsync()) .then(updateResourceAsync()); } @Override public Mono<SpringApp> updateResourceAsync() { return manager().serviceClient().getApps().updateAsync( parent().resourceGroupName(), parent().name(), name(), innerModel()) .map(inner -> { setInner(inner); return this; }); } @Override public Mono<Void> deleteResourceAsync() { return manager().serviceClient().getApps().deleteAsync(parent().resourceGroupName(), parent().name(), name()); } @Override protected Mono<AppResourceInner> getInnerAsync() { return manager().serviceClient().getApps().getAsync(parent().resourceGroupName(), parent().name(), name()); } @Override public String id() { return innerModel().id(); } @Override public SpringAppImpl update() { prepareUpdate(); return this; } public AppPlatformManager manager() { return parent().manager(); } @Override public SpringAppImpl withServiceBinding(String name, BindingResourceProperties bindingProperties) { serviceBindings.prepareCreateOrUpdate(name, bindingProperties); return this; } @Override public SpringAppImpl withoutServiceBinding(String name) { serviceBindings.prepareDelete(name); return this; } @Override public SpringAppImpl withDefaultActiveDeployment() { String defaultDeploymentName = "default"; withActiveDeployment(defaultDeploymentName); springAppDeploymentToCreate = deployments().define(defaultDeploymentName) .withExistingSource(UserSourceType.JAR, String.format("<%s>", defaultDeploymentName)); return this; } @Override @SuppressWarnings("unchecked") public <T extends SpringAppDeployment.DefinitionStages.WithAttach<? extends SpringApp.DefinitionStages.WithCreate, T>> SpringAppDeployment.DefinitionStages.Blank<T> defineActiveDeployment(String name) { return (SpringAppDeployment.DefinitionStages.Blank<T>) deployments.define(name); } SpringAppImpl addActiveDeployment(SpringAppDeploymentImpl deployment) { withActiveDeployment(deployment.name()); springAppDeploymentToCreate = deployment; return this; } @Override public SpringAppImpl withConfigurationServiceBinding() { ensureProperty(); Map<String, Map<String, Object>> addonConfigs = innerModel().properties().addonConfigs(); if (addonConfigs == null) { addonConfigs = new HashMap<>(); innerModel().properties().withAddonConfigs(addonConfigs); } SpringConfigurationService configurationService = parent().getDefaultConfigurationService(); if (configurationService != null) { Map<String, Object> configurationServiceConfigs = addonConfigs.computeIfAbsent(Constants.APPLICATION_CONFIGURATION_SERVICE_KEY, k -> new HashMap<>()); configurationServiceConfigs.put(Constants.BINDING_RESOURCE_ID, configurationService.id()); } return this; } @Override public SpringAppImpl withoutConfigurationServiceBinding() { if (innerModel().properties() == null) { return this; } Map<String, Map<String, Object>> addonConfigs = innerModel().properties().addonConfigs(); if (addonConfigs == null) { return this; } Map<String, Object> configurationServiceConfigs = addonConfigs.get(Constants.APPLICATION_CONFIGURATION_SERVICE_KEY); if (configurationServiceConfigs == null) { return this; } configurationServiceConfigs.put(Constants.BINDING_RESOURCE_ID, ""); return this; } @Override public SpringAppImpl withServiceRegistryBinding() { ensureProperty(); Map<String, Map<String, Object>> addonConfigs = innerModel().properties().addonConfigs(); if (addonConfigs == null) { addonConfigs = new HashMap<>(); innerModel().properties().withAddonConfigs(addonConfigs); } SpringServiceRegistry serviceRegistry = parent().getDefaultServiceRegistry(); if (serviceRegistry != null) { Map<String, Object> serviceRegistryConfigs = addonConfigs.computeIfAbsent(Constants.SERVICE_REGISTRY_KEY, k -> new HashMap<>()); serviceRegistryConfigs.put(Constants.BINDING_RESOURCE_ID, serviceRegistry.id()); } return this; } @Override public SpringAppImpl withoutServiceRegistryBinding() { if (innerModel().properties() == null) { return this; } Map<String, Map<String, Object>> addonConfigs = innerModel().properties().addonConfigs(); if (addonConfigs == null) { return this; } Map<String, Object> configurationServiceConfigs = addonConfigs.get(Constants.SERVICE_REGISTRY_KEY); if (configurationServiceConfigs == null) { return this; } configurationServiceConfigs.put(Constants.BINDING_RESOURCE_ID, ""); return this; } }
why we use equalsIgnoreCase?
public boolean hasConfigurationServiceBinding() { Map<String, Map<String, Object>> addonConfigs = innerModel().properties().addonConfigs(); if (addonConfigs == null) { return false; } SpringConfigurationService configurationService = parent().getDefaultConfigurationService(); if (configurationService == null) { return false; } return addonConfigs.get(Constants.APPLICATION_CONFIGURATION_SERVICE_KEY) != null && configurationService.id().equalsIgnoreCase((String) addonConfigs.get(Constants.APPLICATION_CONFIGURATION_SERVICE_KEY).get(Constants.BINDING_RESOURCE_ID)); }
&& configurationService.id().equalsIgnoreCase((String) addonConfigs.get(Constants.APPLICATION_CONFIGURATION_SERVICE_KEY).get(Constants.BINDING_RESOURCE_ID));
public boolean hasConfigurationServiceBinding() { Map<String, Map<String, Object>> addonConfigs = innerModel().properties().addonConfigs(); if (addonConfigs == null) { return false; } SpringConfigurationService configurationService = parent().getDefaultConfigurationService(); if (configurationService == null) { return false; } return addonConfigs.get(Constants.APPLICATION_CONFIGURATION_SERVICE_KEY) != null && configurationService.id().equalsIgnoreCase((String) addonConfigs.get(Constants.APPLICATION_CONFIGURATION_SERVICE_KEY).get(Constants.BINDING_RESOURCE_ID)); }
class SpringAppImpl extends ExternalChildResourceImpl<SpringApp, AppResourceInner, SpringServiceImpl, SpringService> implements SpringApp, SpringApp.Definition, SpringApp.Update { private Creatable<SpringAppDeployment> springAppDeploymentToCreate = null; private final SpringAppDeploymentsImpl deployments = new SpringAppDeploymentsImpl(this); private final SpringAppServiceBindingsImpl serviceBindings = new SpringAppServiceBindingsImpl(this); private final SpringAppDomainsImpl domains = new SpringAppDomainsImpl(this); private FunctionalTaskItem setActiveDeploymentTask = null; SpringAppImpl(String name, SpringServiceImpl parent, AppResourceInner innerObject) { super(name, parent, innerObject); } @Override public boolean isPublic() { if (innerModel().properties() == null) { return false; } return innerModel().properties().publicProperty(); } @Override public boolean isHttpsOnly() { if (innerModel().properties() == null) { return false; } return innerModel().properties().httpsOnly(); } @Override public String url() { if (innerModel().properties() == null) { return null; } return innerModel().properties().url(); } @Override public String fqdn() { if (innerModel().properties() == null) { return null; } return innerModel().properties().fqdn(); } @Override public TemporaryDisk temporaryDisk() { if (innerModel().properties() == null) { return null; } return innerModel().properties().temporaryDisk(); } @Override public PersistentDisk persistentDisk() { if (innerModel().properties() == null) { return null; } return innerModel().properties().persistentDisk(); } @Override public ManagedIdentityProperties identity() { return innerModel().identity(); } @Override public String activeDeploymentName() { Optional<SpringAppDeployment> deployment = deployments.list().stream().filter(SpringAppDeployment::isActive).findFirst(); return deployment.map(SpringAppDeployment::appName).orElse(null); } @Override public SpringAppDeployment getActiveDeployment() { return getActiveDeploymentAsync().block(); } @Override public Mono<SpringAppDeployment> getActiveDeploymentAsync() { return deployments.listAsync().filter(SpringAppDeployment::isActive).singleOrEmpty(); } @Override @SuppressWarnings("unchecked") public <T extends SpringAppDeployment.DefinitionStages.WithCreate<T>> SpringAppDeployments<T> deployments() { return (SpringAppDeployments<T>) deployments; } @Override public SpringAppServiceBindings serviceBindings() { return serviceBindings; } @Override public SpringAppDomains customDomains() { return domains; } @Override public Mono<ResourceUploadDefinition> getResourceUploadUrlAsync() { return manager().serviceClient().getApps().getResourceUploadUrlAsync( parent().resourceGroupName(), parent().name(), name()); } @Override public ResourceUploadDefinition getResourceUploadUrl() { return getResourceUploadUrlAsync().block(); } private void ensureProperty() { if (innerModel().properties() == null) { innerModel().withProperties(new AppResourceProperties()); } } @Override @Override public boolean hasServiceRegistryBinding() { Map<String, Map<String, Object>> addonConfigs = innerModel().properties().addonConfigs(); if (addonConfigs == null) { return false; } SpringServiceRegistry serviceRegistry = parent().getDefaultServiceRegistry(); if (serviceRegistry == null) { return false; } return addonConfigs.get(Constants.SERVICE_REGISTRY_KEY) != null && serviceRegistry.id().equalsIgnoreCase((String) addonConfigs.get(Constants.SERVICE_REGISTRY_KEY).get(Constants.BINDING_RESOURCE_ID)); } @Override public SpringAppImpl withDefaultPublicEndpoint() { ensureProperty(); innerModel().properties().withPublicProperty(true); return this; } @Override public SpringAppImpl withoutDefaultPublicEndpoint() { ensureProperty(); innerModel().properties().withPublicProperty(false); return this; } @Override public SpringAppImpl withCustomDomain(String domain) { domains.prepareCreateOrUpdate(domain, new CustomDomainProperties()); return this; } @Override public SpringAppImpl withCustomDomain(String domain, String certThumbprint) { domains.prepareCreateOrUpdate(domain, new CustomDomainProperties().withThumbprint(certThumbprint)); return this; } @Override public Update withoutCustomDomain(String domain) { domains.prepareDelete(domain); return this; } @Override public SpringAppImpl withHttpsOnly() { ensureProperty(); innerModel().properties().withHttpsOnly(true); return this; } @Override public SpringAppImpl withoutHttpsOnly() { ensureProperty(); innerModel().properties().withHttpsOnly(false); return this; } @Override public SpringAppImpl withTemporaryDisk(int sizeInGB, String mountPath) { ensureProperty(); innerModel().properties().withTemporaryDisk( new TemporaryDisk().withSizeInGB(sizeInGB).withMountPath(mountPath)); return this; } @Override public SpringAppImpl withPersistentDisk(int sizeInGB, String mountPath) { ensureProperty(); innerModel().properties().withPersistentDisk( new PersistentDisk().withSizeInGB(sizeInGB).withMountPath(mountPath)); return this; } @Override public SpringAppImpl withActiveDeployment(String name) { if (CoreUtils.isNullOrEmpty(name)) { return this; } this.setActiveDeploymentTask = context -> manager().serviceClient().getApps() .setActiveDeploymentsAsync(parent().resourceGroupName(), parent().name(), name(), new ActiveDeploymentCollection().withActiveDeploymentNames(Arrays.asList(name))) .then(context.voidMono()); return this; } @Override public void beforeGroupCreateOrUpdate() { if (setActiveDeploymentTask != null) { this.addPostRunDependent(setActiveDeploymentTask); } setActiveDeploymentTask = null; } @Override public Mono<SpringApp> createResourceAsync() { if (springAppDeploymentToCreate == null) { withDefaultActiveDeployment(); } return manager().serviceClient().getApps().createOrUpdateAsync( parent().resourceGroupName(), parent().name(), name(), new AppResourceInner()) .thenMany(springAppDeploymentToCreate.createAsync()) .then(updateResourceAsync()); } @Override public Mono<SpringApp> updateResourceAsync() { return manager().serviceClient().getApps().updateAsync( parent().resourceGroupName(), parent().name(), name(), innerModel()) .map(inner -> { setInner(inner); return this; }); } @Override public Mono<Void> deleteResourceAsync() { return manager().serviceClient().getApps().deleteAsync(parent().resourceGroupName(), parent().name(), name()); } @Override protected Mono<AppResourceInner> getInnerAsync() { return manager().serviceClient().getApps().getAsync(parent().resourceGroupName(), parent().name(), name()); } @Override public String id() { return innerModel().id(); } @Override public SpringAppImpl update() { prepareUpdate(); return this; } public AppPlatformManager manager() { return parent().manager(); } @Override public SpringAppImpl withServiceBinding(String name, BindingResourceProperties bindingProperties) { serviceBindings.prepareCreateOrUpdate(name, bindingProperties); return this; } @Override public SpringAppImpl withoutServiceBinding(String name) { serviceBindings.prepareDelete(name); return this; } @Override public SpringAppImpl withDefaultActiveDeployment() { String defaultDeploymentName = "default"; withActiveDeployment(defaultDeploymentName); springAppDeploymentToCreate = deployments().define(defaultDeploymentName) .withExistingSource(UserSourceType.JAR, String.format("<%s>", defaultDeploymentName)); return this; } @Override @SuppressWarnings("unchecked") public <T extends SpringAppDeployment.DefinitionStages.WithAttach<? extends SpringApp.DefinitionStages.WithCreate, T>> SpringAppDeployment.DefinitionStages.Blank<T> defineActiveDeployment(String name) { return (SpringAppDeployment.DefinitionStages.Blank<T>) deployments.define(name); } SpringAppImpl addActiveDeployment(SpringAppDeploymentImpl deployment) { withActiveDeployment(deployment.name()); springAppDeploymentToCreate = deployment; return this; } @Override public SpringAppImpl withConfigurationServiceBinding() { ensureProperty(); Map<String, Map<String, Object>> addonConfigs = innerModel().properties().addonConfigs(); if (addonConfigs == null) { addonConfigs = new HashMap<>(); innerModel().properties().withAddonConfigs(addonConfigs); } SpringConfigurationService configurationService = parent().getDefaultConfigurationService(); if (configurationService != null) { Map<String, Object> configurationServiceConfigs = addonConfigs.computeIfAbsent(Constants.APPLICATION_CONFIGURATION_SERVICE_KEY, k -> new HashMap<>()); configurationServiceConfigs.put(Constants.BINDING_RESOURCE_ID, configurationService.id()); } return this; } @Override public SpringAppImpl withoutConfigurationServiceBinding() { if (innerModel().properties() == null) { return this; } Map<String, Map<String, Object>> addonConfigs = innerModel().properties().addonConfigs(); if (addonConfigs == null) { return this; } Map<String, Object> configurationServiceConfigs = addonConfigs.get(Constants.APPLICATION_CONFIGURATION_SERVICE_KEY); if (configurationServiceConfigs == null) { return this; } configurationServiceConfigs.put(Constants.BINDING_RESOURCE_ID, ""); return this; } @Override public SpringAppImpl withServiceRegistryBinding() { ensureProperty(); Map<String, Map<String, Object>> addonConfigs = innerModel().properties().addonConfigs(); if (addonConfigs == null) { addonConfigs = new HashMap<>(); innerModel().properties().withAddonConfigs(addonConfigs); } SpringServiceRegistry serviceRegistry = parent().getDefaultServiceRegistry(); if (serviceRegistry != null) { Map<String, Object> serviceRegistryConfigs = addonConfigs.computeIfAbsent(Constants.SERVICE_REGISTRY_KEY, k -> new HashMap<>()); serviceRegistryConfigs.put(Constants.BINDING_RESOURCE_ID, serviceRegistry.id()); } return this; } @Override public SpringAppImpl withoutServiceRegistryBinding() { if (innerModel().properties() == null) { return this; } Map<String, Map<String, Object>> addonConfigs = innerModel().properties().addonConfigs(); if (addonConfigs == null) { return this; } Map<String, Object> configurationServiceConfigs = addonConfigs.get(Constants.SERVICE_REGISTRY_KEY); if (configurationServiceConfigs == null) { return this; } configurationServiceConfigs.put(Constants.BINDING_RESOURCE_ID, ""); return this; } }
class SpringAppImpl extends ExternalChildResourceImpl<SpringApp, AppResourceInner, SpringServiceImpl, SpringService> implements SpringApp, SpringApp.Definition, SpringApp.Update { private Creatable<SpringAppDeployment> springAppDeploymentToCreate = null; private final SpringAppDeploymentsImpl deployments = new SpringAppDeploymentsImpl(this); private final SpringAppServiceBindingsImpl serviceBindings = new SpringAppServiceBindingsImpl(this); private final SpringAppDomainsImpl domains = new SpringAppDomainsImpl(this); private FunctionalTaskItem setActiveDeploymentTask = null; SpringAppImpl(String name, SpringServiceImpl parent, AppResourceInner innerObject) { super(name, parent, innerObject); } @Override public boolean isPublic() { if (innerModel().properties() == null) { return false; } return innerModel().properties().publicProperty(); } @Override public boolean isHttpsOnly() { if (innerModel().properties() == null) { return false; } return innerModel().properties().httpsOnly(); } @Override public String url() { if (innerModel().properties() == null) { return null; } return innerModel().properties().url(); } @Override public String fqdn() { if (innerModel().properties() == null) { return null; } return innerModel().properties().fqdn(); } @Override public TemporaryDisk temporaryDisk() { if (innerModel().properties() == null) { return null; } return innerModel().properties().temporaryDisk(); } @Override public PersistentDisk persistentDisk() { if (innerModel().properties() == null) { return null; } return innerModel().properties().persistentDisk(); } @Override public ManagedIdentityProperties identity() { return innerModel().identity(); } @Override public String activeDeploymentName() { Optional<SpringAppDeployment> deployment = deployments.list().stream().filter(SpringAppDeployment::isActive).findFirst(); return deployment.map(SpringAppDeployment::appName).orElse(null); } @Override public SpringAppDeployment getActiveDeployment() { return getActiveDeploymentAsync().block(); } @Override public Mono<SpringAppDeployment> getActiveDeploymentAsync() { return deployments.listAsync().filter(SpringAppDeployment::isActive).singleOrEmpty(); } @Override @SuppressWarnings("unchecked") public <T extends SpringAppDeployment.DefinitionStages.WithCreate<T>> SpringAppDeployments<T> deployments() { return (SpringAppDeployments<T>) deployments; } @Override public SpringAppServiceBindings serviceBindings() { return serviceBindings; } @Override public SpringAppDomains customDomains() { return domains; } @Override public Mono<ResourceUploadDefinition> getResourceUploadUrlAsync() { return manager().serviceClient().getApps().getResourceUploadUrlAsync( parent().resourceGroupName(), parent().name(), name()); } @Override public ResourceUploadDefinition getResourceUploadUrl() { return getResourceUploadUrlAsync().block(); } private void ensureProperty() { if (innerModel().properties() == null) { innerModel().withProperties(new AppResourceProperties()); } } @Override @Override public boolean hasServiceRegistryBinding() { Map<String, Map<String, Object>> addonConfigs = innerModel().properties().addonConfigs(); if (addonConfigs == null) { return false; } SpringServiceRegistry serviceRegistry = parent().getDefaultServiceRegistry(); if (serviceRegistry == null) { return false; } return addonConfigs.get(Constants.SERVICE_REGISTRY_KEY) != null && serviceRegistry.id().equalsIgnoreCase((String) addonConfigs.get(Constants.SERVICE_REGISTRY_KEY).get(Constants.BINDING_RESOURCE_ID)); } @Override public SpringAppImpl withDefaultPublicEndpoint() { ensureProperty(); innerModel().properties().withPublicProperty(true); return this; } @Override public SpringAppImpl withoutDefaultPublicEndpoint() { ensureProperty(); innerModel().properties().withPublicProperty(false); return this; } @Override public SpringAppImpl withCustomDomain(String domain) { domains.prepareCreateOrUpdate(domain, new CustomDomainProperties()); return this; } @Override public SpringAppImpl withCustomDomain(String domain, String certThumbprint) { domains.prepareCreateOrUpdate(domain, new CustomDomainProperties().withThumbprint(certThumbprint)); return this; } @Override public Update withoutCustomDomain(String domain) { domains.prepareDelete(domain); return this; } @Override public SpringAppImpl withHttpsOnly() { ensureProperty(); innerModel().properties().withHttpsOnly(true); return this; } @Override public SpringAppImpl withoutHttpsOnly() { ensureProperty(); innerModel().properties().withHttpsOnly(false); return this; } @Override public SpringAppImpl withTemporaryDisk(int sizeInGB, String mountPath) { ensureProperty(); innerModel().properties().withTemporaryDisk( new TemporaryDisk().withSizeInGB(sizeInGB).withMountPath(mountPath)); return this; } @Override public SpringAppImpl withPersistentDisk(int sizeInGB, String mountPath) { ensureProperty(); innerModel().properties().withPersistentDisk( new PersistentDisk().withSizeInGB(sizeInGB).withMountPath(mountPath)); return this; } @Override public SpringAppImpl withActiveDeployment(String name) { if (CoreUtils.isNullOrEmpty(name)) { return this; } this.setActiveDeploymentTask = context -> manager().serviceClient().getApps() .setActiveDeploymentsAsync(parent().resourceGroupName(), parent().name(), name(), new ActiveDeploymentCollection().withActiveDeploymentNames(Arrays.asList(name))) .then(context.voidMono()); return this; } @Override public void beforeGroupCreateOrUpdate() { if (setActiveDeploymentTask != null) { this.addPostRunDependent(setActiveDeploymentTask); } setActiveDeploymentTask = null; } @Override public Mono<SpringApp> createResourceAsync() { if (springAppDeploymentToCreate == null) { withDefaultActiveDeployment(); } return manager().serviceClient().getApps().createOrUpdateAsync( parent().resourceGroupName(), parent().name(), name(), new AppResourceInner()) .thenMany(springAppDeploymentToCreate.createAsync()) .then(updateResourceAsync()); } @Override public Mono<SpringApp> updateResourceAsync() { return manager().serviceClient().getApps().updateAsync( parent().resourceGroupName(), parent().name(), name(), innerModel()) .map(inner -> { setInner(inner); return this; }); } @Override public Mono<Void> deleteResourceAsync() { return manager().serviceClient().getApps().deleteAsync(parent().resourceGroupName(), parent().name(), name()); } @Override protected Mono<AppResourceInner> getInnerAsync() { return manager().serviceClient().getApps().getAsync(parent().resourceGroupName(), parent().name(), name()); } @Override public String id() { return innerModel().id(); } @Override public SpringAppImpl update() { prepareUpdate(); return this; } public AppPlatformManager manager() { return parent().manager(); } @Override public SpringAppImpl withServiceBinding(String name, BindingResourceProperties bindingProperties) { serviceBindings.prepareCreateOrUpdate(name, bindingProperties); return this; } @Override public SpringAppImpl withoutServiceBinding(String name) { serviceBindings.prepareDelete(name); return this; } @Override public SpringAppImpl withDefaultActiveDeployment() { String defaultDeploymentName = "default"; withActiveDeployment(defaultDeploymentName); springAppDeploymentToCreate = deployments().define(defaultDeploymentName) .withExistingSource(UserSourceType.JAR, String.format("<%s>", defaultDeploymentName)); return this; } @Override @SuppressWarnings("unchecked") public <T extends SpringAppDeployment.DefinitionStages.WithAttach<? extends SpringApp.DefinitionStages.WithCreate, T>> SpringAppDeployment.DefinitionStages.Blank<T> defineActiveDeployment(String name) { return (SpringAppDeployment.DefinitionStages.Blank<T>) deployments.define(name); } SpringAppImpl addActiveDeployment(SpringAppDeploymentImpl deployment) { withActiveDeployment(deployment.name()); springAppDeploymentToCreate = deployment; return this; } @Override public SpringAppImpl withConfigurationServiceBinding() { ensureProperty(); Map<String, Map<String, Object>> addonConfigs = innerModel().properties().addonConfigs(); if (addonConfigs == null) { addonConfigs = new HashMap<>(); innerModel().properties().withAddonConfigs(addonConfigs); } SpringConfigurationService configurationService = parent().getDefaultConfigurationService(); if (configurationService != null) { Map<String, Object> configurationServiceConfigs = addonConfigs.computeIfAbsent(Constants.APPLICATION_CONFIGURATION_SERVICE_KEY, k -> new HashMap<>()); configurationServiceConfigs.put(Constants.BINDING_RESOURCE_ID, configurationService.id()); } return this; } @Override public SpringAppImpl withoutConfigurationServiceBinding() { if (innerModel().properties() == null) { return this; } Map<String, Map<String, Object>> addonConfigs = innerModel().properties().addonConfigs(); if (addonConfigs == null) { return this; } Map<String, Object> configurationServiceConfigs = addonConfigs.get(Constants.APPLICATION_CONFIGURATION_SERVICE_KEY); if (configurationServiceConfigs == null) { return this; } configurationServiceConfigs.put(Constants.BINDING_RESOURCE_ID, ""); return this; } @Override public SpringAppImpl withServiceRegistryBinding() { ensureProperty(); Map<String, Map<String, Object>> addonConfigs = innerModel().properties().addonConfigs(); if (addonConfigs == null) { addonConfigs = new HashMap<>(); innerModel().properties().withAddonConfigs(addonConfigs); } SpringServiceRegistry serviceRegistry = parent().getDefaultServiceRegistry(); if (serviceRegistry != null) { Map<String, Object> serviceRegistryConfigs = addonConfigs.computeIfAbsent(Constants.SERVICE_REGISTRY_KEY, k -> new HashMap<>()); serviceRegistryConfigs.put(Constants.BINDING_RESOURCE_ID, serviceRegistry.id()); } return this; } @Override public SpringAppImpl withoutServiceRegistryBinding() { if (innerModel().properties() == null) { return this; } Map<String, Map<String, Object>> addonConfigs = innerModel().properties().addonConfigs(); if (addonConfigs == null) { return this; } Map<String, Object> configurationServiceConfigs = addonConfigs.get(Constants.SERVICE_REGISTRY_KEY); if (configurationServiceConfigs == null) { return this; } configurationServiceConfigs.put(Constants.BINDING_RESOURCE_ID, ""); return this; } }
Basically all 4 changes are: Merge azure-core Context and reactor Context, when sending the request. Rest is only difference of indent.
public Mono<PollResponse<T>> poll(PollingContext<T> pollingContext, TypeReference<T> pollResponseType) { HttpRequest request = new HttpRequest(HttpMethod.GET, pollingContext.getData(PollingConstants.LOCATION)); return FluxUtil.withContext(context1 -> httpPipeline.send(request, CoreUtils.mergeContexts(context1, this.context))) .flatMap(response -> { HttpHeader locationHeader = response.getHeaders().get(PollingConstants.LOCATION); if (locationHeader != null) { pollingContext.setData(PollingConstants.LOCATION, locationHeader.getValue()); } LongRunningOperationStatus status; if (response.getStatusCode() == 202) { status = LongRunningOperationStatus.IN_PROGRESS; } else if (response.getStatusCode() >= 200 && response.getStatusCode() <= 204) { status = LongRunningOperationStatus.SUCCESSFULLY_COMPLETED; } else { status = LongRunningOperationStatus.FAILED; } return response.getBodyAsByteArray().map(BinaryData::fromBytes).flatMap(binaryData -> { pollingContext.setData(PollingConstants.POLL_RESPONSE_BODY, binaryData.toString()); Duration retryAfter = ImplUtils.getRetryAfterFromHeaders(response.getHeaders(), OffsetDateTime::now); return PollingUtils.deserializeResponse(binaryData, serializer, pollResponseType) .map(value -> new PollResponse<>(status, value, retryAfter)); }); }); }
CoreUtils.mergeContexts(context1, this.context)))
new HttpRequest(HttpMethod.GET, pollingContext.getData(PollingConstants.LOCATION)); return FluxUtil.withContext(context1 -> httpPipeline.send(request, CoreUtils.mergeContexts(context1, this.context))) .flatMap(response -> { HttpHeader locationHeader = response.getHeaders().get(PollingConstants.LOCATION); if (locationHeader != null) { pollingContext.setData(PollingConstants.LOCATION, locationHeader.getValue()); } LongRunningOperationStatus status; if (response.getStatusCode() == 202) { status = LongRunningOperationStatus.IN_PROGRESS; } else if (response.getStatusCode() >= 200 && response.getStatusCode() <= 204) { status = LongRunningOperationStatus.SUCCESSFULLY_COMPLETED; } else { status = LongRunningOperationStatus.FAILED; } return response.getBodyAsByteArray().map(BinaryData::fromBytes).flatMap(binaryData -> { pollingContext.setData(PollingConstants.POLL_RESPONSE_BODY, binaryData.toString()); Duration retryAfter = ImplUtils.getRetryAfterFromHeaders(response.getHeaders(), OffsetDateTime::now); return PollingUtils.deserializeResponse(binaryData, serializer, pollResponseType) .map(value -> new PollResponse<>(status, value, retryAfter)); }); }
class LocationPollingStrategy<T, U> implements PollingStrategy<T, U> { private static final ObjectSerializer DEFAULT_SERIALIZER = new DefaultJsonSerializer(); private static final ClientLogger LOGGER = new ClientLogger(LocationPollingStrategy.class); private final HttpPipeline httpPipeline; private final ObjectSerializer serializer; private final Context context; /** * Creates an instance of the location polling strategy using a JSON serializer. * * @param httpPipeline an instance of {@link HttpPipeline} to send requests with * @throws NullPointerException If {@code httpPipeline} is null. */ public LocationPollingStrategy(HttpPipeline httpPipeline) { this(httpPipeline, DEFAULT_SERIALIZER, Context.NONE); } /** * Creates an instance of the location polling strategy. * * @param httpPipeline an instance of {@link HttpPipeline} to send requests with * @param serializer a custom serializer for serializing and deserializing polling responses * @throws NullPointerException If {@code httpPipeline} is null. */ public LocationPollingStrategy(HttpPipeline httpPipeline, ObjectSerializer serializer) { this(httpPipeline, serializer, Context.NONE); } /** * Creates an instance of the location polling strategy. * * @param httpPipeline an instance of {@link HttpPipeline} to send requests with * @param serializer a custom serializer for serializing and deserializing polling responses * @param context an instance of {@link Context} * @throws NullPointerException If {@code httpPipeline} is null. */ public LocationPollingStrategy(HttpPipeline httpPipeline, ObjectSerializer serializer, Context context) { this.httpPipeline = Objects.requireNonNull(httpPipeline, "'httpPipeline' cannot be null"); this.serializer = (serializer == null) ? DEFAULT_SERIALIZER : serializer; this.context = context == null ? Context.NONE : context; } @Override public Mono<Boolean> canPoll(Response<?> initialResponse) { HttpHeader locationHeader = initialResponse.getHeaders().get(PollingConstants.LOCATION); if (locationHeader != null) { try { new URL(locationHeader.getValue()); return Mono.just(true); } catch (MalformedURLException e) { LOGGER.info("Failed to parse Location header into a URL.", e); return Mono.just(false); } } return Mono.just(false); } @Override public Mono<PollResponse<T>> onInitialResponse(Response<?> response, PollingContext<T> pollingContext, TypeReference<T> pollResponseType) { HttpHeader locationHeader = response.getHeaders().get(PollingConstants.LOCATION); if (locationHeader != null) { pollingContext.setData(PollingConstants.LOCATION, locationHeader.getValue()); } pollingContext.setData(PollingConstants.HTTP_METHOD, response.getRequest().getHttpMethod().name()); pollingContext.setData(PollingConstants.REQUEST_URL, response.getRequest().getUrl().toString()); if (response.getStatusCode() == 200 || response.getStatusCode() == 201 || response.getStatusCode() == 202 || response.getStatusCode() == 204) { String retryAfterValue = response.getHeaders().getValue(PollingConstants.RETRY_AFTER); Duration retryAfter = retryAfterValue == null ? null : Duration.ofSeconds(Long.parseLong(retryAfterValue)); return PollingUtils.convertResponse(response.getValue(), serializer, pollResponseType) .map(value -> new PollResponse<>(LongRunningOperationStatus.IN_PROGRESS, value, retryAfter)) .switchIfEmpty(Mono.fromSupplier(() -> new PollResponse<>( LongRunningOperationStatus.IN_PROGRESS, null, retryAfter))); } else { return Mono.error(new AzureException(String.format("Operation failed or cancelled with status code %d," + ", 'Location' header: %s, and response body: %s", response.getStatusCode(), locationHeader, PollingUtils.serializeResponse(response.getValue(), serializer)))); } } @Override public Mono<PollResponse<T>> poll(PollingContext<T> pollingContext, TypeReference<T> pollResponseType) { HttpRequest request = ); } @Override public Mono<U> getResult(PollingContext<T> pollingContext, TypeReference<U> resultType) { if (pollingContext.getLatestResponse().getStatus() == LongRunningOperationStatus.FAILED) { return Mono.error(new AzureException("Long running operation failed.")); } else if (pollingContext.getLatestResponse().getStatus() == LongRunningOperationStatus.USER_CANCELLED) { return Mono.error(new AzureException("Long running operation cancelled.")); } String finalGetUrl; String httpMethod = pollingContext.getData(PollingConstants.HTTP_METHOD); if (HttpMethod.PUT.name().equalsIgnoreCase(httpMethod) || HttpMethod.PATCH.name().equalsIgnoreCase(httpMethod)) { finalGetUrl = pollingContext.getData(PollingConstants.REQUEST_URL); } else if (HttpMethod.POST.name().equalsIgnoreCase(httpMethod) && pollingContext.getData(PollingConstants.LOCATION) != null) { finalGetUrl = pollingContext.getData(PollingConstants.LOCATION); } else { return Mono.error(new AzureException("Cannot get final result")); } if (finalGetUrl == null) { String latestResponseBody = pollingContext.getData(PollingConstants.POLL_RESPONSE_BODY); return PollingUtils.deserializeResponse(BinaryData.fromString(latestResponseBody), serializer, resultType); } else { HttpRequest request = new HttpRequest(HttpMethod.GET, finalGetUrl); return FluxUtil.withContext(context1 -> httpPipeline.send(request, CoreUtils.mergeContexts(context1, this.context))) .flatMap(HttpResponse::getBodyAsByteArray) .map(BinaryData::fromBytes) .flatMap(binaryData -> PollingUtils.deserializeResponse(binaryData, serializer, resultType)); } } }
class LocationPollingStrategy<T, U> implements PollingStrategy<T, U> { private static final ObjectSerializer DEFAULT_SERIALIZER = new DefaultJsonSerializer(); private static final ClientLogger LOGGER = new ClientLogger(LocationPollingStrategy.class); private final HttpPipeline httpPipeline; private final ObjectSerializer serializer; private final Context context; /** * Creates an instance of the location polling strategy using a JSON serializer. * * @param httpPipeline an instance of {@link HttpPipeline} to send requests with * @throws NullPointerException If {@code httpPipeline} is null. */ public LocationPollingStrategy(HttpPipeline httpPipeline) { this(httpPipeline, DEFAULT_SERIALIZER, Context.NONE); } /** * Creates an instance of the location polling strategy. * * @param httpPipeline an instance of {@link HttpPipeline} to send requests with * @param serializer a custom serializer for serializing and deserializing polling responses * @throws NullPointerException If {@code httpPipeline} is null. */ public LocationPollingStrategy(HttpPipeline httpPipeline, ObjectSerializer serializer) { this(httpPipeline, serializer, Context.NONE); } /** * Creates an instance of the location polling strategy. * * @param httpPipeline an instance of {@link HttpPipeline} to send requests with * @param serializer a custom serializer for serializing and deserializing polling responses * @param context an instance of {@link Context} * @throws NullPointerException If {@code httpPipeline} is null. */ public LocationPollingStrategy(HttpPipeline httpPipeline, ObjectSerializer serializer, Context context) { this.httpPipeline = Objects.requireNonNull(httpPipeline, "'httpPipeline' cannot be null"); this.serializer = (serializer == null) ? DEFAULT_SERIALIZER : serializer; this.context = context == null ? Context.NONE : context; } @Override public Mono<Boolean> canPoll(Response<?> initialResponse) { HttpHeader locationHeader = initialResponse.getHeaders().get(PollingConstants.LOCATION); if (locationHeader != null) { try { new URL(locationHeader.getValue()); return Mono.just(true); } catch (MalformedURLException e) { LOGGER.info("Failed to parse Location header into a URL.", e); return Mono.just(false); } } return Mono.just(false); } @Override public Mono<PollResponse<T>> onInitialResponse(Response<?> response, PollingContext<T> pollingContext, TypeReference<T> pollResponseType) { HttpHeader locationHeader = response.getHeaders().get(PollingConstants.LOCATION); if (locationHeader != null) { pollingContext.setData(PollingConstants.LOCATION, locationHeader.getValue()); } pollingContext.setData(PollingConstants.HTTP_METHOD, response.getRequest().getHttpMethod().name()); pollingContext.setData(PollingConstants.REQUEST_URL, response.getRequest().getUrl().toString()); if (response.getStatusCode() == 200 || response.getStatusCode() == 201 || response.getStatusCode() == 202 || response.getStatusCode() == 204) { String retryAfterValue = response.getHeaders().getValue(PollingConstants.RETRY_AFTER); Duration retryAfter = retryAfterValue == null ? null : Duration.ofSeconds(Long.parseLong(retryAfterValue)); return PollingUtils.convertResponse(response.getValue(), serializer, pollResponseType) .map(value -> new PollResponse<>(LongRunningOperationStatus.IN_PROGRESS, value, retryAfter)) .switchIfEmpty(Mono.fromSupplier(() -> new PollResponse<>( LongRunningOperationStatus.IN_PROGRESS, null, retryAfter))); } else { return Mono.error(new AzureException(String.format("Operation failed or cancelled with status code %d," + ", 'Location' header: %s, and response body: %s", response.getStatusCode(), locationHeader, PollingUtils.serializeResponse(response.getValue(), serializer)))); } } @Override public Mono<PollResponse<T>> poll(PollingContext<T> pollingContext, TypeReference<T> pollResponseType) { HttpRequest request = ); } @Override public Mono<U> getResult(PollingContext<T> pollingContext, TypeReference<U> resultType) { if (pollingContext.getLatestResponse().getStatus() == LongRunningOperationStatus.FAILED) { return Mono.error(new AzureException("Long running operation failed.")); } else if (pollingContext.getLatestResponse().getStatus() == LongRunningOperationStatus.USER_CANCELLED) { return Mono.error(new AzureException("Long running operation cancelled.")); } String finalGetUrl; String httpMethod = pollingContext.getData(PollingConstants.HTTP_METHOD); if (HttpMethod.PUT.name().equalsIgnoreCase(httpMethod) || HttpMethod.PATCH.name().equalsIgnoreCase(httpMethod)) { finalGetUrl = pollingContext.getData(PollingConstants.REQUEST_URL); } else if (HttpMethod.POST.name().equalsIgnoreCase(httpMethod) && pollingContext.getData(PollingConstants.LOCATION) != null) { finalGetUrl = pollingContext.getData(PollingConstants.LOCATION); } else { return Mono.error(new AzureException("Cannot get final result")); } if (finalGetUrl == null) { String latestResponseBody = pollingContext.getData(PollingConstants.POLL_RESPONSE_BODY); return PollingUtils.deserializeResponse(BinaryData.fromString(latestResponseBody), serializer, resultType); } else { HttpRequest request = new HttpRequest(HttpMethod.GET, finalGetUrl); return FluxUtil.withContext(context1 -> httpPipeline.send(request, CoreUtils.mergeContexts(context1, this.context))) .flatMap(HttpResponse::getBodyAsByteArray) .map(BinaryData::fromBytes) .flatMap(binaryData -> PollingUtils.deserializeResponse(binaryData, serializer, resultType)); } } }
Any reason for this change?
public void checkForRepeatabilityOptions(HttpClient httpClient) { SmsClientBuilder builder = getSmsClientUsingConnectionString(httpClient); client = setupSyncClient(builder, "checkForRepeatabilityOptions"); Response<Iterable<SmsSendResult>> response = client.sendWithResponse(FROM_PHONE_NUMBER, Arrays.asList(TO_PHONE_NUMBER, TO_PHONE_NUMBER), MESSAGE, null, Context.NONE); String bodyRequest = StandardCharsets.UTF_8.decode(response.getRequest().getBody().blockLast()).toString(); assertTrue(bodyRequest.contains("repeatabilityRequestId")); assertTrue(bodyRequest.contains("repeatabilityFirstSent")); }
String bodyRequest = StandardCharsets.UTF_8.decode(response.getRequest().getBody().blockLast()).toString();
public void checkForRepeatabilityOptions(HttpClient httpClient) { SmsClientBuilder builder = getSmsClientUsingConnectionString(httpClient); client = setupSyncClient(builder, "checkForRepeatabilityOptions"); Response<Iterable<SmsSendResult>> response = client.sendWithResponse(FROM_PHONE_NUMBER, Arrays.asList(TO_PHONE_NUMBER, TO_PHONE_NUMBER), MESSAGE, null, Context.NONE); String bodyRequest = StandardCharsets.UTF_8.decode(response.getRequest().getBody().blockLast()).toString(); assertTrue(bodyRequest.contains("repeatabilityRequestId")); assertTrue(bodyRequest.contains("repeatabilityFirstSent")); }
class SmsClientTests extends SmsTestBase { private SmsClient client; @Override protected void beforeTest() { super.beforeTest(); assumeTrue(shouldEnableSmsTests()); } @ParameterizedTest @MethodSource("com.azure.core.test.TestBase public void sendSmsUsingConnectionString(HttpClient httpClient) { SmsClientBuilder builder = getSmsClientUsingConnectionString(httpClient); client = setupSyncClient(builder, "sendSmsUsingConnectionStringSync"); SmsSendResult sendResult = client.send(FROM_PHONE_NUMBER, TO_PHONE_NUMBER, MESSAGE); assertHappyPath(sendResult); } @ParameterizedTest @MethodSource("com.azure.core.test.TestBase public void sendSmsUsingTokenCredential(HttpClient httpClient) { TokenCredential tokenCredential = new DefaultAzureCredentialBuilder().build(); SmsClientBuilder builder = getSmsClientWithToken(httpClient, tokenCredential); client = setupSyncClient(builder, "sendSmsUsingTokenCredentialSync"); SmsSendResult sendResult = client.send(FROM_PHONE_NUMBER, TO_PHONE_NUMBER, MESSAGE); assertHappyPath(sendResult); } @ParameterizedTest @MethodSource("com.azure.core.test.TestBase public void sendSmsToGroup(HttpClient httpClient) { SmsClientBuilder builder = getSmsClientUsingConnectionString(httpClient); client = setupSyncClient(builder, "sendSmsToGroupSync"); Iterable<SmsSendResult> sendResults = client.send(FROM_PHONE_NUMBER, Arrays.asList(TO_PHONE_NUMBER, TO_PHONE_NUMBER), MESSAGE); for (SmsSendResult result : sendResults) { assertHappyPath(result); } } @ParameterizedTest @MethodSource("com.azure.core.test.TestBase public void sendSmsToGroupWithOptions(HttpClient httpClient) { SmsClientBuilder builder = getSmsClientUsingConnectionString(httpClient); client = setupSyncClient(builder, "sendSmsToGroupWithOptionsSync"); SmsSendOptions options = new SmsSendOptions(); options.setDeliveryReportEnabled(true); options.setTag("New Tag"); Response<Iterable<SmsSendResult>> sendResults = client.sendWithResponse(FROM_PHONE_NUMBER, Arrays.asList(TO_PHONE_NUMBER, TO_PHONE_NUMBER), MESSAGE, options, Context.NONE); for (SmsSendResult result : sendResults.getValue()) { assertHappyPath(result); } } @ParameterizedTest @MethodSource("com.azure.core.test.TestBase public void sendSmsToSingleNumber(HttpClient httpClient) { SmsClientBuilder builder = getSmsClientUsingConnectionString(httpClient); client = setupSyncClient(builder, "sendSmsToSingleNumberSync"); SmsSendResult sendResult = client.send(FROM_PHONE_NUMBER, TO_PHONE_NUMBER, MESSAGE); assertHappyPath(sendResult); } @ParameterizedTest @MethodSource("com.azure.core.test.TestBase public void sendSmsToSingleNumberWithOptions(HttpClient httpClient) { SmsClientBuilder builder = getSmsClientUsingConnectionString(httpClient); client = setupSyncClient(builder, "sendSmsToSingleNumberWithOptionsSync"); SmsSendOptions options = new SmsSendOptions(); options.setDeliveryReportEnabled(true); options.setTag("New Tag"); SmsSendResult sendResult = client.send(FROM_PHONE_NUMBER, TO_PHONE_NUMBER, MESSAGE, options); assertHappyPath(sendResult); } @ParameterizedTest @MethodSource("com.azure.core.test.TestBase public void sendFromFakeNumber(HttpClient httpClient) { SmsClientBuilder builder = getSmsClientUsingConnectionString(httpClient); client = setupSyncClient(builder, "sendFromFakeNumberSync"); try { client.send("+15550000000", TO_PHONE_NUMBER, MESSAGE); } catch (Exception exception) { assertEquals(400, ((HttpResponseException) exception).getResponse().getStatusCode()); } } @ParameterizedTest @MethodSource("com.azure.core.test.TestBase public void sendFromUnauthorizedNumber(HttpClient httpClient) { SmsClientBuilder builder = getSmsClientUsingConnectionString(httpClient); client = setupSyncClient(builder, "sendFromUnauthorizedNumberSync"); try { SmsSendResult response = client.send("+18007342577", TO_PHONE_NUMBER, MESSAGE); } catch (Exception exception) { assertNotNull(((HttpResponseException) exception).getResponse().getStatusCode()); assertEquals(401, ((HttpResponseException) exception).getResponse().getStatusCode()); } } @ParameterizedTest @MethodSource("com.azure.core.test.TestBase public void sendToFakePhoneNumber(HttpClient httpClient) { SmsClientBuilder builder = getSmsClientUsingConnectionString(httpClient); client = setupSyncClient(builder, "sendToFakePhoneNumberSync"); SmsSendResult sendResult = client.send(FROM_PHONE_NUMBER, "+15550000000", MESSAGE); assertFalse(sendResult.isSuccessful()); assertEquals(sendResult.getHttpStatusCode(), 400); } @ParameterizedTest @MethodSource("com.azure.core.test.TestBase public void sendTwoMessages(HttpClient httpClient) { SmsClientBuilder builder = getSmsClientUsingConnectionString(httpClient); client = setupSyncClient(builder, "sendTwoMessagesSync"); SmsSendResult firstResponse = client.send(FROM_PHONE_NUMBER, TO_PHONE_NUMBER, MESSAGE); SmsSendResult secondResponse = client.send(FROM_PHONE_NUMBER, TO_PHONE_NUMBER, MESSAGE); assertNotEquals(firstResponse.getMessageId(), secondResponse.getMessageId()); assertHappyPath(firstResponse); assertHappyPath(secondResponse); } @ParameterizedTest @MethodSource("com.azure.core.test.TestBase private SmsClient setupSyncClient(SmsClientBuilder builder, String testName) { return addLoggingPolicy(builder, testName).buildClient(); } private void assertHappyPath(SmsSendResult sendResult) { assertTrue(sendResult.isSuccessful()); assertEquals(sendResult.getHttpStatusCode(), 202); assertNotNull(sendResult.getMessageId()); } }
class SmsClientTests extends SmsTestBase { private SmsClient client; @Override protected void beforeTest() { super.beforeTest(); assumeTrue(shouldEnableSmsTests()); } @ParameterizedTest @MethodSource("com.azure.core.test.TestBase public void sendSmsUsingConnectionString(HttpClient httpClient) { SmsClientBuilder builder = getSmsClientUsingConnectionString(httpClient); client = setupSyncClient(builder, "sendSmsUsingConnectionStringSync"); SmsSendResult sendResult = client.send(FROM_PHONE_NUMBER, TO_PHONE_NUMBER, MESSAGE); assertHappyPath(sendResult); } @ParameterizedTest @MethodSource("com.azure.core.test.TestBase public void sendSmsUsingTokenCredential(HttpClient httpClient) { TokenCredential tokenCredential = new DefaultAzureCredentialBuilder().build(); SmsClientBuilder builder = getSmsClientWithToken(httpClient, tokenCredential); client = setupSyncClient(builder, "sendSmsUsingTokenCredentialSync"); SmsSendResult sendResult = client.send(FROM_PHONE_NUMBER, TO_PHONE_NUMBER, MESSAGE); assertHappyPath(sendResult); } @ParameterizedTest @MethodSource("com.azure.core.test.TestBase public void sendSmsToGroup(HttpClient httpClient) { SmsClientBuilder builder = getSmsClientUsingConnectionString(httpClient); client = setupSyncClient(builder, "sendSmsToGroupSync"); Iterable<SmsSendResult> sendResults = client.send(FROM_PHONE_NUMBER, Arrays.asList(TO_PHONE_NUMBER, TO_PHONE_NUMBER), MESSAGE); for (SmsSendResult result : sendResults) { assertHappyPath(result); } } @ParameterizedTest @MethodSource("com.azure.core.test.TestBase public void sendSmsToGroupWithOptions(HttpClient httpClient) { SmsClientBuilder builder = getSmsClientUsingConnectionString(httpClient); client = setupSyncClient(builder, "sendSmsToGroupWithOptionsSync"); SmsSendOptions options = new SmsSendOptions(); options.setDeliveryReportEnabled(true); options.setTag("New Tag"); Response<Iterable<SmsSendResult>> sendResults = client.sendWithResponse(FROM_PHONE_NUMBER, Arrays.asList(TO_PHONE_NUMBER, TO_PHONE_NUMBER), MESSAGE, options, Context.NONE); for (SmsSendResult result : sendResults.getValue()) { assertHappyPath(result); } } @ParameterizedTest @MethodSource("com.azure.core.test.TestBase public void sendSmsToSingleNumber(HttpClient httpClient) { SmsClientBuilder builder = getSmsClientUsingConnectionString(httpClient); client = setupSyncClient(builder, "sendSmsToSingleNumberSync"); SmsSendResult sendResult = client.send(FROM_PHONE_NUMBER, TO_PHONE_NUMBER, MESSAGE); assertHappyPath(sendResult); } @ParameterizedTest @MethodSource("com.azure.core.test.TestBase public void sendSmsToSingleNumberWithOptions(HttpClient httpClient) { SmsClientBuilder builder = getSmsClientUsingConnectionString(httpClient); client = setupSyncClient(builder, "sendSmsToSingleNumberWithOptionsSync"); SmsSendOptions options = new SmsSendOptions(); options.setDeliveryReportEnabled(true); options.setTag("New Tag"); SmsSendResult sendResult = client.send(FROM_PHONE_NUMBER, TO_PHONE_NUMBER, MESSAGE, options); assertHappyPath(sendResult); } @ParameterizedTest @MethodSource("com.azure.core.test.TestBase public void sendFromFakeNumber(HttpClient httpClient) { SmsClientBuilder builder = getSmsClientUsingConnectionString(httpClient); client = setupSyncClient(builder, "sendFromFakeNumberSync"); try { client.send("+15550000000", TO_PHONE_NUMBER, MESSAGE); } catch (Exception exception) { assertEquals(400, ((HttpResponseException) exception).getResponse().getStatusCode()); } } @ParameterizedTest @MethodSource("com.azure.core.test.TestBase public void sendFromUnauthorizedNumber(HttpClient httpClient) { SmsClientBuilder builder = getSmsClientUsingConnectionString(httpClient); client = setupSyncClient(builder, "sendFromUnauthorizedNumberSync"); try { SmsSendResult response = client.send("+18007342577", TO_PHONE_NUMBER, MESSAGE); } catch (Exception exception) { assertNotNull(((HttpResponseException) exception).getResponse().getStatusCode()); assertEquals(401, ((HttpResponseException) exception).getResponse().getStatusCode()); } } @ParameterizedTest @MethodSource("com.azure.core.test.TestBase public void sendToFakePhoneNumber(HttpClient httpClient) { SmsClientBuilder builder = getSmsClientUsingConnectionString(httpClient); client = setupSyncClient(builder, "sendToFakePhoneNumberSync"); SmsSendResult sendResult = client.send(FROM_PHONE_NUMBER, "+15550000000", MESSAGE); assertFalse(sendResult.isSuccessful()); assertEquals(sendResult.getHttpStatusCode(), 400); } @ParameterizedTest @MethodSource("com.azure.core.test.TestBase public void sendTwoMessages(HttpClient httpClient) { SmsClientBuilder builder = getSmsClientUsingConnectionString(httpClient); client = setupSyncClient(builder, "sendTwoMessagesSync"); SmsSendResult firstResponse = client.send(FROM_PHONE_NUMBER, TO_PHONE_NUMBER, MESSAGE); SmsSendResult secondResponse = client.send(FROM_PHONE_NUMBER, TO_PHONE_NUMBER, MESSAGE); assertNotEquals(firstResponse.getMessageId(), secondResponse.getMessageId()); assertHappyPath(firstResponse); assertHappyPath(secondResponse); } @ParameterizedTest @MethodSource("com.azure.core.test.TestBase private SmsClient setupSyncClient(SmsClientBuilder builder, String testName) { return addLoggingPolicy(builder, testName).buildClient(); } private void assertHappyPath(SmsSendResult sendResult) { assertTrue(sendResult.isSuccessful()); assertEquals(sendResult.getHttpStatusCode(), 202); assertNotNull(sendResult.getMessageId()); } }
Mind adding a comment here stating that this is done to ensure the class has been loading, triggering the static initializer to bind the accessor, and without this a NullPointerException could occur.
private static void ensureAccessorSet() { if (accessor == null) { BinaryData.fromString(""); } }
if (accessor == null) {
private static void ensureAccessorSet() { if (accessor == null) { BinaryData.fromString(""); } }
class BinaryDataHelper { private static BinaryDataAccessor accessor; /** * Type defining the methods that access private values of {@link BinaryData}. */ public interface BinaryDataAccessor { /** * Creates a new {@link BinaryData} with the given content. * * @param content The {@link BinaryDataContent}. * @return A new {@link BinaryData}. * @throws NullPointerException If {@code content} is null. */ BinaryData createBinaryData(BinaryDataContent content); /** * Gets the {@link BinaryDataContent} that backs the {@link BinaryData}. * * @param binaryData The {@link BinaryData} having its content retrieved. * @return The {@link BinaryDataContent} that backs the {@link BinaryData}. */ BinaryDataContent getContent(BinaryData binaryData); } /** * The method called from {@link BinaryData} to set its accessor. * * @param binaryDataAccessor The accessor. */ public static void setAccessor(final BinaryDataAccessor binaryDataAccessor) { accessor = binaryDataAccessor; } /** * Creates a new {@link BinaryData} with the given content. * * @param content The {@link BinaryDataContent}. * @return A new {@link BinaryData}. * @throws NullPointerException If {@code content} is null. */ public static BinaryData createBinaryData(BinaryDataContent content) { ensureAccessorSet(); return accessor.createBinaryData(content); } /** * Gets the {@link BinaryDataContent} that backs the {@link BinaryData}. * * @param binaryData The {@link BinaryData} having its content retrieved. * @return The {@link BinaryDataContent} that backs the {@link BinaryData}. */ public static BinaryDataContent getContent(BinaryData binaryData) { ensureAccessorSet(); return accessor.getContent(binaryData); } }
class BinaryDataHelper { private static BinaryDataAccessor accessor; /** * Type defining the methods that access private values of {@link BinaryData}. */ public interface BinaryDataAccessor { /** * Creates a new {@link BinaryData} with the given content. * * @param content The {@link BinaryDataContent}. * @return A new {@link BinaryData}. * @throws NullPointerException If {@code content} is null. */ BinaryData createBinaryData(BinaryDataContent content); /** * Gets the {@link BinaryDataContent} that backs the {@link BinaryData}. * * @param binaryData The {@link BinaryData} having its content retrieved. * @return The {@link BinaryDataContent} that backs the {@link BinaryData}. */ BinaryDataContent getContent(BinaryData binaryData); } /** * The method called from {@link BinaryData} to set its accessor. * * @param binaryDataAccessor The accessor. */ public static void setAccessor(final BinaryDataAccessor binaryDataAccessor) { accessor = binaryDataAccessor; } /** * Creates a new {@link BinaryData} with the given content. * * @param content The {@link BinaryDataContent}. * @return A new {@link BinaryData}. * @throws NullPointerException If {@code content} is null. */ public static BinaryData createBinaryData(BinaryDataContent content) { ensureAccessorSet(); return accessor.createBinaryData(content); } /** * Gets the {@link BinaryDataContent} that backs the {@link BinaryData}. * * @param binaryData The {@link BinaryData} having its content retrieved. * @return The {@link BinaryDataContent} that backs the {@link BinaryData}. */ public static BinaryDataContent getContent(BinaryData binaryData) { ensureAccessorSet(); return accessor.getContent(binaryData); } /** * The success of setting up accessor depends on the order in which classes are loaded. * This method ensures that if accessor hasn't been set we force-load BinaryData class * which in turns populates the accessor. */ }
nit: ```suggestion return monoError(LOGGER, new NullPointerException("'data' cannot be null.")); ```
public static Mono<BinaryData> fromFlux(Flux<ByteBuffer> data, Long length, boolean bufferContent) { if (data == null) { return monoError(LOGGER, new NullPointerException("'content' cannot be null.")); } if (length != null && length < 0) { return monoError(LOGGER, new IllegalArgumentException("'length' cannot be less than 0.")); } if (bufferContent && length != null && length > MAX_ARRAY_SIZE) { return monoError(LOGGER, new IllegalArgumentException( String.format("'length' cannot be greater than %d when content buffering is enabled.", MAX_ARRAY_SIZE))); } if (bufferContent) { if (length != null) { return FluxUtil.collectBytesInByteBufferStream(data, length.intValue()) .flatMap(bytes -> Mono.just(BinaryData.fromBytes(bytes))); } return FluxUtil.collectBytesInByteBufferStream(data) .flatMap(bytes -> Mono.just(BinaryData.fromBytes(bytes))); } else { return Mono.just(new BinaryData(new FluxByteBufferContent(data, length))); } }
return monoError(LOGGER, new NullPointerException("'content' cannot be null."));
public static Mono<BinaryData> fromFlux(Flux<ByteBuffer> data, Long length, boolean bufferContent) { if (data == null) { return monoError(LOGGER, new NullPointerException("'data' cannot be null.")); } if (length != null && length < 0) { return monoError(LOGGER, new IllegalArgumentException("'length' cannot be less than 0.")); } if (bufferContent && length != null && length > MAX_ARRAY_SIZE) { return monoError(LOGGER, new IllegalArgumentException( String.format("'length' cannot be greater than %d when content buffering is enabled.", MAX_ARRAY_SIZE))); } if (bufferContent) { if (length != null) { return FluxUtil.collectBytesInByteBufferStream(data, length.intValue()) .flatMap(bytes -> Mono.just(BinaryData.fromBytes(bytes))); } return FluxUtil.collectBytesInByteBufferStream(data) .flatMap(bytes -> Mono.just(BinaryData.fromBytes(bytes))); } else { return Mono.just(new BinaryData(new FluxByteBufferContent(data, length))); } }
class BinaryData { private static final ClientLogger LOGGER = new ClientLogger(BinaryData.class); static final JsonSerializer SERIALIZER = JsonSerializerProviders.createInstance(true); static final int MAX_ARRAY_SIZE = Integer.MAX_VALUE - 8; private final BinaryDataContent content; BinaryData(BinaryDataContent content) { this.content = Objects.requireNonNull(content, "'content' cannot be null."); } static { BinaryDataHelper.setAccessor(new BinaryDataHelper.BinaryDataAccessor() { @Override public BinaryData createBinaryData(BinaryDataContent content) { return new BinaryData(content); } @Override public BinaryDataContent getContent(BinaryData binaryData) { return binaryData.content; } }); } /** * Creates an instance of {@link BinaryData} from the given {@link InputStream}. Depending on the type of * inputStream, the BinaryData instance created may or may not allow reading the content more than once. The * stream content is not cached if the stream is not read into a format that requires the content to be fully read * into memory. * <p> * <b>NOTE:</b> The {@link InputStream} is not closed by this function. * </p> * * <p><strong>Create an instance from an InputStream</strong></p> * * <!-- src_embed com.azure.core.util.BinaryData.fromStream * <pre> * final ByteArrayInputStream inputStream = new ByteArrayInputStream& * BinaryData binaryData = BinaryData.fromStream& * System.out.println& * </pre> * <!-- end com.azure.core.util.BinaryData.fromStream * * @param inputStream The {@link InputStream} that {@link BinaryData} will represent. * @return A {@link BinaryData} representing the {@link InputStream}. * @throws UncheckedIOException If any error happens while reading the {@link InputStream}. * @throws NullPointerException If {@code inputStream} is null. */ public static BinaryData fromStream(InputStream inputStream) { return new BinaryData(new InputStreamContent(inputStream)); } /** * Creates an instance of {@link BinaryData} from the given {@link InputStream}. * <b>NOTE:</b> The {@link InputStream} is not closed by this function. * * <p><strong>Create an instance from an InputStream</strong></p> * * <!-- src_embed com.azure.core.util.BinaryData.fromStreamAsync * <pre> * final ByteArrayInputStream inputStream = new ByteArrayInputStream& * * Mono&lt;BinaryData&gt; binaryDataMono = BinaryData.fromStreamAsync& * * Disposable subscriber = binaryDataMono * .map& * System.out.println& * return true; * & * .subscribe& * * & * TimeUnit.SECONDS.sleep& * subscriber.dispose& * </pre> * <!-- end com.azure.core.util.BinaryData.fromStreamAsync * * @param inputStream The {@link InputStream} that {@link BinaryData} will represent. * @return A {@link Mono} of {@link BinaryData} representing the {@link InputStream}. * @throws UncheckedIOException If any error happens while reading the {@link InputStream}. * @throws NullPointerException If {@code inputStream} is null. */ public static Mono<BinaryData> fromStreamAsync(InputStream inputStream) { return Mono.fromCallable(() -> fromStream(inputStream)); } /** * Creates an instance of {@link BinaryData} from the given {@link Flux} of {@link ByteBuffer}. * * <p><strong>Create an instance from a Flux of ByteBuffer</strong></p> * * <p>This method aggregates data into single byte array.</p> * * <!-- src_embed com.azure.core.util.BinaryData.fromFlux * <pre> * final byte[] data = &quot;Some Data&quot;.getBytes& * final Flux&lt;ByteBuffer&gt; dataFlux = Flux.just& * * Mono&lt;BinaryData&gt; binaryDataMono = BinaryData.fromFlux& * * Disposable subscriber = binaryDataMono * .map& * System.out.println& * return true; * & * .subscribe& * * & * TimeUnit.SECONDS.sleep& * subscriber.dispose& * </pre> * <!-- end com.azure.core.util.BinaryData.fromFlux * * @param data The {@link Flux} of {@link ByteBuffer} that {@link BinaryData} will represent. * @return A {@link Mono} of {@link BinaryData} representing the {@link Flux} of {@link ByteBuffer}. * @throws NullPointerException If {@code data} is null. */ public static Mono<BinaryData> fromFlux(Flux<ByteBuffer> data) { return fromFlux(data, null); } /** * Creates an instance of {@link BinaryData} from the given {@link Flux} of {@link ByteBuffer}. * * <p><strong>Create an instance from a Flux of ByteBuffer</strong></p> * * <p>This method aggregates data into single byte array.</p> * * <!-- src_embed com.azure.core.util.BinaryData.fromFlux * <pre> * final byte[] data = &quot;Some Data&quot;.getBytes& * final long length = data.length; * final Flux&lt;ByteBuffer&gt; dataFlux = Flux.just& * * Mono&lt;BinaryData&gt; binaryDataMono = BinaryData.fromFlux& * * Disposable subscriber = binaryDataMono * .map& * System.out.println& * return true; * & * .subscribe& * * & * TimeUnit.SECONDS.sleep& * subscriber.dispose& * </pre> * <!-- end com.azure.core.util.BinaryData.fromFlux * * @param data The {@link Flux} of {@link ByteBuffer} that {@link BinaryData} will represent. * @param length The length of {@code data} in bytes. * @return A {@link Mono} of {@link BinaryData} representing the {@link Flux} of {@link ByteBuffer}. * @throws IllegalArgumentException if the length is less than zero. * @throws NullPointerException if {@code data} is null. */ public static Mono<BinaryData> fromFlux(Flux<ByteBuffer> data, Long length) { return fromFlux(data, length, true); } /** * Creates an instance of {@link BinaryData} from the given {@link Flux} of {@link ByteBuffer}. * * <p><strong>Create an instance from a Flux of ByteBuffer</strong></p> * * <!-- src_embed com.azure.core.util.BinaryData.fromFlux * <pre> * final byte[] data = &quot;Some Data&quot;.getBytes& * final long length = data.length; * final boolean shouldAggregateData = false; * final Flux&lt;ByteBuffer&gt; dataFlux = Flux.just& * * Mono&lt;BinaryData&gt; binaryDataMono = BinaryData.fromFlux& * * Disposable subscriber = binaryDataMono * .map& * System.out.println& * return true; * & * .subscribe& * * & * TimeUnit.SECONDS.sleep& * subscriber.dispose& * </pre> * <!-- end com.azure.core.util.BinaryData.fromFlux * * @param data The {@link Flux} of {@link ByteBuffer} that {@link BinaryData} will represent. * @param length The length of {@code data} in bytes. * @param bufferContent A flag indicating whether {@link Flux} should be buffered eagerly or * consumption deferred. * @return A {@link Mono} of {@link BinaryData} representing the {@link Flux} of {@link ByteBuffer}. * @throws IllegalArgumentException if the length is less than zero. * @throws NullPointerException if {@code data} is null. */ /** * Creates an instance of {@link BinaryData} from the given {@link String}. * <p> * The {@link String} is converted into bytes using {@link String * StandardCharsets * </p> * <p><strong>Create an instance from a String</strong></p> * * <!-- src_embed com.azure.core.util.BinaryData.fromString * <pre> * final String data = &quot;Some Data&quot;; * & * BinaryData binaryData = BinaryData.fromString& * System.out.println& * </pre> * <!-- end com.azure.core.util.BinaryData.fromString * * @param data The {@link String} that {@link BinaryData} will represent. * @return A {@link BinaryData} representing the {@link String}. * @throws NullPointerException If {@code data} is null. */ public static BinaryData fromString(String data) { return new BinaryData(new StringContent(data)); } /** * Creates an instance of {@link BinaryData} from the given byte array. * <p> * If the byte array is null or zero length an empty {@link BinaryData} will be returned. Note that the input * byte array is used as a reference by this instance of {@link BinaryData} and any changes to the byte array * outside of this instance will result in the contents of this BinaryData instance being updated as well. To * safely update the byte array without impacting the BinaryData instance, perform an array copy first. * </p> * * <p><strong>Create an instance from a byte array</strong></p> * * <!-- src_embed com.azure.core.util.BinaryData.fromBytes * <pre> * final byte[] data = &quot;Some Data&quot;.getBytes& * BinaryData binaryData = BinaryData.fromBytes& * System.out.println& * </pre> * <!-- end com.azure.core.util.BinaryData.fromBytes * * @param data The byte array that {@link BinaryData} will represent. * @return A {@link BinaryData} representing the byte array. * @throws NullPointerException If {@code data} is null. */ public static BinaryData fromBytes(byte[] data) { return new BinaryData(new ByteArrayContent(data)); } /** * Creates an instance of {@link BinaryData} by serializing the {@link Object} using the default {@link * JsonSerializer}. * * <p> * <b>Note:</b> This method first looks for a {@link JsonSerializerProvider} implementation on the classpath. If no * implementation is found, a default Jackson-based implementation will be used to serialize the object. *</p> * <p><strong>Creating an instance from an Object</strong></p> * * <!-- src_embed com.azure.core.util.BinaryData.fromObject * <pre> * class Person & * & * private String name; * * & * public Person setName& * this.name = name; * return this; * & * * & * public String getName& * return name; * & * & * final Person data = new Person& * * & * & * & * BinaryData binaryData = BinaryData.fromObject& * * System.out.println& * </pre> * <!-- end com.azure.core.util.BinaryData.fromObject * * @param data The object that will be JSON serialized that {@link BinaryData} will represent. * @return A {@link BinaryData} representing the JSON serialized object. * @throws NullPointerException If {@code data} is null. * @see JsonSerializer */ public static BinaryData fromObject(Object data) { return fromObject(data, SERIALIZER); } /** * Creates an instance of {@link BinaryData} by serializing the {@link Object} using the default {@link * JsonSerializer}. * * <p> * <b>Note:</b> This method first looks for a {@link JsonSerializerProvider} implementation on the classpath. If no * implementation is found, a default Jackson-based implementation will be used to serialize the object. * </p> * <p><strong>Creating an instance from an Object</strong></p> * * <!-- src_embed com.azure.core.util.BinaryData.fromObjectAsync * <pre> * class Person & * & * private String name; * * & * public Person setName& * this.name = name; * return this; * & * * & * public String getName& * return name; * & * & * final Person data = new Person& * * & * & * & * Disposable subscriber = BinaryData.fromObjectAsync& * .subscribe& * * & * TimeUnit.SECONDS.sleep& * subscriber.dispose& * </pre> * <!-- end com.azure.core.util.BinaryData.fromObjectAsync * * @param data The object that will be JSON serialized that {@link BinaryData} will represent. * @return A {@link Mono} of {@link BinaryData} representing the JSON serialized object. * @see JsonSerializer */ public static Mono<BinaryData> fromObjectAsync(Object data) { return fromObjectAsync(data, SERIALIZER); } /** * Creates an instance of {@link BinaryData} by serializing the {@link Object} using the passed {@link * ObjectSerializer}. * <p> * The passed {@link ObjectSerializer} can either be one of the implementations offered by the Azure SDKs or your * own implementation. * </p> * * <p><strong>Azure SDK implementations</strong></p> * <ul> * <li><a href="https: * <li><a href="https: * </ul> * * <p><strong>Create an instance from an Object</strong></p> * * <!-- src_embed com.azure.core.util.BinaryData.fromObject * <pre> * class Person & * & * private String name; * * & * public Person setName& * this.name = name; * return this; * & * * & * public String getName& * return name; * & * & * final Person data = new Person& * * & * & * & * final ObjectSerializer serializer = * new MyJsonSerializer& * BinaryData binaryData = BinaryData.fromObject& * * System.out.println& * </pre> * <!-- end com.azure.core.util.BinaryData.fromObject * * @param data The object that will be serialized that {@link BinaryData} will represent. The {@code serializer} * determines how {@code null} data is serialized. * @param serializer The {@link ObjectSerializer} used to serialize object. * @return A {@link BinaryData} representing the serialized object. * @throws NullPointerException If {@code serializer} is null. * @see ObjectSerializer * @see JsonSerializer * @see <a href="https: */ public static BinaryData fromObject(Object data, ObjectSerializer serializer) { return new BinaryData(new SerializableContent(data, serializer)); } /** * Creates an instance of {@link BinaryData} by serializing the {@link Object} using the passed {@link * ObjectSerializer}. * * <p> * The passed {@link ObjectSerializer} can either be one of the implementations offered by the Azure SDKs or your * own implementation. * </p> * * <p><strong>Azure SDK implementations</strong></p> * <ul> * <li><a href="https: * <li><a href="https: * </ul> * * <p><strong>Create an instance from an Object</strong></p> * * <!-- src_embed com.azure.core.util.BinaryData.fromObjectAsync * <pre> * class Person & * & * private String name; * * & * public Person setName& * this.name = name; * return this; * & * * & * public String getName& * return name; * & * & * final Person data = new Person& * * & * & * & * final ObjectSerializer serializer = * new MyJsonSerializer& * Disposable subscriber = BinaryData.fromObjectAsync& * .subscribe& * * & * TimeUnit.SECONDS.sleep& * subscriber.dispose& * </pre> * <!-- end com.azure.core.util.BinaryData.fromObjectAsync * * @param data The object that will be serialized that {@link BinaryData} will represent. The {@code serializer} * determines how {@code null} data is serialized. * @param serializer The {@link ObjectSerializer} used to serialize object. * @return A {@link Mono} of {@link BinaryData} representing the serialized object. * @throws NullPointerException If {@code serializer} is null. * @see ObjectSerializer * @see JsonSerializer * @see <a href="https: */ public static Mono<BinaryData> fromObjectAsync(Object data, ObjectSerializer serializer) { return Mono.fromCallable(() -> fromObject(data, serializer)); } /** * Creates a {@link BinaryData} that uses the content of the file at {@link Path} as its data. This method checks * for the existence of the file at the time of creating an instance of {@link BinaryData}. The file, however, is * not read until there is an attempt to read the contents of the returned BinaryData instance. * * <p><strong>Create an instance from a file</strong></p> * * <!-- src_embed com.azure.core.util.BinaryData.fromFile --> * <pre> * BinaryData binaryData = BinaryData.fromFile& * System.out.println& * </pre> * <!-- end com.azure.core.util.BinaryData.fromFile --> * * @param file The {@link Path} that will be the {@link BinaryData} data. * @return A new {@link BinaryData}. * @throws NullPointerException If {@code file} is null. */ public static BinaryData fromFile(Path file) { return fromFile(file, STREAM_READ_SIZE); } /** * Creates a {@link BinaryData} that uses the content of the file at {@link Path file} as its data. This method * checks for the existence of the file at the time of creating an instance of {@link BinaryData}. The file, * however, is not read until there is an attempt to read the contents of the returned BinaryData instance. * * <p><strong>Create an instance from a file</strong></p> * * <!-- src_embed com.azure.core.util.BinaryData.fromFile * <pre> * BinaryData binaryData = BinaryData.fromFile& * System.out.println& * </pre> * <!-- end com.azure.core.util.BinaryData.fromFile * * @param file The {@link Path} that will be the {@link BinaryData} data. * @param chunkSize The requested size for each read of the path. * @return A new {@link BinaryData}. * @throws NullPointerException If {@code file} is null. * @throws IllegalArgumentException If {@code offset} or {@code length} are negative or {@code offset} plus {@code * length} is greater than the file size or {@code chunkSize} is less than or equal to 0. * @throws UncheckedIOException if the file does not exist. */ public static BinaryData fromFile(Path file, int chunkSize) { return new BinaryData(new FileContent(file, chunkSize)); } /** * Returns a byte array representation of this {@link BinaryData}. This method returns a reference to the * underlying byte array. Modifying the contents of the returned byte array will also change the content of this * BinaryData instance. If the content source of this BinaryData instance is a file, an Inputstream or a * {@code Flux<ByteBuffer>} the source is not modified. To safely update the byte array, it is recommended * to make a copy of the contents first. * * @return A byte array representing this {@link BinaryData}. */ public byte[] toBytes() { return content.toBytes(); } /** * Returns a {@link String} representation of this {@link BinaryData} by converting its data using the UTF-8 * character set. A new instance of String is created each time this method is called. * * @return A {@link String} representing this {@link BinaryData}. */ public String toString() { return content.toString(); } /** * Returns an {@link Object} representation of this {@link BinaryData} by deserializing its data using the default * {@link JsonSerializer}. Each time this method is called, the content is deserialized and a new instance of * type {@code T} is returned. So, calling this method repeatedly to convert the underlying data source into the * same type is not recommended. * <p> * The type, represented by {@link Class}, should be a non-generic class, for generic classes use {@link * * <p> * <b>Note:</b> This method first looks for a {@link JsonSerializerProvider} implementation on the classpath. If no * implementation is found, a default Jackson-based implementation will be used to deserialize the object. * * <p><strong>Get a non-generic Object from the BinaryData</strong></p> * * <!-- src_embed com.azure.core.util.BinaryData.toObject * <pre> * class Person & * & * private String name; * * & * public Person setName& * this.name = name; * return this; * & * * & * public String getName& * return name; * & * & * final Person data = new Person& * * & * & * & * & * & * * BinaryData binaryData = BinaryData.fromObject& * * Person person = binaryData.toObject& * System.out.println& * </pre> * <!-- end com.azure.core.util.BinaryData.toObject * * @param <T> Type of the deserialized Object. * @param clazz The {@link Class} representing the Object's type. * @return An {@link Object} representing the JSON deserialized {@link BinaryData}. * @throws NullPointerException If {@code clazz} is null. * @see JsonSerializer */ public <T> T toObject(Class<T> clazz) { return toObject(TypeReference.createInstance(clazz), SERIALIZER); } /** * Returns an {@link Object} representation of this {@link BinaryData} by deserializing its data using the default * {@link JsonSerializer}. Each time this method is called, the content is deserialized and a new instance of * type {@code T} is returned. So, calling this method repeatedly to convert the underlying data source into the * same type is not recommended. * <p> * The type, represented by {@link TypeReference}, can either be a generic or non-generic type. If the type is * generic create a sub-type of {@link TypeReference}, if the type is non-generic use {@link * TypeReference * <p> * <b>Note:</b> This method first looks for a {@link JsonSerializerProvider} implementation on the classpath. If no * implementation is found, a default Jackson-based implementation will be used to deserialize the object. * * <p><strong>Get a non-generic Object from the BinaryData</strong></p> * * <!-- src_embed com.azure.core.util.BinaryData.toObject * <pre> * class Person & * & * private String name; * * & * public Person setName& * this.name = name; * return this; * & * * & * public String getName& * return name; * & * & * final Person data = new Person& * * & * & * & * & * & * * BinaryData binaryData = BinaryData.fromObject& * * Person person = binaryData.toObject& * System.out.println& * </pre> * <!-- end com.azure.core.util.BinaryData.toObject * * <p><strong>Get a generic Object from the BinaryData</strong></p> * * <!-- src_embed com.azure.core.util.BinaryData.toObject * <pre> * final Person person1 = new Person& * final Person person2 = new Person& * * List&lt;Person&gt; personList = new ArrayList&lt;&gt;& * personList.add& * personList.add& * * & * & * & * & * & * * * BinaryData binaryData = BinaryData.fromObject& * * List&lt;Person&gt; persons = binaryData.toObject& * persons.forEach& * </pre> * <!-- end com.azure.core.util.BinaryData.toObject * * @param typeReference The {@link TypeReference} representing the Object's type. * @param <T> Type of the deserialized Object. * @return An {@link Object} representing the JSON deserialized {@link BinaryData}. * @throws NullPointerException If {@code typeReference} is null. * @see JsonSerializer */ public <T> T toObject(TypeReference<T> typeReference) { return toObject(typeReference, SERIALIZER); } /** * Returns an {@link Object} representation of this {@link BinaryData} by deserializing its data using the passed * {@link ObjectSerializer}. Each time this method is called, the content is deserialized and a new instance of * type {@code T} is returned. So, calling this method repeatedly to convert the underlying data source into the * same type is not recommended. * <p> * The type, represented by {@link Class}, should be a non-generic class, for generic classes use {@link * * <p> * The passed {@link ObjectSerializer} can either be one of the implementations offered by the Azure SDKs or your * own implementation. * * <p><strong>Azure SDK implementations</strong></p> * <ul> * <li><a href="https: * <li><a href="https: * </ul> * * <p><strong>Get a non-generic Object from the BinaryData</strong></p> * * <!-- src_embed com.azure.core.util.BinaryData.toObject * <pre> * class Person & * & * private String name; * * & * public Person setName& * this.name = name; * return this; * & * * & * public String getName& * return name; * & * & * final Person data = new Person& * * & * & * & * * final ObjectSerializer serializer = * new MyJsonSerializer& * BinaryData binaryData = BinaryData.fromObject& * * Person person = binaryData.toObject& * System.out.println& * </pre> * <!-- end com.azure.core.util.BinaryData.toObject * * @param clazz The {@link Class} representing the Object's type. * @param serializer The {@link ObjectSerializer} used to deserialize object. * @param <T> Type of the deserialized Object. * @return An {@link Object} representing the deserialized {@link BinaryData}. * @throws NullPointerException If {@code clazz} or {@code serializer} is null. * @see ObjectSerializer * @see JsonSerializer * @see <a href="https: */ public <T> T toObject(Class<T> clazz, ObjectSerializer serializer) { return toObject(TypeReference.createInstance(clazz), serializer); } /** * Returns an {@link Object} representation of this {@link BinaryData} by deserializing its data using the passed * {@link ObjectSerializer}. Each time this method is called, the content is deserialized and a new instance of * type {@code T} is returned. So, calling this method repeatedly to convert the underlying data source into the * same type is not recommended. * <p> * The type, represented by {@link TypeReference}, can either be a generic or non-generic type. If the type is * generic create a sub-type of {@link TypeReference}, if the type is non-generic use {@link * TypeReference * <p> * The passed {@link ObjectSerializer} can either be one of the implementations offered by the Azure SDKs or your * own implementation. * * <p><strong>Azure SDK implementations</strong></p> * <ul> * <li><a href="https: * <li><a href="https: * </ul> * * <p><strong>Get a non-generic Object from the BinaryData</strong></p> * * <!-- src_embed com.azure.core.util.BinaryData.toObject * <pre> * class Person & * & * private String name; * * & * public Person setName& * this.name = name; * return this; * & * * & * public String getName& * return name; * & * & * final Person data = new Person& * * & * & * & * * final ObjectSerializer serializer = * new MyJsonSerializer& * BinaryData binaryData = BinaryData.fromObject& * * Person person = binaryData.toObject& * System.out.println& * * </pre> * <!-- end com.azure.core.util.BinaryData.toObject * * <p><strong>Get a generic Object from the BinaryData</strong></p> * * <!-- src_embed com.azure.core.util.BinaryData.toObject * <pre> * final Person person1 = new Person& * final Person person2 = new Person& * * List&lt;Person&gt; personList = new ArrayList&lt;&gt;& * personList.add& * personList.add& * * final ObjectSerializer serializer = * new MyJsonSerializer& * BinaryData binaryData = BinaryData.fromObject& * * & * List&lt;Person&gt; persons = binaryData.toObject& * persons.forEach& * </pre> * <!-- end com.azure.core.util.BinaryData.toObject * * @param typeReference The {@link TypeReference} representing the Object's type. * @param serializer The {@link ObjectSerializer} used to deserialize object. * @param <T> Type of the deserialized Object. * @return An {@link Object} representing the deserialized {@link BinaryData}. * @throws NullPointerException If {@code typeReference} or {@code serializer} is null. * @see ObjectSerializer * @see JsonSerializer * @see <a href="https: */ public <T> T toObject(TypeReference<T> typeReference, ObjectSerializer serializer) { Objects.requireNonNull(typeReference, "'typeReference' cannot be null."); Objects.requireNonNull(serializer, "'serializer' cannot be null."); return content.toObject(typeReference, serializer); } /** * Returns an {@link Object} representation of this {@link BinaryData} by deserializing its data using the default * {@link JsonSerializer}. Each time this method is called, the content is deserialized and a new instance of * type {@code T} is returned. So, calling this method repeatedly to convert the underlying data source into the * same type is not recommended. * <p> * The type, represented by {@link Class}, should be a non-generic class, for generic classes use {@link * * <p> * <b>Note:</b> This method first looks for a {@link JsonSerializerProvider} implementation on the classpath. If no * implementation is found, a default Jackson-based implementation will be used to deserialize the object. * * <p><strong>Get a non-generic Object from the BinaryData</strong></p> * * <!-- src_embed com.azure.core.util.BinaryData.toObjectAsync * <pre> * class Person & * & * private String name; * * & * public Person setName& * this.name = name; * return this; * & * * & * public String getName& * return name; * & * & * final Person data = new Person& * * & * & * & * & * & * * BinaryData binaryData = BinaryData.fromObject& * * Disposable subscriber = binaryData.toObjectAsync& * .subscribe& * * & * TimeUnit.SECONDS.sleep& * subscriber.dispose& * </pre> * <!-- end com.azure.core.util.BinaryData.toObjectAsync * * @param clazz The {@link Class} representing the Object's type. * @param <T> Type of the deserialized Object. * @return A {@link Mono} of {@link Object} representing the JSON deserialized {@link BinaryData}. * @throws NullPointerException If {@code clazz} is null. * @see JsonSerializer */ public <T> Mono<T> toObjectAsync(Class<T> clazz) { return toObjectAsync(TypeReference.createInstance(clazz), SERIALIZER); } /** * Returns an {@link Object} representation of this {@link BinaryData} by deserializing its data using the default * {@link JsonSerializer}. Each time this method is called, the content is deserialized and a new instance of * type {@code T} is returned. So, calling this method repeatedly to convert the underlying data source into the * same type is not recommended. * <p> * The type, represented by {@link TypeReference}, can either be a generic or non-generic type. If the type is * generic create a sub-type of {@link TypeReference}, if the type is non-generic use {@link * TypeReference * <p> * <b>Note:</b> This method first looks for a {@link JsonSerializerProvider} implementation on the classpath. If no * implementation is found, a default Jackson-based implementation will be used to deserialize the object. * * <p><strong>Get a non-generic Object from the BinaryData</strong></p> * * <!-- src_embed com.azure.core.util.BinaryData.toObjectAsync * <pre> * class Person & * & * private String name; * * & * public Person setName& * this.name = name; * return this; * & * * & * public String getName& * return name; * & * & * final Person data = new Person& * * & * & * & * & * & * * BinaryData binaryData = BinaryData.fromObject& * * Disposable subscriber = binaryData.toObjectAsync& * .subscribe& * * & * TimeUnit.SECONDS.sleep& * subscriber.dispose& * </pre> * <!-- end com.azure.core.util.BinaryData.toObjectAsync * * <p><strong>Get a generic Object from the BinaryData</strong></p> * * <!-- src_embed com.azure.core.util.BinaryData.toObjectAsync * <pre> * final Person person1 = new Person& * final Person person2 = new Person& * * List&lt;Person&gt; personList = new ArrayList&lt;&gt;& * personList.add& * personList.add& * * BinaryData binaryData = BinaryData.fromObject& * * Disposable subscriber = binaryData.toObjectAsync& * .subscribe& * * & * TimeUnit.SECONDS.sleep& * subscriber.dispose& * </pre> * <!-- end com.azure.core.util.BinaryData.toObjectAsync * * @param typeReference The {@link TypeReference} representing the Object's type. * @param <T> Type of the deserialized Object. * @return A {@link Mono} of {@link Object} representing the JSON deserialized {@link BinaryData}. * @throws NullPointerException If {@code typeReference} is null. * @see JsonSerializer */ public <T> Mono<T> toObjectAsync(TypeReference<T> typeReference) { return toObjectAsync(typeReference, SERIALIZER); } /** * Returns an {@link Object} representation of this {@link BinaryData} by deserializing its data using the passed * {@link ObjectSerializer}. Each time this method is called, the content is deserialized and a new instance of * type {@code T} is returned. So, calling this method repeatedly to convert the underlying data source into the * same type is not recommended. * <p> * The type, represented by {@link Class}, should be a non-generic class, for generic classes use {@link * * <p> * The passed {@link ObjectSerializer} can either be one of the implementations offered by the Azure SDKs or your * own implementation. * * <p><strong>Azure SDK implementations</strong></p> * <ul> * <li><a href="https: * <li><a href="https: * </ul> * * <p><strong>Get a non-generic Object from the BinaryData</strong></p> * * <!-- src_embed com.azure.core.util.BinaryData.toObjectAsync * <pre> * class Person & * & * private String name; * * & * public Person setName& * this.name = name; * return this; * & * * & * public String getName& * return name; * & * & * final Person data = new Person& * * & * & * & * * final ObjectSerializer serializer = * new MyJsonSerializer& * BinaryData binaryData = BinaryData.fromObject& * * Disposable subscriber = binaryData.toObjectAsync& * .subscribe& * * & * TimeUnit.SECONDS.sleep& * subscriber.dispose& * </pre> * <!-- end com.azure.core.util.BinaryData.toObjectAsync * * @param clazz The {@link Class} representing the Object's type. * @param serializer The {@link ObjectSerializer} used to deserialize object. * @param <T> Type of the deserialized Object. * @return A {@link Mono} of {@link Object} representing the deserialized {@link BinaryData}. * @throws NullPointerException If {@code clazz} or {@code serializer} is null. * @see ObjectSerializer * @see JsonSerializer * @see <a href="https: */ public <T> Mono<T> toObjectAsync(Class<T> clazz, ObjectSerializer serializer) { return toObjectAsync(TypeReference.createInstance(clazz), serializer); } /** * Returns an {@link Object} representation of this {@link BinaryData} by deserializing its data using the passed * {@link ObjectSerializer}. Each time this method is called, the content is deserialized and a new instance of * type {@code T} is returned. So, calling this method repeatedly to convert the underlying data source into the * same type is not recommended. * <p> * The type, represented by {@link TypeReference}, can either be a generic or non-generic type. If the type is * generic create a sub-type of {@link TypeReference}, if the type is non-generic use {@link * TypeReference * <p> * The passed {@link ObjectSerializer} can either be one of the implementations offered by the Azure SDKs or your * own implementation. * * <p><strong>Azure SDK implementations</strong></p> * <ul> * <li><a href="https: * <li><a href="https: * </ul> * * <p><strong>Get a non-generic Object from the BinaryData</strong></p> * * <!-- src_embed com.azure.core.util.BinaryData.toObjectAsync * <pre> * class Person & * & * private String name; * * & * public Person setName& * this.name = name; * return this; * & * * & * public String getName& * return name; * & * & * final Person data = new Person& * * & * & * & * * final ObjectSerializer serializer = * new MyJsonSerializer& * BinaryData binaryData = BinaryData.fromObject& * * Disposable subscriber = binaryData * .toObjectAsync& * .subscribe& * * & * TimeUnit.SECONDS.sleep& * subscriber.dispose& * </pre> * <!-- end com.azure.core.util.BinaryData.toObjectAsync * * <p><strong>Get a generic Object from the BinaryData</strong></p> * * <!-- src_embed com.azure.core.util.BinaryData.toObjectAsync * <pre> * final Person person1 = new Person& * final Person person2 = new Person& * * List&lt;Person&gt; personList = new ArrayList&lt;&gt;& * personList.add& * personList.add& * * final ObjectSerializer serializer = * new MyJsonSerializer& * BinaryData binaryData = BinaryData.fromObject& * * Disposable subscriber = binaryData * .toObjectAsync& * .subscribe& * * & * TimeUnit.SECONDS.sleep& * subscriber.dispose& * </pre> * <!-- end com.azure.core.util.BinaryData.toObjectAsync * * @param typeReference The {@link TypeReference} representing the Object's type. * @param serializer The {@link ObjectSerializer} used to deserialize object. * @param <T> Type of the deserialized Object. * @return A {@link Mono} of {@link Object} representing the deserialized {@link BinaryData}. * @throws NullPointerException If {@code typeReference} or {@code serializer} is null. * @see ObjectSerializer * @see JsonSerializer * @see <a href="https: */ public <T> Mono<T> toObjectAsync(TypeReference<T> typeReference, ObjectSerializer serializer) { return Mono.fromCallable(() -> toObject(typeReference, serializer)); } /** * Returns an {@link InputStream} representation of this {@link BinaryData}. * * <p><strong>Get an InputStream from the BinaryData</strong></p> * * <!-- src_embed com.azure.core.util.BinaryData.toStream --> * <pre> * final byte[] data = &quot;Some Data&quot;.getBytes& * BinaryData binaryData = BinaryData.fromStream& * final byte[] bytes = new byte[data.length]; * binaryData.toStream& * System.out.println& * </pre> * <!-- end com.azure.core.util.BinaryData.toStream --> * * @return An {@link InputStream} representing the {@link BinaryData}. */ public InputStream toStream() { return content.toStream(); } /** * Returns a read-only {@link ByteBuffer} representation of this {@link BinaryData}. * <p> * Attempting to mutate the returned {@link ByteBuffer} will throw a {@link ReadOnlyBufferException}. * * <p><strong>Get a read-only ByteBuffer from the BinaryData</strong></p> * * <!-- src_embed com.azure.util.BinaryData.toByteBuffer --> * <pre> * final byte[] data = &quot;Some Data&quot;.getBytes& * BinaryData binaryData = BinaryData.fromBytes& * final byte[] bytes = new byte[data.length]; * binaryData.toByteBuffer& * System.out.println& * </pre> * <!-- end com.azure.util.BinaryData.toByteBuffer --> * * @return A read-only {@link ByteBuffer} representing the {@link BinaryData}. */ public ByteBuffer toByteBuffer() { return content.toByteBuffer(); } /** * Returns the content of this {@link BinaryData} instance as a flux of {@link ByteBuffer ByteBuffers}. The * content is not read from the underlying data source until the {@link Flux} is subscribed to. * * @return the content of this {@link BinaryData} instance as a flux of {@link ByteBuffer ByteBuffers}. */ public Flux<ByteBuffer> toFluxByteBuffer() { return content.toFluxByteBuffer(); } /** * Returns the length of the content, if it is known. The length can be {@code null} if the source did not * specify the length or the length cannot be determined without reading the whole content. * * @return the length of the content, if it is known. */ public Long getLength() { return content.getLength(); } }
class BinaryData { private static final ClientLogger LOGGER = new ClientLogger(BinaryData.class); static final JsonSerializer SERIALIZER = JsonSerializerProviders.createInstance(true); static final int MAX_ARRAY_SIZE = Integer.MAX_VALUE - 8; private final BinaryDataContent content; BinaryData(BinaryDataContent content) { this.content = Objects.requireNonNull(content, "'content' cannot be null."); } static { BinaryDataHelper.setAccessor(new BinaryDataHelper.BinaryDataAccessor() { @Override public BinaryData createBinaryData(BinaryDataContent content) { return new BinaryData(content); } @Override public BinaryDataContent getContent(BinaryData binaryData) { return binaryData.content; } }); } /** * Creates an instance of {@link BinaryData} from the given {@link InputStream}. Depending on the type of * inputStream, the BinaryData instance created may or may not allow reading the content more than once. The * stream content is not cached if the stream is not read into a format that requires the content to be fully read * into memory. * <p> * <b>NOTE:</b> The {@link InputStream} is not closed by this function. * </p> * * <p><strong>Create an instance from an InputStream</strong></p> * * <!-- src_embed com.azure.core.util.BinaryData.fromStream * <pre> * final ByteArrayInputStream inputStream = new ByteArrayInputStream& * BinaryData binaryData = BinaryData.fromStream& * System.out.println& * </pre> * <!-- end com.azure.core.util.BinaryData.fromStream * * @param inputStream The {@link InputStream} that {@link BinaryData} will represent. * @return A {@link BinaryData} representing the {@link InputStream}. * @throws UncheckedIOException If any error happens while reading the {@link InputStream}. * @throws NullPointerException If {@code inputStream} is null. */ public static BinaryData fromStream(InputStream inputStream) { return new BinaryData(new InputStreamContent(inputStream)); } /** * Creates an instance of {@link BinaryData} from the given {@link InputStream}. * <b>NOTE:</b> The {@link InputStream} is not closed by this function. * * <p><strong>Create an instance from an InputStream</strong></p> * * <!-- src_embed com.azure.core.util.BinaryData.fromStreamAsync * <pre> * final ByteArrayInputStream inputStream = new ByteArrayInputStream& * * Mono&lt;BinaryData&gt; binaryDataMono = BinaryData.fromStreamAsync& * * Disposable subscriber = binaryDataMono * .map& * System.out.println& * return true; * & * .subscribe& * * & * TimeUnit.SECONDS.sleep& * subscriber.dispose& * </pre> * <!-- end com.azure.core.util.BinaryData.fromStreamAsync * * @param inputStream The {@link InputStream} that {@link BinaryData} will represent. * @return A {@link Mono} of {@link BinaryData} representing the {@link InputStream}. * @throws UncheckedIOException If any error happens while reading the {@link InputStream}. * @throws NullPointerException If {@code inputStream} is null. */ public static Mono<BinaryData> fromStreamAsync(InputStream inputStream) { return Mono.fromCallable(() -> fromStream(inputStream)); } /** * Creates an instance of {@link BinaryData} from the given {@link Flux} of {@link ByteBuffer}. * * <p><strong>Create an instance from a Flux of ByteBuffer</strong></p> * * <p>This method aggregates data into single byte array.</p> * * <!-- src_embed com.azure.core.util.BinaryData.fromFlux * <pre> * final byte[] data = &quot;Some Data&quot;.getBytes& * final Flux&lt;ByteBuffer&gt; dataFlux = Flux.just& * * Mono&lt;BinaryData&gt; binaryDataMono = BinaryData.fromFlux& * * Disposable subscriber = binaryDataMono * .map& * System.out.println& * return true; * & * .subscribe& * * & * TimeUnit.SECONDS.sleep& * subscriber.dispose& * </pre> * <!-- end com.azure.core.util.BinaryData.fromFlux * * @param data The {@link Flux} of {@link ByteBuffer} that {@link BinaryData} will represent. * @return A {@link Mono} of {@link BinaryData} representing the {@link Flux} of {@link ByteBuffer}. * @throws NullPointerException If {@code data} is null. */ public static Mono<BinaryData> fromFlux(Flux<ByteBuffer> data) { return fromFlux(data, null); } /** * Creates an instance of {@link BinaryData} from the given {@link Flux} of {@link ByteBuffer}. * * <p><strong>Create an instance from a Flux of ByteBuffer</strong></p> * * <p>This method aggregates data into single byte array.</p> * * <!-- src_embed com.azure.core.util.BinaryData.fromFlux * <pre> * final byte[] data = &quot;Some Data&quot;.getBytes& * final long length = data.length; * final Flux&lt;ByteBuffer&gt; dataFlux = Flux.just& * * Mono&lt;BinaryData&gt; binaryDataMono = BinaryData.fromFlux& * * Disposable subscriber = binaryDataMono * .map& * System.out.println& * return true; * & * .subscribe& * * & * TimeUnit.SECONDS.sleep& * subscriber.dispose& * </pre> * <!-- end com.azure.core.util.BinaryData.fromFlux * * @param data The {@link Flux} of {@link ByteBuffer} that {@link BinaryData} will represent. * @param length The length of {@code data} in bytes. * @return A {@link Mono} of {@link BinaryData} representing the {@link Flux} of {@link ByteBuffer}. * @throws IllegalArgumentException if the length is less than zero. * @throws NullPointerException if {@code data} is null. */ public static Mono<BinaryData> fromFlux(Flux<ByteBuffer> data, Long length) { return fromFlux(data, length, true); } /** * Creates an instance of {@link BinaryData} from the given {@link Flux} of {@link ByteBuffer}. * * <p><strong>Create an instance from a Flux of ByteBuffer</strong></p> * * <!-- src_embed com.azure.core.util.BinaryData.fromFlux * <pre> * final byte[] data = &quot;Some Data&quot;.getBytes& * final long length = data.length; * final boolean shouldAggregateData = false; * final Flux&lt;ByteBuffer&gt; dataFlux = Flux.just& * * Mono&lt;BinaryData&gt; binaryDataMono = BinaryData.fromFlux& * * Disposable subscriber = binaryDataMono * .map& * System.out.println& * return true; * & * .subscribe& * * & * TimeUnit.SECONDS.sleep& * subscriber.dispose& * </pre> * <!-- end com.azure.core.util.BinaryData.fromFlux * * @param data The {@link Flux} of {@link ByteBuffer} that {@link BinaryData} will represent. * @param length The length of {@code data} in bytes. * @param bufferContent A flag indicating whether {@link Flux} should be buffered eagerly or * consumption deferred. * @return A {@link Mono} of {@link BinaryData} representing the {@link Flux} of {@link ByteBuffer}. * @throws IllegalArgumentException if the length is less than zero. * @throws NullPointerException if {@code data} is null. */ /** * Creates an instance of {@link BinaryData} from the given {@link String}. * <p> * The {@link String} is converted into bytes using {@link String * StandardCharsets * </p> * <p><strong>Create an instance from a String</strong></p> * * <!-- src_embed com.azure.core.util.BinaryData.fromString * <pre> * final String data = &quot;Some Data&quot;; * & * BinaryData binaryData = BinaryData.fromString& * System.out.println& * </pre> * <!-- end com.azure.core.util.BinaryData.fromString * * @param data The {@link String} that {@link BinaryData} will represent. * @return A {@link BinaryData} representing the {@link String}. * @throws NullPointerException If {@code data} is null. */ public static BinaryData fromString(String data) { return new BinaryData(new StringContent(data)); } /** * Creates an instance of {@link BinaryData} from the given byte array. * <p> * If the byte array is null or zero length an empty {@link BinaryData} will be returned. Note that the input * byte array is used as a reference by this instance of {@link BinaryData} and any changes to the byte array * outside of this instance will result in the contents of this BinaryData instance being updated as well. To * safely update the byte array without impacting the BinaryData instance, perform an array copy first. * </p> * * <p><strong>Create an instance from a byte array</strong></p> * * <!-- src_embed com.azure.core.util.BinaryData.fromBytes * <pre> * final byte[] data = &quot;Some Data&quot;.getBytes& * BinaryData binaryData = BinaryData.fromBytes& * System.out.println& * </pre> * <!-- end com.azure.core.util.BinaryData.fromBytes * * @param data The byte array that {@link BinaryData} will represent. * @return A {@link BinaryData} representing the byte array. * @throws NullPointerException If {@code data} is null. */ public static BinaryData fromBytes(byte[] data) { return new BinaryData(new ByteArrayContent(data)); } /** * Creates an instance of {@link BinaryData} by serializing the {@link Object} using the default {@link * JsonSerializer}. * * <p> * <b>Note:</b> This method first looks for a {@link JsonSerializerProvider} implementation on the classpath. If no * implementation is found, a default Jackson-based implementation will be used to serialize the object. *</p> * <p><strong>Creating an instance from an Object</strong></p> * * <!-- src_embed com.azure.core.util.BinaryData.fromObject * <pre> * class Person & * & * private String name; * * & * public Person setName& * this.name = name; * return this; * & * * & * public String getName& * return name; * & * & * final Person data = new Person& * * & * & * & * BinaryData binaryData = BinaryData.fromObject& * * System.out.println& * </pre> * <!-- end com.azure.core.util.BinaryData.fromObject * * @param data The object that will be JSON serialized that {@link BinaryData} will represent. * @return A {@link BinaryData} representing the JSON serialized object. * @throws NullPointerException If {@code data} is null. * @see JsonSerializer */ public static BinaryData fromObject(Object data) { return fromObject(data, SERIALIZER); } /** * Creates an instance of {@link BinaryData} by serializing the {@link Object} using the default {@link * JsonSerializer}. * * <p> * <b>Note:</b> This method first looks for a {@link JsonSerializerProvider} implementation on the classpath. If no * implementation is found, a default Jackson-based implementation will be used to serialize the object. * </p> * <p><strong>Creating an instance from an Object</strong></p> * * <!-- src_embed com.azure.core.util.BinaryData.fromObjectAsync * <pre> * class Person & * & * private String name; * * & * public Person setName& * this.name = name; * return this; * & * * & * public String getName& * return name; * & * & * final Person data = new Person& * * & * & * & * Disposable subscriber = BinaryData.fromObjectAsync& * .subscribe& * * & * TimeUnit.SECONDS.sleep& * subscriber.dispose& * </pre> * <!-- end com.azure.core.util.BinaryData.fromObjectAsync * * @param data The object that will be JSON serialized that {@link BinaryData} will represent. * @return A {@link Mono} of {@link BinaryData} representing the JSON serialized object. * @see JsonSerializer */ public static Mono<BinaryData> fromObjectAsync(Object data) { return fromObjectAsync(data, SERIALIZER); } /** * Creates an instance of {@link BinaryData} by serializing the {@link Object} using the passed {@link * ObjectSerializer}. * <p> * The passed {@link ObjectSerializer} can either be one of the implementations offered by the Azure SDKs or your * own implementation. * </p> * * <p><strong>Azure SDK implementations</strong></p> * <ul> * <li><a href="https: * <li><a href="https: * </ul> * * <p><strong>Create an instance from an Object</strong></p> * * <!-- src_embed com.azure.core.util.BinaryData.fromObject * <pre> * class Person & * & * private String name; * * & * public Person setName& * this.name = name; * return this; * & * * & * public String getName& * return name; * & * & * final Person data = new Person& * * & * & * & * final ObjectSerializer serializer = * new MyJsonSerializer& * BinaryData binaryData = BinaryData.fromObject& * * System.out.println& * </pre> * <!-- end com.azure.core.util.BinaryData.fromObject * * @param data The object that will be serialized that {@link BinaryData} will represent. The {@code serializer} * determines how {@code null} data is serialized. * @param serializer The {@link ObjectSerializer} used to serialize object. * @return A {@link BinaryData} representing the serialized object. * @throws NullPointerException If {@code serializer} is null. * @see ObjectSerializer * @see JsonSerializer * @see <a href="https: */ public static BinaryData fromObject(Object data, ObjectSerializer serializer) { return new BinaryData(new SerializableContent(data, serializer)); } /** * Creates an instance of {@link BinaryData} by serializing the {@link Object} using the passed {@link * ObjectSerializer}. * * <p> * The passed {@link ObjectSerializer} can either be one of the implementations offered by the Azure SDKs or your * own implementation. * </p> * * <p><strong>Azure SDK implementations</strong></p> * <ul> * <li><a href="https: * <li><a href="https: * </ul> * * <p><strong>Create an instance from an Object</strong></p> * * <!-- src_embed com.azure.core.util.BinaryData.fromObjectAsync * <pre> * class Person & * & * private String name; * * & * public Person setName& * this.name = name; * return this; * & * * & * public String getName& * return name; * & * & * final Person data = new Person& * * & * & * & * final ObjectSerializer serializer = * new MyJsonSerializer& * Disposable subscriber = BinaryData.fromObjectAsync& * .subscribe& * * & * TimeUnit.SECONDS.sleep& * subscriber.dispose& * </pre> * <!-- end com.azure.core.util.BinaryData.fromObjectAsync * * @param data The object that will be serialized that {@link BinaryData} will represent. The {@code serializer} * determines how {@code null} data is serialized. * @param serializer The {@link ObjectSerializer} used to serialize object. * @return A {@link Mono} of {@link BinaryData} representing the serialized object. * @throws NullPointerException If {@code serializer} is null. * @see ObjectSerializer * @see JsonSerializer * @see <a href="https: */ public static Mono<BinaryData> fromObjectAsync(Object data, ObjectSerializer serializer) { return Mono.fromCallable(() -> fromObject(data, serializer)); } /** * Creates a {@link BinaryData} that uses the content of the file at {@link Path} as its data. This method checks * for the existence of the file at the time of creating an instance of {@link BinaryData}. The file, however, is * not read until there is an attempt to read the contents of the returned BinaryData instance. * * <p><strong>Create an instance from a file</strong></p> * * <!-- src_embed com.azure.core.util.BinaryData.fromFile --> * <pre> * BinaryData binaryData = BinaryData.fromFile& * System.out.println& * </pre> * <!-- end com.azure.core.util.BinaryData.fromFile --> * * @param file The {@link Path} that will be the {@link BinaryData} data. * @return A new {@link BinaryData}. * @throws NullPointerException If {@code file} is null. */ public static BinaryData fromFile(Path file) { return fromFile(file, STREAM_READ_SIZE); } /** * Creates a {@link BinaryData} that uses the content of the file at {@link Path file} as its data. This method * checks for the existence of the file at the time of creating an instance of {@link BinaryData}. The file, * however, is not read until there is an attempt to read the contents of the returned BinaryData instance. * * <p><strong>Create an instance from a file</strong></p> * * <!-- src_embed com.azure.core.util.BinaryData.fromFile * <pre> * BinaryData binaryData = BinaryData.fromFile& * System.out.println& * </pre> * <!-- end com.azure.core.util.BinaryData.fromFile * * @param file The {@link Path} that will be the {@link BinaryData} data. * @param chunkSize The requested size for each read of the path. * @return A new {@link BinaryData}. * @throws NullPointerException If {@code file} is null. * @throws IllegalArgumentException If {@code offset} or {@code length} are negative or {@code offset} plus {@code * length} is greater than the file size or {@code chunkSize} is less than or equal to 0. * @throws UncheckedIOException if the file does not exist. */ public static BinaryData fromFile(Path file, int chunkSize) { return new BinaryData(new FileContent(file, chunkSize)); } /** * Returns a byte array representation of this {@link BinaryData}. This method returns a reference to the * underlying byte array. Modifying the contents of the returned byte array will also change the content of this * BinaryData instance. If the content source of this BinaryData instance is a file, an Inputstream or a * {@code Flux<ByteBuffer>} the source is not modified. To safely update the byte array, it is recommended * to make a copy of the contents first. * * @return A byte array representing this {@link BinaryData}. */ public byte[] toBytes() { return content.toBytes(); } /** * Returns a {@link String} representation of this {@link BinaryData} by converting its data using the UTF-8 * character set. A new instance of String is created each time this method is called. * * @return A {@link String} representing this {@link BinaryData}. */ public String toString() { return content.toString(); } /** * Returns an {@link Object} representation of this {@link BinaryData} by deserializing its data using the default * {@link JsonSerializer}. Each time this method is called, the content is deserialized and a new instance of * type {@code T} is returned. So, calling this method repeatedly to convert the underlying data source into the * same type is not recommended. * <p> * The type, represented by {@link Class}, should be a non-generic class, for generic classes use {@link * * <p> * <b>Note:</b> This method first looks for a {@link JsonSerializerProvider} implementation on the classpath. If no * implementation is found, a default Jackson-based implementation will be used to deserialize the object. * * <p><strong>Get a non-generic Object from the BinaryData</strong></p> * * <!-- src_embed com.azure.core.util.BinaryData.toObject * <pre> * class Person & * & * private String name; * * & * public Person setName& * this.name = name; * return this; * & * * & * public String getName& * return name; * & * & * final Person data = new Person& * * & * & * & * & * & * * BinaryData binaryData = BinaryData.fromObject& * * Person person = binaryData.toObject& * System.out.println& * </pre> * <!-- end com.azure.core.util.BinaryData.toObject * * @param <T> Type of the deserialized Object. * @param clazz The {@link Class} representing the Object's type. * @return An {@link Object} representing the JSON deserialized {@link BinaryData}. * @throws NullPointerException If {@code clazz} is null. * @see JsonSerializer */ public <T> T toObject(Class<T> clazz) { return toObject(TypeReference.createInstance(clazz), SERIALIZER); } /** * Returns an {@link Object} representation of this {@link BinaryData} by deserializing its data using the default * {@link JsonSerializer}. Each time this method is called, the content is deserialized and a new instance of * type {@code T} is returned. So, calling this method repeatedly to convert the underlying data source into the * same type is not recommended. * <p> * The type, represented by {@link TypeReference}, can either be a generic or non-generic type. If the type is * generic create a sub-type of {@link TypeReference}, if the type is non-generic use {@link * TypeReference * <p> * <b>Note:</b> This method first looks for a {@link JsonSerializerProvider} implementation on the classpath. If no * implementation is found, a default Jackson-based implementation will be used to deserialize the object. * * <p><strong>Get a non-generic Object from the BinaryData</strong></p> * * <!-- src_embed com.azure.core.util.BinaryData.toObject * <pre> * class Person & * & * private String name; * * & * public Person setName& * this.name = name; * return this; * & * * & * public String getName& * return name; * & * & * final Person data = new Person& * * & * & * & * & * & * * BinaryData binaryData = BinaryData.fromObject& * * Person person = binaryData.toObject& * System.out.println& * </pre> * <!-- end com.azure.core.util.BinaryData.toObject * * <p><strong>Get a generic Object from the BinaryData</strong></p> * * <!-- src_embed com.azure.core.util.BinaryData.toObject * <pre> * final Person person1 = new Person& * final Person person2 = new Person& * * List&lt;Person&gt; personList = new ArrayList&lt;&gt;& * personList.add& * personList.add& * * & * & * & * & * & * * * BinaryData binaryData = BinaryData.fromObject& * * List&lt;Person&gt; persons = binaryData.toObject& * persons.forEach& * </pre> * <!-- end com.azure.core.util.BinaryData.toObject * * @param typeReference The {@link TypeReference} representing the Object's type. * @param <T> Type of the deserialized Object. * @return An {@link Object} representing the JSON deserialized {@link BinaryData}. * @throws NullPointerException If {@code typeReference} is null. * @see JsonSerializer */ public <T> T toObject(TypeReference<T> typeReference) { return toObject(typeReference, SERIALIZER); } /** * Returns an {@link Object} representation of this {@link BinaryData} by deserializing its data using the passed * {@link ObjectSerializer}. Each time this method is called, the content is deserialized and a new instance of * type {@code T} is returned. So, calling this method repeatedly to convert the underlying data source into the * same type is not recommended. * <p> * The type, represented by {@link Class}, should be a non-generic class, for generic classes use {@link * * <p> * The passed {@link ObjectSerializer} can either be one of the implementations offered by the Azure SDKs or your * own implementation. * * <p><strong>Azure SDK implementations</strong></p> * <ul> * <li><a href="https: * <li><a href="https: * </ul> * * <p><strong>Get a non-generic Object from the BinaryData</strong></p> * * <!-- src_embed com.azure.core.util.BinaryData.toObject * <pre> * class Person & * & * private String name; * * & * public Person setName& * this.name = name; * return this; * & * * & * public String getName& * return name; * & * & * final Person data = new Person& * * & * & * & * * final ObjectSerializer serializer = * new MyJsonSerializer& * BinaryData binaryData = BinaryData.fromObject& * * Person person = binaryData.toObject& * System.out.println& * </pre> * <!-- end com.azure.core.util.BinaryData.toObject * * @param clazz The {@link Class} representing the Object's type. * @param serializer The {@link ObjectSerializer} used to deserialize object. * @param <T> Type of the deserialized Object. * @return An {@link Object} representing the deserialized {@link BinaryData}. * @throws NullPointerException If {@code clazz} or {@code serializer} is null. * @see ObjectSerializer * @see JsonSerializer * @see <a href="https: */ public <T> T toObject(Class<T> clazz, ObjectSerializer serializer) { return toObject(TypeReference.createInstance(clazz), serializer); } /** * Returns an {@link Object} representation of this {@link BinaryData} by deserializing its data using the passed * {@link ObjectSerializer}. Each time this method is called, the content is deserialized and a new instance of * type {@code T} is returned. So, calling this method repeatedly to convert the underlying data source into the * same type is not recommended. * <p> * The type, represented by {@link TypeReference}, can either be a generic or non-generic type. If the type is * generic create a sub-type of {@link TypeReference}, if the type is non-generic use {@link * TypeReference * <p> * The passed {@link ObjectSerializer} can either be one of the implementations offered by the Azure SDKs or your * own implementation. * * <p><strong>Azure SDK implementations</strong></p> * <ul> * <li><a href="https: * <li><a href="https: * </ul> * * <p><strong>Get a non-generic Object from the BinaryData</strong></p> * * <!-- src_embed com.azure.core.util.BinaryData.toObject * <pre> * class Person & * & * private String name; * * & * public Person setName& * this.name = name; * return this; * & * * & * public String getName& * return name; * & * & * final Person data = new Person& * * & * & * & * * final ObjectSerializer serializer = * new MyJsonSerializer& * BinaryData binaryData = BinaryData.fromObject& * * Person person = binaryData.toObject& * System.out.println& * * </pre> * <!-- end com.azure.core.util.BinaryData.toObject * * <p><strong>Get a generic Object from the BinaryData</strong></p> * * <!-- src_embed com.azure.core.util.BinaryData.toObject * <pre> * final Person person1 = new Person& * final Person person2 = new Person& * * List&lt;Person&gt; personList = new ArrayList&lt;&gt;& * personList.add& * personList.add& * * final ObjectSerializer serializer = * new MyJsonSerializer& * BinaryData binaryData = BinaryData.fromObject& * * & * List&lt;Person&gt; persons = binaryData.toObject& * persons.forEach& * </pre> * <!-- end com.azure.core.util.BinaryData.toObject * * @param typeReference The {@link TypeReference} representing the Object's type. * @param serializer The {@link ObjectSerializer} used to deserialize object. * @param <T> Type of the deserialized Object. * @return An {@link Object} representing the deserialized {@link BinaryData}. * @throws NullPointerException If {@code typeReference} or {@code serializer} is null. * @see ObjectSerializer * @see JsonSerializer * @see <a href="https: */ public <T> T toObject(TypeReference<T> typeReference, ObjectSerializer serializer) { Objects.requireNonNull(typeReference, "'typeReference' cannot be null."); Objects.requireNonNull(serializer, "'serializer' cannot be null."); return content.toObject(typeReference, serializer); } /** * Returns an {@link Object} representation of this {@link BinaryData} by deserializing its data using the default * {@link JsonSerializer}. Each time this method is called, the content is deserialized and a new instance of * type {@code T} is returned. So, calling this method repeatedly to convert the underlying data source into the * same type is not recommended. * <p> * The type, represented by {@link Class}, should be a non-generic class, for generic classes use {@link * * <p> * <b>Note:</b> This method first looks for a {@link JsonSerializerProvider} implementation on the classpath. If no * implementation is found, a default Jackson-based implementation will be used to deserialize the object. * * <p><strong>Get a non-generic Object from the BinaryData</strong></p> * * <!-- src_embed com.azure.core.util.BinaryData.toObjectAsync * <pre> * class Person & * & * private String name; * * & * public Person setName& * this.name = name; * return this; * & * * & * public String getName& * return name; * & * & * final Person data = new Person& * * & * & * & * & * & * * BinaryData binaryData = BinaryData.fromObject& * * Disposable subscriber = binaryData.toObjectAsync& * .subscribe& * * & * TimeUnit.SECONDS.sleep& * subscriber.dispose& * </pre> * <!-- end com.azure.core.util.BinaryData.toObjectAsync * * @param clazz The {@link Class} representing the Object's type. * @param <T> Type of the deserialized Object. * @return A {@link Mono} of {@link Object} representing the JSON deserialized {@link BinaryData}. * @throws NullPointerException If {@code clazz} is null. * @see JsonSerializer */ public <T> Mono<T> toObjectAsync(Class<T> clazz) { return toObjectAsync(TypeReference.createInstance(clazz), SERIALIZER); } /** * Returns an {@link Object} representation of this {@link BinaryData} by deserializing its data using the default * {@link JsonSerializer}. Each time this method is called, the content is deserialized and a new instance of * type {@code T} is returned. So, calling this method repeatedly to convert the underlying data source into the * same type is not recommended. * <p> * The type, represented by {@link TypeReference}, can either be a generic or non-generic type. If the type is * generic create a sub-type of {@link TypeReference}, if the type is non-generic use {@link * TypeReference * <p> * <b>Note:</b> This method first looks for a {@link JsonSerializerProvider} implementation on the classpath. If no * implementation is found, a default Jackson-based implementation will be used to deserialize the object. * * <p><strong>Get a non-generic Object from the BinaryData</strong></p> * * <!-- src_embed com.azure.core.util.BinaryData.toObjectAsync * <pre> * class Person & * & * private String name; * * & * public Person setName& * this.name = name; * return this; * & * * & * public String getName& * return name; * & * & * final Person data = new Person& * * & * & * & * & * & * * BinaryData binaryData = BinaryData.fromObject& * * Disposable subscriber = binaryData.toObjectAsync& * .subscribe& * * & * TimeUnit.SECONDS.sleep& * subscriber.dispose& * </pre> * <!-- end com.azure.core.util.BinaryData.toObjectAsync * * <p><strong>Get a generic Object from the BinaryData</strong></p> * * <!-- src_embed com.azure.core.util.BinaryData.toObjectAsync * <pre> * final Person person1 = new Person& * final Person person2 = new Person& * * List&lt;Person&gt; personList = new ArrayList&lt;&gt;& * personList.add& * personList.add& * * BinaryData binaryData = BinaryData.fromObject& * * Disposable subscriber = binaryData.toObjectAsync& * .subscribe& * * & * TimeUnit.SECONDS.sleep& * subscriber.dispose& * </pre> * <!-- end com.azure.core.util.BinaryData.toObjectAsync * * @param typeReference The {@link TypeReference} representing the Object's type. * @param <T> Type of the deserialized Object. * @return A {@link Mono} of {@link Object} representing the JSON deserialized {@link BinaryData}. * @throws NullPointerException If {@code typeReference} is null. * @see JsonSerializer */ public <T> Mono<T> toObjectAsync(TypeReference<T> typeReference) { return toObjectAsync(typeReference, SERIALIZER); } /** * Returns an {@link Object} representation of this {@link BinaryData} by deserializing its data using the passed * {@link ObjectSerializer}. Each time this method is called, the content is deserialized and a new instance of * type {@code T} is returned. So, calling this method repeatedly to convert the underlying data source into the * same type is not recommended. * <p> * The type, represented by {@link Class}, should be a non-generic class, for generic classes use {@link * * <p> * The passed {@link ObjectSerializer} can either be one of the implementations offered by the Azure SDKs or your * own implementation. * * <p><strong>Azure SDK implementations</strong></p> * <ul> * <li><a href="https: * <li><a href="https: * </ul> * * <p><strong>Get a non-generic Object from the BinaryData</strong></p> * * <!-- src_embed com.azure.core.util.BinaryData.toObjectAsync * <pre> * class Person & * & * private String name; * * & * public Person setName& * this.name = name; * return this; * & * * & * public String getName& * return name; * & * & * final Person data = new Person& * * & * & * & * * final ObjectSerializer serializer = * new MyJsonSerializer& * BinaryData binaryData = BinaryData.fromObject& * * Disposable subscriber = binaryData.toObjectAsync& * .subscribe& * * & * TimeUnit.SECONDS.sleep& * subscriber.dispose& * </pre> * <!-- end com.azure.core.util.BinaryData.toObjectAsync * * @param clazz The {@link Class} representing the Object's type. * @param serializer The {@link ObjectSerializer} used to deserialize object. * @param <T> Type of the deserialized Object. * @return A {@link Mono} of {@link Object} representing the deserialized {@link BinaryData}. * @throws NullPointerException If {@code clazz} or {@code serializer} is null. * @see ObjectSerializer * @see JsonSerializer * @see <a href="https: */ public <T> Mono<T> toObjectAsync(Class<T> clazz, ObjectSerializer serializer) { return toObjectAsync(TypeReference.createInstance(clazz), serializer); } /** * Returns an {@link Object} representation of this {@link BinaryData} by deserializing its data using the passed * {@link ObjectSerializer}. Each time this method is called, the content is deserialized and a new instance of * type {@code T} is returned. So, calling this method repeatedly to convert the underlying data source into the * same type is not recommended. * <p> * The type, represented by {@link TypeReference}, can either be a generic or non-generic type. If the type is * generic create a sub-type of {@link TypeReference}, if the type is non-generic use {@link * TypeReference * <p> * The passed {@link ObjectSerializer} can either be one of the implementations offered by the Azure SDKs or your * own implementation. * * <p><strong>Azure SDK implementations</strong></p> * <ul> * <li><a href="https: * <li><a href="https: * </ul> * * <p><strong>Get a non-generic Object from the BinaryData</strong></p> * * <!-- src_embed com.azure.core.util.BinaryData.toObjectAsync * <pre> * class Person & * & * private String name; * * & * public Person setName& * this.name = name; * return this; * & * * & * public String getName& * return name; * & * & * final Person data = new Person& * * & * & * & * * final ObjectSerializer serializer = * new MyJsonSerializer& * BinaryData binaryData = BinaryData.fromObject& * * Disposable subscriber = binaryData * .toObjectAsync& * .subscribe& * * & * TimeUnit.SECONDS.sleep& * subscriber.dispose& * </pre> * <!-- end com.azure.core.util.BinaryData.toObjectAsync * * <p><strong>Get a generic Object from the BinaryData</strong></p> * * <!-- src_embed com.azure.core.util.BinaryData.toObjectAsync * <pre> * final Person person1 = new Person& * final Person person2 = new Person& * * List&lt;Person&gt; personList = new ArrayList&lt;&gt;& * personList.add& * personList.add& * * final ObjectSerializer serializer = * new MyJsonSerializer& * BinaryData binaryData = BinaryData.fromObject& * * Disposable subscriber = binaryData * .toObjectAsync& * .subscribe& * * & * TimeUnit.SECONDS.sleep& * subscriber.dispose& * </pre> * <!-- end com.azure.core.util.BinaryData.toObjectAsync * * @param typeReference The {@link TypeReference} representing the Object's type. * @param serializer The {@link ObjectSerializer} used to deserialize object. * @param <T> Type of the deserialized Object. * @return A {@link Mono} of {@link Object} representing the deserialized {@link BinaryData}. * @throws NullPointerException If {@code typeReference} or {@code serializer} is null. * @see ObjectSerializer * @see JsonSerializer * @see <a href="https: */ public <T> Mono<T> toObjectAsync(TypeReference<T> typeReference, ObjectSerializer serializer) { return Mono.fromCallable(() -> toObject(typeReference, serializer)); } /** * Returns an {@link InputStream} representation of this {@link BinaryData}. * * <p><strong>Get an InputStream from the BinaryData</strong></p> * * <!-- src_embed com.azure.core.util.BinaryData.toStream --> * <pre> * final byte[] data = &quot;Some Data&quot;.getBytes& * BinaryData binaryData = BinaryData.fromStream& * final byte[] bytes = new byte[data.length]; * binaryData.toStream& * System.out.println& * </pre> * <!-- end com.azure.core.util.BinaryData.toStream --> * * @return An {@link InputStream} representing the {@link BinaryData}. */ public InputStream toStream() { return content.toStream(); } /** * Returns a read-only {@link ByteBuffer} representation of this {@link BinaryData}. * <p> * Attempting to mutate the returned {@link ByteBuffer} will throw a {@link ReadOnlyBufferException}. * * <p><strong>Get a read-only ByteBuffer from the BinaryData</strong></p> * * <!-- src_embed com.azure.util.BinaryData.toByteBuffer --> * <pre> * final byte[] data = &quot;Some Data&quot;.getBytes& * BinaryData binaryData = BinaryData.fromBytes& * final byte[] bytes = new byte[data.length]; * binaryData.toByteBuffer& * System.out.println& * </pre> * <!-- end com.azure.util.BinaryData.toByteBuffer --> * * @return A read-only {@link ByteBuffer} representing the {@link BinaryData}. */ public ByteBuffer toByteBuffer() { return content.toByteBuffer(); } /** * Returns the content of this {@link BinaryData} instance as a flux of {@link ByteBuffer ByteBuffers}. The * content is not read from the underlying data source until the {@link Flux} is subscribed to. * * @return the content of this {@link BinaryData} instance as a flux of {@link ByteBuffer ByteBuffers}. */ public Flux<ByteBuffer> toFluxByteBuffer() { return content.toFluxByteBuffer(); } /** * Returns the length of the content, if it is known. The length can be {@code null} if the source did not * specify the length or the length cannot be determined without reading the whole content. * * @return the length of the content, if it is known. */ public Long getLength() { return content.getLength(); } }
Delay the default to `HttpPipelineProvider`. So that code here would now have effect https://github.com/Azure/azure-sdk-for-java/blob/90775e584eb5bae2f8bb929090440ac24ddb6d95/sdk/resourcemanager/azure-resourcemanager-resources/src/main/java/com/azure/resourcemanager/resources/fluentcore/arm/implementation/AzureConfigurableImpl.java#L142-L144 (previously `retryPolicy` variable will not be null, and hence `retryOptions` have no effect)
protected AzureConfigurableImpl() { policies = new ArrayList<>(); scopes = new ArrayList<>(); tokens = new ArrayList<>(); httpLogOptions = new HttpLogOptions().setLogLevel(HttpLogDetailLevel.NONE); }
}
protected AzureConfigurableImpl() { policies = new ArrayList<>(); scopes = new ArrayList<>(); tokens = new ArrayList<>(); httpLogOptions = new HttpLogOptions().setLogLevel(HttpLogDetailLevel.NONE); }
class AzureConfigurableImpl<T extends AzureConfigurable<T>> implements AzureConfigurable<T> { private HttpClient httpClient; private HttpLogOptions httpLogOptions; private final List<HttpPipelinePolicy> policies; private final List<String> scopes; private RetryPolicy retryPolicy; private RetryOptions retryOptions; private Configuration configuration; private final List<TokenCredential> tokens; @Override @SuppressWarnings("unchecked") public T withLogOptions(HttpLogOptions httpLogOptions) { Objects.requireNonNull(httpLogOptions); this.httpLogOptions = httpLogOptions; return (T) this; } @Override @SuppressWarnings("unchecked") public T withLogLevel(HttpLogDetailLevel logLevel) { Objects.requireNonNull(logLevel); this.httpLogOptions = httpLogOptions.setLogLevel(logLevel); return (T) this; } @Override @SuppressWarnings("unchecked") public T withPolicy(HttpPipelinePolicy policy) { Objects.requireNonNull(policy); policies.add(policy); return (T) this; } @Override @SuppressWarnings("unchecked") public T withAuxiliaryCredential(TokenCredential token) { Objects.requireNonNull(token); this.tokens.add(token); return (T) this; } @Override @SuppressWarnings("unchecked") public T withAuxiliaryCredentials(List<TokenCredential> tokens) { Objects.requireNonNull(tokens); this.tokens.addAll(tokens); return (T) this; } @Override @SuppressWarnings("unchecked") public T withRetryPolicy(RetryPolicy retryPolicy) { Objects.requireNonNull(retryPolicy); this.retryPolicy = retryPolicy; return (T) this; } @Override @SuppressWarnings("unchecked") public T withScope(String scope) { Objects.requireNonNull(scope); this.scopes.add(scope); return (T) this; } @Override @SuppressWarnings("unchecked") public T withScopes(List<String> scopes) { Objects.requireNonNull(scopes); this.scopes.addAll(scopes); return (T) this; } @Override @SuppressWarnings("unchecked") public T withHttpClient(HttpClient httpClient) { Objects.requireNonNull(httpClient); this.httpClient = httpClient; return (T) this; } @Override @SuppressWarnings("unchecked") public T withConfiguration(Configuration configuration) { Objects.requireNonNull(configuration); this.configuration = configuration; return (T) this; } @Override @SuppressWarnings("unchecked") public T withRetryOptions(RetryOptions retryOptions) { Objects.requireNonNull(retryOptions); this.retryOptions = retryOptions; return (T) this; } protected HttpPipeline buildHttpPipeline(TokenCredential credential, AzureProfile profile) { Objects.requireNonNull(credential); if (!tokens.isEmpty()) { policies.add( new AuxiliaryAuthenticationPolicy(profile.getEnvironment(), tokens.toArray(new TokenCredential[0]))); } if (this.retryPolicy == null && this.retryOptions != null) { this.retryPolicy = new RetryPolicy(this.retryOptions); } return HttpPipelineProvider.buildHttpPipeline(credential, profile, scopes(), httpLogOptions, configuration, retryPolicy, policies, httpClient); } private String[] scopes() { return scopes.isEmpty() ? null : scopes.toArray(new String[0]); } }
class AzureConfigurableImpl<T extends AzureConfigurable<T>> implements AzureConfigurable<T> { private HttpClient httpClient; private HttpLogOptions httpLogOptions; private final List<HttpPipelinePolicy> policies; private final List<String> scopes; private RetryPolicy retryPolicy; private RetryOptions retryOptions; private Configuration configuration; private final List<TokenCredential> tokens; @Override @SuppressWarnings("unchecked") public T withLogOptions(HttpLogOptions httpLogOptions) { Objects.requireNonNull(httpLogOptions); this.httpLogOptions = httpLogOptions; return (T) this; } @Override @SuppressWarnings("unchecked") public T withLogLevel(HttpLogDetailLevel logLevel) { Objects.requireNonNull(logLevel); this.httpLogOptions = httpLogOptions.setLogLevel(logLevel); return (T) this; } @Override @SuppressWarnings("unchecked") public T withPolicy(HttpPipelinePolicy policy) { Objects.requireNonNull(policy); policies.add(policy); return (T) this; } @Override @SuppressWarnings("unchecked") public T withAuxiliaryCredential(TokenCredential token) { Objects.requireNonNull(token); this.tokens.add(token); return (T) this; } @Override @SuppressWarnings("unchecked") public T withAuxiliaryCredentials(List<TokenCredential> tokens) { Objects.requireNonNull(tokens); this.tokens.addAll(tokens); return (T) this; } @Override @SuppressWarnings("unchecked") public T withRetryPolicy(RetryPolicy retryPolicy) { Objects.requireNonNull(retryPolicy); this.retryPolicy = retryPolicy; return (T) this; } @Override @SuppressWarnings("unchecked") public T withScope(String scope) { Objects.requireNonNull(scope); this.scopes.add(scope); return (T) this; } @Override @SuppressWarnings("unchecked") public T withScopes(List<String> scopes) { Objects.requireNonNull(scopes); this.scopes.addAll(scopes); return (T) this; } @Override @SuppressWarnings("unchecked") public T withHttpClient(HttpClient httpClient) { Objects.requireNonNull(httpClient); this.httpClient = httpClient; return (T) this; } @Override @SuppressWarnings("unchecked") public T withConfiguration(Configuration configuration) { Objects.requireNonNull(configuration); this.configuration = configuration; return (T) this; } @Override @SuppressWarnings("unchecked") public T withRetryOptions(RetryOptions retryOptions) { Objects.requireNonNull(retryOptions); this.retryOptions = retryOptions; return (T) this; } protected HttpPipeline buildHttpPipeline(TokenCredential credential, AzureProfile profile) { Objects.requireNonNull(credential); if (!tokens.isEmpty()) { policies.add( new AuxiliaryAuthenticationPolicy(profile.getEnvironment(), tokens.toArray(new TokenCredential[0]))); } if (this.retryPolicy == null && this.retryOptions != null) { this.retryPolicy = new RetryPolicy(this.retryOptions); } return HttpPipelineProvider.buildHttpPipeline(credential, profile, scopes(), httpLogOptions, configuration, retryPolicy, policies, httpClient); } private String[] scopes() { return scopes.isEmpty() ? null : scopes.toArray(new String[0]); } }
Which class will handle this exception?
protected void doHealthCheck(Builder builder) { if (database == null) { builder.status("The option of `spring.cloud.azure.cosmos.database` is not configured!"); } else { try { CosmosDatabaseResponse response = this.cosmosAsyncClient.getDatabase(database) .read() .block(timeout); if (response != null) { LOGGER.info("The health indicator cost {} RUs, cosmos uri: {}, dbName: {}", response.getRequestCharge(), endpoint, database); } if (response == null) { builder.down(); } else { builder.up().withDetail("database", response.getProperties().getId()); } }catch (Exception e) { if (e instanceof NotFoundException) { builder.status("The option of `spring.cloud.azure.cosmos.database` is not configured correctly!"); } else { throw e; } } } }
throw e;
protected void doHealthCheck(Builder builder) { if (database == null) { builder.status(Status.UNKNOWN).withDetail("Database not configured", "The option of `spring.cloud.azure.cosmos.database` is not configured!"); return; } CosmosDatabaseResponse response = this.cosmosAsyncClient.getDatabase(database) .read() .block(timeout); if (response == null) { throw new RuntimeException("Error occurred checking the database!"); } LOGGER.info("The health indicator cost {} RUs, endpoint: {}, database: {}", response.getRequestCharge(), endpoint, database); builder.up() .withDetail(DATA_BASE_FIELD, database); }
class CosmosHealthIndicator extends AbstractHealthIndicator { private static final Logger LOGGER = LoggerFactory.getLogger(CosmosHealthIndicator.class); private final CosmosAsyncClient cosmosAsyncClient; private final String database; private final String endpoint; private Duration timeout = DEFAULT_HEALTH_CHECK_TIMEOUT; /** * Creates a new instance of {@link CosmosHealthIndicator}. * @param cosmosAsyncClient the cosmosAsyncClient * @param database database name * @param endpoint cosmos endpoint */ public CosmosHealthIndicator(CosmosAsyncClient cosmosAsyncClient, String database, String endpoint) { super("Cosmos health check failed"); Assert.notNull(cosmosAsyncClient, "CosmosClient must not be null"); this.cosmosAsyncClient = cosmosAsyncClient; this.database = database; this.endpoint = endpoint; } @Override /** * Set health check request timeout. * @param timeout the duration value. */ public void setTimeout(Duration timeout) { this.timeout = timeout; } }
class CosmosHealthIndicator extends AbstractHealthIndicator { private static final Logger LOGGER = LoggerFactory.getLogger(CosmosHealthIndicator.class); private final CosmosAsyncClient cosmosAsyncClient; private final String database; private final String endpoint; private Duration timeout = DEFAULT_HEALTH_CHECK_TIMEOUT; /** * Creates a new instance of {@link CosmosHealthIndicator}. * * @param cosmosAsyncClient the cosmosAsyncClient * @param database database name * @param endpoint cosmos endpoint */ public CosmosHealthIndicator(CosmosAsyncClient cosmosAsyncClient, String database, String endpoint) { super("Cosmos health check failed"); Assert.notNull(cosmosAsyncClient, "CosmosClient must not be null"); this.cosmosAsyncClient = cosmosAsyncClient; this.database = database; this.endpoint = endpoint; } @Override /** * Set health check request timeout. * * @param timeout the duration value. */ public void setTimeout(Duration timeout) { this.timeout = timeout; } }
Does `resourceLoader` use `blobServiceAsyncClient` to sent request?
protected void doHealthCheck(Health.Builder builder) { if (blobServiceAsyncClient == null) { builder.status(NOT_CONFIGURED_STATUS); } else { builder.withDetail(URL_FIELD, blobServiceAsyncClient.getAccountUrl()); try { resourceLoader.getResource("azure-blob: builder.up(); } catch (Exception e) { builder.down(); } } }
resourceLoader.getResource("azure-blob:
protected void doHealthCheck(Health.Builder builder) throws Exception { if (blobServiceAsyncClient == null) { builder.status(NOT_CONFIGURED_STATUS); return; } BlobContainerAsyncClient containerAsyncClient = blobServiceAsyncClient.getBlobContainerAsyncClient( NOT_EXISTING_CONTAINER); Response<Boolean> exists = containerAsyncClient.existsWithResponse().block(timeout); if (exists == null) { throw new RuntimeException("Error occurred checking the container existence!"); } builder.up() .withDetail(URL_FIELD, blobServiceAsyncClient.getAccountUrl()); }
class StorageBlobHealthIndicator extends AbstractHealthIndicator { private final BlobServiceAsyncClient blobServiceAsyncClient; private final ResourceLoader resourceLoader; /** * Creates a new instance of {@link StorageBlobHealthIndicator}. * * @param blobServiceAsyncClient the blob service client * @param resourceLoader the resource loader */ public StorageBlobHealthIndicator(BlobServiceAsyncClient blobServiceAsyncClient, ResourceLoader resourceLoader) { this.blobServiceAsyncClient = blobServiceAsyncClient; this.resourceLoader = resourceLoader; } @Override }
class StorageBlobHealthIndicator extends AbstractHealthIndicator { private final BlobServiceAsyncClient blobServiceAsyncClient; private Duration timeout = DEFAULT_HEALTH_CHECK_TIMEOUT; /** * Creates a new instance of {@link StorageBlobHealthIndicator}. * @param blobServiceAsyncClient the blob service client */ public StorageBlobHealthIndicator(BlobServiceAsyncClient blobServiceAsyncClient) { this.blobServiceAsyncClient = blobServiceAsyncClient; } @Override /** * Set health check request timeout. * * @param timeout the duration value. */ public void setTimeout(Duration timeout) { this.timeout = timeout; } }
why not use the sdk api directly?
protected void doHealthCheck(Health.Builder builder) { if (blobServiceAsyncClient == null) { builder.status(NOT_CONFIGURED_STATUS); } else { builder.withDetail(URL_FIELD, blobServiceAsyncClient.getAccountUrl()); try { resourceLoader.getResource("azure-blob: builder.up(); } catch (Exception e) { builder.down(); } } }
resourceLoader.getResource("azure-blob:
protected void doHealthCheck(Health.Builder builder) throws Exception { if (blobServiceAsyncClient == null) { builder.status(NOT_CONFIGURED_STATUS); return; } BlobContainerAsyncClient containerAsyncClient = blobServiceAsyncClient.getBlobContainerAsyncClient( NOT_EXISTING_CONTAINER); Response<Boolean> exists = containerAsyncClient.existsWithResponse().block(timeout); if (exists == null) { throw new RuntimeException("Error occurred checking the container existence!"); } builder.up() .withDetail(URL_FIELD, blobServiceAsyncClient.getAccountUrl()); }
class StorageBlobHealthIndicator extends AbstractHealthIndicator { private final BlobServiceAsyncClient blobServiceAsyncClient; private final ResourceLoader resourceLoader; /** * Creates a new instance of {@link StorageBlobHealthIndicator}. * * @param blobServiceAsyncClient the blob service client * @param resourceLoader the resource loader */ public StorageBlobHealthIndicator(BlobServiceAsyncClient blobServiceAsyncClient, ResourceLoader resourceLoader) { this.blobServiceAsyncClient = blobServiceAsyncClient; this.resourceLoader = resourceLoader; } @Override }
class StorageBlobHealthIndicator extends AbstractHealthIndicator { private final BlobServiceAsyncClient blobServiceAsyncClient; private Duration timeout = DEFAULT_HEALTH_CHECK_TIMEOUT; /** * Creates a new instance of {@link StorageBlobHealthIndicator}. * @param blobServiceAsyncClient the blob service client */ public StorageBlobHealthIndicator(BlobServiceAsyncClient blobServiceAsyncClient) { this.blobServiceAsyncClient = blobServiceAsyncClient; } @Override /** * Set health check request timeout. * * @param timeout the duration value. */ public void setTimeout(Duration timeout) { this.timeout = timeout; } }
Context is immutable.
public Context mergeContext(Context context) { return CoreUtils.mergeContexts(this.getContext(), context); }
return CoreUtils.mergeContexts(this.getContext(), context);
public Context mergeContext(Context context) { return CoreUtils.mergeContexts(this.getContext(), context); }
class AzureServiceClient { private final ClientLogger logger = new ClientLogger(getClass()); private static final Map<String, String> PROPERTIES = CoreUtils.getProperties("azure.properties"); private static final String SDK_VERSION; static { SDK_VERSION = PROPERTIES.getOrDefault("version", "UnknownVersion"); } private final SerializerAdapter serializerAdapter; private final HttpPipeline httpPipeline; private final String sdkName; /** * Creates a new instance of {@link AzureServiceClient}. * * @param httpPipeline The HttpPipline used by the client. * @param serializerAdapter The SerializerAdapter used by the client. * @param environment The AzureEnvironment used by the client. */ protected AzureServiceClient(HttpPipeline httpPipeline, SerializerAdapter serializerAdapter, AzureEnvironment environment) { this.httpPipeline = httpPipeline; this.serializerAdapter = serializerAdapter; String packageName = this.getClass().getPackage().getName(); String implementationSegment = ".implementation"; if (packageName.endsWith(implementationSegment)) { packageName = packageName.substring(0, packageName.length() - implementationSegment.length()); } this.sdkName = packageName; } /** * Gets serializer adapter for JSON serialization/de-serialization. * * @return the serializer adapter. */ private SerializerAdapter getSerializerAdapter() { return this.serializerAdapter; } /** * Gets The HTTP pipeline to send requests through. * * @return the httpPipeline value. */ public HttpPipeline getHttpPipeline() { return this.httpPipeline; } /** * Gets The default poll interval for long-running operation. * * @return the defaultPollInterval value. */ public abstract Duration getDefaultPollInterval(); /** * Gets default client context. * * @return the default client context. */ public Context getContext() { return new Context("Sdk-Name", sdkName) .addData("Sdk-Version", SDK_VERSION); } /** * Merges default client context with provided context. * * @param context the context to be merged with default client context. * @return the merged context. */ /** * Gets long running operation result. * * @param lroInit the raw response of init operation. * @param httpPipeline the http pipeline. * @param pollResultType type of poll result. * @param finalResultType type of final result. * @param context the context shared by all requests. * @param <T> type of poll result. * @param <U> type of final result. * @return poller flux for poll result and final result. */ public <T, U> PollerFlux<PollResult<T>, U> getLroResult(Mono<Response<Flux<ByteBuffer>>> lroInit, HttpPipeline httpPipeline, Type pollResultType, Type finalResultType, Context context) { return PollerFactory.create( getSerializerAdapter(), httpPipeline, pollResultType, finalResultType, ResourceManagerUtils.InternalRuntimeContext.getDelayDuration(this.getDefaultPollInterval()), lroInit, context ); } /** * Gets the final result, or an error, based on last async poll response. * * @param response the last async poll response. * @param <T> type of poll result. * @param <U> type of final result. * @return the final result, or an error. */ public <T, U> Mono<U> getLroFinalResultOrError(AsyncPollResponse<PollResult<T>, U> response) { if (response.getStatus() != LongRunningOperationStatus.SUCCESSFULLY_COMPLETED) { String errorMessage; ManagementError managementError = null; HttpResponse errorResponse = null; PollResult.Error lroError = response.getValue().getError(); if (lroError != null) { errorResponse = new HttpResponseImpl(lroError.getResponseStatusCode(), lroError.getResponseHeaders(), lroError.getResponseBody()); errorMessage = response.getValue().getError().getMessage(); String errorBody = response.getValue().getError().getResponseBody(); if (errorBody != null) { try { managementError = this.getSerializerAdapter().deserialize( errorBody, ManagementError.class, SerializerEncoding.JSON); if (managementError.getCode() == null || managementError.getMessage() == null) { managementError = null; } } catch (IOException | RuntimeException ioe) { logger.logThrowableAsWarning(ioe); } } } else { errorMessage = "Long running operation failed."; } if (managementError == null) { managementError = new ManagementError(response.getStatus().toString(), errorMessage); } return Mono.error(new ManagementException(errorMessage, errorResponse, managementError)); } else { return response.getFinalResult(); } } private static class HttpResponseImpl extends HttpResponse { private final int statusCode; private final byte[] responseBody; private final HttpHeaders httpHeaders; HttpResponseImpl(int statusCode, HttpHeaders httpHeaders, String responseBody) { super(null); this.statusCode = statusCode; this.httpHeaders = httpHeaders; this.responseBody = responseBody == null ? null : responseBody.getBytes(StandardCharsets.UTF_8); } @Override public int getStatusCode() { return statusCode; } @Override public String getHeaderValue(String s) { return httpHeaders.getValue(s); } @Override public HttpHeaders getHeaders() { return httpHeaders; } @Override public Flux<ByteBuffer> getBody() { return Flux.just(ByteBuffer.wrap(responseBody)); } @Override public Mono<byte[]> getBodyAsByteArray() { return Mono.just(responseBody); } @Override public Mono<String> getBodyAsString() { return Mono.just(new String(responseBody, StandardCharsets.UTF_8)); } @Override public Mono<String> getBodyAsString(Charset charset) { return Mono.just(new String(responseBody, charset)); } } }
class AzureServiceClient { private final ClientLogger logger = new ClientLogger(getClass()); private static final Map<String, String> PROPERTIES = CoreUtils.getProperties("azure.properties"); private static final String SDK_VERSION; static { SDK_VERSION = PROPERTIES.getOrDefault("version", "UnknownVersion"); } private final SerializerAdapter serializerAdapter; private final HttpPipeline httpPipeline; private final String sdkName; /** * Creates a new instance of {@link AzureServiceClient}. * * @param httpPipeline The HttpPipline used by the client. * @param serializerAdapter The SerializerAdapter used by the client. * @param environment The AzureEnvironment used by the client. */ protected AzureServiceClient(HttpPipeline httpPipeline, SerializerAdapter serializerAdapter, AzureEnvironment environment) { this.httpPipeline = httpPipeline; this.serializerAdapter = serializerAdapter; String packageName = this.getClass().getPackage().getName(); String implementationSegment = ".implementation"; if (packageName.endsWith(implementationSegment)) { packageName = packageName.substring(0, packageName.length() - implementationSegment.length()); } this.sdkName = packageName; } /** * Gets serializer adapter for JSON serialization/de-serialization. * * @return the serializer adapter. */ private SerializerAdapter getSerializerAdapter() { return this.serializerAdapter; } /** * Gets The HTTP pipeline to send requests through. * * @return the httpPipeline value. */ public HttpPipeline getHttpPipeline() { return this.httpPipeline; } /** * Gets The default poll interval for long-running operation. * * @return the defaultPollInterval value. */ public abstract Duration getDefaultPollInterval(); /** * Gets default client context. * * @return the default client context. */ public Context getContext() { return new Context("Sdk-Name", sdkName) .addData("Sdk-Version", SDK_VERSION); } /** * Merges default client context with provided context. * * @param context the context to be merged with default client context. * @return the merged context. */ /** * Gets long running operation result. * * @param lroInit the raw response of init operation. * @param httpPipeline the http pipeline. * @param pollResultType type of poll result. * @param finalResultType type of final result. * @param context the context shared by all requests. * @param <T> type of poll result. * @param <U> type of final result. * @return poller flux for poll result and final result. */ public <T, U> PollerFlux<PollResult<T>, U> getLroResult(Mono<Response<Flux<ByteBuffer>>> lroInit, HttpPipeline httpPipeline, Type pollResultType, Type finalResultType, Context context) { return PollerFactory.create( getSerializerAdapter(), httpPipeline, pollResultType, finalResultType, ResourceManagerUtils.InternalRuntimeContext.getDelayDuration(this.getDefaultPollInterval()), lroInit, context ); } /** * Gets the final result, or an error, based on last async poll response. * * @param response the last async poll response. * @param <T> type of poll result. * @param <U> type of final result. * @return the final result, or an error. */ public <T, U> Mono<U> getLroFinalResultOrError(AsyncPollResponse<PollResult<T>, U> response) { if (response.getStatus() != LongRunningOperationStatus.SUCCESSFULLY_COMPLETED) { String errorMessage; ManagementError managementError = null; HttpResponse errorResponse = null; PollResult.Error lroError = response.getValue().getError(); if (lroError != null) { errorResponse = new HttpResponseImpl(lroError.getResponseStatusCode(), lroError.getResponseHeaders(), lroError.getResponseBody()); errorMessage = response.getValue().getError().getMessage(); String errorBody = response.getValue().getError().getResponseBody(); if (errorBody != null) { try { managementError = this.getSerializerAdapter().deserialize( errorBody, ManagementError.class, SerializerEncoding.JSON); if (managementError.getCode() == null || managementError.getMessage() == null) { managementError = null; } } catch (IOException | RuntimeException ioe) { logger.logThrowableAsWarning(ioe); } } } else { errorMessage = "Long running operation failed."; } if (managementError == null) { managementError = new ManagementError(response.getStatus().toString(), errorMessage); } return Mono.error(new ManagementException(errorMessage, errorResponse, managementError)); } else { return response.getFinalResult(); } } private static class HttpResponseImpl extends HttpResponse { private final int statusCode; private final byte[] responseBody; private final HttpHeaders httpHeaders; HttpResponseImpl(int statusCode, HttpHeaders httpHeaders, String responseBody) { super(null); this.statusCode = statusCode; this.httpHeaders = httpHeaders; this.responseBody = responseBody == null ? null : responseBody.getBytes(StandardCharsets.UTF_8); } @Override public int getStatusCode() { return statusCode; } @Override public String getHeaderValue(String s) { return httpHeaders.getValue(s); } @Override public HttpHeaders getHeaders() { return httpHeaders; } @Override public Flux<ByteBuffer> getBody() { return Flux.just(ByteBuffer.wrap(responseBody)); } @Override public Mono<byte[]> getBodyAsByteArray() { return Mono.just(responseBody); } @Override public Mono<String> getBodyAsString() { return Mono.just(new String(responseBody, StandardCharsets.UTF_8)); } @Override public Mono<String> getBodyAsString(Charset charset) { return Mono.just(new String(responseBody, charset)); } } }
Can we use the `Status.UNKNOWN` and put this information in the builder.withDetail, just like how https://github.com/Azure/azure-sdk-for-java/blob/5bc550c9a5de4f8ee93a5b3141500ee34be3850d/sdk/spring/spring-cloud-azure-actuator/src/main/java/com/azure/spring/cloud/actuator/eventhubs/EventHubsHealthIndicator.java#L39 does.
protected void doHealthCheck(Builder builder) throws Exception { if (database == null) { builder.status("The option of `spring.cloud.azure.cosmos.database` is not configured!"); } else { try { CosmosDatabaseResponse response = this.cosmosAsyncClient.getDatabase(database) .read() .block(timeout); if (response != null) { LOGGER.info("The health indicator cost {} RUs, cosmos uri: {}, dbName: {}", response.getRequestCharge(), endpoint, database); builder.withDetail("RUs", response.getRequestCharge()); builder.withDetail("CosmosUri", endpoint); builder.up().withDetail("database", response.getProperties().getId()); } else { builder.down(); } } catch (Exception e) { if (e instanceof NotFoundException) { builder.up().withDetail("database", "The option of `spring.cloud.azure.cosmos.database` is not " + "configured correctly!"); } else { throw e; } } } }
builder.status("The option of `spring.cloud.azure.cosmos.database` is not configured!");
protected void doHealthCheck(Builder builder) { if (database == null) { builder.status(Status.UNKNOWN).withDetail("Database not configured", "The option of `spring.cloud.azure.cosmos.database` is not configured!"); return; } CosmosDatabaseResponse response = this.cosmosAsyncClient.getDatabase(database) .read() .block(timeout); if (response == null) { throw new RuntimeException("Error occurred checking the database!"); } LOGGER.info("The health indicator cost {} RUs, endpoint: {}, database: {}", response.getRequestCharge(), endpoint, database); builder.up() .withDetail(DATA_BASE_FIELD, database); }
class CosmosHealthIndicator extends AbstractHealthIndicator { private static final Logger LOGGER = LoggerFactory.getLogger(CosmosHealthIndicator.class); private final CosmosAsyncClient cosmosAsyncClient; private final String database; private final String endpoint; private Duration timeout = DEFAULT_HEALTH_CHECK_TIMEOUT; /** * Creates a new instance of {@link CosmosHealthIndicator}. * @param cosmosAsyncClient the cosmosAsyncClient * @param database database name * @param endpoint cosmos endpoint */ public CosmosHealthIndicator(CosmosAsyncClient cosmosAsyncClient, String database, String endpoint) { super("Cosmos health check failed"); Assert.notNull(cosmosAsyncClient, "CosmosClient must not be null"); this.cosmosAsyncClient = cosmosAsyncClient; this.database = database; this.endpoint = endpoint; } @Override /** * Set health check request timeout. * @param timeout the duration value. */ public void setTimeout(Duration timeout) { this.timeout = timeout; } }
class CosmosHealthIndicator extends AbstractHealthIndicator { private static final Logger LOGGER = LoggerFactory.getLogger(CosmosHealthIndicator.class); private final CosmosAsyncClient cosmosAsyncClient; private final String database; private final String endpoint; private Duration timeout = DEFAULT_HEALTH_CHECK_TIMEOUT; /** * Creates a new instance of {@link CosmosHealthIndicator}. * * @param cosmosAsyncClient the cosmosAsyncClient * @param database database name * @param endpoint cosmos endpoint */ public CosmosHealthIndicator(CosmosAsyncClient cosmosAsyncClient, String database, String endpoint) { super("Cosmos health check failed"); Assert.notNull(cosmosAsyncClient, "CosmosClient must not be null"); this.cosmosAsyncClient = cosmosAsyncClient; this.database = database; this.endpoint = endpoint; } @Override /** * Set health check request timeout. * * @param timeout the duration value. */ public void setTimeout(Duration timeout) { this.timeout = timeout; } }
How about letting `LOGGER.info(...)` and `builder.up().withDetail()` output the same information. Use `database` instead of `response.getProperties().getId()`.
protected void doHealthCheck(Builder builder) throws Exception { if (database == null) { builder.status("The option of `spring.cloud.azure.cosmos.database` is not configured!"); } else { try { CosmosDatabaseResponse response = this.cosmosAsyncClient.getDatabase(database) .read() .block(timeout); if (response != null) { LOGGER.info("The health indicator cost {} RUs, cosmos uri: {}, dbName: {}", response.getRequestCharge(), endpoint, database); builder.withDetail("RUs", response.getRequestCharge()); builder.withDetail("CosmosUri", endpoint); builder.up().withDetail("database", response.getProperties().getId()); } else { builder.down(); } } catch (Exception e) { if (e instanceof NotFoundException) { builder.up().withDetail("database", "The option of `spring.cloud.azure.cosmos.database` is not " + "configured correctly!"); } else { throw e; } } } }
builder.up().withDetail("database", response.getProperties().getId());
protected void doHealthCheck(Builder builder) { if (database == null) { builder.status(Status.UNKNOWN).withDetail("Database not configured", "The option of `spring.cloud.azure.cosmos.database` is not configured!"); return; } CosmosDatabaseResponse response = this.cosmosAsyncClient.getDatabase(database) .read() .block(timeout); if (response == null) { throw new RuntimeException("Error occurred checking the database!"); } LOGGER.info("The health indicator cost {} RUs, endpoint: {}, database: {}", response.getRequestCharge(), endpoint, database); builder.up() .withDetail(DATA_BASE_FIELD, database); }
class CosmosHealthIndicator extends AbstractHealthIndicator { private static final Logger LOGGER = LoggerFactory.getLogger(CosmosHealthIndicator.class); private final CosmosAsyncClient cosmosAsyncClient; private final String database; private final String endpoint; private Duration timeout = DEFAULT_HEALTH_CHECK_TIMEOUT; /** * Creates a new instance of {@link CosmosHealthIndicator}. * @param cosmosAsyncClient the cosmosAsyncClient * @param database database name * @param endpoint cosmos endpoint */ public CosmosHealthIndicator(CosmosAsyncClient cosmosAsyncClient, String database, String endpoint) { super("Cosmos health check failed"); Assert.notNull(cosmosAsyncClient, "CosmosClient must not be null"); this.cosmosAsyncClient = cosmosAsyncClient; this.database = database; this.endpoint = endpoint; } @Override /** * Set health check request timeout. * @param timeout the duration value. */ public void setTimeout(Duration timeout) { this.timeout = timeout; } }
class CosmosHealthIndicator extends AbstractHealthIndicator { private static final Logger LOGGER = LoggerFactory.getLogger(CosmosHealthIndicator.class); private final CosmosAsyncClient cosmosAsyncClient; private final String database; private final String endpoint; private Duration timeout = DEFAULT_HEALTH_CHECK_TIMEOUT; /** * Creates a new instance of {@link CosmosHealthIndicator}. * * @param cosmosAsyncClient the cosmosAsyncClient * @param database database name * @param endpoint cosmos endpoint */ public CosmosHealthIndicator(CosmosAsyncClient cosmosAsyncClient, String database, String endpoint) { super("Cosmos health check failed"); Assert.notNull(cosmosAsyncClient, "CosmosClient must not be null"); this.cosmosAsyncClient = cosmosAsyncClient; this.database = database; this.endpoint = endpoint; } @Override /** * Set health check request timeout. * * @param timeout the duration value. */ public void setTimeout(Duration timeout) { this.timeout = timeout; } }
It's better to put `builder.up()` at the beginning.
protected void doHealthCheck(Builder builder) { if (database == null) { builder.status(Status.UNKNOWN).withDetail("Database not configured", "The option of `spring.cloud.azure.cosmos.database` is not configured!"); } else { try { CosmosDatabaseResponse response = this.cosmosAsyncClient.getDatabase(database) .read() .block(timeout); if (response != null) { LOGGER.info("The health indicator cost {} RUs, cosmos uri: {}, dbName: {}", response.getRequestCharge(), endpoint, database); builder.withDetail("RUs", response.getRequestCharge()); builder.withDetail("CosmosUri", endpoint); builder.up().withDetail("database", database); } else { builder.down(); } } catch (NotFoundException e) { builder.status(Status.UNKNOWN).withDetail("Database not found", "The option of `spring.cloud.azure.cosmos.database` is not configured correctly!"); } } }
builder.up().withDetail("database", database);
protected void doHealthCheck(Builder builder) { if (database == null) { builder.status(Status.UNKNOWN).withDetail("Database not configured", "The option of `spring.cloud.azure.cosmos.database` is not configured!"); return; } CosmosDatabaseResponse response = this.cosmosAsyncClient.getDatabase(database) .read() .block(timeout); if (response == null) { throw new RuntimeException("Error occurred checking the database!"); } LOGGER.info("The health indicator cost {} RUs, endpoint: {}, database: {}", response.getRequestCharge(), endpoint, database); builder.up() .withDetail(DATA_BASE_FIELD, database); }
class CosmosHealthIndicator extends AbstractHealthIndicator { private static final Logger LOGGER = LoggerFactory.getLogger(CosmosHealthIndicator.class); private final CosmosAsyncClient cosmosAsyncClient; private final String database; private final String endpoint; private Duration timeout = DEFAULT_HEALTH_CHECK_TIMEOUT; /** * Creates a new instance of {@link CosmosHealthIndicator}. * @param cosmosAsyncClient the cosmosAsyncClient * @param database database name * @param endpoint cosmos endpoint */ public CosmosHealthIndicator(CosmosAsyncClient cosmosAsyncClient, String database, String endpoint) { super("Cosmos health check failed"); Assert.notNull(cosmosAsyncClient, "CosmosClient must not be null"); this.cosmosAsyncClient = cosmosAsyncClient; this.database = database; this.endpoint = endpoint; } @Override /** * Set health check request timeout. * @param timeout the duration value. */ public void setTimeout(Duration timeout) { this.timeout = timeout; } }
class CosmosHealthIndicator extends AbstractHealthIndicator { private static final Logger LOGGER = LoggerFactory.getLogger(CosmosHealthIndicator.class); private final CosmosAsyncClient cosmosAsyncClient; private final String database; private final String endpoint; private Duration timeout = DEFAULT_HEALTH_CHECK_TIMEOUT; /** * Creates a new instance of {@link CosmosHealthIndicator}. * * @param cosmosAsyncClient the cosmosAsyncClient * @param database database name * @param endpoint cosmos endpoint */ public CosmosHealthIndicator(CosmosAsyncClient cosmosAsyncClient, String database, String endpoint) { super("Cosmos health check failed"); Assert.notNull(cosmosAsyncClient, "CosmosClient must not be null"); this.cosmosAsyncClient = cosmosAsyncClient; this.database = database; this.endpoint = endpoint; } @Override /** * Set health check request timeout. * * @param timeout the duration value. */ public void setTimeout(Duration timeout) { this.timeout = timeout; } }
I would prefer to return in this if
protected void doHealthCheck(Builder builder) { if (database == null) { builder.status(Status.UNKNOWN).withDetail("Database not configured", "The option of `spring.cloud.azure.cosmos.database` is not configured!"); } else { try { CosmosDatabaseResponse response = this.cosmosAsyncClient.getDatabase(database) .read() .block(timeout); if (response != null) { LOGGER.info("The health indicator cost {} RUs, cosmos uri: {}, dbName: {}", response.getRequestCharge(), endpoint, database); builder.withDetail("RUs", response.getRequestCharge()); builder.withDetail("CosmosUri", endpoint); builder.up().withDetail("database", database); } else { builder.down(); } } catch (NotFoundException e) { builder.status(Status.UNKNOWN).withDetail("Database not found", "The option of `spring.cloud.azure.cosmos.database` is not configured correctly!"); } } }
} else {
protected void doHealthCheck(Builder builder) { if (database == null) { builder.status(Status.UNKNOWN).withDetail("Database not configured", "The option of `spring.cloud.azure.cosmos.database` is not configured!"); return; } CosmosDatabaseResponse response = this.cosmosAsyncClient.getDatabase(database) .read() .block(timeout); if (response == null) { throw new RuntimeException("Error occurred checking the database!"); } LOGGER.info("The health indicator cost {} RUs, endpoint: {}, database: {}", response.getRequestCharge(), endpoint, database); builder.up() .withDetail(DATA_BASE_FIELD, database); }
class CosmosHealthIndicator extends AbstractHealthIndicator { private static final Logger LOGGER = LoggerFactory.getLogger(CosmosHealthIndicator.class); private final CosmosAsyncClient cosmosAsyncClient; private final String database; private final String endpoint; private Duration timeout = DEFAULT_HEALTH_CHECK_TIMEOUT; /** * Creates a new instance of {@link CosmosHealthIndicator}. * @param cosmosAsyncClient the cosmosAsyncClient * @param database database name * @param endpoint cosmos endpoint */ public CosmosHealthIndicator(CosmosAsyncClient cosmosAsyncClient, String database, String endpoint) { super("Cosmos health check failed"); Assert.notNull(cosmosAsyncClient, "CosmosClient must not be null"); this.cosmosAsyncClient = cosmosAsyncClient; this.database = database; this.endpoint = endpoint; } @Override /** * Set health check request timeout. * @param timeout the duration value. */ public void setTimeout(Duration timeout) { this.timeout = timeout; } }
class CosmosHealthIndicator extends AbstractHealthIndicator { private static final Logger LOGGER = LoggerFactory.getLogger(CosmosHealthIndicator.class); private final CosmosAsyncClient cosmosAsyncClient; private final String database; private final String endpoint; private Duration timeout = DEFAULT_HEALTH_CHECK_TIMEOUT; /** * Creates a new instance of {@link CosmosHealthIndicator}. * * @param cosmosAsyncClient the cosmosAsyncClient * @param database database name * @param endpoint cosmos endpoint */ public CosmosHealthIndicator(CosmosAsyncClient cosmosAsyncClient, String database, String endpoint) { super("Cosmos health check failed"); Assert.notNull(cosmosAsyncClient, "CosmosClient must not be null"); this.cosmosAsyncClient = cosmosAsyncClient; this.database = database; this.endpoint = endpoint; } @Override /** * Set health check request timeout. * * @param timeout the duration value. */ public void setTimeout(Duration timeout) { this.timeout = timeout; } }
I don't think we should catch `NotFoundException` here, the Javadoc of `certificateAsyncClient.getCertificateWithResponse("spring-cloud-azure-not-existing-certificate")` says `@throws ResourceNotFoundException when a certificate with {@code certificateName} doesn't exist in the key vault.`, but there's no such thing for the `cosmosAsyncClient.getDatabase(database).read()`, and `NotFoundException` is from the implementation package.
protected void doHealthCheck(Builder builder) { if (database == null) { builder.status(Status.UNKNOWN).withDetail("Database not configured", "The option of `spring.cloud.azure.cosmos.database` is not configured!"); return; } try { CosmosDatabaseResponse response = this.cosmosAsyncClient.getDatabase(database) .read() .block(timeout); if (response != null) { LOGGER.info("The health indicator cost {} RUs, cosmos uri: {}, dbName: {}", response.getRequestCharge(), endpoint, database); builder.up() .withDetail("RUs", response.getRequestCharge()) .withDetail("CosmosUri", endpoint) .withDetail("database", database); } else { builder.down(); } } catch (NotFoundException e) { builder.status(Status.UNKNOWN).withDetail("Database not found", "The option of `spring.cloud.azure.cosmos.database` is not configured correctly!"); } }
} catch (NotFoundException e) {
protected void doHealthCheck(Builder builder) { if (database == null) { builder.status(Status.UNKNOWN).withDetail("Database not configured", "The option of `spring.cloud.azure.cosmos.database` is not configured!"); return; } CosmosDatabaseResponse response = this.cosmosAsyncClient.getDatabase(database) .read() .block(timeout); if (response == null) { throw new RuntimeException("Error occurred checking the database!"); } LOGGER.info("The health indicator cost {} RUs, endpoint: {}, database: {}", response.getRequestCharge(), endpoint, database); builder.up() .withDetail(DATA_BASE_FIELD, database); }
class CosmosHealthIndicator extends AbstractHealthIndicator { private static final Logger LOGGER = LoggerFactory.getLogger(CosmosHealthIndicator.class); private final CosmosAsyncClient cosmosAsyncClient; private final String database; private final String endpoint; private Duration timeout = DEFAULT_HEALTH_CHECK_TIMEOUT; /** * Creates a new instance of {@link CosmosHealthIndicator}. * @param cosmosAsyncClient the cosmosAsyncClient * @param database database name * @param endpoint cosmos endpoint */ public CosmosHealthIndicator(CosmosAsyncClient cosmosAsyncClient, String database, String endpoint) { super("Cosmos health check failed"); Assert.notNull(cosmosAsyncClient, "CosmosClient must not be null"); this.cosmosAsyncClient = cosmosAsyncClient; this.database = database; this.endpoint = endpoint; } @Override /** * Set health check request timeout. * @param timeout the duration value. */ public void setTimeout(Duration timeout) { this.timeout = timeout; } }
class CosmosHealthIndicator extends AbstractHealthIndicator { private static final Logger LOGGER = LoggerFactory.getLogger(CosmosHealthIndicator.class); private final CosmosAsyncClient cosmosAsyncClient; private final String database; private final String endpoint; private Duration timeout = DEFAULT_HEALTH_CHECK_TIMEOUT; /** * Creates a new instance of {@link CosmosHealthIndicator}. * * @param cosmosAsyncClient the cosmosAsyncClient * @param database database name * @param endpoint cosmos endpoint */ public CosmosHealthIndicator(CosmosAsyncClient cosmosAsyncClient, String database, String endpoint) { super("Cosmos health check failed"); Assert.notNull(cosmosAsyncClient, "CosmosClient must not be null"); this.cosmosAsyncClient = cosmosAsyncClient; this.database = database; this.endpoint = endpoint; } @Override /** * Set health check request timeout. * * @param timeout the duration value. */ public void setTimeout(Duration timeout) { this.timeout = timeout; } }
Is it enough for us to construct a `getBlobContainerAsyncClient`, do we really need to construct a `BlobAsyncClient`?
protected void doHealthCheck(Health.Builder builder) throws Exception { if (blobServiceAsyncClient == null) { builder.status(NOT_CONFIGURED_STATUS); return; } try { BlobContainerAsyncClient containerAsyncClient = blobServiceAsyncClient.getBlobContainerAsyncClient( "spring-cloud-azure-not-existing-container"); BlobAsyncClient blobAsyncClient = containerAsyncClient.getBlobAsyncClient( "spring-cloud-azure-not-existing-blob"); builder.withDetail(URL_FIELD, blobServiceAsyncClient.getAccountUrl()); BlobRange range = new BlobRange(0, (long) 2); DownloadRetryOptions options = new DownloadRetryOptions().setMaxRetryRequests(3); Mono<BlobDownloadAsyncResponse> response = blobAsyncClient.downloadStreamWithResponse( range, options, null, false); response.block(timeout); builder.up(); } catch (BlobStorageException e) { builder.up(); } }
"spring-cloud-azure-not-existing-blob");
protected void doHealthCheck(Health.Builder builder) throws Exception { if (blobServiceAsyncClient == null) { builder.status(NOT_CONFIGURED_STATUS); return; } BlobContainerAsyncClient containerAsyncClient = blobServiceAsyncClient.getBlobContainerAsyncClient( NOT_EXISTING_CONTAINER); Response<Boolean> exists = containerAsyncClient.existsWithResponse().block(timeout); if (exists == null) { throw new RuntimeException("Error occurred checking the container existence!"); } builder.up() .withDetail(URL_FIELD, blobServiceAsyncClient.getAccountUrl()); }
class StorageBlobHealthIndicator extends AbstractHealthIndicator { private final BlobServiceAsyncClient blobServiceAsyncClient; private Duration timeout = DEFAULT_HEALTH_CHECK_TIMEOUT; /** * Creates a new instance of {@link StorageBlobHealthIndicator}. * @param blobServiceAsyncClient the blob service client */ public StorageBlobHealthIndicator(BlobServiceAsyncClient blobServiceAsyncClient) { this.blobServiceAsyncClient = blobServiceAsyncClient; } @Override /** * Set health check request timeout. * * @param timeout the duration value. */ public void setTimeout(Duration timeout) { this.timeout = timeout; } }
class StorageBlobHealthIndicator extends AbstractHealthIndicator { private final BlobServiceAsyncClient blobServiceAsyncClient; private Duration timeout = DEFAULT_HEALTH_CHECK_TIMEOUT; /** * Creates a new instance of {@link StorageBlobHealthIndicator}. * @param blobServiceAsyncClient the blob service client */ public StorageBlobHealthIndicator(BlobServiceAsyncClient blobServiceAsyncClient) { this.blobServiceAsyncClient = blobServiceAsyncClient; } @Override /** * Set health check request timeout. * * @param timeout the duration value. */ public void setTimeout(Duration timeout) { this.timeout = timeout; } }
Let's keep it as a `database`, I am not sure there will someone who uses this detailed `database` in their monitoring system, so this would be like a public API.
protected void doHealthCheck(Builder builder) { if (database == null) { builder.status(Status.UNKNOWN).withDetail("Database not configured", "The option of `spring.cloud.azure.cosmos.database` is not configured!"); return; } CosmosDatabaseResponse response = this.cosmosAsyncClient.getDatabase(database) .read() .block(timeout); if (response != null) { LOGGER.info("The health indicator cost {} RUs, cosmos uri: {}, dbName: {}", response.getRequestCharge(), endpoint, database); builder.up() .withDetail("RUs", response.getRequestCharge()) .withDetail("CosmosUri", endpoint) .withDetail("Database", database); } else { builder.down(); } }
.withDetail("Database", database);
protected void doHealthCheck(Builder builder) { if (database == null) { builder.status(Status.UNKNOWN).withDetail("Database not configured", "The option of `spring.cloud.azure.cosmos.database` is not configured!"); return; } CosmosDatabaseResponse response = this.cosmosAsyncClient.getDatabase(database) .read() .block(timeout); if (response == null) { throw new RuntimeException("Error occurred checking the database!"); } LOGGER.info("The health indicator cost {} RUs, endpoint: {}, database: {}", response.getRequestCharge(), endpoint, database); builder.up() .withDetail(DATA_BASE_FIELD, database); }
class CosmosHealthIndicator extends AbstractHealthIndicator { private static final Logger LOGGER = LoggerFactory.getLogger(CosmosHealthIndicator.class); private final CosmosAsyncClient cosmosAsyncClient; private final String database; private final String endpoint; private Duration timeout = DEFAULT_HEALTH_CHECK_TIMEOUT; /** * Creates a new instance of {@link CosmosHealthIndicator}. * * @param cosmosAsyncClient the cosmosAsyncClient * @param database database name * @param endpoint cosmos endpoint */ public CosmosHealthIndicator(CosmosAsyncClient cosmosAsyncClient, String database, String endpoint) { super("Cosmos health check failed"); Assert.notNull(cosmosAsyncClient, "CosmosClient must not be null"); this.cosmosAsyncClient = cosmosAsyncClient; this.database = database; this.endpoint = endpoint; } @Override /** * Set health check request timeout. * * @param timeout the duration value. */ public void setTimeout(Duration timeout) { this.timeout = timeout; } }
class CosmosHealthIndicator extends AbstractHealthIndicator { private static final Logger LOGGER = LoggerFactory.getLogger(CosmosHealthIndicator.class); private final CosmosAsyncClient cosmosAsyncClient; private final String database; private final String endpoint; private Duration timeout = DEFAULT_HEALTH_CHECK_TIMEOUT; /** * Creates a new instance of {@link CosmosHealthIndicator}. * * @param cosmosAsyncClient the cosmosAsyncClient * @param database database name * @param endpoint cosmos endpoint */ public CosmosHealthIndicator(CosmosAsyncClient cosmosAsyncClient, String database, String endpoint) { super("Cosmos health check failed"); Assert.notNull(cosmosAsyncClient, "CosmosClient must not be null"); this.cosmosAsyncClient = cosmosAsyncClient; this.database = database; this.endpoint = endpoint; } @Override /** * Set health check request timeout. * * @param timeout the duration value. */ public void setTimeout(Duration timeout) { this.timeout = timeout; } }
Please don't use `assert` in the source code.
protected void doHealthCheck(Health.Builder builder) throws Exception { if (blobServiceAsyncClient == null) { builder.status(NOT_CONFIGURED_STATUS); return; } BlobContainerAsyncClient containerAsyncClient = blobServiceAsyncClient.getBlobContainerAsyncClient( NOT_EXISTING_CONTAINER); Response<Boolean> exists = containerAsyncClient.existsWithResponse().block(timeout); assert exists != null; builder.up() .withDetail(URL_FIELD, blobServiceAsyncClient.getAccountUrl()) .withDetail("containerName", NOT_EXISTING_CONTAINER) .withDetail("exists", exists.getValue()) .withDetail("statusCode", exists.getStatusCode()); }
assert exists != null;
protected void doHealthCheck(Health.Builder builder) throws Exception { if (blobServiceAsyncClient == null) { builder.status(NOT_CONFIGURED_STATUS); return; } BlobContainerAsyncClient containerAsyncClient = blobServiceAsyncClient.getBlobContainerAsyncClient( NOT_EXISTING_CONTAINER); Response<Boolean> exists = containerAsyncClient.existsWithResponse().block(timeout); if (exists == null) { throw new RuntimeException("Error occurred checking the container existence!"); } builder.up() .withDetail(URL_FIELD, blobServiceAsyncClient.getAccountUrl()); }
class StorageBlobHealthIndicator extends AbstractHealthIndicator { private final BlobServiceAsyncClient blobServiceAsyncClient; private Duration timeout = DEFAULT_HEALTH_CHECK_TIMEOUT; /** * Creates a new instance of {@link StorageBlobHealthIndicator}. * @param blobServiceAsyncClient the blob service client */ public StorageBlobHealthIndicator(BlobServiceAsyncClient blobServiceAsyncClient) { this.blobServiceAsyncClient = blobServiceAsyncClient; } @Override /** * Set health check request timeout. * * @param timeout the duration value. */ public void setTimeout(Duration timeout) { this.timeout = timeout; } }
class StorageBlobHealthIndicator extends AbstractHealthIndicator { private final BlobServiceAsyncClient blobServiceAsyncClient; private Duration timeout = DEFAULT_HEALTH_CHECK_TIMEOUT; /** * Creates a new instance of {@link StorageBlobHealthIndicator}. * @param blobServiceAsyncClient the blob service client */ public StorageBlobHealthIndicator(BlobServiceAsyncClient blobServiceAsyncClient) { this.blobServiceAsyncClient = blobServiceAsyncClient; } @Override /** * Set health check request timeout. * * @param timeout the duration value. */ public void setTimeout(Duration timeout) { this.timeout = timeout; } }
can we start by exposing as few as possible details? Let's only keep the URL_FIELD
protected void doHealthCheck(Health.Builder builder) throws Exception { if (blobServiceAsyncClient == null) { builder.status(NOT_CONFIGURED_STATUS); return; } BlobContainerAsyncClient containerAsyncClient = blobServiceAsyncClient.getBlobContainerAsyncClient( NOT_EXISTING_CONTAINER); Response<Boolean> exists = containerAsyncClient.existsWithResponse().block(timeout); Objects.requireNonNull(exists, "Error occurred checking the container existence!"); builder.up() .withDetail(URL_FIELD, blobServiceAsyncClient.getAccountUrl()) .withDetail("containerName", NOT_EXISTING_CONTAINER) .withDetail("exists", exists.getValue()) .withDetail("statusCode", exists.getStatusCode()); }
.withDetail("statusCode", exists.getStatusCode());
protected void doHealthCheck(Health.Builder builder) throws Exception { if (blobServiceAsyncClient == null) { builder.status(NOT_CONFIGURED_STATUS); return; } BlobContainerAsyncClient containerAsyncClient = blobServiceAsyncClient.getBlobContainerAsyncClient( NOT_EXISTING_CONTAINER); Response<Boolean> exists = containerAsyncClient.existsWithResponse().block(timeout); if (exists == null) { throw new RuntimeException("Error occurred checking the container existence!"); } builder.up() .withDetail(URL_FIELD, blobServiceAsyncClient.getAccountUrl()); }
class StorageBlobHealthIndicator extends AbstractHealthIndicator { private final BlobServiceAsyncClient blobServiceAsyncClient; private Duration timeout = DEFAULT_HEALTH_CHECK_TIMEOUT; /** * Creates a new instance of {@link StorageBlobHealthIndicator}. * @param blobServiceAsyncClient the blob service client */ public StorageBlobHealthIndicator(BlobServiceAsyncClient blobServiceAsyncClient) { this.blobServiceAsyncClient = blobServiceAsyncClient; } @Override /** * Set health check request timeout. * * @param timeout the duration value. */ public void setTimeout(Duration timeout) { this.timeout = timeout; } }
class StorageBlobHealthIndicator extends AbstractHealthIndicator { private final BlobServiceAsyncClient blobServiceAsyncClient; private Duration timeout = DEFAULT_HEALTH_CHECK_TIMEOUT; /** * Creates a new instance of {@link StorageBlobHealthIndicator}. * @param blobServiceAsyncClient the blob service client */ public StorageBlobHealthIndicator(BlobServiceAsyncClient blobServiceAsyncClient) { this.blobServiceAsyncClient = blobServiceAsyncClient; } @Override /** * Set health check request timeout. * * @param timeout the duration value. */ public void setTimeout(Duration timeout) { this.timeout = timeout; } }
Should we use the URL_FIELD here?
protected void doHealthCheck(Builder builder) { if (database == null) { builder.status(Status.UNKNOWN).withDetail("Database not configured", "The option of `spring.cloud.azure.cosmos.database` is not configured!"); return; } CosmosDatabaseResponse response = this.cosmosAsyncClient.getDatabase(database) .read() .block(timeout); if (response != null) { LOGGER.info("The health indicator cost {} RUs, cosmos uri: {}, dbName: {}", response.getRequestCharge(), endpoint, database); builder.up() .withDetail("RUs", response.getRequestCharge()) .withDetail("CosmosUri", endpoint) .withDetail("database", database); } else { builder.down(); } }
.withDetail("CosmosUri", endpoint)
protected void doHealthCheck(Builder builder) { if (database == null) { builder.status(Status.UNKNOWN).withDetail("Database not configured", "The option of `spring.cloud.azure.cosmos.database` is not configured!"); return; } CosmosDatabaseResponse response = this.cosmosAsyncClient.getDatabase(database) .read() .block(timeout); if (response == null) { throw new RuntimeException("Error occurred checking the database!"); } LOGGER.info("The health indicator cost {} RUs, endpoint: {}, database: {}", response.getRequestCharge(), endpoint, database); builder.up() .withDetail(DATA_BASE_FIELD, database); }
class CosmosHealthIndicator extends AbstractHealthIndicator { private static final Logger LOGGER = LoggerFactory.getLogger(CosmosHealthIndicator.class); private final CosmosAsyncClient cosmosAsyncClient; private final String database; private final String endpoint; private Duration timeout = DEFAULT_HEALTH_CHECK_TIMEOUT; /** * Creates a new instance of {@link CosmosHealthIndicator}. * * @param cosmosAsyncClient the cosmosAsyncClient * @param database database name * @param endpoint cosmos endpoint */ public CosmosHealthIndicator(CosmosAsyncClient cosmosAsyncClient, String database, String endpoint) { super("Cosmos health check failed"); Assert.notNull(cosmosAsyncClient, "CosmosClient must not be null"); this.cosmosAsyncClient = cosmosAsyncClient; this.database = database; this.endpoint = endpoint; } @Override /** * Set health check request timeout. * * @param timeout the duration value. */ public void setTimeout(Duration timeout) { this.timeout = timeout; } }
class CosmosHealthIndicator extends AbstractHealthIndicator { private static final Logger LOGGER = LoggerFactory.getLogger(CosmosHealthIndicator.class); private final CosmosAsyncClient cosmosAsyncClient; private final String database; private final String endpoint; private Duration timeout = DEFAULT_HEALTH_CHECK_TIMEOUT; /** * Creates a new instance of {@link CosmosHealthIndicator}. * * @param cosmosAsyncClient the cosmosAsyncClient * @param database database name * @param endpoint cosmos endpoint */ public CosmosHealthIndicator(CosmosAsyncClient cosmosAsyncClient, String database, String endpoint) { super("Cosmos health check failed"); Assert.notNull(cosmosAsyncClient, "CosmosClient must not be null"); this.cosmosAsyncClient = cosmosAsyncClient; this.database = database; this.endpoint = endpoint; } @Override /** * Set health check request timeout. * * @param timeout the duration value. */ public void setTimeout(Duration timeout) { this.timeout = timeout; } }
Let's remove RUs for now, and we can add it if there's a customer ask.
protected void doHealthCheck(Builder builder) { if (database == null) { builder.status(Status.UNKNOWN).withDetail("Database not configured", "The option of `spring.cloud.azure.cosmos.database` is not configured!"); return; } CosmosDatabaseResponse response = this.cosmosAsyncClient.getDatabase(database) .read() .block(timeout); if (response != null) { LOGGER.info("The health indicator cost {} RUs, cosmos uri: {}, dbName: {}", response.getRequestCharge(), endpoint, database); builder.up() .withDetail("RUs", response.getRequestCharge()) .withDetail("CosmosUri", endpoint) .withDetail("database", database); } else { builder.down(); } }
.withDetail("RUs", response.getRequestCharge())
protected void doHealthCheck(Builder builder) { if (database == null) { builder.status(Status.UNKNOWN).withDetail("Database not configured", "The option of `spring.cloud.azure.cosmos.database` is not configured!"); return; } CosmosDatabaseResponse response = this.cosmosAsyncClient.getDatabase(database) .read() .block(timeout); if (response == null) { throw new RuntimeException("Error occurred checking the database!"); } LOGGER.info("The health indicator cost {} RUs, endpoint: {}, database: {}", response.getRequestCharge(), endpoint, database); builder.up() .withDetail(DATA_BASE_FIELD, database); }
class CosmosHealthIndicator extends AbstractHealthIndicator { private static final Logger LOGGER = LoggerFactory.getLogger(CosmosHealthIndicator.class); private final CosmosAsyncClient cosmosAsyncClient; private final String database; private final String endpoint; private Duration timeout = DEFAULT_HEALTH_CHECK_TIMEOUT; /** * Creates a new instance of {@link CosmosHealthIndicator}. * * @param cosmosAsyncClient the cosmosAsyncClient * @param database database name * @param endpoint cosmos endpoint */ public CosmosHealthIndicator(CosmosAsyncClient cosmosAsyncClient, String database, String endpoint) { super("Cosmos health check failed"); Assert.notNull(cosmosAsyncClient, "CosmosClient must not be null"); this.cosmosAsyncClient = cosmosAsyncClient; this.database = database; this.endpoint = endpoint; } @Override /** * Set health check request timeout. * * @param timeout the duration value. */ public void setTimeout(Duration timeout) { this.timeout = timeout; } }
class CosmosHealthIndicator extends AbstractHealthIndicator { private static final Logger LOGGER = LoggerFactory.getLogger(CosmosHealthIndicator.class); private final CosmosAsyncClient cosmosAsyncClient; private final String database; private final String endpoint; private Duration timeout = DEFAULT_HEALTH_CHECK_TIMEOUT; /** * Creates a new instance of {@link CosmosHealthIndicator}. * * @param cosmosAsyncClient the cosmosAsyncClient * @param database database name * @param endpoint cosmos endpoint */ public CosmosHealthIndicator(CosmosAsyncClient cosmosAsyncClient, String database, String endpoint) { super("Cosmos health check failed"); Assert.notNull(cosmosAsyncClient, "CosmosClient must not be null"); this.cosmosAsyncClient = cosmosAsyncClient; this.database = database; this.endpoint = endpoint; } @Override /** * Set health check request timeout. * * @param timeout the duration value. */ public void setTimeout(Duration timeout) { this.timeout = timeout; } }
Consider parsing a date and comparing result OR validate that data provide inside the string is really a date, (i.e. I could write `@PlaybackOnly(expiryTime = "0")` to game the system).
private void validateExpiryTime(PlaybackOnly annotation) { String expiryStr = annotation.expiryTime(); if ("".equals(expiryStr)) { return; } OffsetDateTime now = OffsetDateTime.now(ZoneId.of(ZoneId.SHORT_IDS.get("PST"))); String nowStr = now.getYear() + "/" + String.format("%02d", now.getMonthValue()) + "/" + String.format("%02d", now.getDayOfMonth()); if (expiryStr.compareTo(nowStr) < 0) { throw new RuntimeException("PlaybackOnly has expired. Test must be reenabled"); } }
if (expiryStr.compareTo(nowStr) < 0) {
private void validateExpiryTime(PlaybackOnly annotation) { String expiryStr = annotation.expiryTime(); if ("".equals(expiryStr)) { return; } OffsetDateTime expiry = LocalDate.parse(expiryStr, DateTimeFormatter.ofPattern("yyyy-MM-dd")).atTime(0, 0) .atZone(ZoneId.of(ZoneId.SHORT_IDS.get("PST"))).toOffsetDateTime(); OffsetDateTime now = OffsetDateTime.now(ZoneId.of(ZoneId.SHORT_IDS.get("PST"))); if (now.isAfter(expiry)) { throw new RuntimeException("PlaybackOnly has expired. Test must be reenabled"); } }
class PlaybackOnlyExtension implements IAnnotationDrivenExtension<PlaybackOnly> { @Override public void visitFeatureAnnotation(PlaybackOnly annotation, FeatureInfo feature) { validateExpiryTime(annotation); TestMode testMode = TestEnvironment.getInstance().getTestMode(); if (testMode != TestMode.PLAYBACK) { feature.skip(String.format("Test ignored in %s mode", testMode)); } } @Override public void visitSpecAnnotation(PlaybackOnly annotation, SpecInfo spec) { validateExpiryTime(annotation); TestMode testMode = TestEnvironment.getInstance().getTestMode(); if (testMode != TestMode.PLAYBACK) { spec.skip(String.format("Test ignored in %s mode", testMode)); } } }
class PlaybackOnlyExtension implements IAnnotationDrivenExtension<PlaybackOnly> { @Override public void visitFeatureAnnotation(PlaybackOnly annotation, FeatureInfo feature) { validateExpiryTime(annotation); TestMode testMode = TestEnvironment.getInstance().getTestMode(); if (testMode != TestMode.PLAYBACK) { feature.skip(String.format("Test ignored in %s mode", testMode)); } } @Override public void visitSpecAnnotation(PlaybackOnly annotation, SpecInfo spec) { validateExpiryTime(annotation); TestMode testMode = TestEnvironment.getInstance().getTestMode(); if (testMode != TestMode.PLAYBACK) { spec.skip(String.format("Test ignored in %s mode", testMode)); } } }
Why do we need `totalLength`? Do we set the cache capacity based on the total number of chars in the cache?
int getTotalLength() { return totalLength; }
}
int getTotalLength() { synchronized (lock) { return cache.getTotalLength(); } }
class SchemaRegistrySchemaCache { private static final String SIZE_KEY = "size"; private static final String TOTAL_LENGTH_KEY = "totalLength"; private final ClientLogger logger = new ClientLogger(SchemaRegistrySchemaCache.class); private final SchemaCache cache; private final SchemaRegistryAsyncClient schemaRegistryClient; private final String schemaGroup; private final boolean autoRegisterSchemas; private final Object lock = new Object(); SchemaRegistrySchemaCache(SchemaRegistryAsyncClient schemaRegistryClient, String schemaGroup, boolean autoRegisterSchemas, int capacity) { this.schemaRegistryClient = schemaRegistryClient; this.schemaGroup = schemaGroup; this.autoRegisterSchemas = autoRegisterSchemas; this.cache = new SchemaCache(capacity); } Mono<String> getSchemaId(Schema schema) { final String existingSchemaId; synchronized (lock) { existingSchemaId = cache.getSchemaId(schema); } if (existingSchemaId != null) { return Mono.just(existingSchemaId); } final String schemaFullName = schema.getFullName(); final String schemaString = schema.toString(); if (CoreUtils.isNullOrEmpty(schemaGroup)) { return monoError(logger, new IllegalStateException("Cannot serialize when 'schemaGroup' is not set. Please" + "set in SchemaRegistryApacheAvroSerializer.schemaGroup when creating serializer.")); } final Mono<SchemaProperties> serviceCall; if (autoRegisterSchemas) { serviceCall = this.schemaRegistryClient .registerSchema(schemaGroup, schemaFullName, schemaString, SchemaFormat.AVRO); } else { serviceCall = this.schemaRegistryClient.getSchemaProperties( schemaGroup, schemaFullName, schemaString, SchemaFormat.AVRO); } return serviceCall.map(properties -> { final String schemaId = properties.getId(); synchronized (lock) { cache.put(schemaId, schema); logCacheStatus(); } return schemaId; }); } Mono<Schema> getSchema(String schemaId) { synchronized (lock) { final Schema existing = cache.get(schemaId); if (existing != null) { return Mono.just(existing); } } return schemaRegistryClient.getSchema(schemaId) .handle((registryObject, sink) -> { final String schemaString = registryObject.getDefinition(); final Schema parsedSchema; synchronized (lock) { parsedSchema = new Schema.Parser().parse(schemaString); cache.put(schemaId, parsedSchema); logCacheStatus(); } sink.next(parsedSchema); }); } /** * Gets number of cached schemas. * * @return Number of cached schemas. */ int getSize() { synchronized (lock) { return cache.size(); } } /** * Gets the length of schemas stored in cache. * * @return The length of schemas stored in cache. */ /** * Logs the cache status if log level verbose is enabled. Otherwise, no-op. */ private void logCacheStatus() { if (!logger.canLogAtLevel(LogLevel.VERBOSE)) { return; } final int size = cache.size(); final int length = cache.getTotalLength(); logger.atVerbose() .addKeyValue(SIZE_KEY, size) .addKeyValue(TOTAL_LENGTH_KEY, length) .log("Cache entry added or updated. Total number of entries: {}; Total schema length: {}", size, length); } /** * Simple LRU cache. Accesses to cache are synchronized via the outer class lock. */ private static final class SchemaCache extends LinkedHashMap<String, Schema> { private static final long serialVersionUID = -1L; private final int capacity; private final HashMap<Schema, String> schemaToIdCache = new HashMap<>(); private int totalLength; /** * Creates an LRU cache with maximum capacity. * * @param capacity Max size (number of entries) of the cache. */ SchemaCache(int capacity) { super(64, 0.75f, true); this.capacity = capacity; } int getTotalLength() { return totalLength; } String getSchemaId(Schema schema) { final String schemaId = schemaToIdCache.get(schema); if (schemaId != null) { super.get(schemaId); } return schemaId; } /** * Adds a schema keyed by its schema id. If it is larger than its counterpart. */ @Override public Schema put(String schemaId, Schema value) { final Schema existing = super.put(schemaId, value); final int currentLength = value.toString().length(); if (existing == null) { totalLength = totalLength + currentLength; } else { final int difference = currentLength - existing.toString().length(); totalLength = totalLength - difference; } schemaToIdCache.put(value, schemaId); return existing; } @Override protected boolean removeEldestEntry(Map.Entry<String, Schema> eldest) { final boolean removingEntry = size() > capacity; if (removingEntry) { final Schema value = eldest.getValue(); totalLength = totalLength - value.toString().length(); schemaToIdCache.remove(value); } return removingEntry; } } }
class SchemaRegistrySchemaCache { private static final String SIZE_KEY = "size"; private static final String TOTAL_LENGTH_KEY = "totalLength"; private final ClientLogger logger = new ClientLogger(SchemaRegistrySchemaCache.class); private final SchemaCache cache; private final SchemaRegistryAsyncClient schemaRegistryClient; private final String schemaGroup; private final boolean autoRegisterSchemas; private final Object lock = new Object(); SchemaRegistrySchemaCache(SchemaRegistryAsyncClient schemaRegistryClient, String schemaGroup, boolean autoRegisterSchemas, int capacity) { this.schemaRegistryClient = schemaRegistryClient; this.schemaGroup = schemaGroup; this.autoRegisterSchemas = autoRegisterSchemas; this.cache = new SchemaCache(capacity); } Mono<String> getSchemaId(Schema schema) { final String existingSchemaId; synchronized (lock) { existingSchemaId = cache.getSchemaId(schema); } if (existingSchemaId != null) { return Mono.just(existingSchemaId); } final String schemaFullName = schema.getFullName(); final String schemaString = schema.toString(); if (CoreUtils.isNullOrEmpty(schemaGroup)) { return monoError(logger, new IllegalStateException("Cannot serialize when 'schemaGroup' is not set. Please" + "set in SchemaRegistryApacheAvroSerializer.schemaGroup when creating serializer.")); } final Mono<SchemaProperties> serviceCall; if (autoRegisterSchemas) { serviceCall = this.schemaRegistryClient .registerSchema(schemaGroup, schemaFullName, schemaString, SchemaFormat.AVRO); } else { serviceCall = this.schemaRegistryClient.getSchemaProperties( schemaGroup, schemaFullName, schemaString, SchemaFormat.AVRO); } return serviceCall.map(properties -> { final String schemaId = properties.getId(); synchronized (lock) { cache.put(schemaId, schema); logCacheStatus(); } return schemaId; }); } Mono<Schema> getSchema(String schemaId) { synchronized (lock) { final Schema existing = cache.get(schemaId); if (existing != null) { return Mono.just(existing); } } return schemaRegistryClient.getSchema(schemaId) .handle((registryObject, sink) -> { final String schemaString = registryObject.getDefinition(); final Schema parsedSchema; synchronized (lock) { parsedSchema = new Schema.Parser().parse(schemaString); cache.put(schemaId, parsedSchema); logCacheStatus(); } sink.next(parsedSchema); }); } /** * Gets number of cached schemas. * * @return Number of cached schemas. */ int getSize() { synchronized (lock) { return cache.size(); } } /** * Gets the length of schemas stored in cache. * * @return The length of schemas stored in cache. */ /** * Logs the cache status if log level verbose is enabled. Otherwise, no-op. */ private void logCacheStatus() { if (!logger.canLogAtLevel(LogLevel.VERBOSE)) { return; } final int size = cache.size(); final int length = cache.getTotalLength(); logger.atVerbose() .addKeyValue(SIZE_KEY, size) .addKeyValue(TOTAL_LENGTH_KEY, length) .log("Cache entry added or updated. Total number of entries: {}; Total schema length: {}", size, length); } /** * Simple LRU cache. Accesses to cache are synchronized via the outer class lock. * TODO (conniey): When https: */ private static final class SchemaCache extends LinkedHashMap<String, Schema> { private static final long serialVersionUID = -1L; private final int capacity; private final HashMap<Schema, String> schemaToIdCache = new HashMap<>(); private int totalLength; /** * Creates an LRU cache with maximum capacity. * * @param capacity Max size (number of entries) of the cache. */ SchemaCache(int capacity) { super(64, 0.75f, true); this.capacity = capacity; } int getTotalLength() { return totalLength; } /** * Gets the schema id of a matching schema. * * @param schema Schema to get entry for. * @return The schema id or null if it does not exist in the cache. */ String getSchemaId(Schema schema) { final String schemaId = schemaToIdCache.get(schema); if (schemaId != null) { super.get(schemaId); } return schemaId; } /** * Adds a schema keyed by its schema id. */ @Override public Schema put(String schemaId, Schema value) { final Schema existing = super.put(schemaId, value); final int currentLength = value.toString().length(); if (existing == null) { totalLength = totalLength + currentLength; } else { final int difference = currentLength - existing.toString().length(); totalLength = totalLength - difference; } schemaToIdCache.put(value, schemaId); return existing; } @Override protected boolean removeEldestEntry(Map.Entry<String, Schema> eldest) { final boolean removingEntry = size() > capacity; if (removingEntry) { final Schema value = eldest.getValue(); totalLength = totalLength - value.toString().length(); schemaToIdCache.remove(value); } return removingEntry; } } }
It looks like the decision to evict is based on the number of entries in the cache. So, how is `totalLength` used?
protected boolean removeEldestEntry(Map.Entry<String, Schema> eldest) { final boolean removingEntry = size() > capacity; if (removingEntry) { final Schema value = eldest.getValue(); totalLength = totalLength - value.toString().length(); schemaToIdCache.remove(value); } return removingEntry; }
final boolean removingEntry = size() > capacity;
protected boolean removeEldestEntry(Map.Entry<String, Schema> eldest) { final boolean removingEntry = size() > capacity; if (removingEntry) { final Schema value = eldest.getValue(); totalLength = totalLength - value.toString().length(); schemaToIdCache.remove(value); } return removingEntry; }
class SchemaCache extends LinkedHashMap<String, Schema> { private static final long serialVersionUID = -1L; private final int capacity; private final HashMap<Schema, String> schemaToIdCache = new HashMap<>(); private int totalLength; /** * Creates an LRU cache with maximum capacity. * * @param capacity Max size (number of entries) of the cache. */ SchemaCache(int capacity) { super(64, 0.75f, true); this.capacity = capacity; } int getTotalLength() { return totalLength; } String getSchemaId(Schema schema) { final String schemaId = schemaToIdCache.get(schema); if (schemaId != null) { super.get(schemaId); } return schemaId; } /** * Adds a schema keyed by its schema id. If it is larger than its counterpart. */ @Override public Schema put(String schemaId, Schema value) { final Schema existing = super.put(schemaId, value); final int currentLength = value.toString().length(); if (existing == null) { totalLength = totalLength + currentLength; } else { final int difference = currentLength - existing.toString().length(); totalLength = totalLength - difference; } schemaToIdCache.put(value, schemaId); return existing; } @Override }
class SchemaCache extends LinkedHashMap<String, Schema> { private static final long serialVersionUID = -1L; private final int capacity; private final HashMap<Schema, String> schemaToIdCache = new HashMap<>(); private int totalLength; /** * Creates an LRU cache with maximum capacity. * * @param capacity Max size (number of entries) of the cache. */ SchemaCache(int capacity) { super(64, 0.75f, true); this.capacity = capacity; } int getTotalLength() { return totalLength; } /** * Gets the schema id of a matching schema. * * @param schema Schema to get entry for. * @return The schema id or null if it does not exist in the cache. */ String getSchemaId(Schema schema) { final String schemaId = schemaToIdCache.get(schema); if (schemaId != null) { super.get(schemaId); } return schemaId; } /** * Adds a schema keyed by its schema id. */ @Override public Schema put(String schemaId, Schema value) { final Schema existing = super.put(schemaId, value); final int currentLength = value.toString().length(); if (existing == null) { totalLength = totalLength + currentLength; } else { final int difference = currentLength - existing.toString().length(); totalLength = totalLength - difference; } schemaToIdCache.put(value, schemaId); return existing; } @Override }
Have we perf tested the impact of locking in Event Hubs scenario where every event will do a getSchema for deserializing the event?
Mono<Schema> getSchema(String schemaId) { synchronized (lock) { final Schema existing = cache.get(schemaId); if (existing != null) { return Mono.just(existing); } } return schemaRegistryClient.getSchema(schemaId) .handle((registryObject, sink) -> { final String schemaString = registryObject.getDefinition(); final Schema parsedSchema; synchronized (lock) { parsedSchema = new Schema.Parser().parse(schemaString); cache.put(schemaId, parsedSchema); logCacheStatus(); } sink.next(parsedSchema); }); }
synchronized (lock) {
Mono<Schema> getSchema(String schemaId) { synchronized (lock) { final Schema existing = cache.get(schemaId); if (existing != null) { return Mono.just(existing); } } return schemaRegistryClient.getSchema(schemaId) .handle((registryObject, sink) -> { final String schemaString = registryObject.getDefinition(); final Schema parsedSchema; synchronized (lock) { parsedSchema = new Schema.Parser().parse(schemaString); cache.put(schemaId, parsedSchema); logCacheStatus(); } sink.next(parsedSchema); }); }
class SchemaRegistrySchemaCache { private static final String SIZE_KEY = "size"; private static final String TOTAL_LENGTH_KEY = "totalLength"; private final ClientLogger logger = new ClientLogger(SchemaRegistrySchemaCache.class); private final SchemaCache cache; private final SchemaRegistryAsyncClient schemaRegistryClient; private final String schemaGroup; private final boolean autoRegisterSchemas; private final Object lock = new Object(); SchemaRegistrySchemaCache(SchemaRegistryAsyncClient schemaRegistryClient, String schemaGroup, boolean autoRegisterSchemas, int capacity) { this.schemaRegistryClient = schemaRegistryClient; this.schemaGroup = schemaGroup; this.autoRegisterSchemas = autoRegisterSchemas; this.cache = new SchemaCache(capacity); } Mono<String> getSchemaId(Schema schema) { final String existingSchemaId; synchronized (lock) { existingSchemaId = cache.getSchemaId(schema); } if (existingSchemaId != null) { return Mono.just(existingSchemaId); } final String schemaFullName = schema.getFullName(); final String schemaString = schema.toString(); if (CoreUtils.isNullOrEmpty(schemaGroup)) { return monoError(logger, new IllegalStateException("Cannot serialize when 'schemaGroup' is not set. Please" + "set in SchemaRegistryApacheAvroSerializer.schemaGroup when creating serializer.")); } final Mono<SchemaProperties> serviceCall; if (autoRegisterSchemas) { serviceCall = this.schemaRegistryClient .registerSchema(schemaGroup, schemaFullName, schemaString, SchemaFormat.AVRO); } else { serviceCall = this.schemaRegistryClient.getSchemaProperties( schemaGroup, schemaFullName, schemaString, SchemaFormat.AVRO); } return serviceCall.map(properties -> { final String schemaId = properties.getId(); synchronized (lock) { cache.put(schemaId, schema); logCacheStatus(); } return schemaId; }); } /** * Gets number of cached schemas. * * @return Number of cached schemas. */ int getSize() { synchronized (lock) { return cache.size(); } } /** * Gets the length of schemas stored in cache. * * @return The length of schemas stored in cache. */ int getTotalLength() { synchronized (lock) { return cache.getTotalLength(); } } /** * Logs the cache status if log level verbose is enabled. Otherwise, no-op. */ private void logCacheStatus() { if (!logger.canLogAtLevel(LogLevel.VERBOSE)) { return; } final int size = cache.size(); final int length = cache.getTotalLength(); logger.atVerbose() .addKeyValue(SIZE_KEY, size) .addKeyValue(TOTAL_LENGTH_KEY, length) .log("Cache entry added or updated. Total number of entries: {}; Total schema length: {}", size, length); } /** * Simple LRU cache. Accesses to cache are synchronized via the outer class lock. */ private static final class SchemaCache extends LinkedHashMap<String, Schema> { private static final long serialVersionUID = -1L; private final int capacity; private final HashMap<Schema, String> schemaToIdCache = new HashMap<>(); private int totalLength; /** * Creates an LRU cache with maximum capacity. * * @param capacity Max size (number of entries) of the cache. */ SchemaCache(int capacity) { super(64, 0.75f, true); this.capacity = capacity; } int getTotalLength() { return totalLength; } String getSchemaId(Schema schema) { final String schemaId = schemaToIdCache.get(schema); if (schemaId != null) { super.get(schemaId); } return schemaId; } /** * Adds a schema keyed by its schema id. If it is larger than its counterpart. */ @Override public Schema put(String schemaId, Schema value) { final Schema existing = super.put(schemaId, value); final int currentLength = value.toString().length(); if (existing == null) { totalLength = totalLength + currentLength; } else { final int difference = currentLength - existing.toString().length(); totalLength = totalLength - difference; } schemaToIdCache.put(value, schemaId); return existing; } @Override protected boolean removeEldestEntry(Map.Entry<String, Schema> eldest) { final boolean removingEntry = size() > capacity; if (removingEntry) { final Schema value = eldest.getValue(); totalLength = totalLength - value.toString().length(); schemaToIdCache.remove(value); } return removingEntry; } } }
class SchemaRegistrySchemaCache { private static final String SIZE_KEY = "size"; private static final String TOTAL_LENGTH_KEY = "totalLength"; private final ClientLogger logger = new ClientLogger(SchemaRegistrySchemaCache.class); private final SchemaCache cache; private final SchemaRegistryAsyncClient schemaRegistryClient; private final String schemaGroup; private final boolean autoRegisterSchemas; private final Object lock = new Object(); SchemaRegistrySchemaCache(SchemaRegistryAsyncClient schemaRegistryClient, String schemaGroup, boolean autoRegisterSchemas, int capacity) { this.schemaRegistryClient = schemaRegistryClient; this.schemaGroup = schemaGroup; this.autoRegisterSchemas = autoRegisterSchemas; this.cache = new SchemaCache(capacity); } Mono<String> getSchemaId(Schema schema) { final String existingSchemaId; synchronized (lock) { existingSchemaId = cache.getSchemaId(schema); } if (existingSchemaId != null) { return Mono.just(existingSchemaId); } final String schemaFullName = schema.getFullName(); final String schemaString = schema.toString(); if (CoreUtils.isNullOrEmpty(schemaGroup)) { return monoError(logger, new IllegalStateException("Cannot serialize when 'schemaGroup' is not set. Please" + "set in SchemaRegistryApacheAvroSerializer.schemaGroup when creating serializer.")); } final Mono<SchemaProperties> serviceCall; if (autoRegisterSchemas) { serviceCall = this.schemaRegistryClient .registerSchema(schemaGroup, schemaFullName, schemaString, SchemaFormat.AVRO); } else { serviceCall = this.schemaRegistryClient.getSchemaProperties( schemaGroup, schemaFullName, schemaString, SchemaFormat.AVRO); } return serviceCall.map(properties -> { final String schemaId = properties.getId(); synchronized (lock) { cache.put(schemaId, schema); logCacheStatus(); } return schemaId; }); } /** * Gets number of cached schemas. * * @return Number of cached schemas. */ int getSize() { synchronized (lock) { return cache.size(); } } /** * Gets the length of schemas stored in cache. * * @return The length of schemas stored in cache. */ int getTotalLength() { synchronized (lock) { return cache.getTotalLength(); } } /** * Logs the cache status if log level verbose is enabled. Otherwise, no-op. */ private void logCacheStatus() { if (!logger.canLogAtLevel(LogLevel.VERBOSE)) { return; } final int size = cache.size(); final int length = cache.getTotalLength(); logger.atVerbose() .addKeyValue(SIZE_KEY, size) .addKeyValue(TOTAL_LENGTH_KEY, length) .log("Cache entry added or updated. Total number of entries: {}; Total schema length: {}", size, length); } /** * Simple LRU cache. Accesses to cache are synchronized via the outer class lock. * TODO (conniey): When https: */ private static final class SchemaCache extends LinkedHashMap<String, Schema> { private static final long serialVersionUID = -1L; private final int capacity; private final HashMap<Schema, String> schemaToIdCache = new HashMap<>(); private int totalLength; /** * Creates an LRU cache with maximum capacity. * * @param capacity Max size (number of entries) of the cache. */ SchemaCache(int capacity) { super(64, 0.75f, true); this.capacity = capacity; } int getTotalLength() { return totalLength; } /** * Gets the schema id of a matching schema. * * @param schema Schema to get entry for. * @return The schema id or null if it does not exist in the cache. */ String getSchemaId(Schema schema) { final String schemaId = schemaToIdCache.get(schema); if (schemaId != null) { super.get(schemaId); } return schemaId; } /** * Adds a schema keyed by its schema id. */ @Override public Schema put(String schemaId, Schema value) { final Schema existing = super.put(schemaId, value); final int currentLength = value.toString().length(); if (existing == null) { totalLength = totalLength + currentLength; } else { final int difference = currentLength - existing.toString().length(); totalLength = totalLength - difference; } schemaToIdCache.put(value, schemaId); return existing; } @Override protected boolean removeEldestEntry(Map.Entry<String, Schema> eldest) { final boolean removingEntry = size() > capacity; if (removingEntry) { final Schema value = eldest.getValue(); totalLength = totalLength - value.toString().length(); schemaToIdCache.remove(value); } return removingEntry; } } }
Is there a constant netty has that we can use here instead of a string literal?
private static NettyOutbound sendInputStream(NettyOutbound reactorNettyOutbound, InputStreamContent bodyContent) { return reactorNettyOutbound.sendUsing( bodyContent::toStream, (c, stream) -> { if (c.channel().pipeline().get(ChunkedWriteHandler.class) == null) { c.addHandlerLast("reactor.left.chunkedWriter", new ChunkedWriteHandler()); } return new ChunkedStream(stream); }, (stream) -> { }); }
c.addHandlerLast("reactor.left.chunkedWriter", new ChunkedWriteHandler());
private static NettyOutbound sendInputStream(NettyOutbound reactorNettyOutbound, InputStreamContent bodyContent) { return reactorNettyOutbound.sendUsing( bodyContent::toStream, (c, stream) -> { if (c.channel().pipeline().get(ChunkedWriteHandler.class) == null) { c.addHandlerLast(NettyPipeline.ChunkedWriter, new ChunkedWriteHandler()); } return new ChunkedStream(stream); }, (stream) -> { }); }
class NettyAsyncHttpClient implements HttpClient { private static final String AZURE_EAGERLY_READ_RESPONSE = "azure-eagerly-read-response"; private static final String AZURE_RESPONSE_TIMEOUT = "azure-response-timeout"; final boolean disableBufferCopy; final long readTimeout; final long writeTimeout; final long responseTimeout; final reactor.netty.http.client.HttpClient nettyClient; /** * Creates NettyAsyncHttpClient with provided http client. * * @param nettyClient the reactor-netty http client * @param disableBufferCopy Determines whether deep cloning of response buffers should be disabled. */ NettyAsyncHttpClient(reactor.netty.http.client.HttpClient nettyClient, boolean disableBufferCopy, long readTimeout, long writeTimeout, long responseTimeout) { this.nettyClient = nettyClient; this.disableBufferCopy = disableBufferCopy; this.readTimeout = readTimeout; this.writeTimeout = writeTimeout; this.responseTimeout = responseTimeout; } /** * {@inheritDoc} */ @Override public Mono<HttpResponse> send(HttpRequest request) { return send(request, Context.NONE); } @Override public Mono<HttpResponse> send(HttpRequest request, Context context) { Objects.requireNonNull(request.getHttpMethod(), "'request.getHttpMethod()' cannot be null."); Objects.requireNonNull(request.getUrl(), "'request.getUrl()' cannot be null."); Objects.requireNonNull(request.getUrl().getProtocol(), "'request.getUrl().getProtocol()' cannot be null."); boolean effectiveEagerlyReadResponse = (boolean) context.getData(AZURE_EAGERLY_READ_RESPONSE).orElse(false); long effectiveResponseTimeout = context.getData(AZURE_RESPONSE_TIMEOUT) .filter(timeoutDuration -> timeoutDuration instanceof Duration) .map(timeoutDuration -> ((Duration) timeoutDuration).toMillis()) .orElse(this.responseTimeout); return nettyClient .doOnRequest((r, connection) -> addWriteTimeoutHandler(connection, writeTimeout)) .doAfterRequest((r, connection) -> addResponseTimeoutHandler(connection, effectiveResponseTimeout)) .doOnResponse((response, connection) -> addReadTimeoutHandler(connection, readTimeout)) .doAfterResponseSuccess((response, connection) -> removeReadTimeoutHandler(connection)) .request(HttpMethod.valueOf(request.getHttpMethod().toString())) .uri(request.getUrl().toString()) .send(bodySendDelegate(request)) .responseConnection(responseDelegate(request, disableBufferCopy, effectiveEagerlyReadResponse)) .single() .onErrorMap(throwable -> { if (throwable instanceof SSLException) { if (throwable.getCause() instanceof ProxyConnectException) { return throwable.getCause(); } } return throwable; }) .retryWhen(Retry.max(1).filter(throwable -> throwable instanceof ProxyConnectException) .onRetryExhaustedThrow((ignoredSpec, signal) -> signal.failure())); } /** * Delegate to send the request content. * * @param restRequest the Rest request contains the body to be sent * @return a delegate upon invocation sets the request body in reactor-netty outbound object */ private static BiFunction<HttpClientRequest, NettyOutbound, Publisher<Void>> bodySendDelegate( final HttpRequest restRequest) { return (reactorNettyRequest, reactorNettyOutbound) -> { for (HttpHeader hdr : restRequest.getHeaders()) { if (reactorNettyRequest.requestHeaders().contains(hdr.getName())) { boolean first = true; for (String value : hdr.getValuesList()) { if (first) { first = false; reactorNettyRequest.header(hdr.getName(), value); } else { reactorNettyRequest.addHeader(hdr.getName(), value); } } } else { hdr.getValuesList().forEach(value -> reactorNettyRequest.addHeader(hdr.getName(), value)); } } BinaryData body = restRequest.getBodyAsBinaryData(); if (body != null) { BinaryDataContent bodyContent = BinaryDataHelper.getContent(body); if (bodyContent instanceof ByteArrayContent) { return reactorNettyOutbound.send(Mono.just(Unpooled.wrappedBuffer(bodyContent.toBytes()))); } else if (bodyContent instanceof StringContent || bodyContent instanceof SerializableContent) { return reactorNettyOutbound.send( Mono.fromSupplier(() -> Unpooled.wrappedBuffer(bodyContent.toBytes()))); } else if (bodyContent instanceof FileContent) { FileContent fileContent = (FileContent) bodyContent; return reactorNettyOutbound.sendFile(fileContent.getFile(), 0, fileContent.getLength()); } else if (bodyContent instanceof InputStreamContent) { return sendInputStream(reactorNettyOutbound, (InputStreamContent) bodyContent); } else { Flux<ByteBuf> nettyByteBufFlux = restRequest.getBody().map(Unpooled::wrappedBuffer); return reactorNettyOutbound.send(nettyByteBufFlux); } } else { return reactorNettyOutbound; } }; } /** * Delegate to receive response. * * @param restRequest the Rest request whose response this delegate handles * @param disableBufferCopy Flag indicating if the network response shouldn't be buffered. * @param eagerlyReadResponse Flag indicating if the network response should be eagerly read into memory. * @return a delegate upon invocation setup Rest response object */ private static BiFunction<HttpClientResponse, Connection, Publisher<HttpResponse>> responseDelegate( final HttpRequest restRequest, final boolean disableBufferCopy, final boolean eagerlyReadResponse) { return (reactorNettyResponse, reactorNettyConnection) -> { /* * If the response is being eagerly read into memory the flag for buffer copying can be ignored as the * response MUST be deeply copied to ensure it can safely be used downstream. */ if (eagerlyReadResponse) { return FluxUtil.collectBytesFromNetworkResponse( reactorNettyConnection.inbound().receive().asByteBuffer(), new NettyToAzureCoreHttpHeadersWrapper(reactorNettyResponse.responseHeaders())) .doFinally(ignored -> closeConnection(reactorNettyConnection)) .map(bytes -> new NettyAsyncHttpBufferedResponse(reactorNettyResponse, restRequest, bytes)); } else { return Mono.just(new NettyAsyncHttpResponse(reactorNettyResponse, reactorNettyConnection, restRequest, disableBufferCopy)); } }; } /* * Adds write timeout handler once the request is ready to begin sending. */ private static void addWriteTimeoutHandler(Connection connection, long timeoutMillis) { connection.addHandlerLast(WriteTimeoutHandler.HANDLER_NAME, new WriteTimeoutHandler(timeoutMillis)); } /* * Remove write timeout handler from the connection as the request has finished sending, then add response timeout * handler. */ private static void addResponseTimeoutHandler(Connection connection, long timeoutMillis) { connection.removeHandler(WriteTimeoutHandler.HANDLER_NAME) .addHandlerLast(ResponseTimeoutHandler.HANDLER_NAME, new ResponseTimeoutHandler(timeoutMillis)); } /* * Remove response timeout handler from the connection as the response has been received, then add read timeout * handler. */ private static void addReadTimeoutHandler(Connection connection, long timeoutMillis) { connection.removeHandler(ResponseTimeoutHandler.HANDLER_NAME) .addHandlerLast(ReadTimeoutHandler.HANDLER_NAME, new ReadTimeoutHandler(timeoutMillis)); } /* * Remove read timeout handler as the complete response has been received. */ private static void removeReadTimeoutHandler(Connection connection) { connection.removeHandler(ReadTimeoutHandler.HANDLER_NAME); } }
class NettyAsyncHttpClient implements HttpClient { private static final ClientLogger LOGGER = new ClientLogger(NettyAsyncHttpClient.class); private static final String AZURE_EAGERLY_READ_RESPONSE = "azure-eagerly-read-response"; private static final String AZURE_RESPONSE_TIMEOUT = "azure-response-timeout"; final boolean disableBufferCopy; final long readTimeout; final long writeTimeout; final long responseTimeout; final reactor.netty.http.client.HttpClient nettyClient; /** * Creates NettyAsyncHttpClient with provided http client. * * @param nettyClient the reactor-netty http client * @param disableBufferCopy Determines whether deep cloning of response buffers should be disabled. */ NettyAsyncHttpClient(reactor.netty.http.client.HttpClient nettyClient, boolean disableBufferCopy, long readTimeout, long writeTimeout, long responseTimeout) { this.nettyClient = nettyClient; this.disableBufferCopy = disableBufferCopy; this.readTimeout = readTimeout; this.writeTimeout = writeTimeout; this.responseTimeout = responseTimeout; } /** * {@inheritDoc} */ @Override public Mono<HttpResponse> send(HttpRequest request) { return send(request, Context.NONE); } @Override public Mono<HttpResponse> send(HttpRequest request, Context context) { Objects.requireNonNull(request.getHttpMethod(), "'request.getHttpMethod()' cannot be null."); Objects.requireNonNull(request.getUrl(), "'request.getUrl()' cannot be null."); Objects.requireNonNull(request.getUrl().getProtocol(), "'request.getUrl().getProtocol()' cannot be null."); boolean effectiveEagerlyReadResponse = (boolean) context.getData(AZURE_EAGERLY_READ_RESPONSE).orElse(false); long effectiveResponseTimeout = context.getData(AZURE_RESPONSE_TIMEOUT) .filter(timeoutDuration -> timeoutDuration instanceof Duration) .map(timeoutDuration -> ((Duration) timeoutDuration).toMillis()) .orElse(this.responseTimeout); return nettyClient .doOnRequest((r, connection) -> addWriteTimeoutHandler(connection, writeTimeout)) .doAfterRequest((r, connection) -> addResponseTimeoutHandler(connection, effectiveResponseTimeout)) .doOnResponse((response, connection) -> addReadTimeoutHandler(connection, readTimeout)) .doAfterResponseSuccess((response, connection) -> removeReadTimeoutHandler(connection)) .request(HttpMethod.valueOf(request.getHttpMethod().toString())) .uri(request.getUrl().toString()) .send(bodySendDelegate(request)) .responseConnection(responseDelegate(request, disableBufferCopy, effectiveEagerlyReadResponse)) .single() .onErrorMap(throwable -> { if (throwable instanceof SSLException) { if (throwable.getCause() instanceof ProxyConnectException) { return throwable.getCause(); } } return throwable; }) .retryWhen(Retry.max(1).filter(throwable -> throwable instanceof ProxyConnectException) .onRetryExhaustedThrow((ignoredSpec, signal) -> signal.failure())); } /** * Delegate to send the request content. * * @param restRequest the Rest request contains the body to be sent * @return a delegate upon invocation sets the request body in reactor-netty outbound object */ private static BiFunction<HttpClientRequest, NettyOutbound, Publisher<Void>> bodySendDelegate( final HttpRequest restRequest) { return (reactorNettyRequest, reactorNettyOutbound) -> { for (HttpHeader hdr : restRequest.getHeaders()) { if (reactorNettyRequest.requestHeaders().contains(hdr.getName())) { boolean first = true; for (String value : hdr.getValuesList()) { if (first) { first = false; reactorNettyRequest.header(hdr.getName(), value); } else { reactorNettyRequest.addHeader(hdr.getName(), value); } } } else { hdr.getValuesList().forEach(value -> reactorNettyRequest.addHeader(hdr.getName(), value)); } } BinaryData body = restRequest.getBodyAsBinaryData(); if (body != null) { BinaryDataContent bodyContent = BinaryDataHelper.getContent(body); if (bodyContent instanceof ByteArrayContent) { return reactorNettyOutbound.send(Mono.just(Unpooled.wrappedBuffer(bodyContent.toBytes()))); } else if (bodyContent instanceof StringContent || bodyContent instanceof SerializableContent) { return reactorNettyOutbound.send( Mono.fromSupplier(() -> Unpooled.wrappedBuffer(bodyContent.toBytes()))); } else if (bodyContent instanceof FileContent) { return sendFile(restRequest, reactorNettyOutbound, (FileContent) bodyContent); } else if (bodyContent instanceof InputStreamContent) { return sendInputStream(reactorNettyOutbound, (InputStreamContent) bodyContent); } else { Flux<ByteBuf> nettyByteBufFlux = restRequest.getBody().map(Unpooled::wrappedBuffer); return reactorNettyOutbound.send(nettyByteBufFlux); } } else { return reactorNettyOutbound; } }; } private static NettyOutbound sendFile( HttpRequest restRequest, NettyOutbound reactorNettyOutbound, FileContent fileContent) { if (restRequest.getUrl().getProtocol().equals("https")) { return reactorNettyOutbound.sendUsing( () -> FileChannel.open(fileContent.getFile(), StandardOpenOption.READ), (c, fc) -> { if (c.channel().pipeline().get(ChunkedWriteHandler.class) == null) { c.addHandlerLast(NettyPipeline.ChunkedWriter, new ChunkedWriteHandler()); } try { return new ChunkedNioFile( fc, 0, fileContent.getLength(), fileContent.getChunkSize()); } catch (IOException e) { throw LOGGER.logExceptionAsError(new UncheckedIOException(e)); } }, (fc) -> { try { fc.close(); } catch (IOException e) { throw LOGGER.logExceptionAsError(new UncheckedIOException(e)); } }); } else { return reactorNettyOutbound.sendFile( fileContent.getFile(), 0, fileContent.getLength()); } } /** * Delegate to receive response. * * @param restRequest the Rest request whose response this delegate handles * @param disableBufferCopy Flag indicating if the network response shouldn't be buffered. * @param eagerlyReadResponse Flag indicating if the network response should be eagerly read into memory. * @return a delegate upon invocation setup Rest response object */ private static BiFunction<HttpClientResponse, Connection, Publisher<HttpResponse>> responseDelegate( final HttpRequest restRequest, final boolean disableBufferCopy, final boolean eagerlyReadResponse) { return (reactorNettyResponse, reactorNettyConnection) -> { /* * If the response is being eagerly read into memory the flag for buffer copying can be ignored as the * response MUST be deeply copied to ensure it can safely be used downstream. */ if (eagerlyReadResponse) { return FluxUtil.collectBytesFromNetworkResponse( reactorNettyConnection.inbound().receive().asByteBuffer(), new NettyToAzureCoreHttpHeadersWrapper(reactorNettyResponse.responseHeaders())) .doFinally(ignored -> closeConnection(reactorNettyConnection)) .map(bytes -> new NettyAsyncHttpBufferedResponse(reactorNettyResponse, restRequest, bytes)); } else { return Mono.just(new NettyAsyncHttpResponse(reactorNettyResponse, reactorNettyConnection, restRequest, disableBufferCopy)); } }; } /* * Adds write timeout handler once the request is ready to begin sending. */ private static void addWriteTimeoutHandler(Connection connection, long timeoutMillis) { connection.addHandlerLast(WriteTimeoutHandler.HANDLER_NAME, new WriteTimeoutHandler(timeoutMillis)); } /* * Remove write timeout handler from the connection as the request has finished sending, then add response timeout * handler. */ private static void addResponseTimeoutHandler(Connection connection, long timeoutMillis) { connection.removeHandler(WriteTimeoutHandler.HANDLER_NAME) .addHandlerLast(ResponseTimeoutHandler.HANDLER_NAME, new ResponseTimeoutHandler(timeoutMillis)); } /* * Remove response timeout handler from the connection as the response has been received, then add read timeout * handler. */ private static void addReadTimeoutHandler(Connection connection, long timeoutMillis) { connection.removeHandler(ResponseTimeoutHandler.HANDLER_NAME) .addHandlerLast(ReadTimeoutHandler.HANDLER_NAME, new ReadTimeoutHandler(timeoutMillis)); } /* * Remove read timeout handler as the complete response has been received. */ private static void removeReadTimeoutHandler(Connection connection) { connection.removeHandler(ReadTimeoutHandler.HANDLER_NAME); } }
What should be our guidance on when this stream can be safely closed by the user?
private static NettyOutbound sendInputStream(NettyOutbound reactorNettyOutbound, InputStreamContent bodyContent) { return reactorNettyOutbound.sendUsing( bodyContent::toStream, (c, stream) -> { if (c.channel().pipeline().get(ChunkedWriteHandler.class) == null) { c.addHandlerLast("reactor.left.chunkedWriter", new ChunkedWriteHandler()); } return new ChunkedStream(stream); }, (stream) -> { }); }
private static NettyOutbound sendInputStream(NettyOutbound reactorNettyOutbound, InputStreamContent bodyContent) { return reactorNettyOutbound.sendUsing( bodyContent::toStream, (c, stream) -> { if (c.channel().pipeline().get(ChunkedWriteHandler.class) == null) { c.addHandlerLast(NettyPipeline.ChunkedWriter, new ChunkedWriteHandler()); } return new ChunkedStream(stream); }, (stream) -> { }); }
class NettyAsyncHttpClient implements HttpClient { private static final String AZURE_EAGERLY_READ_RESPONSE = "azure-eagerly-read-response"; private static final String AZURE_RESPONSE_TIMEOUT = "azure-response-timeout"; final boolean disableBufferCopy; final long readTimeout; final long writeTimeout; final long responseTimeout; final reactor.netty.http.client.HttpClient nettyClient; /** * Creates NettyAsyncHttpClient with provided http client. * * @param nettyClient the reactor-netty http client * @param disableBufferCopy Determines whether deep cloning of response buffers should be disabled. */ NettyAsyncHttpClient(reactor.netty.http.client.HttpClient nettyClient, boolean disableBufferCopy, long readTimeout, long writeTimeout, long responseTimeout) { this.nettyClient = nettyClient; this.disableBufferCopy = disableBufferCopy; this.readTimeout = readTimeout; this.writeTimeout = writeTimeout; this.responseTimeout = responseTimeout; } /** * {@inheritDoc} */ @Override public Mono<HttpResponse> send(HttpRequest request) { return send(request, Context.NONE); } @Override public Mono<HttpResponse> send(HttpRequest request, Context context) { Objects.requireNonNull(request.getHttpMethod(), "'request.getHttpMethod()' cannot be null."); Objects.requireNonNull(request.getUrl(), "'request.getUrl()' cannot be null."); Objects.requireNonNull(request.getUrl().getProtocol(), "'request.getUrl().getProtocol()' cannot be null."); boolean effectiveEagerlyReadResponse = (boolean) context.getData(AZURE_EAGERLY_READ_RESPONSE).orElse(false); long effectiveResponseTimeout = context.getData(AZURE_RESPONSE_TIMEOUT) .filter(timeoutDuration -> timeoutDuration instanceof Duration) .map(timeoutDuration -> ((Duration) timeoutDuration).toMillis()) .orElse(this.responseTimeout); return nettyClient .doOnRequest((r, connection) -> addWriteTimeoutHandler(connection, writeTimeout)) .doAfterRequest((r, connection) -> addResponseTimeoutHandler(connection, effectiveResponseTimeout)) .doOnResponse((response, connection) -> addReadTimeoutHandler(connection, readTimeout)) .doAfterResponseSuccess((response, connection) -> removeReadTimeoutHandler(connection)) .request(HttpMethod.valueOf(request.getHttpMethod().toString())) .uri(request.getUrl().toString()) .send(bodySendDelegate(request)) .responseConnection(responseDelegate(request, disableBufferCopy, effectiveEagerlyReadResponse)) .single() .onErrorMap(throwable -> { if (throwable instanceof SSLException) { if (throwable.getCause() instanceof ProxyConnectException) { return throwable.getCause(); } } return throwable; }) .retryWhen(Retry.max(1).filter(throwable -> throwable instanceof ProxyConnectException) .onRetryExhaustedThrow((ignoredSpec, signal) -> signal.failure())); } /** * Delegate to send the request content. * * @param restRequest the Rest request contains the body to be sent * @return a delegate upon invocation sets the request body in reactor-netty outbound object */ private static BiFunction<HttpClientRequest, NettyOutbound, Publisher<Void>> bodySendDelegate( final HttpRequest restRequest) { return (reactorNettyRequest, reactorNettyOutbound) -> { for (HttpHeader hdr : restRequest.getHeaders()) { if (reactorNettyRequest.requestHeaders().contains(hdr.getName())) { boolean first = true; for (String value : hdr.getValuesList()) { if (first) { first = false; reactorNettyRequest.header(hdr.getName(), value); } else { reactorNettyRequest.addHeader(hdr.getName(), value); } } } else { hdr.getValuesList().forEach(value -> reactorNettyRequest.addHeader(hdr.getName(), value)); } } BinaryData body = restRequest.getBodyAsBinaryData(); if (body != null) { BinaryDataContent bodyContent = BinaryDataHelper.getContent(body); if (bodyContent instanceof ByteArrayContent) { return reactorNettyOutbound.send(Mono.just(Unpooled.wrappedBuffer(bodyContent.toBytes()))); } else if (bodyContent instanceof StringContent || bodyContent instanceof SerializableContent) { return reactorNettyOutbound.send( Mono.fromSupplier(() -> Unpooled.wrappedBuffer(bodyContent.toBytes()))); } else if (bodyContent instanceof FileContent) { FileContent fileContent = (FileContent) bodyContent; return reactorNettyOutbound.sendFile(fileContent.getFile(), 0, fileContent.getLength()); } else if (bodyContent instanceof InputStreamContent) { return sendInputStream(reactorNettyOutbound, (InputStreamContent) bodyContent); } else { Flux<ByteBuf> nettyByteBufFlux = restRequest.getBody().map(Unpooled::wrappedBuffer); return reactorNettyOutbound.send(nettyByteBufFlux); } } else { return reactorNettyOutbound; } }; } /** * Delegate to receive response. * * @param restRequest the Rest request whose response this delegate handles * @param disableBufferCopy Flag indicating if the network response shouldn't be buffered. * @param eagerlyReadResponse Flag indicating if the network response should be eagerly read into memory. * @return a delegate upon invocation setup Rest response object */ private static BiFunction<HttpClientResponse, Connection, Publisher<HttpResponse>> responseDelegate( final HttpRequest restRequest, final boolean disableBufferCopy, final boolean eagerlyReadResponse) { return (reactorNettyResponse, reactorNettyConnection) -> { /* * If the response is being eagerly read into memory the flag for buffer copying can be ignored as the * response MUST be deeply copied to ensure it can safely be used downstream. */ if (eagerlyReadResponse) { return FluxUtil.collectBytesFromNetworkResponse( reactorNettyConnection.inbound().receive().asByteBuffer(), new NettyToAzureCoreHttpHeadersWrapper(reactorNettyResponse.responseHeaders())) .doFinally(ignored -> closeConnection(reactorNettyConnection)) .map(bytes -> new NettyAsyncHttpBufferedResponse(reactorNettyResponse, restRequest, bytes)); } else { return Mono.just(new NettyAsyncHttpResponse(reactorNettyResponse, reactorNettyConnection, restRequest, disableBufferCopy)); } }; } /* * Adds write timeout handler once the request is ready to begin sending. */ private static void addWriteTimeoutHandler(Connection connection, long timeoutMillis) { connection.addHandlerLast(WriteTimeoutHandler.HANDLER_NAME, new WriteTimeoutHandler(timeoutMillis)); } /* * Remove write timeout handler from the connection as the request has finished sending, then add response timeout * handler. */ private static void addResponseTimeoutHandler(Connection connection, long timeoutMillis) { connection.removeHandler(WriteTimeoutHandler.HANDLER_NAME) .addHandlerLast(ResponseTimeoutHandler.HANDLER_NAME, new ResponseTimeoutHandler(timeoutMillis)); } /* * Remove response timeout handler from the connection as the response has been received, then add read timeout * handler. */ private static void addReadTimeoutHandler(Connection connection, long timeoutMillis) { connection.removeHandler(ResponseTimeoutHandler.HANDLER_NAME) .addHandlerLast(ReadTimeoutHandler.HANDLER_NAME, new ReadTimeoutHandler(timeoutMillis)); } /* * Remove read timeout handler as the complete response has been received. */ private static void removeReadTimeoutHandler(Connection connection) { connection.removeHandler(ReadTimeoutHandler.HANDLER_NAME); } }
class NettyAsyncHttpClient implements HttpClient { private static final ClientLogger LOGGER = new ClientLogger(NettyAsyncHttpClient.class); private static final String AZURE_EAGERLY_READ_RESPONSE = "azure-eagerly-read-response"; private static final String AZURE_RESPONSE_TIMEOUT = "azure-response-timeout"; final boolean disableBufferCopy; final long readTimeout; final long writeTimeout; final long responseTimeout; final reactor.netty.http.client.HttpClient nettyClient; /** * Creates NettyAsyncHttpClient with provided http client. * * @param nettyClient the reactor-netty http client * @param disableBufferCopy Determines whether deep cloning of response buffers should be disabled. */ NettyAsyncHttpClient(reactor.netty.http.client.HttpClient nettyClient, boolean disableBufferCopy, long readTimeout, long writeTimeout, long responseTimeout) { this.nettyClient = nettyClient; this.disableBufferCopy = disableBufferCopy; this.readTimeout = readTimeout; this.writeTimeout = writeTimeout; this.responseTimeout = responseTimeout; } /** * {@inheritDoc} */ @Override public Mono<HttpResponse> send(HttpRequest request) { return send(request, Context.NONE); } @Override public Mono<HttpResponse> send(HttpRequest request, Context context) { Objects.requireNonNull(request.getHttpMethod(), "'request.getHttpMethod()' cannot be null."); Objects.requireNonNull(request.getUrl(), "'request.getUrl()' cannot be null."); Objects.requireNonNull(request.getUrl().getProtocol(), "'request.getUrl().getProtocol()' cannot be null."); boolean effectiveEagerlyReadResponse = (boolean) context.getData(AZURE_EAGERLY_READ_RESPONSE).orElse(false); long effectiveResponseTimeout = context.getData(AZURE_RESPONSE_TIMEOUT) .filter(timeoutDuration -> timeoutDuration instanceof Duration) .map(timeoutDuration -> ((Duration) timeoutDuration).toMillis()) .orElse(this.responseTimeout); return nettyClient .doOnRequest((r, connection) -> addWriteTimeoutHandler(connection, writeTimeout)) .doAfterRequest((r, connection) -> addResponseTimeoutHandler(connection, effectiveResponseTimeout)) .doOnResponse((response, connection) -> addReadTimeoutHandler(connection, readTimeout)) .doAfterResponseSuccess((response, connection) -> removeReadTimeoutHandler(connection)) .request(HttpMethod.valueOf(request.getHttpMethod().toString())) .uri(request.getUrl().toString()) .send(bodySendDelegate(request)) .responseConnection(responseDelegate(request, disableBufferCopy, effectiveEagerlyReadResponse)) .single() .onErrorMap(throwable -> { if (throwable instanceof SSLException) { if (throwable.getCause() instanceof ProxyConnectException) { return throwable.getCause(); } } return throwable; }) .retryWhen(Retry.max(1).filter(throwable -> throwable instanceof ProxyConnectException) .onRetryExhaustedThrow((ignoredSpec, signal) -> signal.failure())); } /** * Delegate to send the request content. * * @param restRequest the Rest request contains the body to be sent * @return a delegate upon invocation sets the request body in reactor-netty outbound object */ private static BiFunction<HttpClientRequest, NettyOutbound, Publisher<Void>> bodySendDelegate( final HttpRequest restRequest) { return (reactorNettyRequest, reactorNettyOutbound) -> { for (HttpHeader hdr : restRequest.getHeaders()) { if (reactorNettyRequest.requestHeaders().contains(hdr.getName())) { boolean first = true; for (String value : hdr.getValuesList()) { if (first) { first = false; reactorNettyRequest.header(hdr.getName(), value); } else { reactorNettyRequest.addHeader(hdr.getName(), value); } } } else { hdr.getValuesList().forEach(value -> reactorNettyRequest.addHeader(hdr.getName(), value)); } } BinaryData body = restRequest.getBodyAsBinaryData(); if (body != null) { BinaryDataContent bodyContent = BinaryDataHelper.getContent(body); if (bodyContent instanceof ByteArrayContent) { return reactorNettyOutbound.send(Mono.just(Unpooled.wrappedBuffer(bodyContent.toBytes()))); } else if (bodyContent instanceof StringContent || bodyContent instanceof SerializableContent) { return reactorNettyOutbound.send( Mono.fromSupplier(() -> Unpooled.wrappedBuffer(bodyContent.toBytes()))); } else if (bodyContent instanceof FileContent) { return sendFile(restRequest, reactorNettyOutbound, (FileContent) bodyContent); } else if (bodyContent instanceof InputStreamContent) { return sendInputStream(reactorNettyOutbound, (InputStreamContent) bodyContent); } else { Flux<ByteBuf> nettyByteBufFlux = restRequest.getBody().map(Unpooled::wrappedBuffer); return reactorNettyOutbound.send(nettyByteBufFlux); } } else { return reactorNettyOutbound; } }; } private static NettyOutbound sendFile( HttpRequest restRequest, NettyOutbound reactorNettyOutbound, FileContent fileContent) { if (restRequest.getUrl().getProtocol().equals("https")) { return reactorNettyOutbound.sendUsing( () -> FileChannel.open(fileContent.getFile(), StandardOpenOption.READ), (c, fc) -> { if (c.channel().pipeline().get(ChunkedWriteHandler.class) == null) { c.addHandlerLast(NettyPipeline.ChunkedWriter, new ChunkedWriteHandler()); } try { return new ChunkedNioFile( fc, 0, fileContent.getLength(), fileContent.getChunkSize()); } catch (IOException e) { throw LOGGER.logExceptionAsError(new UncheckedIOException(e)); } }, (fc) -> { try { fc.close(); } catch (IOException e) { throw LOGGER.logExceptionAsError(new UncheckedIOException(e)); } }); } else { return reactorNettyOutbound.sendFile( fileContent.getFile(), 0, fileContent.getLength()); } } /** * Delegate to receive response. * * @param restRequest the Rest request whose response this delegate handles * @param disableBufferCopy Flag indicating if the network response shouldn't be buffered. * @param eagerlyReadResponse Flag indicating if the network response should be eagerly read into memory. * @return a delegate upon invocation setup Rest response object */ private static BiFunction<HttpClientResponse, Connection, Publisher<HttpResponse>> responseDelegate( final HttpRequest restRequest, final boolean disableBufferCopy, final boolean eagerlyReadResponse) { return (reactorNettyResponse, reactorNettyConnection) -> { /* * If the response is being eagerly read into memory the flag for buffer copying can be ignored as the * response MUST be deeply copied to ensure it can safely be used downstream. */ if (eagerlyReadResponse) { return FluxUtil.collectBytesFromNetworkResponse( reactorNettyConnection.inbound().receive().asByteBuffer(), new NettyToAzureCoreHttpHeadersWrapper(reactorNettyResponse.responseHeaders())) .doFinally(ignored -> closeConnection(reactorNettyConnection)) .map(bytes -> new NettyAsyncHttpBufferedResponse(reactorNettyResponse, restRequest, bytes)); } else { return Mono.just(new NettyAsyncHttpResponse(reactorNettyResponse, reactorNettyConnection, restRequest, disableBufferCopy)); } }; } /* * Adds write timeout handler once the request is ready to begin sending. */ private static void addWriteTimeoutHandler(Connection connection, long timeoutMillis) { connection.addHandlerLast(WriteTimeoutHandler.HANDLER_NAME, new WriteTimeoutHandler(timeoutMillis)); } /* * Remove write timeout handler from the connection as the request has finished sending, then add response timeout * handler. */ private static void addResponseTimeoutHandler(Connection connection, long timeoutMillis) { connection.removeHandler(WriteTimeoutHandler.HANDLER_NAME) .addHandlerLast(ResponseTimeoutHandler.HANDLER_NAME, new ResponseTimeoutHandler(timeoutMillis)); } /* * Remove response timeout handler from the connection as the response has been received, then add read timeout * handler. */ private static void addReadTimeoutHandler(Connection connection, long timeoutMillis) { connection.removeHandler(ResponseTimeoutHandler.HANDLER_NAME) .addHandlerLast(ReadTimeoutHandler.HANDLER_NAME, new ReadTimeoutHandler(timeoutMillis)); } /* * Remove read timeout handler as the complete response has been received. */ private static void removeReadTimeoutHandler(Connection connection) { connection.removeHandler(ReadTimeoutHandler.HANDLER_NAME); } }
What happens in the case of retry and the `InputStream` is not replayable?
private static NettyOutbound sendInputStream(NettyOutbound reactorNettyOutbound, InputStreamContent bodyContent) { return reactorNettyOutbound.sendUsing( bodyContent::toStream, (c, stream) -> { if (c.channel().pipeline().get(ChunkedWriteHandler.class) == null) { c.addHandlerLast("reactor.left.chunkedWriter", new ChunkedWriteHandler()); } return new ChunkedStream(stream); }, (stream) -> { }); }
bodyContent::toStream,
private static NettyOutbound sendInputStream(NettyOutbound reactorNettyOutbound, InputStreamContent bodyContent) { return reactorNettyOutbound.sendUsing( bodyContent::toStream, (c, stream) -> { if (c.channel().pipeline().get(ChunkedWriteHandler.class) == null) { c.addHandlerLast(NettyPipeline.ChunkedWriter, new ChunkedWriteHandler()); } return new ChunkedStream(stream); }, (stream) -> { }); }
class NettyAsyncHttpClient implements HttpClient { private static final String AZURE_EAGERLY_READ_RESPONSE = "azure-eagerly-read-response"; private static final String AZURE_RESPONSE_TIMEOUT = "azure-response-timeout"; final boolean disableBufferCopy; final long readTimeout; final long writeTimeout; final long responseTimeout; final reactor.netty.http.client.HttpClient nettyClient; /** * Creates NettyAsyncHttpClient with provided http client. * * @param nettyClient the reactor-netty http client * @param disableBufferCopy Determines whether deep cloning of response buffers should be disabled. */ NettyAsyncHttpClient(reactor.netty.http.client.HttpClient nettyClient, boolean disableBufferCopy, long readTimeout, long writeTimeout, long responseTimeout) { this.nettyClient = nettyClient; this.disableBufferCopy = disableBufferCopy; this.readTimeout = readTimeout; this.writeTimeout = writeTimeout; this.responseTimeout = responseTimeout; } /** * {@inheritDoc} */ @Override public Mono<HttpResponse> send(HttpRequest request) { return send(request, Context.NONE); } @Override public Mono<HttpResponse> send(HttpRequest request, Context context) { Objects.requireNonNull(request.getHttpMethod(), "'request.getHttpMethod()' cannot be null."); Objects.requireNonNull(request.getUrl(), "'request.getUrl()' cannot be null."); Objects.requireNonNull(request.getUrl().getProtocol(), "'request.getUrl().getProtocol()' cannot be null."); boolean effectiveEagerlyReadResponse = (boolean) context.getData(AZURE_EAGERLY_READ_RESPONSE).orElse(false); long effectiveResponseTimeout = context.getData(AZURE_RESPONSE_TIMEOUT) .filter(timeoutDuration -> timeoutDuration instanceof Duration) .map(timeoutDuration -> ((Duration) timeoutDuration).toMillis()) .orElse(this.responseTimeout); return nettyClient .doOnRequest((r, connection) -> addWriteTimeoutHandler(connection, writeTimeout)) .doAfterRequest((r, connection) -> addResponseTimeoutHandler(connection, effectiveResponseTimeout)) .doOnResponse((response, connection) -> addReadTimeoutHandler(connection, readTimeout)) .doAfterResponseSuccess((response, connection) -> removeReadTimeoutHandler(connection)) .request(HttpMethod.valueOf(request.getHttpMethod().toString())) .uri(request.getUrl().toString()) .send(bodySendDelegate(request)) .responseConnection(responseDelegate(request, disableBufferCopy, effectiveEagerlyReadResponse)) .single() .onErrorMap(throwable -> { if (throwable instanceof SSLException) { if (throwable.getCause() instanceof ProxyConnectException) { return throwable.getCause(); } } return throwable; }) .retryWhen(Retry.max(1).filter(throwable -> throwable instanceof ProxyConnectException) .onRetryExhaustedThrow((ignoredSpec, signal) -> signal.failure())); } /** * Delegate to send the request content. * * @param restRequest the Rest request contains the body to be sent * @return a delegate upon invocation sets the request body in reactor-netty outbound object */ private static BiFunction<HttpClientRequest, NettyOutbound, Publisher<Void>> bodySendDelegate( final HttpRequest restRequest) { return (reactorNettyRequest, reactorNettyOutbound) -> { for (HttpHeader hdr : restRequest.getHeaders()) { if (reactorNettyRequest.requestHeaders().contains(hdr.getName())) { boolean first = true; for (String value : hdr.getValuesList()) { if (first) { first = false; reactorNettyRequest.header(hdr.getName(), value); } else { reactorNettyRequest.addHeader(hdr.getName(), value); } } } else { hdr.getValuesList().forEach(value -> reactorNettyRequest.addHeader(hdr.getName(), value)); } } BinaryData body = restRequest.getBodyAsBinaryData(); if (body != null) { BinaryDataContent bodyContent = BinaryDataHelper.getContent(body); if (bodyContent instanceof ByteArrayContent) { return reactorNettyOutbound.send(Mono.just(Unpooled.wrappedBuffer(bodyContent.toBytes()))); } else if (bodyContent instanceof StringContent || bodyContent instanceof SerializableContent) { return reactorNettyOutbound.send( Mono.fromSupplier(() -> Unpooled.wrappedBuffer(bodyContent.toBytes()))); } else if (bodyContent instanceof FileContent) { FileContent fileContent = (FileContent) bodyContent; return reactorNettyOutbound.sendFile(fileContent.getFile(), 0, fileContent.getLength()); } else if (bodyContent instanceof InputStreamContent) { return sendInputStream(reactorNettyOutbound, (InputStreamContent) bodyContent); } else { Flux<ByteBuf> nettyByteBufFlux = restRequest.getBody().map(Unpooled::wrappedBuffer); return reactorNettyOutbound.send(nettyByteBufFlux); } } else { return reactorNettyOutbound; } }; } /** * Delegate to receive response. * * @param restRequest the Rest request whose response this delegate handles * @param disableBufferCopy Flag indicating if the network response shouldn't be buffered. * @param eagerlyReadResponse Flag indicating if the network response should be eagerly read into memory. * @return a delegate upon invocation setup Rest response object */ private static BiFunction<HttpClientResponse, Connection, Publisher<HttpResponse>> responseDelegate( final HttpRequest restRequest, final boolean disableBufferCopy, final boolean eagerlyReadResponse) { return (reactorNettyResponse, reactorNettyConnection) -> { /* * If the response is being eagerly read into memory the flag for buffer copying can be ignored as the * response MUST be deeply copied to ensure it can safely be used downstream. */ if (eagerlyReadResponse) { return FluxUtil.collectBytesFromNetworkResponse( reactorNettyConnection.inbound().receive().asByteBuffer(), new NettyToAzureCoreHttpHeadersWrapper(reactorNettyResponse.responseHeaders())) .doFinally(ignored -> closeConnection(reactorNettyConnection)) .map(bytes -> new NettyAsyncHttpBufferedResponse(reactorNettyResponse, restRequest, bytes)); } else { return Mono.just(new NettyAsyncHttpResponse(reactorNettyResponse, reactorNettyConnection, restRequest, disableBufferCopy)); } }; } /* * Adds write timeout handler once the request is ready to begin sending. */ private static void addWriteTimeoutHandler(Connection connection, long timeoutMillis) { connection.addHandlerLast(WriteTimeoutHandler.HANDLER_NAME, new WriteTimeoutHandler(timeoutMillis)); } /* * Remove write timeout handler from the connection as the request has finished sending, then add response timeout * handler. */ private static void addResponseTimeoutHandler(Connection connection, long timeoutMillis) { connection.removeHandler(WriteTimeoutHandler.HANDLER_NAME) .addHandlerLast(ResponseTimeoutHandler.HANDLER_NAME, new ResponseTimeoutHandler(timeoutMillis)); } /* * Remove response timeout handler from the connection as the response has been received, then add read timeout * handler. */ private static void addReadTimeoutHandler(Connection connection, long timeoutMillis) { connection.removeHandler(ResponseTimeoutHandler.HANDLER_NAME) .addHandlerLast(ReadTimeoutHandler.HANDLER_NAME, new ReadTimeoutHandler(timeoutMillis)); } /* * Remove read timeout handler as the complete response has been received. */ private static void removeReadTimeoutHandler(Connection connection) { connection.removeHandler(ReadTimeoutHandler.HANDLER_NAME); } }
class NettyAsyncHttpClient implements HttpClient { private static final ClientLogger LOGGER = new ClientLogger(NettyAsyncHttpClient.class); private static final String AZURE_EAGERLY_READ_RESPONSE = "azure-eagerly-read-response"; private static final String AZURE_RESPONSE_TIMEOUT = "azure-response-timeout"; final boolean disableBufferCopy; final long readTimeout; final long writeTimeout; final long responseTimeout; final reactor.netty.http.client.HttpClient nettyClient; /** * Creates NettyAsyncHttpClient with provided http client. * * @param nettyClient the reactor-netty http client * @param disableBufferCopy Determines whether deep cloning of response buffers should be disabled. */ NettyAsyncHttpClient(reactor.netty.http.client.HttpClient nettyClient, boolean disableBufferCopy, long readTimeout, long writeTimeout, long responseTimeout) { this.nettyClient = nettyClient; this.disableBufferCopy = disableBufferCopy; this.readTimeout = readTimeout; this.writeTimeout = writeTimeout; this.responseTimeout = responseTimeout; } /** * {@inheritDoc} */ @Override public Mono<HttpResponse> send(HttpRequest request) { return send(request, Context.NONE); } @Override public Mono<HttpResponse> send(HttpRequest request, Context context) { Objects.requireNonNull(request.getHttpMethod(), "'request.getHttpMethod()' cannot be null."); Objects.requireNonNull(request.getUrl(), "'request.getUrl()' cannot be null."); Objects.requireNonNull(request.getUrl().getProtocol(), "'request.getUrl().getProtocol()' cannot be null."); boolean effectiveEagerlyReadResponse = (boolean) context.getData(AZURE_EAGERLY_READ_RESPONSE).orElse(false); long effectiveResponseTimeout = context.getData(AZURE_RESPONSE_TIMEOUT) .filter(timeoutDuration -> timeoutDuration instanceof Duration) .map(timeoutDuration -> ((Duration) timeoutDuration).toMillis()) .orElse(this.responseTimeout); return nettyClient .doOnRequest((r, connection) -> addWriteTimeoutHandler(connection, writeTimeout)) .doAfterRequest((r, connection) -> addResponseTimeoutHandler(connection, effectiveResponseTimeout)) .doOnResponse((response, connection) -> addReadTimeoutHandler(connection, readTimeout)) .doAfterResponseSuccess((response, connection) -> removeReadTimeoutHandler(connection)) .request(HttpMethod.valueOf(request.getHttpMethod().toString())) .uri(request.getUrl().toString()) .send(bodySendDelegate(request)) .responseConnection(responseDelegate(request, disableBufferCopy, effectiveEagerlyReadResponse)) .single() .onErrorMap(throwable -> { if (throwable instanceof SSLException) { if (throwable.getCause() instanceof ProxyConnectException) { return throwable.getCause(); } } return throwable; }) .retryWhen(Retry.max(1).filter(throwable -> throwable instanceof ProxyConnectException) .onRetryExhaustedThrow((ignoredSpec, signal) -> signal.failure())); } /** * Delegate to send the request content. * * @param restRequest the Rest request contains the body to be sent * @return a delegate upon invocation sets the request body in reactor-netty outbound object */ private static BiFunction<HttpClientRequest, NettyOutbound, Publisher<Void>> bodySendDelegate( final HttpRequest restRequest) { return (reactorNettyRequest, reactorNettyOutbound) -> { for (HttpHeader hdr : restRequest.getHeaders()) { if (reactorNettyRequest.requestHeaders().contains(hdr.getName())) { boolean first = true; for (String value : hdr.getValuesList()) { if (first) { first = false; reactorNettyRequest.header(hdr.getName(), value); } else { reactorNettyRequest.addHeader(hdr.getName(), value); } } } else { hdr.getValuesList().forEach(value -> reactorNettyRequest.addHeader(hdr.getName(), value)); } } BinaryData body = restRequest.getBodyAsBinaryData(); if (body != null) { BinaryDataContent bodyContent = BinaryDataHelper.getContent(body); if (bodyContent instanceof ByteArrayContent) { return reactorNettyOutbound.send(Mono.just(Unpooled.wrappedBuffer(bodyContent.toBytes()))); } else if (bodyContent instanceof StringContent || bodyContent instanceof SerializableContent) { return reactorNettyOutbound.send( Mono.fromSupplier(() -> Unpooled.wrappedBuffer(bodyContent.toBytes()))); } else if (bodyContent instanceof FileContent) { return sendFile(restRequest, reactorNettyOutbound, (FileContent) bodyContent); } else if (bodyContent instanceof InputStreamContent) { return sendInputStream(reactorNettyOutbound, (InputStreamContent) bodyContent); } else { Flux<ByteBuf> nettyByteBufFlux = restRequest.getBody().map(Unpooled::wrappedBuffer); return reactorNettyOutbound.send(nettyByteBufFlux); } } else { return reactorNettyOutbound; } }; } private static NettyOutbound sendFile( HttpRequest restRequest, NettyOutbound reactorNettyOutbound, FileContent fileContent) { if (restRequest.getUrl().getProtocol().equals("https")) { return reactorNettyOutbound.sendUsing( () -> FileChannel.open(fileContent.getFile(), StandardOpenOption.READ), (c, fc) -> { if (c.channel().pipeline().get(ChunkedWriteHandler.class) == null) { c.addHandlerLast(NettyPipeline.ChunkedWriter, new ChunkedWriteHandler()); } try { return new ChunkedNioFile( fc, 0, fileContent.getLength(), fileContent.getChunkSize()); } catch (IOException e) { throw LOGGER.logExceptionAsError(new UncheckedIOException(e)); } }, (fc) -> { try { fc.close(); } catch (IOException e) { throw LOGGER.logExceptionAsError(new UncheckedIOException(e)); } }); } else { return reactorNettyOutbound.sendFile( fileContent.getFile(), 0, fileContent.getLength()); } } /** * Delegate to receive response. * * @param restRequest the Rest request whose response this delegate handles * @param disableBufferCopy Flag indicating if the network response shouldn't be buffered. * @param eagerlyReadResponse Flag indicating if the network response should be eagerly read into memory. * @return a delegate upon invocation setup Rest response object */ private static BiFunction<HttpClientResponse, Connection, Publisher<HttpResponse>> responseDelegate( final HttpRequest restRequest, final boolean disableBufferCopy, final boolean eagerlyReadResponse) { return (reactorNettyResponse, reactorNettyConnection) -> { /* * If the response is being eagerly read into memory the flag for buffer copying can be ignored as the * response MUST be deeply copied to ensure it can safely be used downstream. */ if (eagerlyReadResponse) { return FluxUtil.collectBytesFromNetworkResponse( reactorNettyConnection.inbound().receive().asByteBuffer(), new NettyToAzureCoreHttpHeadersWrapper(reactorNettyResponse.responseHeaders())) .doFinally(ignored -> closeConnection(reactorNettyConnection)) .map(bytes -> new NettyAsyncHttpBufferedResponse(reactorNettyResponse, restRequest, bytes)); } else { return Mono.just(new NettyAsyncHttpResponse(reactorNettyResponse, reactorNettyConnection, restRequest, disableBufferCopy)); } }; } /* * Adds write timeout handler once the request is ready to begin sending. */ private static void addWriteTimeoutHandler(Connection connection, long timeoutMillis) { connection.addHandlerLast(WriteTimeoutHandler.HANDLER_NAME, new WriteTimeoutHandler(timeoutMillis)); } /* * Remove write timeout handler from the connection as the request has finished sending, then add response timeout * handler. */ private static void addResponseTimeoutHandler(Connection connection, long timeoutMillis) { connection.removeHandler(WriteTimeoutHandler.HANDLER_NAME) .addHandlerLast(ResponseTimeoutHandler.HANDLER_NAME, new ResponseTimeoutHandler(timeoutMillis)); } /* * Remove response timeout handler from the connection as the response has been received, then add read timeout * handler. */ private static void addReadTimeoutHandler(Connection connection, long timeoutMillis) { connection.removeHandler(ResponseTimeoutHandler.HANDLER_NAME) .addHandlerLast(ReadTimeoutHandler.HANDLER_NAME, new ReadTimeoutHandler(timeoutMillis)); } /* * Remove read timeout handler as the complete response has been received. */ private static void removeReadTimeoutHandler(Connection connection) { connection.removeHandler(ReadTimeoutHandler.HANDLER_NAME); } }
```suggestion // If the x-ms-date exists ignore the Date header. ```
private static String buildStringToSign(URL requestUrl, HttpHeaders headers, AzureNamedKeyCredential credential) { String dateHeader = headers.get("x-ms-date") != null ? "" : getStandardHeaderValue(headers, "Date"); return String.join("\n", dateHeader, getCanonicalizedResource(requestUrl, credential)); }
private static String buildStringToSign(URL requestUrl, HttpHeaders headers, AzureNamedKeyCredential credential) { String dateHeader = headers.get("x-ms-date") != null ? "" : getStandardHeaderValue(headers, "Date"); return String.join("\n", dateHeader, getCanonicalizedResource(requestUrl, credential)); }
class TableAzureNamedKeyCredentialPolicy implements HttpPipelinePolicy { private static final String AUTHORIZATION_HEADER_FORMAT = "SharedKeyLite %s:%s"; private final AzureNamedKeyCredential credential; /** * Creates a SharedKey pipeline policy that adds the SharedKey into the request's authorization header. * * @param credential The SharedKey credential used to create the policy. */ public TableAzureNamedKeyCredentialPolicy(AzureNamedKeyCredential credential) { this.credential = credential; } /** * Authorizes a {@link com.azure.core.http.HttpRequest} with the SharedKey credential. * * @param context The context of the request. * @param next The next policy in the pipeline. * * @return A reactive result containing the HTTP response. */ public Mono<HttpResponse> process(HttpPipelineCallContext context, HttpPipelineNextPolicy next) { String authorizationValue = generateAuthorizationHeader(context.getHttpRequest().getUrl(), context.getHttpRequest().getHeaders()); context.getHttpRequest().setHeader("Authorization", authorizationValue); return next.process(); } /** * Generates the Auth Headers * * @param requestUrl The URL which the request is going to. * @param headers The headers of the request. * * @return The auth header */ String generateAuthorizationHeader(URL requestUrl, HttpHeaders headers) { String signature = computeHmac256(this.credential.getAzureNamedKey().getKey(), buildStringToSign(requestUrl, headers, this.credential)); return String.format(AUTHORIZATION_HEADER_FORMAT, this.credential.getAzureNamedKey().getName(), signature); } /** * Creates the String to Sign. * * @param requestUrl The Url which the request is going to. * @param headers The headers of the request. * * @return A string to sign for the request. */ /** * Returns a header value or an empty string if said value is {@code null}. * * @param headers The request headers. * @param headerName The name of the header to get the value for. * * @return The standard header for the given name. */ private static String getStandardHeaderValue(HttpHeaders headers, String headerName) { final Header header = headers.get(headerName); return header == null ? "" : header.getValue(); } /** * Returns the canonicalized resource needed for a request. * * @param requestUrl The URL of the request. * * @return The string that is the canonicalized resource. */ private static String getCanonicalizedResource(URL requestUrl, AzureNamedKeyCredential credential) { StringBuilder canonicalizedResource = new StringBuilder("/").append(credential.getAzureNamedKey().getName()); if (requestUrl.getPath().length() > 0) { canonicalizedResource.append(requestUrl.getPath()); } else { canonicalizedResource.append('/'); } if (requestUrl.getQuery() != null) { Map<String, String[]> queryParams = parseQueryStringSplitValues(requestUrl.getQuery()); String[] queryParamValues = queryParams.get("comp"); if (queryParamValues != null) { Arrays.sort(queryParamValues); canonicalizedResource.append("?comp=") .append(String.join(",", queryParamValues)); } } return canonicalizedResource.toString(); } /** * Get the {@link AzureNamedKeyCredential} linked to the policy. * * @return The {@link AzureNamedKeyCredential}. */ public AzureNamedKeyCredential getCredential() { return credential; } }
class TableAzureNamedKeyCredentialPolicy implements HttpPipelinePolicy { private static final String AUTHORIZATION_HEADER_FORMAT = "SharedKeyLite %s:%s"; private final AzureNamedKeyCredential credential; /** * Creates a SharedKey pipeline policy that adds the SharedKey into the request's authorization header. * * @param credential The SharedKey credential used to create the policy. */ public TableAzureNamedKeyCredentialPolicy(AzureNamedKeyCredential credential) { this.credential = credential; } /** * Authorizes a {@link com.azure.core.http.HttpRequest} with the SharedKey credential. * * @param context The context of the request. * @param next The next policy in the pipeline. * * @return A reactive result containing the HTTP response. */ public Mono<HttpResponse> process(HttpPipelineCallContext context, HttpPipelineNextPolicy next) { String authorizationValue = generateAuthorizationHeader(context.getHttpRequest().getUrl(), context.getHttpRequest().getHeaders()); context.getHttpRequest().setHeader("Authorization", authorizationValue); return next.process(); } /** * Generates the Auth Headers * * @param requestUrl The URL which the request is going to. * @param headers The headers of the request. * * @return The auth header */ String generateAuthorizationHeader(URL requestUrl, HttpHeaders headers) { String signature = computeHmac256(this.credential.getAzureNamedKey().getKey(), buildStringToSign(requestUrl, headers, this.credential)); return String.format(AUTHORIZATION_HEADER_FORMAT, this.credential.getAzureNamedKey().getName(), signature); } /** * Creates the String to Sign. * * @param requestUrl The Url which the request is going to. * @param headers The headers of the request. * * @return A string to sign for the request. */ /** * Returns a header value or an empty string if said value is {@code null}. * * @param headers The request headers. * @param headerName The name of the header to get the value for. * * @return The standard header for the given name. */ private static String getStandardHeaderValue(HttpHeaders headers, String headerName) { final Header header = headers.get(headerName); return header == null ? "" : header.getValue(); } /** * Returns the canonicalized resource needed for a request. * * @param requestUrl The URL of the request. * * @return The string that is the canonicalized resource. */ private static String getCanonicalizedResource(URL requestUrl, AzureNamedKeyCredential credential) { StringBuilder canonicalizedResource = new StringBuilder("/").append(credential.getAzureNamedKey().getName()); if (requestUrl.getPath().length() > 0) { canonicalizedResource.append(requestUrl.getPath()); } else { canonicalizedResource.append('/'); } if (requestUrl.getQuery() != null) { Map<String, String[]> queryParams = parseQueryStringSplitValues(requestUrl.getQuery()); String[] queryParamValues = queryParams.get("comp"); if (queryParamValues != null) { Arrays.sort(queryParamValues); canonicalizedResource.append("?comp=") .append(String.join(",", queryParamValues)); } } return canonicalizedResource.toString(); } /** * Get the {@link AzureNamedKeyCredential} linked to the policy. * * @return The {@link AzureNamedKeyCredential}. */ public AzureNamedKeyCredential getCredential() { return credential; } }
I'm wondering if, for now, it'd be better to always go down the code path above to just have this being consistent and easier to debug while we introduce this change
private Mono<RequestBody> toOkHttpRequestBody(BinaryData bodyContent, HttpHeaders headers) { String contentType = headers.getValue("Content-Type"); MediaType mediaType = (contentType == null) ? null : MediaType.parse(contentType); if (bodyContent == null) { return Mono.just(RequestBody.create(ByteString.EMPTY, mediaType)); } BinaryDataContent content = BinaryDataHelper.getContent(bodyContent); if (content instanceof ByteArrayContent) { return Mono.just(RequestBody.create(content.toBytes(), mediaType)); } else if (content instanceof StringContent || content instanceof SerializableContent) { return Mono.fromCallable(() -> RequestBody.create(content.toBytes(), mediaType)); } else if (content instanceof InputStreamContent) { return Mono.just(new OkHttpInputStreamRequestBody((InputStreamContent) content, headers, mediaType)); } else if (content instanceof FileContent) { return Mono.just(new OkHttpFileRequestBody((FileContent) content, headers, mediaType)); } else { OkHttpFluxRequestBody fluxRequestBody = new OkHttpFluxRequestBody( content, headers, mediaType, writeTimeout); if (fluxRequestBody.contentLength() < 0 || fluxRequestBody.contentLength() > BUFFERED_FLUX_REQUEST_THRESHOLD) { return Mono.just(fluxRequestBody); } else { return toByteString(bodyContent.toFluxByteBuffer()).map(bs -> RequestBody.create(bs, mediaType)); } } }
private Mono<RequestBody> toOkHttpRequestBody(BinaryData bodyContent, HttpHeaders headers) { String contentType = headers.getValue("Content-Type"); MediaType mediaType = (contentType == null) ? null : MediaType.parse(contentType); if (bodyContent == null) { return EMPTY_REQUEST_BODY_MONO; } BinaryDataContent content = BinaryDataHelper.getContent(bodyContent); if (content instanceof ByteArrayContent) { return Mono.just(RequestBody.create(content.toBytes(), mediaType)); } else if (content instanceof StringContent || content instanceof SerializableContent) { return Mono.fromCallable(() -> RequestBody.create(content.toBytes(), mediaType)); } else { long effectiveContentLength = getRequestContentLength(content, headers); if (content instanceof InputStreamContent) { return Mono.just(new OkHttpInputStreamRequestBody( (InputStreamContent) content, effectiveContentLength, mediaType)); } else if (content instanceof FileContent) { return Mono.just(new OkHttpFileRequestBody((FileContent) content, effectiveContentLength, mediaType)); } else { return Mono.just(new OkHttpFluxRequestBody( content, effectiveContentLength, mediaType, httpClient.callTimeoutMillis())); } } }
class OkHttpAsyncHttpClient implements HttpClient { private static final Mono<okio.ByteString> EMPTY_BYTE_STRING_MONO = Mono.just(okio.ByteString.EMPTY); private static final long BUFFERED_FLUX_REQUEST_THRESHOLD = 10 * 1024L; final OkHttpClient httpClient; private final Duration writeTimeout; OkHttpAsyncHttpClient(OkHttpClient httpClient, Duration writeTimeout) { this.httpClient = httpClient; this.writeTimeout = writeTimeout; } @Override public Mono<HttpResponse> send(HttpRequest request) { return send(request, Context.NONE); } @Override public Mono<HttpResponse> send(HttpRequest request, Context context) { boolean eagerlyReadResponse = (boolean) context.getData("azure-eagerly-read-response").orElse(false); return Mono.create(sink -> sink.onRequest(value -> { toOkHttpRequest(request).subscribe(okHttpRequest -> { try { Call call = httpClient.newCall(okHttpRequest); call.enqueue(new OkHttpCallback(sink, request, eagerlyReadResponse)); sink.onCancel(call::cancel); } catch (Exception ex) { sink.error(ex); } }, sink::error); })); } /** * Converts the given azure-core request to okhttp request. * * @param request the azure-core request * @return the Mono emitting okhttp request */ private Mono<okhttp3.Request> toOkHttpRequest(HttpRequest request) { Request.Builder requestBuilder = new Request.Builder() .url(request.getUrl()); if (request.getHeaders() != null) { for (HttpHeader hdr : request.getHeaders()) { hdr.getValuesList().forEach(value -> requestBuilder.addHeader(hdr.getName(), value)); } } if (request.getHttpMethod() == HttpMethod.GET) { return Mono.just(requestBuilder.get().build()); } else if (request.getHttpMethod() == HttpMethod.HEAD) { return Mono.just(requestBuilder.head().build()); } return toOkHttpRequestBody(request.getBodyAsBinaryData(), request.getHeaders()) .map(okhttpRequestBody -> requestBuilder.method(request.getHttpMethod().toString(), okhttpRequestBody) .build()); } /** * Create a Mono of okhttp3.RequestBody from the given BinaryData. * * @param bodyContent The request body content * @param headers the headers associated with the original request * @return the Mono emitting okhttp request */ /** * Aggregate Flux of java.nio.ByteBuffer to single okio.ByteString. * * Pooled okio.Buffer type is used to buffer emitted ByteBuffer instances. Content of each ByteBuffer will be * written (i.e copied) to the internal okio.Buffer slots. Once the stream terminates, the contents of all slots get * copied to one single byte array and okio.ByteString will be created referring this byte array. Finally, the * initial okio.Buffer will be returned to the pool. * * @param bbFlux the Flux of ByteBuffer to aggregate * @return a mono emitting aggregated ByteString */ private static Mono<ByteString> toByteString(Flux<ByteBuffer> bbFlux) { Objects.requireNonNull(bbFlux, "'bbFlux' cannot be null."); return Mono.using(okio.Buffer::new, buffer -> bbFlux.reduce(buffer, (b, byteBuffer) -> { try { b.write(byteBuffer); return b; } catch (IOException ioe) { throw Exceptions.propagate(ioe); } }).map(b -> ByteString.of(b.readByteArray())), okio.Buffer::clear) .switchIfEmpty(EMPTY_BYTE_STRING_MONO); } private static class OkHttpCallback implements okhttp3.Callback { private final MonoSink<HttpResponse> sink; private final HttpRequest request; private final boolean eagerlyReadResponse; OkHttpCallback(MonoSink<HttpResponse> sink, HttpRequest request, boolean eagerlyReadResponse) { this.sink = sink; this.request = request; this.eagerlyReadResponse = eagerlyReadResponse; } @SuppressWarnings("NullableProblems") @Override public void onFailure(okhttp3.Call call, IOException e) { if (e.getMessage().startsWith("canceled due to") && e.getSuppressed().length == 1) { sink.error(e.getSuppressed()[0]); } else { sink.error(e); } } @SuppressWarnings("NullableProblems") @Override public void onResponse(okhttp3.Call call, okhttp3.Response response) { /* * Use a buffered response when we are eagerly reading the response from the network and the body isn't * empty. */ if (eagerlyReadResponse) { ResponseBody body = response.body(); if (Objects.nonNull(body)) { try { byte[] bytes = body.bytes(); body.close(); sink.success(new OkHttpAsyncBufferedResponse(response, request, bytes)); } catch (IOException ex) { sink.error(ex); } } else { sink.success(new OkHttpAsyncResponse(response, request)); } } else { sink.success(new OkHttpAsyncResponse(response, request)); } } } }
class OkHttpAsyncHttpClient implements HttpClient { private static final Mono<RequestBody> EMPTY_REQUEST_BODY_MONO = Mono.just(RequestBody.create(new byte[0])); final OkHttpClient httpClient; OkHttpAsyncHttpClient(OkHttpClient httpClient) { this.httpClient = httpClient; } @Override public Mono<HttpResponse> send(HttpRequest request) { return send(request, Context.NONE); } @Override public Mono<HttpResponse> send(HttpRequest request, Context context) { boolean eagerlyReadResponse = (boolean) context.getData("azure-eagerly-read-response").orElse(false); return Mono.create(sink -> sink.onRequest(value -> { toOkHttpRequest(request).subscribe(okHttpRequest -> { try { Call call = httpClient.newCall(okHttpRequest); call.enqueue(new OkHttpCallback(sink, request, eagerlyReadResponse)); sink.onCancel(call::cancel); } catch (Exception ex) { sink.error(ex); } }, sink::error); })); } /** * Converts the given azure-core request to okhttp request. * * @param request the azure-core request * @return the Mono emitting okhttp request */ private Mono<okhttp3.Request> toOkHttpRequest(HttpRequest request) { Request.Builder requestBuilder = new Request.Builder() .url(request.getUrl()); if (request.getHeaders() != null) { for (HttpHeader hdr : request.getHeaders()) { hdr.getValuesList().forEach(value -> requestBuilder.addHeader(hdr.getName(), value)); } } if (request.getHttpMethod() == HttpMethod.GET) { return Mono.just(requestBuilder.get().build()); } else if (request.getHttpMethod() == HttpMethod.HEAD) { return Mono.just(requestBuilder.head().build()); } return toOkHttpRequestBody(request.getBodyAsBinaryData(), request.getHeaders()) .map(okhttpRequestBody -> requestBuilder.method(request.getHttpMethod().toString(), okhttpRequestBody) .build()); } /** * Create a Mono of okhttp3.RequestBody from the given BinaryData. * * @param bodyContent The request body content * @param headers the headers associated with the original request * @return the Mono emitting okhttp request */ private static long getRequestContentLength(BinaryDataContent content, HttpHeaders headers) { Long contentLength = content.getLength(); if (contentLength == null) { String contentLengthHeaderValue = headers.getValue("Content-Length"); if (contentLengthHeaderValue != null) { contentLength = Long.parseLong(contentLengthHeaderValue); } else { contentLength = -1L; } } return contentLength; } private static class OkHttpCallback implements okhttp3.Callback { private final MonoSink<HttpResponse> sink; private final HttpRequest request; private final boolean eagerlyReadResponse; OkHttpCallback(MonoSink<HttpResponse> sink, HttpRequest request, boolean eagerlyReadResponse) { this.sink = sink; this.request = request; this.eagerlyReadResponse = eagerlyReadResponse; } @SuppressWarnings("NullableProblems") @Override public void onFailure(okhttp3.Call call, IOException e) { sink.error(e); } @SuppressWarnings("NullableProblems") @Override public void onResponse(okhttp3.Call call, okhttp3.Response response) { /* * Use a buffered response when we are eagerly reading the response from the network and the body isn't * empty. */ if (eagerlyReadResponse) { ResponseBody body = response.body(); if (Objects.nonNull(body)) { try { byte[] bytes = body.bytes(); body.close(); sink.success(new OkHttpAsyncBufferedResponse(response, request, bytes)); } catch (IOException ex) { sink.error(ex); } } else { sink.success(new OkHttpAsyncResponse(response, request)); } } else { sink.success(new OkHttpAsyncResponse(response, request)); } } } }
Is this timeout in the right place? I'd have expected the write timeout to behave closer to the network layer or in the `sink.write` above. Maybe ```java Mono<BufferedSink> requestSendMono = content.toFluxByteBuffer() .publishOn(Schedulers.boundedElastic()) .timeout(writeTimeout) .reduce(....); ```
public void writeTo(BufferedSink bufferedSink) throws IOException { if (bodySent.compareAndSet(false, true)) { Mono<BufferedSink> requestSendMono = content.toFluxByteBuffer() .publishOn(Schedulers.boundedElastic()) .reduce(bufferedSink, (sink, buffer) -> { try { while (buffer.hasRemaining()) { sink.write(buffer); } return sink; } catch (IOException e) { throw Exceptions.propagate(e); } }); if (writeTimeout != null) { requestSendMono.block(writeTimeout); } else { requestSendMono.block(); } } else { throw LOGGER.logThrowableAsError(new IOException("Re-attempt to send Flux body is not supported")); } }
requestSendMono.block(writeTimeout);
public void writeTo(BufferedSink bufferedSink) throws IOException { if (bodySent.compareAndSet(false, true)) { Mono<Void> requestSendMono = content.toFluxByteBuffer() .flatMapSequential(buffer -> { if (Schedulers.isInNonBlockingThread()) { return Mono.just(buffer) .publishOn(Schedulers.boundedElastic()) .map(b -> writeBuffer(bufferedSink, b)) .then(); } else { writeBuffer(bufferedSink, buffer); return Mono.empty(); } }, 1, 1) .then(); if (callTimeoutMillis > 0) { /* * Default call timeout (in milliseconds). By default there is no timeout for complete calls, but * there is for the connection, write, and read actions within a call. */ requestSendMono.block(Duration.ofMillis(callTimeoutMillis)); } else { requestSendMono.block(); } } else { throw LOGGER.logThrowableAsError(new IOException("Re-attempt to send Flux body is not supported")); } }
class OkHttpFluxRequestBody extends OkHttpStreamableRequestBody<BinaryDataContent> { private static final ClientLogger LOGGER = new ClientLogger(OkHttpFluxRequestBody.class); private final AtomicBoolean bodySent = new AtomicBoolean(false); private final Duration writeTimeout; public OkHttpFluxRequestBody( BinaryDataContent content, HttpHeaders httpHeaders, MediaType mediaType, Duration writeTimeout) { super(content, httpHeaders, mediaType); this.writeTimeout = writeTimeout; } @Override }
class OkHttpFluxRequestBody extends OkHttpStreamableRequestBody<BinaryDataContent> { private static final ClientLogger LOGGER = new ClientLogger(OkHttpFluxRequestBody.class); private final AtomicBoolean bodySent = new AtomicBoolean(false); private final int callTimeoutMillis; public OkHttpFluxRequestBody( BinaryDataContent content, long effectiveContentLength, MediaType mediaType, int callTimeoutMillis) { super(content, effectiveContentLength, mediaType); this.callTimeoutMillis = callTimeoutMillis; } @Override private ByteBuffer writeBuffer(BufferedSink sink, ByteBuffer buffer) { try { while (buffer.hasRemaining()) { sink.write(buffer); } return buffer; } catch (IOException e) { throw Exceptions.propagate(e); } } }
If this path is removed, could this API become non-reactive?
private Mono<RequestBody> toOkHttpRequestBody(BinaryData bodyContent, HttpHeaders headers) { String contentType = headers.getValue("Content-Type"); MediaType mediaType = (contentType == null) ? null : MediaType.parse(contentType); if (bodyContent == null) { return Mono.just(RequestBody.create(ByteString.EMPTY, mediaType)); } BinaryDataContent content = BinaryDataHelper.getContent(bodyContent); if (content instanceof ByteArrayContent) { return Mono.just(RequestBody.create(content.toBytes(), mediaType)); } else if (content instanceof StringContent || content instanceof SerializableContent) { return Mono.fromCallable(() -> RequestBody.create(content.toBytes(), mediaType)); } else if (content instanceof InputStreamContent) { return Mono.just(new OkHttpInputStreamRequestBody((InputStreamContent) content, headers, mediaType)); } else if (content instanceof FileContent) { return Mono.just(new OkHttpFileRequestBody((FileContent) content, headers, mediaType)); } else { OkHttpFluxRequestBody fluxRequestBody = new OkHttpFluxRequestBody( content, headers, mediaType, writeTimeout); if (fluxRequestBody.contentLength() < 0 || fluxRequestBody.contentLength() > BUFFERED_FLUX_REQUEST_THRESHOLD) { return Mono.just(fluxRequestBody); } else { return toByteString(bodyContent.toFluxByteBuffer()).map(bs -> RequestBody.create(bs, mediaType)); } } }
return toByteString(bodyContent.toFluxByteBuffer()).map(bs -> RequestBody.create(bs, mediaType));
private Mono<RequestBody> toOkHttpRequestBody(BinaryData bodyContent, HttpHeaders headers) { String contentType = headers.getValue("Content-Type"); MediaType mediaType = (contentType == null) ? null : MediaType.parse(contentType); if (bodyContent == null) { return EMPTY_REQUEST_BODY_MONO; } BinaryDataContent content = BinaryDataHelper.getContent(bodyContent); if (content instanceof ByteArrayContent) { return Mono.just(RequestBody.create(content.toBytes(), mediaType)); } else if (content instanceof StringContent || content instanceof SerializableContent) { return Mono.fromCallable(() -> RequestBody.create(content.toBytes(), mediaType)); } else { long effectiveContentLength = getRequestContentLength(content, headers); if (content instanceof InputStreamContent) { return Mono.just(new OkHttpInputStreamRequestBody( (InputStreamContent) content, effectiveContentLength, mediaType)); } else if (content instanceof FileContent) { return Mono.just(new OkHttpFileRequestBody((FileContent) content, effectiveContentLength, mediaType)); } else { return Mono.just(new OkHttpFluxRequestBody( content, effectiveContentLength, mediaType, httpClient.callTimeoutMillis())); } } }
class OkHttpAsyncHttpClient implements HttpClient { private static final Mono<okio.ByteString> EMPTY_BYTE_STRING_MONO = Mono.just(okio.ByteString.EMPTY); private static final long BUFFERED_FLUX_REQUEST_THRESHOLD = 10 * 1024L; final OkHttpClient httpClient; private final Duration writeTimeout; OkHttpAsyncHttpClient(OkHttpClient httpClient, Duration writeTimeout) { this.httpClient = httpClient; this.writeTimeout = writeTimeout; } @Override public Mono<HttpResponse> send(HttpRequest request) { return send(request, Context.NONE); } @Override public Mono<HttpResponse> send(HttpRequest request, Context context) { boolean eagerlyReadResponse = (boolean) context.getData("azure-eagerly-read-response").orElse(false); return Mono.create(sink -> sink.onRequest(value -> { toOkHttpRequest(request).subscribe(okHttpRequest -> { try { Call call = httpClient.newCall(okHttpRequest); call.enqueue(new OkHttpCallback(sink, request, eagerlyReadResponse)); sink.onCancel(call::cancel); } catch (Exception ex) { sink.error(ex); } }, sink::error); })); } /** * Converts the given azure-core request to okhttp request. * * @param request the azure-core request * @return the Mono emitting okhttp request */ private Mono<okhttp3.Request> toOkHttpRequest(HttpRequest request) { Request.Builder requestBuilder = new Request.Builder() .url(request.getUrl()); if (request.getHeaders() != null) { for (HttpHeader hdr : request.getHeaders()) { hdr.getValuesList().forEach(value -> requestBuilder.addHeader(hdr.getName(), value)); } } if (request.getHttpMethod() == HttpMethod.GET) { return Mono.just(requestBuilder.get().build()); } else if (request.getHttpMethod() == HttpMethod.HEAD) { return Mono.just(requestBuilder.head().build()); } return toOkHttpRequestBody(request.getBodyAsBinaryData(), request.getHeaders()) .map(okhttpRequestBody -> requestBuilder.method(request.getHttpMethod().toString(), okhttpRequestBody) .build()); } /** * Create a Mono of okhttp3.RequestBody from the given BinaryData. * * @param bodyContent The request body content * @param headers the headers associated with the original request * @return the Mono emitting okhttp request */ /** * Aggregate Flux of java.nio.ByteBuffer to single okio.ByteString. * * Pooled okio.Buffer type is used to buffer emitted ByteBuffer instances. Content of each ByteBuffer will be * written (i.e copied) to the internal okio.Buffer slots. Once the stream terminates, the contents of all slots get * copied to one single byte array and okio.ByteString will be created referring this byte array. Finally, the * initial okio.Buffer will be returned to the pool. * * @param bbFlux the Flux of ByteBuffer to aggregate * @return a mono emitting aggregated ByteString */ private static Mono<ByteString> toByteString(Flux<ByteBuffer> bbFlux) { Objects.requireNonNull(bbFlux, "'bbFlux' cannot be null."); return Mono.using(okio.Buffer::new, buffer -> bbFlux.reduce(buffer, (b, byteBuffer) -> { try { b.write(byteBuffer); return b; } catch (IOException ioe) { throw Exceptions.propagate(ioe); } }).map(b -> ByteString.of(b.readByteArray())), okio.Buffer::clear) .switchIfEmpty(EMPTY_BYTE_STRING_MONO); } private static class OkHttpCallback implements okhttp3.Callback { private final MonoSink<HttpResponse> sink; private final HttpRequest request; private final boolean eagerlyReadResponse; OkHttpCallback(MonoSink<HttpResponse> sink, HttpRequest request, boolean eagerlyReadResponse) { this.sink = sink; this.request = request; this.eagerlyReadResponse = eagerlyReadResponse; } @SuppressWarnings("NullableProblems") @Override public void onFailure(okhttp3.Call call, IOException e) { if (e.getMessage().startsWith("canceled due to") && e.getSuppressed().length == 1) { sink.error(e.getSuppressed()[0]); } else { sink.error(e); } } @SuppressWarnings("NullableProblems") @Override public void onResponse(okhttp3.Call call, okhttp3.Response response) { /* * Use a buffered response when we are eagerly reading the response from the network and the body isn't * empty. */ if (eagerlyReadResponse) { ResponseBody body = response.body(); if (Objects.nonNull(body)) { try { byte[] bytes = body.bytes(); body.close(); sink.success(new OkHttpAsyncBufferedResponse(response, request, bytes)); } catch (IOException ex) { sink.error(ex); } } else { sink.success(new OkHttpAsyncResponse(response, request)); } } else { sink.success(new OkHttpAsyncResponse(response, request)); } } } }
class OkHttpAsyncHttpClient implements HttpClient { private static final Mono<RequestBody> EMPTY_REQUEST_BODY_MONO = Mono.just(RequestBody.create(new byte[0])); final OkHttpClient httpClient; OkHttpAsyncHttpClient(OkHttpClient httpClient) { this.httpClient = httpClient; } @Override public Mono<HttpResponse> send(HttpRequest request) { return send(request, Context.NONE); } @Override public Mono<HttpResponse> send(HttpRequest request, Context context) { boolean eagerlyReadResponse = (boolean) context.getData("azure-eagerly-read-response").orElse(false); return Mono.create(sink -> sink.onRequest(value -> { toOkHttpRequest(request).subscribe(okHttpRequest -> { try { Call call = httpClient.newCall(okHttpRequest); call.enqueue(new OkHttpCallback(sink, request, eagerlyReadResponse)); sink.onCancel(call::cancel); } catch (Exception ex) { sink.error(ex); } }, sink::error); })); } /** * Converts the given azure-core request to okhttp request. * * @param request the azure-core request * @return the Mono emitting okhttp request */ private Mono<okhttp3.Request> toOkHttpRequest(HttpRequest request) { Request.Builder requestBuilder = new Request.Builder() .url(request.getUrl()); if (request.getHeaders() != null) { for (HttpHeader hdr : request.getHeaders()) { hdr.getValuesList().forEach(value -> requestBuilder.addHeader(hdr.getName(), value)); } } if (request.getHttpMethod() == HttpMethod.GET) { return Mono.just(requestBuilder.get().build()); } else if (request.getHttpMethod() == HttpMethod.HEAD) { return Mono.just(requestBuilder.head().build()); } return toOkHttpRequestBody(request.getBodyAsBinaryData(), request.getHeaders()) .map(okhttpRequestBody -> requestBuilder.method(request.getHttpMethod().toString(), okhttpRequestBody) .build()); } /** * Create a Mono of okhttp3.RequestBody from the given BinaryData. * * @param bodyContent The request body content * @param headers the headers associated with the original request * @return the Mono emitting okhttp request */ private static long getRequestContentLength(BinaryDataContent content, HttpHeaders headers) { Long contentLength = content.getLength(); if (contentLength == null) { String contentLengthHeaderValue = headers.getValue("Content-Length"); if (contentLengthHeaderValue != null) { contentLength = Long.parseLong(contentLengthHeaderValue); } else { contentLength = -1L; } } return contentLength; } private static class OkHttpCallback implements okhttp3.Callback { private final MonoSink<HttpResponse> sink; private final HttpRequest request; private final boolean eagerlyReadResponse; OkHttpCallback(MonoSink<HttpResponse> sink, HttpRequest request, boolean eagerlyReadResponse) { this.sink = sink; this.request = request; this.eagerlyReadResponse = eagerlyReadResponse; } @SuppressWarnings("NullableProblems") @Override public void onFailure(okhttp3.Call call, IOException e) { sink.error(e); } @SuppressWarnings("NullableProblems") @Override public void onResponse(okhttp3.Call call, okhttp3.Response response) { /* * Use a buffered response when we are eagerly reading the response from the network and the body isn't * empty. */ if (eagerlyReadResponse) { ResponseBody body = response.body(); if (Objects.nonNull(body)) { try { byte[] bytes = body.bytes(); body.close(); sink.success(new OkHttpAsyncBufferedResponse(response, request, bytes)); } catch (IOException ex) { sink.error(ex); } } else { sink.success(new OkHttpAsyncResponse(response, request)); } } else { sink.success(new OkHttpAsyncResponse(response, request)); } } } }
Does `RequestBody` copy the `byte[]` contents? If it does we should turn this into a `Mono.fromCallable` just in the off chance the reactive stream gets cancelled before the `RequestBody` is used.
private Mono<RequestBody> toOkHttpRequestBody(BinaryData bodyContent, HttpHeaders headers) { String contentType = headers.getValue("Content-Type"); MediaType mediaType = (contentType == null) ? null : MediaType.parse(contentType); if (bodyContent == null) { return Mono.just(RequestBody.create(ByteString.EMPTY, mediaType)); } BinaryDataContent content = BinaryDataHelper.getContent(bodyContent); if (content instanceof ByteArrayContent) { return Mono.just(RequestBody.create(content.toBytes(), mediaType)); } else if (content instanceof StringContent || content instanceof SerializableContent) { return Mono.fromCallable(() -> RequestBody.create(content.toBytes(), mediaType)); } else if (content instanceof InputStreamContent) { return Mono.just(new OkHttpInputStreamRequestBody((InputStreamContent) content, headers, mediaType)); } else if (content instanceof FileContent) { return Mono.just(new OkHttpFileRequestBody((FileContent) content, headers, mediaType)); } else { OkHttpFluxRequestBody fluxRequestBody = new OkHttpFluxRequestBody( content, headers, mediaType, httpClient); if (fluxRequestBody.contentLength() < 0 || fluxRequestBody.contentLength() > BUFFERED_FLUX_REQUEST_THRESHOLD) { return Mono.just(fluxRequestBody); } else { return toByteString(bodyContent.toFluxByteBuffer()).map(bs -> RequestBody.create(bs, mediaType)); } } }
return Mono.just(RequestBody.create(content.toBytes(), mediaType));
private Mono<RequestBody> toOkHttpRequestBody(BinaryData bodyContent, HttpHeaders headers) { String contentType = headers.getValue("Content-Type"); MediaType mediaType = (contentType == null) ? null : MediaType.parse(contentType); if (bodyContent == null) { return EMPTY_REQUEST_BODY_MONO; } BinaryDataContent content = BinaryDataHelper.getContent(bodyContent); if (content instanceof ByteArrayContent) { return Mono.just(RequestBody.create(content.toBytes(), mediaType)); } else if (content instanceof StringContent || content instanceof SerializableContent) { return Mono.fromCallable(() -> RequestBody.create(content.toBytes(), mediaType)); } else { long effectiveContentLength = getRequestContentLength(content, headers); if (content instanceof InputStreamContent) { return Mono.just(new OkHttpInputStreamRequestBody( (InputStreamContent) content, effectiveContentLength, mediaType)); } else if (content instanceof FileContent) { return Mono.just(new OkHttpFileRequestBody((FileContent) content, effectiveContentLength, mediaType)); } else { return Mono.just(new OkHttpFluxRequestBody( content, effectiveContentLength, mediaType, httpClient.callTimeoutMillis())); } } }
class OkHttpAsyncHttpClient implements HttpClient { private static final Mono<okio.ByteString> EMPTY_BYTE_STRING_MONO = Mono.just(okio.ByteString.EMPTY); /** * This constant defines a size of Flux based request where buffering in memory becomes less performant * than streaming (which involves thread hops). The value has been established experimentally using * Storage benchmarks. */ private static final long BUFFERED_FLUX_REQUEST_THRESHOLD = 100 * 1024L; final OkHttpClient httpClient; OkHttpAsyncHttpClient(OkHttpClient httpClient) { this.httpClient = httpClient; } @Override public Mono<HttpResponse> send(HttpRequest request) { return send(request, Context.NONE); } @Override public Mono<HttpResponse> send(HttpRequest request, Context context) { boolean eagerlyReadResponse = (boolean) context.getData("azure-eagerly-read-response").orElse(false); return Mono.create(sink -> sink.onRequest(value -> { toOkHttpRequest(request).subscribe(okHttpRequest -> { try { Call call = httpClient.newCall(okHttpRequest); call.enqueue(new OkHttpCallback(sink, request, eagerlyReadResponse)); sink.onCancel(call::cancel); } catch (Exception ex) { sink.error(ex); } }, sink::error); })); } /** * Converts the given azure-core request to okhttp request. * * @param request the azure-core request * @return the Mono emitting okhttp request */ private Mono<okhttp3.Request> toOkHttpRequest(HttpRequest request) { Request.Builder requestBuilder = new Request.Builder() .url(request.getUrl()); if (request.getHeaders() != null) { for (HttpHeader hdr : request.getHeaders()) { hdr.getValuesList().forEach(value -> requestBuilder.addHeader(hdr.getName(), value)); } } if (request.getHttpMethod() == HttpMethod.GET) { return Mono.just(requestBuilder.get().build()); } else if (request.getHttpMethod() == HttpMethod.HEAD) { return Mono.just(requestBuilder.head().build()); } return toOkHttpRequestBody(request.getBodyAsBinaryData(), request.getHeaders()) .map(okhttpRequestBody -> requestBuilder.method(request.getHttpMethod().toString(), okhttpRequestBody) .build()); } /** * Create a Mono of okhttp3.RequestBody from the given BinaryData. * * @param bodyContent The request body content * @param headers the headers associated with the original request * @return the Mono emitting okhttp request */ /** * Aggregate Flux of java.nio.ByteBuffer to single okio.ByteString. * * Pooled okio.Buffer type is used to buffer emitted ByteBuffer instances. Content of each ByteBuffer will be * written (i.e copied) to the internal okio.Buffer slots. Once the stream terminates, the contents of all slots get * copied to one single byte array and okio.ByteString will be created referring this byte array. Finally, the * initial okio.Buffer will be returned to the pool. * * @param bbFlux the Flux of ByteBuffer to aggregate * @return a mono emitting aggregated ByteString */ private static Mono<ByteString> toByteString(Flux<ByteBuffer> bbFlux) { Objects.requireNonNull(bbFlux, "'bbFlux' cannot be null."); return Mono.using(okio.Buffer::new, buffer -> bbFlux.reduce(buffer, (b, byteBuffer) -> { try { b.write(byteBuffer); return b; } catch (IOException ioe) { throw Exceptions.propagate(ioe); } }).map(b -> ByteString.of(b.readByteArray())), okio.Buffer::clear) .switchIfEmpty(EMPTY_BYTE_STRING_MONO); } private static class OkHttpCallback implements okhttp3.Callback { private final MonoSink<HttpResponse> sink; private final HttpRequest request; private final boolean eagerlyReadResponse; OkHttpCallback(MonoSink<HttpResponse> sink, HttpRequest request, boolean eagerlyReadResponse) { this.sink = sink; this.request = request; this.eagerlyReadResponse = eagerlyReadResponse; } @SuppressWarnings("NullableProblems") @Override public void onFailure(okhttp3.Call call, IOException e) { if (e.getMessage().startsWith("canceled due to") && e.getSuppressed().length == 1) { sink.error(e.getSuppressed()[0]); } else { sink.error(e); } } @SuppressWarnings("NullableProblems") @Override public void onResponse(okhttp3.Call call, okhttp3.Response response) { /* * Use a buffered response when we are eagerly reading the response from the network and the body isn't * empty. */ if (eagerlyReadResponse) { ResponseBody body = response.body(); if (Objects.nonNull(body)) { try { byte[] bytes = body.bytes(); body.close(); sink.success(new OkHttpAsyncBufferedResponse(response, request, bytes)); } catch (IOException ex) { sink.error(ex); } } else { sink.success(new OkHttpAsyncResponse(response, request)); } } else { sink.success(new OkHttpAsyncResponse(response, request)); } } } }
class OkHttpAsyncHttpClient implements HttpClient { private static final Mono<RequestBody> EMPTY_REQUEST_BODY_MONO = Mono.just(RequestBody.create(new byte[0])); final OkHttpClient httpClient; OkHttpAsyncHttpClient(OkHttpClient httpClient) { this.httpClient = httpClient; } @Override public Mono<HttpResponse> send(HttpRequest request) { return send(request, Context.NONE); } @Override public Mono<HttpResponse> send(HttpRequest request, Context context) { boolean eagerlyReadResponse = (boolean) context.getData("azure-eagerly-read-response").orElse(false); return Mono.create(sink -> sink.onRequest(value -> { toOkHttpRequest(request).subscribe(okHttpRequest -> { try { Call call = httpClient.newCall(okHttpRequest); call.enqueue(new OkHttpCallback(sink, request, eagerlyReadResponse)); sink.onCancel(call::cancel); } catch (Exception ex) { sink.error(ex); } }, sink::error); })); } /** * Converts the given azure-core request to okhttp request. * * @param request the azure-core request * @return the Mono emitting okhttp request */ private Mono<okhttp3.Request> toOkHttpRequest(HttpRequest request) { Request.Builder requestBuilder = new Request.Builder() .url(request.getUrl()); if (request.getHeaders() != null) { for (HttpHeader hdr : request.getHeaders()) { hdr.getValuesList().forEach(value -> requestBuilder.addHeader(hdr.getName(), value)); } } if (request.getHttpMethod() == HttpMethod.GET) { return Mono.just(requestBuilder.get().build()); } else if (request.getHttpMethod() == HttpMethod.HEAD) { return Mono.just(requestBuilder.head().build()); } return toOkHttpRequestBody(request.getBodyAsBinaryData(), request.getHeaders()) .map(okhttpRequestBody -> requestBuilder.method(request.getHttpMethod().toString(), okhttpRequestBody) .build()); } /** * Create a Mono of okhttp3.RequestBody from the given BinaryData. * * @param bodyContent The request body content * @param headers the headers associated with the original request * @return the Mono emitting okhttp request */ private static long getRequestContentLength(BinaryDataContent content, HttpHeaders headers) { Long contentLength = content.getLength(); if (contentLength == null) { String contentLengthHeaderValue = headers.getValue("Content-Length"); if (contentLengthHeaderValue != null) { contentLength = Long.parseLong(contentLengthHeaderValue); } else { contentLength = -1L; } } return contentLength; } private static class OkHttpCallback implements okhttp3.Callback { private final MonoSink<HttpResponse> sink; private final HttpRequest request; private final boolean eagerlyReadResponse; OkHttpCallback(MonoSink<HttpResponse> sink, HttpRequest request, boolean eagerlyReadResponse) { this.sink = sink; this.request = request; this.eagerlyReadResponse = eagerlyReadResponse; } @SuppressWarnings("NullableProblems") @Override public void onFailure(okhttp3.Call call, IOException e) { sink.error(e); } @SuppressWarnings("NullableProblems") @Override public void onResponse(okhttp3.Call call, okhttp3.Response response) { /* * Use a buffered response when we are eagerly reading the response from the network and the body isn't * empty. */ if (eagerlyReadResponse) { ResponseBody body = response.body(); if (Objects.nonNull(body)) { try { byte[] bytes = body.bytes(); body.close(); sink.success(new OkHttpAsyncBufferedResponse(response, request, bytes)); } catch (IOException ex) { sink.error(ex); } } else { sink.success(new OkHttpAsyncResponse(response, request)); } } else { sink.success(new OkHttpAsyncResponse(response, request)); } } } }
Shouldn't this be pushed into the if block and the if inspection be performed on the content directly?
private Mono<RequestBody> toOkHttpRequestBody(BinaryData bodyContent, HttpHeaders headers) { String contentType = headers.getValue("Content-Type"); MediaType mediaType = (contentType == null) ? null : MediaType.parse(contentType); if (bodyContent == null) { return Mono.just(RequestBody.create(ByteString.EMPTY, mediaType)); } BinaryDataContent content = BinaryDataHelper.getContent(bodyContent); if (content instanceof ByteArrayContent) { return Mono.just(RequestBody.create(content.toBytes(), mediaType)); } else if (content instanceof StringContent || content instanceof SerializableContent) { return Mono.fromCallable(() -> RequestBody.create(content.toBytes(), mediaType)); } else if (content instanceof InputStreamContent) { return Mono.just(new OkHttpInputStreamRequestBody((InputStreamContent) content, headers, mediaType)); } else if (content instanceof FileContent) { return Mono.just(new OkHttpFileRequestBody((FileContent) content, headers, mediaType)); } else { OkHttpFluxRequestBody fluxRequestBody = new OkHttpFluxRequestBody( content, headers, mediaType, httpClient); if (fluxRequestBody.contentLength() < 0 || fluxRequestBody.contentLength() > BUFFERED_FLUX_REQUEST_THRESHOLD) { return Mono.just(fluxRequestBody); } else { return toByteString(bodyContent.toFluxByteBuffer()).map(bs -> RequestBody.create(bs, mediaType)); } } }
OkHttpFluxRequestBody fluxRequestBody = new OkHttpFluxRequestBody(
private Mono<RequestBody> toOkHttpRequestBody(BinaryData bodyContent, HttpHeaders headers) { String contentType = headers.getValue("Content-Type"); MediaType mediaType = (contentType == null) ? null : MediaType.parse(contentType); if (bodyContent == null) { return EMPTY_REQUEST_BODY_MONO; } BinaryDataContent content = BinaryDataHelper.getContent(bodyContent); if (content instanceof ByteArrayContent) { return Mono.just(RequestBody.create(content.toBytes(), mediaType)); } else if (content instanceof StringContent || content instanceof SerializableContent) { return Mono.fromCallable(() -> RequestBody.create(content.toBytes(), mediaType)); } else { long effectiveContentLength = getRequestContentLength(content, headers); if (content instanceof InputStreamContent) { return Mono.just(new OkHttpInputStreamRequestBody( (InputStreamContent) content, effectiveContentLength, mediaType)); } else if (content instanceof FileContent) { return Mono.just(new OkHttpFileRequestBody((FileContent) content, effectiveContentLength, mediaType)); } else { return Mono.just(new OkHttpFluxRequestBody( content, effectiveContentLength, mediaType, httpClient.callTimeoutMillis())); } } }
class OkHttpAsyncHttpClient implements HttpClient { private static final Mono<okio.ByteString> EMPTY_BYTE_STRING_MONO = Mono.just(okio.ByteString.EMPTY); /** * This constant defines a size of Flux based request where buffering in memory becomes less performant * than streaming (which involves thread hops). The value has been established experimentally using * Storage benchmarks. */ private static final long BUFFERED_FLUX_REQUEST_THRESHOLD = 100 * 1024L; final OkHttpClient httpClient; OkHttpAsyncHttpClient(OkHttpClient httpClient) { this.httpClient = httpClient; } @Override public Mono<HttpResponse> send(HttpRequest request) { return send(request, Context.NONE); } @Override public Mono<HttpResponse> send(HttpRequest request, Context context) { boolean eagerlyReadResponse = (boolean) context.getData("azure-eagerly-read-response").orElse(false); return Mono.create(sink -> sink.onRequest(value -> { toOkHttpRequest(request).subscribe(okHttpRequest -> { try { Call call = httpClient.newCall(okHttpRequest); call.enqueue(new OkHttpCallback(sink, request, eagerlyReadResponse)); sink.onCancel(call::cancel); } catch (Exception ex) { sink.error(ex); } }, sink::error); })); } /** * Converts the given azure-core request to okhttp request. * * @param request the azure-core request * @return the Mono emitting okhttp request */ private Mono<okhttp3.Request> toOkHttpRequest(HttpRequest request) { Request.Builder requestBuilder = new Request.Builder() .url(request.getUrl()); if (request.getHeaders() != null) { for (HttpHeader hdr : request.getHeaders()) { hdr.getValuesList().forEach(value -> requestBuilder.addHeader(hdr.getName(), value)); } } if (request.getHttpMethod() == HttpMethod.GET) { return Mono.just(requestBuilder.get().build()); } else if (request.getHttpMethod() == HttpMethod.HEAD) { return Mono.just(requestBuilder.head().build()); } return toOkHttpRequestBody(request.getBodyAsBinaryData(), request.getHeaders()) .map(okhttpRequestBody -> requestBuilder.method(request.getHttpMethod().toString(), okhttpRequestBody) .build()); } /** * Create a Mono of okhttp3.RequestBody from the given BinaryData. * * @param bodyContent The request body content * @param headers the headers associated with the original request * @return the Mono emitting okhttp request */ /** * Aggregate Flux of java.nio.ByteBuffer to single okio.ByteString. * * Pooled okio.Buffer type is used to buffer emitted ByteBuffer instances. Content of each ByteBuffer will be * written (i.e copied) to the internal okio.Buffer slots. Once the stream terminates, the contents of all slots get * copied to one single byte array and okio.ByteString will be created referring this byte array. Finally, the * initial okio.Buffer will be returned to the pool. * * @param bbFlux the Flux of ByteBuffer to aggregate * @return a mono emitting aggregated ByteString */ private static Mono<ByteString> toByteString(Flux<ByteBuffer> bbFlux) { Objects.requireNonNull(bbFlux, "'bbFlux' cannot be null."); return Mono.using(okio.Buffer::new, buffer -> bbFlux.reduce(buffer, (b, byteBuffer) -> { try { b.write(byteBuffer); return b; } catch (IOException ioe) { throw Exceptions.propagate(ioe); } }).map(b -> ByteString.of(b.readByteArray())), okio.Buffer::clear) .switchIfEmpty(EMPTY_BYTE_STRING_MONO); } private static class OkHttpCallback implements okhttp3.Callback { private final MonoSink<HttpResponse> sink; private final HttpRequest request; private final boolean eagerlyReadResponse; OkHttpCallback(MonoSink<HttpResponse> sink, HttpRequest request, boolean eagerlyReadResponse) { this.sink = sink; this.request = request; this.eagerlyReadResponse = eagerlyReadResponse; } @SuppressWarnings("NullableProblems") @Override public void onFailure(okhttp3.Call call, IOException e) { if (e.getMessage().startsWith("canceled due to") && e.getSuppressed().length == 1) { sink.error(e.getSuppressed()[0]); } else { sink.error(e); } } @SuppressWarnings("NullableProblems") @Override public void onResponse(okhttp3.Call call, okhttp3.Response response) { /* * Use a buffered response when we are eagerly reading the response from the network and the body isn't * empty. */ if (eagerlyReadResponse) { ResponseBody body = response.body(); if (Objects.nonNull(body)) { try { byte[] bytes = body.bytes(); body.close(); sink.success(new OkHttpAsyncBufferedResponse(response, request, bytes)); } catch (IOException ex) { sink.error(ex); } } else { sink.success(new OkHttpAsyncResponse(response, request)); } } else { sink.success(new OkHttpAsyncResponse(response, request)); } } } }
class OkHttpAsyncHttpClient implements HttpClient { private static final Mono<RequestBody> EMPTY_REQUEST_BODY_MONO = Mono.just(RequestBody.create(new byte[0])); final OkHttpClient httpClient; OkHttpAsyncHttpClient(OkHttpClient httpClient) { this.httpClient = httpClient; } @Override public Mono<HttpResponse> send(HttpRequest request) { return send(request, Context.NONE); } @Override public Mono<HttpResponse> send(HttpRequest request, Context context) { boolean eagerlyReadResponse = (boolean) context.getData("azure-eagerly-read-response").orElse(false); return Mono.create(sink -> sink.onRequest(value -> { toOkHttpRequest(request).subscribe(okHttpRequest -> { try { Call call = httpClient.newCall(okHttpRequest); call.enqueue(new OkHttpCallback(sink, request, eagerlyReadResponse)); sink.onCancel(call::cancel); } catch (Exception ex) { sink.error(ex); } }, sink::error); })); } /** * Converts the given azure-core request to okhttp request. * * @param request the azure-core request * @return the Mono emitting okhttp request */ private Mono<okhttp3.Request> toOkHttpRequest(HttpRequest request) { Request.Builder requestBuilder = new Request.Builder() .url(request.getUrl()); if (request.getHeaders() != null) { for (HttpHeader hdr : request.getHeaders()) { hdr.getValuesList().forEach(value -> requestBuilder.addHeader(hdr.getName(), value)); } } if (request.getHttpMethod() == HttpMethod.GET) { return Mono.just(requestBuilder.get().build()); } else if (request.getHttpMethod() == HttpMethod.HEAD) { return Mono.just(requestBuilder.head().build()); } return toOkHttpRequestBody(request.getBodyAsBinaryData(), request.getHeaders()) .map(okhttpRequestBody -> requestBuilder.method(request.getHttpMethod().toString(), okhttpRequestBody) .build()); } /** * Create a Mono of okhttp3.RequestBody from the given BinaryData. * * @param bodyContent The request body content * @param headers the headers associated with the original request * @return the Mono emitting okhttp request */ private static long getRequestContentLength(BinaryDataContent content, HttpHeaders headers) { Long contentLength = content.getLength(); if (contentLength == null) { String contentLengthHeaderValue = headers.getValue("Content-Length"); if (contentLengthHeaderValue != null) { contentLength = Long.parseLong(contentLengthHeaderValue); } else { contentLength = -1L; } } return contentLength; } private static class OkHttpCallback implements okhttp3.Callback { private final MonoSink<HttpResponse> sink; private final HttpRequest request; private final boolean eagerlyReadResponse; OkHttpCallback(MonoSink<HttpResponse> sink, HttpRequest request, boolean eagerlyReadResponse) { this.sink = sink; this.request = request; this.eagerlyReadResponse = eagerlyReadResponse; } @SuppressWarnings("NullableProblems") @Override public void onFailure(okhttp3.Call call, IOException e) { sink.error(e); } @SuppressWarnings("NullableProblems") @Override public void onResponse(okhttp3.Call call, okhttp3.Response response) { /* * Use a buffered response when we are eagerly reading the response from the network and the body isn't * empty. */ if (eagerlyReadResponse) { ResponseBody body = response.body(); if (Objects.nonNull(body)) { try { byte[] bytes = body.bytes(); body.close(); sink.success(new OkHttpAsyncBufferedResponse(response, request, bytes)); } catch (IOException ex) { sink.error(ex); } } else { sink.success(new OkHttpAsyncResponse(response, request)); } } else { sink.success(new OkHttpAsyncResponse(response, request)); } } } }
is this conventional to use `Context.NONE` as a value even though it's a different type from what `SPAN_CONTEXT_KEY` normally holds?
private EventData traceMessageSpan(EventData eventData) { Optional<Object> eventContextData = eventData.getContext().getData(SPAN_CONTEXT_KEY); if (eventContextData.isPresent()) { return eventData; } else { Context eventContext = eventData.getContext() .addData(AZ_TRACING_NAMESPACE_KEY, AZ_NAMESPACE_VALUE) .addData(ENTITY_PATH_KEY, this.entityPath) .addData(HOST_NAME_KEY, this.hostname); eventContext = tracerProvider.startSpan(AZ_TRACING_SERVICE_NAME, eventContext, ProcessKind.MESSAGE); Optional<Object> eventDiagnosticIdOptional = eventContext.getData(DIAGNOSTIC_ID_KEY); if (eventDiagnosticIdOptional.isPresent()) { eventData.getProperties().put(DIAGNOSTIC_ID_KEY, eventDiagnosticIdOptional.get().toString()); tracerProvider.endSpan(eventContext, Signal.complete()); eventData.addContext(SPAN_CONTEXT_KEY, eventContext.getData(SPAN_CONTEXT_KEY).orElse(Context.NONE)); } } return eventData; }
eventData.addContext(SPAN_CONTEXT_KEY, eventContext.getData(SPAN_CONTEXT_KEY).orElse(Context.NONE));
private EventData traceMessageSpan(EventData eventData) { Optional<Object> eventContextData = eventData.getContext().getData(SPAN_CONTEXT_KEY); if (eventContextData.isPresent()) { return eventData; } else { Context eventContext = eventData.getContext() .addData(AZ_TRACING_NAMESPACE_KEY, AZ_NAMESPACE_VALUE) .addData(ENTITY_PATH_KEY, this.entityPath) .addData(HOST_NAME_KEY, this.hostname); eventContext = tracerProvider.startSpan(AZ_TRACING_SERVICE_NAME, eventContext, ProcessKind.MESSAGE); Optional<Object> eventDiagnosticIdOptional = eventContext.getData(DIAGNOSTIC_ID_KEY); if (eventDiagnosticIdOptional.isPresent()) { eventData.getProperties().put(DIAGNOSTIC_ID_KEY, eventDiagnosticIdOptional.get().toString()); tracerProvider.endSpan(eventContext, Signal.complete()); Object spanContext = eventContext.getData(SPAN_CONTEXT_KEY).orElse(null); if (spanContext != null) { eventData.addContext(SPAN_CONTEXT_KEY, spanContext); } } } return eventData; }
class EventDataBatch { private static final ClientLogger LOGGER = new ClientLogger(EventDataBatch.class); private final int maxMessageSize; private final String partitionKey; private final ErrorContextProvider contextProvider; private final List<EventData> events; private final byte[] eventBytes; private final String partitionId; private int sizeInBytes; private final TracerProvider tracerProvider; private final String entityPath; private final String hostname; EventDataBatch(int maxMessageSize, String partitionId, String partitionKey, ErrorContextProvider contextProvider, TracerProvider tracerProvider, String entityPath, String hostname) { this.maxMessageSize = maxMessageSize; this.partitionKey = partitionKey; this.partitionId = partitionId; this.contextProvider = contextProvider; this.events = new LinkedList<>(); this.sizeInBytes = (maxMessageSize / 65536) * 1024; this.eventBytes = new byte[maxMessageSize]; this.tracerProvider = tracerProvider; this.entityPath = entityPath; this.hostname = hostname; } /** * Gets the number of {@link EventData events} in the batch. * * @return The number of {@link EventData events} in the batch. */ public int getCount() { return events.size(); } /** * Gets the maximum size, in bytes, of the {@link EventDataBatch}. * * @return The maximum size, in bytes, of the {@link EventDataBatch}. */ public int getMaxSizeInBytes() { return maxMessageSize; } /** * Gets the size of the {@link EventDataBatch} in bytes. * * @return the size of the {@link EventDataBatch} in bytes. */ public int getSizeInBytes() { return this.sizeInBytes; } /** * Tries to add an {@link EventData event} to the batch. * * <p>This method is not thread-safe; make sure to synchronize the method access when using multiple threads * to add events.</p> * * @param eventData The {@link EventData} to add to the batch. * @return {@code true} if the event could be added to the batch; {@code false} if the event was too large to fit in * the batch. * @throws IllegalArgumentException if {@code eventData} is {@code null}. * @throws AmqpException if {@code eventData} is larger than the maximum size of the {@link EventDataBatch}. */ public boolean tryAdd(final EventData eventData) { if (eventData == null) { throw LOGGER.logExceptionAsWarning(new NullPointerException("eventData cannot be null")); } EventData event = tracerProvider.isEnabled() ? traceMessageSpan(eventData) : eventData; final int size; try { size = getSize(event, events.isEmpty()); } catch (BufferOverflowException exception) { throw LOGGER.logExceptionAsWarning(new AmqpException(false, AmqpErrorCondition.LINK_PAYLOAD_SIZE_EXCEEDED, String.format(Locale.US, "Size of the payload exceeded maximum message size: %s kb", maxMessageSize / 1024), contextProvider.getErrorContext())); } if (this.sizeInBytes + size > this.maxMessageSize) { return false; } this.sizeInBytes += size; this.events.add(event); return true; } /** * Method to start and end a "Azure.EventHubs.message" span and add the "DiagnosticId" as a property of the message. * * @param eventData The Event to add tracing span for. * @return the updated event data object. */ List<EventData> getEvents() { return events; } String getPartitionKey() { return partitionKey; } String getPartitionId() { return partitionId; } private int getSize(final EventData eventData, final boolean isFirst) { Objects.requireNonNull(eventData, "'eventData' cannot be null."); final Message amqpMessage = createAmqpMessage(eventData, partitionKey); int eventSize = amqpMessage.encode(this.eventBytes, 0, maxMessageSize); eventSize += 16; if (isFirst) { amqpMessage.setBody(null); amqpMessage.setApplicationProperties(null); amqpMessage.setProperties(null); amqpMessage.setDeliveryAnnotations(null); eventSize += amqpMessage.encode(this.eventBytes, 0, maxMessageSize); } return eventSize; } /* * Creates the AMQP message represented by the event data */ private static Message createAmqpMessage(EventData event, String partitionKey) { final AmqpAnnotatedMessage amqpAnnotatedMessage = event.getRawAmqpMessage(); final Message protonJ = MessageUtils.toProtonJMessage(amqpAnnotatedMessage); if (partitionKey == null) { return protonJ; } if (protonJ.getMessageAnnotations() == null) { protonJ.setMessageAnnotations(new MessageAnnotations(new HashMap<>())); } final MessageAnnotations messageAnnotations = protonJ.getMessageAnnotations(); messageAnnotations.getValue().put(AmqpConstants.PARTITION_KEY, partitionKey); return protonJ; } }
class EventDataBatch { private static final ClientLogger LOGGER = new ClientLogger(EventDataBatch.class); private final int maxMessageSize; private final String partitionKey; private final ErrorContextProvider contextProvider; private final List<EventData> events; private final byte[] eventBytes; private final String partitionId; private int sizeInBytes; private final TracerProvider tracerProvider; private final String entityPath; private final String hostname; EventDataBatch(int maxMessageSize, String partitionId, String partitionKey, ErrorContextProvider contextProvider, TracerProvider tracerProvider, String entityPath, String hostname) { this.maxMessageSize = maxMessageSize; this.partitionKey = partitionKey; this.partitionId = partitionId; this.contextProvider = contextProvider; this.events = new LinkedList<>(); this.sizeInBytes = (maxMessageSize / 65536) * 1024; this.eventBytes = new byte[maxMessageSize]; this.tracerProvider = tracerProvider; this.entityPath = entityPath; this.hostname = hostname; } /** * Gets the number of {@link EventData events} in the batch. * * @return The number of {@link EventData events} in the batch. */ public int getCount() { return events.size(); } /** * Gets the maximum size, in bytes, of the {@link EventDataBatch}. * * @return The maximum size, in bytes, of the {@link EventDataBatch}. */ public int getMaxSizeInBytes() { return maxMessageSize; } /** * Gets the size of the {@link EventDataBatch} in bytes. * * @return the size of the {@link EventDataBatch} in bytes. */ public int getSizeInBytes() { return this.sizeInBytes; } /** * Tries to add an {@link EventData event} to the batch. * * <p>This method is not thread-safe; make sure to synchronize the method access when using multiple threads * to add events.</p> * * @param eventData The {@link EventData} to add to the batch. * @return {@code true} if the event could be added to the batch; {@code false} if the event was too large to fit in * the batch. * @throws IllegalArgumentException if {@code eventData} is {@code null}. * @throws AmqpException if {@code eventData} is larger than the maximum size of the {@link EventDataBatch}. */ public boolean tryAdd(final EventData eventData) { if (eventData == null) { throw LOGGER.logExceptionAsWarning(new NullPointerException("eventData cannot be null")); } EventData event = tracerProvider.isEnabled() ? traceMessageSpan(eventData) : eventData; final int size; try { size = getSize(event, events.isEmpty()); } catch (BufferOverflowException exception) { throw LOGGER.logExceptionAsWarning(new AmqpException(false, AmqpErrorCondition.LINK_PAYLOAD_SIZE_EXCEEDED, String.format(Locale.US, "Size of the payload exceeded maximum message size: %s kb", maxMessageSize / 1024), contextProvider.getErrorContext())); } if (this.sizeInBytes + size > this.maxMessageSize) { return false; } this.sizeInBytes += size; this.events.add(event); return true; } /** * Method to start and end a "Azure.EventHubs.message" span and add the "DiagnosticId" as a property of the message. * * @param eventData The Event to add tracing span for. * @return the updated event data object. */ List<EventData> getEvents() { return events; } String getPartitionKey() { return partitionKey; } String getPartitionId() { return partitionId; } private int getSize(final EventData eventData, final boolean isFirst) { Objects.requireNonNull(eventData, "'eventData' cannot be null."); final Message amqpMessage = createAmqpMessage(eventData, partitionKey); int eventSize = amqpMessage.encode(this.eventBytes, 0, maxMessageSize); eventSize += 16; if (isFirst) { amqpMessage.setBody(null); amqpMessage.setApplicationProperties(null); amqpMessage.setProperties(null); amqpMessage.setDeliveryAnnotations(null); eventSize += amqpMessage.encode(this.eventBytes, 0, maxMessageSize); } return eventSize; } /* * Creates the AMQP message represented by the event data */ private static Message createAmqpMessage(EventData event, String partitionKey) { final AmqpAnnotatedMessage amqpAnnotatedMessage = event.getRawAmqpMessage(); final Message protonJ = MessageUtils.toProtonJMessage(amqpAnnotatedMessage); if (partitionKey == null) { return protonJ; } if (protonJ.getMessageAnnotations() == null) { protonJ.setMessageAnnotations(new MessageAnnotations(new HashMap<>())); } final MessageAnnotations messageAnnotations = protonJ.getMessageAnnotations(); messageAnnotations.getValue().put(AmqpConstants.PARTITION_KEY, partitionKey); return protonJ; } }
Are we changing the logger level to `debug` because we expect this race condition to be hit more often?
public static PartitionKeyAccessor getPartitionKeyAccessor() { if (!partitionKeyClassLoaded.get()) { logger.debug("Initializing PartitionKeyAccessor..."); PartitionKey.doNothingButEnsureLoadingClass(); } PartitionKeyAccessor snapshot = accessor.get(); if (snapshot == null) { logger.error("PartitionKeyAccessor is not initialized yet!"); System.exit(9701); } return snapshot; }
logger.debug("Initializing PartitionKeyAccessor...");
public static PartitionKeyAccessor getPartitionKeyAccessor() { if (!partitionKeyClassLoaded.get()) { logger.debug("Initializing PartitionKeyAccessor..."); PartitionKey.doNothingButEnsureLoadingClass(); } PartitionKeyAccessor snapshot = accessor.get(); if (snapshot == null) { logger.error("PartitionKeyAccessor is not initialized yet!"); System.exit(9701); } return snapshot; }
class PartitionKeyHelper { private final static AtomicBoolean partitionKeyClassLoaded = new AtomicBoolean(false); private final static AtomicReference<PartitionKeyAccessor> accessor = new AtomicReference<>(); private PartitionKeyHelper() {} public static void setPartitionKeyAccessor(final PartitionKeyAccessor newAccessor) { if (!accessor.compareAndSet(null, newAccessor)) { logger.debug("PartitionKeyAccessor already initialized!"); } else { logger.info("Setting PartitionKeyAccessor..."); partitionKeyClassLoaded.set(true); } } public interface PartitionKeyAccessor { PartitionKey toPartitionKey(PartitionKeyInternal partitionKeyInternal); } }
class PartitionKeyHelper { private final static AtomicBoolean partitionKeyClassLoaded = new AtomicBoolean(false); private final static AtomicReference<PartitionKeyAccessor> accessor = new AtomicReference<>(); private PartitionKeyHelper() {} public static void setPartitionKeyAccessor(final PartitionKeyAccessor newAccessor) { if (!accessor.compareAndSet(null, newAccessor)) { logger.debug("PartitionKeyAccessor already initialized!"); } else { logger.info("Setting PartitionKeyAccessor..."); partitionKeyClassLoaded.set(true); } } public interface PartitionKeyAccessor { PartitionKey toPartitionKey(PartitionKeyInternal partitionKeyInternal); } }
Wouldn't setting the accessor have the same race condition issue that was with getter?
public static void setCosmosClientBuilderAccessor(final CosmosClientBuilderAccessor newAccessor) { if (!accessor.compareAndSet(null, newAccessor)) { logger.debug("CosmosClientBuilderAccessor already initialized!"); } else { logger.info("Setting CosmosClientBuilderAccessor..."); cosmosClientBuilderClassLoaded.set(true); } }
cosmosClientBuilderClassLoaded.set(true);
public static void setCosmosClientBuilderAccessor(final CosmosClientBuilderAccessor newAccessor) { if (!accessor.compareAndSet(null, newAccessor)) { logger.debug("CosmosClientBuilderAccessor already initialized!"); } else { logger.info("Setting CosmosClientBuilderAccessor..."); cosmosClientBuilderClassLoaded.set(true); } }
class CosmosClientBuilderHelper { private static final AtomicReference<CosmosClientBuilderAccessor> accessor = new AtomicReference<>(); private static final AtomicBoolean cosmosClientBuilderClassLoaded = new AtomicBoolean(false); private CosmosClientBuilderHelper() {} public static CosmosClientBuilderAccessor getCosmosClientBuilderAccessor() { if (!cosmosClientBuilderClassLoaded.get()) { logger.debug("Initializing CosmosClientBuilderAccessor..."); CosmosClientBuilder.doNothingButEnsureLoadingClass(); } CosmosClientBuilderAccessor snapshot = accessor.get(); if (snapshot == null) { logger.error("CosmosClientBuilderAccessor is not initialized yet!"); System.exit(9700); } return snapshot; } public interface CosmosClientBuilderAccessor { void setCosmosClientMetadataCachesSnapshot(CosmosClientBuilder builder, CosmosClientMetadataCachesSnapshot metadataCache); CosmosClientMetadataCachesSnapshot getCosmosClientMetadataCachesSnapshot(CosmosClientBuilder builder); void setCosmosClientApiType(CosmosClientBuilder builder, ApiType apiType); ApiType getCosmosClientApiType(CosmosClientBuilder builder); ConnectionPolicy getConnectionPolicy(CosmosClientBuilder builder); Configs getConfigs(CosmosClientBuilder builder); ConsistencyLevel getConsistencyLevel(CosmosClientBuilder builder); } }
class CosmosClientBuilderHelper { private static final AtomicReference<CosmosClientBuilderAccessor> accessor = new AtomicReference<>(); private static final AtomicBoolean cosmosClientBuilderClassLoaded = new AtomicBoolean(false); private CosmosClientBuilderHelper() {} public static CosmosClientBuilderAccessor getCosmosClientBuilderAccessor() { if (!cosmosClientBuilderClassLoaded.get()) { logger.debug("Initializing CosmosClientBuilderAccessor..."); CosmosClientBuilder.doNothingButEnsureLoadingClass(); } CosmosClientBuilderAccessor snapshot = accessor.get(); if (snapshot == null) { logger.error("CosmosClientBuilderAccessor is not initialized yet!"); System.exit(9700); } return snapshot; } public interface CosmosClientBuilderAccessor { void setCosmosClientMetadataCachesSnapshot(CosmosClientBuilder builder, CosmosClientMetadataCachesSnapshot metadataCache); CosmosClientMetadataCachesSnapshot getCosmosClientMetadataCachesSnapshot(CosmosClientBuilder builder); void setCosmosClientApiType(CosmosClientBuilder builder, ApiType apiType); ApiType getCosmosClientApiType(CosmosClientBuilder builder); ConnectionPolicy getConnectionPolicy(CosmosClientBuilder builder); Configs getConfigs(CosmosClientBuilder builder); ConsistencyLevel getConsistencyLevel(CosmosClientBuilder builder); } }
If we are going to turn on warning logs, we probably want to provide a code or environmental toggle to disable them so the customer doesn't get noisy logs if they can't immediately upgrade and operate at large scale. Will coordinate offline for a well-defined strategy here.
public EncryptedBlobAsyncClient buildEncryptedBlobAsyncClient() { Objects.requireNonNull(blobName, "'blobName' cannot be null."); checkValidEncryptionParameters(); if (EncryptionVersion.V1.equals(this.encryptionVersion)) { LOGGER.warning("Client is being configured to use v1 of client side encryption, which is no longer " + "considered secure. The default is v1 for compatibility reasons, but it is highly recommended " + "the version be set to v2 using this builder"); } this.encryptionVersion = encryptionVersion == null ? EncryptionVersion.V1 : encryptionVersion; /* Implicit and explicit root container access are functionally equivalent, but explicit references are easier to read and debug. */ if (CoreUtils.isNullOrEmpty(containerName)) { containerName = BlobContainerAsyncClient.ROOT_CONTAINER_NAME; } BlobServiceVersion serviceVersion = version != null ? version : BlobServiceVersion.getLatest(); return new EncryptedBlobAsyncClient(addBlobUserAgentModificationPolicy(getHttpPipeline()), endpoint, serviceVersion, accountName, containerName, blobName, snapshot, customerProvidedKey, encryptionScope, keyWrapper, keyWrapAlgorithm, versionId, encryptionVersion); }
+ "the version be set to v2 using this builder");
public EncryptedBlobAsyncClient buildEncryptedBlobAsyncClient() { Objects.requireNonNull(blobName, "'blobName' cannot be null."); checkValidEncryptionParameters(); this.encryptionVersion = encryptionVersion == null ? EncryptionVersion.V1 : encryptionVersion; if (EncryptionVersion.V1.equals(this.encryptionVersion)) { LOGGER.warning("Client is being configured to use v1 of client side encryption, which is no longer " + "considered secure. The default is v1 for compatibility reasons, but it is highly recommended " + "the version be set to v2 using the constructor"); } /* Implicit and explicit root container access are functionally equivalent, but explicit references are easier to read and debug. */ if (CoreUtils.isNullOrEmpty(containerName)) { containerName = BlobContainerAsyncClient.ROOT_CONTAINER_NAME; } BlobServiceVersion serviceVersion = version != null ? version : BlobServiceVersion.getLatest(); return new EncryptedBlobAsyncClient(addBlobUserAgentModificationPolicy(getHttpPipeline()), endpoint, serviceVersion, accountName, containerName, blobName, snapshot, customerProvidedKey, encryptionScope, keyWrapper, keyWrapAlgorithm, versionId, encryptionVersion); }
class EncryptedBlobClientBuilder implements TokenCredentialTrait<EncryptedBlobClientBuilder>, ConnectionStringTrait<EncryptedBlobClientBuilder>, AzureNamedKeyCredentialTrait<EncryptedBlobClientBuilder>, AzureSasCredentialTrait<EncryptedBlobClientBuilder>, HttpTrait<EncryptedBlobClientBuilder>, ConfigurationTrait<EncryptedBlobClientBuilder>, EndpointTrait<EncryptedBlobClientBuilder> { private static final ClientLogger LOGGER = new ClientLogger(EncryptedBlobClientBuilder.class); private static final Map<String, String> PROPERTIES = CoreUtils.getProperties("azure-storage-blob-cryptography.properties"); private static final String SDK_NAME = "name"; private static final String SDK_VERSION = "version"; private static final String CLIENT_NAME = PROPERTIES.getOrDefault(SDK_NAME, "UnknownName"); private static final String CLIENT_VERSION = PROPERTIES.getOrDefault(SDK_VERSION, "UnknownVersion"); private static final String BLOB_CLIENT_NAME = USER_AGENT_PROPERTIES.getOrDefault(SDK_NAME, "UnknownName"); private static final String BLOB_CLIENT_VERSION = USER_AGENT_PROPERTIES.getOrDefault(SDK_VERSION, "UnknownVersion"); private String endpoint; private String accountName; private String containerName; private String blobName; private String snapshot; private String versionId; private boolean requiresEncryption; private EncryptionVersion encryptionVersion; private StorageSharedKeyCredential storageSharedKeyCredential; private TokenCredential tokenCredential; private AzureSasCredential azureSasCredential; private String sasToken; private HttpClient httpClient; private final List<HttpPipelinePolicy> perCallPolicies = new ArrayList<>(); private final List<HttpPipelinePolicy> perRetryPolicies = new ArrayList<>(); private HttpLogOptions logOptions; private RequestRetryOptions retryOptions; private RetryOptions coreRetryOptions; private HttpPipeline httpPipeline; private ClientOptions clientOptions = new ClientOptions(); private Configuration configuration; private AsyncKeyEncryptionKey keyWrapper; private AsyncKeyEncryptionKeyResolver keyResolver; private String keyWrapAlgorithm; private BlobServiceVersion version; private CpkInfo customerProvidedKey; private EncryptionScope encryptionScope; /** * Creates a new instance of the EncryptedBlobClientBuilder */ public EncryptedBlobClientBuilder() { logOptions = getDefaultHttpLogOptions(); } /** * Creates a {@link EncryptedBlobClient} based on options set in the Builder. * * <p><strong>Code Samples</strong></p> * * <!-- src_embed com.azure.storage.blob.specialized.cryptography.EncryptedBlobClientBuilder.buildEncryptedBlobAsyncClient --> * <pre> * EncryptedBlobAsyncClient client = new EncryptedBlobClientBuilder& * .key& * .keyResolver& * .connectionString& * .buildEncryptedBlobAsyncClient& * </pre> * <!-- end com.azure.storage.blob.specialized.cryptography.EncryptedBlobClientBuilder.buildEncryptedBlobAsyncClient --> * * @return a {@link EncryptedBlobClient} created from the configurations in this builder. * @throws NullPointerException If {@code endpoint}, {@code containerName}, or {@code blobName} is {@code null}. * @throws IllegalStateException If multiple credentials have been specified. * @throws IllegalStateException If both {@link * and {@link */ public EncryptedBlobClient buildEncryptedBlobClient() { return new EncryptedBlobClient(buildEncryptedBlobAsyncClient()); } /** * Creates a {@link EncryptedBlobAsyncClient} based on options set in the Builder. * * <p><strong>Code Samples</strong></p> * * <!-- src_embed com.azure.storage.blob.specialized.cryptography.EncryptedBlobClientBuilder.buildEncryptedBlobClient --> * <pre> * EncryptedBlobClient client = new EncryptedBlobClientBuilder& * .key& * .keyResolver& * .connectionString& * .buildEncryptedBlobClient& * </pre> * <!-- end com.azure.storage.blob.specialized.cryptography.EncryptedBlobClientBuilder.buildEncryptedBlobClient --> * * @return a {@link EncryptedBlobAsyncClient} created from the configurations in this builder. * @throws NullPointerException If {@code endpoint}, {@code containerName}, or {@code blobName} is {@code null}. * @throws IllegalStateException If multiple credentials have been specified. * @throws IllegalStateException If both {@link * and {@link */ private HttpPipeline addBlobUserAgentModificationPolicy(HttpPipeline pipeline) { List<HttpPipelinePolicy> policies = new ArrayList<>(); for (int i = 0; i < pipeline.getPolicyCount(); i++) { HttpPipelinePolicy currPolicy = pipeline.getPolicy(i); policies.add(currPolicy); if (currPolicy instanceof UserAgentPolicy) { policies.add(new BlobUserAgentModificationPolicy(CLIENT_NAME, CLIENT_VERSION)); } } return new HttpPipelineBuilder() .httpClient(pipeline.getHttpClient()) .policies(policies.toArray(new HttpPipelinePolicy[0])) .build(); } private BlobAsyncClient getUnencryptedBlobClient() { BlobClientBuilder builder = new BlobClientBuilder() .endpoint(endpoint) .containerName(containerName) .blobName(blobName) .snapshot(snapshot) .customerProvidedKey( customerProvidedKey == null ? null : new CustomerProvidedKey(customerProvidedKey.getEncryptionKey())) .encryptionScope(encryptionScope == null ? null : encryptionScope.getEncryptionScope()) .versionId(versionId) .serviceVersion(version) .httpClient(httpClient); if (storageSharedKeyCredential != null) { builder.credential(storageSharedKeyCredential); } else if (tokenCredential != null) { builder.credential(tokenCredential); } else if (azureSasCredential != null) { builder.credential(azureSasCredential); } else if (sasToken != null) { builder.credential(new AzureSasCredential(sasToken)); } return builder.buildAsyncClient(); } private HttpPipeline getHttpPipeline() { CredentialValidator.validateSingleCredentialIsPresent( storageSharedKeyCredential, tokenCredential, azureSasCredential, sasToken, LOGGER); if (httpPipeline != null) { List<HttpPipelinePolicy> policies = new ArrayList<>(); boolean decryptionPolicyPresent = false; for (int i = 0; i < httpPipeline.getPolicyCount(); i++) { HttpPipelinePolicy currPolicy = httpPipeline.getPolicy(i); if (currPolicy instanceof BlobDecryptionPolicy || currPolicy instanceof FetchEncryptionVersionPolicy) { throw LOGGER.logExceptionAsError(new IllegalArgumentException("The passed pipeline was already" + " configured for encryption/decryption in a way that might conflict with the passed key " + "information. Please ensure that the passed pipeline is not already configured for " + "encryption/decryption")); } policies.add(currPolicy); } policies.add(0, new BlobDecryptionPolicy(keyWrapper, keyResolver, requiresEncryption)); policies.add(0, new FetchEncryptionVersionPolicy(getUnencryptedBlobClient(), requiresEncryption)); return new HttpPipelineBuilder() .httpClient(httpPipeline.getHttpClient()) .policies(policies.toArray(new HttpPipelinePolicy[0])) .build(); } Configuration userAgentConfiguration = (configuration == null) ? Configuration.NONE : configuration; List<HttpPipelinePolicy> policies = new ArrayList<>(); policies.add(new FetchEncryptionVersionPolicy(getUnencryptedBlobClient(), requiresEncryption)); policies.add(new BlobDecryptionPolicy(keyWrapper, keyResolver, requiresEncryption)); String applicationId = clientOptions.getApplicationId() != null ? clientOptions.getApplicationId() : logOptions.getApplicationId(); policies.add(new UserAgentPolicy(applicationId, BLOB_CLIENT_NAME, BLOB_CLIENT_VERSION, userAgentConfiguration)); policies.add(new RequestIdPolicy()); policies.addAll(perCallPolicies); HttpPolicyProviders.addBeforeRetryPolicies(policies); policies.add(BuilderUtils.createRetryPolicy(retryOptions, coreRetryOptions, LOGGER)); policies.add(new AddDatePolicy()); HttpHeaders headers = new HttpHeaders(); clientOptions.getHeaders().forEach(header -> headers.put(header.getName(), header.getValue())); if (headers.getSize() > 0) { policies.add(new AddHeadersPolicy(headers)); } policies.add(new MetadataValidationPolicy()); if (storageSharedKeyCredential != null) { policies.add(new StorageSharedKeyCredentialPolicy(storageSharedKeyCredential)); } else if (tokenCredential != null) { BuilderHelper.httpsValidation(tokenCredential, "bearer token", endpoint, LOGGER); policies.add(new BearerTokenAuthenticationPolicy(tokenCredential, Constants.STORAGE_SCOPE)); } else if (azureSasCredential != null) { policies.add(new AzureSasCredentialPolicy(azureSasCredential, false)); } else if (sasToken != null) { policies.add(new AzureSasCredentialPolicy(new AzureSasCredential(sasToken), false)); } policies.addAll(perRetryPolicies); HttpPolicyProviders.addAfterRetryPolicies(policies); policies.add(new ResponseValidationPolicyBuilder() .addOptionalEcho(Constants.HeaderConstants.CLIENT_REQUEST_ID) .addOptionalEcho(Constants.HeaderConstants.ENCRYPTION_KEY_SHA256) .build()); policies.add(new HttpLoggingPolicy(logOptions)); policies.add(new ScrubEtagPolicy()); return new HttpPipelineBuilder() .policies(policies.toArray(new HttpPipelinePolicy[0])) .httpClient(httpClient) .build(); } /** * Sets the encryption key parameters for the client * * @param key An object of type {@link AsyncKeyEncryptionKey} that is used to wrap/unwrap the content encryption key * @param keyWrapAlgorithm The {@link String} used to wrap the key. * @return the updated EncryptedBlobClientBuilder object */ public EncryptedBlobClientBuilder key(AsyncKeyEncryptionKey key, String keyWrapAlgorithm) { this.keyWrapper = key; this.keyWrapAlgorithm = keyWrapAlgorithm; return this; } /** * Sets the encryption parameters for this client * * @param keyResolver The key resolver used to select the correct key for decrypting existing blobs. * @return the updated EncryptedBlobClientBuilder object */ public EncryptedBlobClientBuilder keyResolver(AsyncKeyEncryptionKeyResolver keyResolver) { this.keyResolver = keyResolver; return this; } private void checkValidEncryptionParameters() { if (this.keyWrapper == null && this.keyResolver == null) { throw LOGGER.logExceptionAsError(new IllegalArgumentException("Key and KeyResolver cannot both be null")); } if (this.keyWrapper != null && this.keyWrapAlgorithm == null) { throw LOGGER.logExceptionAsError( new IllegalArgumentException("Key Wrap Algorithm must be specified with a Key.")); } } /** * Sets the {@link StorageSharedKeyCredential} used to authorize requests sent to the service. * * @param credential {@link StorageSharedKeyCredential}. * @return the updated EncryptedBlobClientBuilder * @throws NullPointerException If {@code credential} is {@code null}. */ public EncryptedBlobClientBuilder credential(StorageSharedKeyCredential credential) { this.storageSharedKeyCredential = Objects.requireNonNull(credential, "'credential' cannot be null."); this.tokenCredential = null; this.sasToken = null; return this; } /** * Sets the {@link AzureNamedKeyCredential} used to authorize requests sent to the service. * * @param credential {@link AzureNamedKeyCredential}. * @return the updated EncryptedBlobClientBuilder * @throws NullPointerException If {@code credential} is {@code null}. */ @Override public EncryptedBlobClientBuilder credential(AzureNamedKeyCredential credential) { Objects.requireNonNull(credential, "'credential' cannot be null."); return credential(StorageSharedKeyCredential.fromAzureNamedKeyCredential(credential)); } /** * Sets the {@link TokenCredential} used to authorize requests sent to the service. Refer to the Azure SDK for Java * <a href="https: * documentation for more details on proper usage of the {@link TokenCredential} type. * * @param credential {@link TokenCredential} used to authorize requests sent to the service. * @return the updated EncryptedBlobClientBuilder * @throws NullPointerException If {@code credential} is {@code null}. */ @Override public EncryptedBlobClientBuilder credential(TokenCredential credential) { this.tokenCredential = Objects.requireNonNull(credential, "'credential' cannot be null."); this.storageSharedKeyCredential = null; this.sasToken = null; return this; } /** * Sets the SAS token used to authorize requests sent to the service. * * @param sasToken The SAS token to use for authenticating requests. This string should only be the query parameters * (with or without a leading '?') and not a full url. * @return the updated EncryptedBlobClientBuilder * @throws NullPointerException If {@code sasToken} is {@code null}. */ public EncryptedBlobClientBuilder sasToken(String sasToken) { this.sasToken = Objects.requireNonNull(sasToken, "'sasToken' cannot be null."); this.storageSharedKeyCredential = null; this.tokenCredential = null; return this; } /** * Sets the {@link AzureSasCredential} used to authorize requests sent to the service. * * @param credential {@link AzureSasCredential} used to authorize requests sent to the service. * @return the updated EncryptedBlobClientBuilder * @throws NullPointerException If {@code credential} is {@code null}. */ @Override public EncryptedBlobClientBuilder credential(AzureSasCredential credential) { this.azureSasCredential = Objects.requireNonNull(credential, "'credential' cannot be null."); return this; } /** * Clears the credential used to authorize the request. * * <p>This is for blobs that are publicly accessible.</p> * * @return the updated EncryptedBlobClientBuilder */ public EncryptedBlobClientBuilder setAnonymousAccess() { this.storageSharedKeyCredential = null; this.tokenCredential = null; this.azureSasCredential = null; this.sasToken = null; return this; } /** * Sets the connection string to connect to the service. * * @param connectionString Connection string of the storage account. * @return the updated EncryptedBlobClientBuilder * @throws IllegalArgumentException If {@code connectionString} is invalid. */ @Override public EncryptedBlobClientBuilder connectionString(String connectionString) { StorageConnectionString storageConnectionString = StorageConnectionString.create(connectionString, LOGGER); StorageEndpoint endpoint = storageConnectionString.getBlobEndpoint(); if (endpoint == null || endpoint.getPrimaryUri() == null) { throw LOGGER .logExceptionAsError(new IllegalArgumentException( "connectionString missing required settings to derive blob service endpoint.")); } this.endpoint(endpoint.getPrimaryUri()); if (storageConnectionString.getAccountName() != null) { this.accountName = storageConnectionString.getAccountName(); } StorageAuthenticationSettings authSettings = storageConnectionString.getStorageAuthSettings(); if (authSettings.getType() == StorageAuthenticationSettings.Type.ACCOUNT_NAME_KEY) { this.credential(new StorageSharedKeyCredential(authSettings.getAccount().getName(), authSettings.getAccount().getAccessKey())); } else if (authSettings.getType() == StorageAuthenticationSettings.Type.SAS_TOKEN) { this.sasToken(authSettings.getSasToken()); } return this; } /** * Sets the service endpoint, additionally parses it for information (SAS token, container name, blob name) * * <p>If the blob name contains special characters, pass in the url encoded version of the blob name. </p> * * <p>If the endpoint is to a blob in the root container, this method will fail as it will interpret the blob name * as the container name. With only one path element, it is impossible to distinguish between a container name and a * blob in the root container, so it is assumed to be the container name as this is much more common. When working * with blobs in the root container, it is best to set the endpoint to the account url and specify the blob name * separately using the {@link EncryptedBlobClientBuilder * * @param endpoint URL of the service * @return the updated EncryptedBlobClientBuilder object * @throws IllegalArgumentException If {@code endpoint} is {@code null} or is a malformed URL. */ @Override public EncryptedBlobClientBuilder endpoint(String endpoint) { try { URL url = new URL(endpoint); BlobUrlParts parts = BlobUrlParts.parse(url); this.accountName = parts.getAccountName(); this.endpoint = BuilderHelper.getEndpoint(parts); this.containerName = parts.getBlobContainerName() == null ? this.containerName : parts.getBlobContainerName(); this.blobName = parts.getBlobName() == null ? this.blobName : Utility.urlEncode(parts.getBlobName()); this.snapshot = parts.getSnapshot(); this.versionId = parts.getVersionId(); String sasToken = parts.getCommonSasQueryParameters().encode(); if (!CoreUtils.isNullOrEmpty(sasToken)) { this.sasToken(sasToken); } } catch (MalformedURLException ex) { throw LOGGER.logExceptionAsError( new IllegalArgumentException("The Azure Storage Blob endpoint url is malformed.", ex)); } return this; } /** * Sets the name of the container that contains the blob. * * @param containerName Name of the container. If the value {@code null} or empty the root container, {@code $root}, * will be used. * @return the updated EncryptedBlobClientBuilder object */ public EncryptedBlobClientBuilder containerName(String containerName) { this.containerName = containerName; return this; } /** * Sets the name of the blob. * * @param blobName Name of the blob. If the blob name contains special characters, pass in the url encoded version * of the blob name. * @return the updated EncryptedBlobClientBuilder object * @throws NullPointerException If {@code blobName} is {@code null} */ public EncryptedBlobClientBuilder blobName(String blobName) { this.blobName = Utility.urlEncode(Utility.urlDecode(Objects.requireNonNull(blobName, "'blobName' cannot be null."))); return this; } /** * Sets the snapshot identifier of the blob. * * @param snapshot Snapshot identifier for the blob. * @return the updated EncryptedBlobClientBuilder object */ public EncryptedBlobClientBuilder snapshot(String snapshot) { this.snapshot = snapshot; return this; } /** * Sets the version identifier of the blob. * * @param versionId Version identifier for the blob, pass {@code null} to interact with the latest blob version. * @return the updated EncryptedBlobClientBuilder object */ public EncryptedBlobClientBuilder versionId(String versionId) { this.versionId = versionId; return this; } /** * Sets the {@link HttpClient} to use for sending and receiving requests to and from the service. * * <p><strong>Note:</strong> It is important to understand the precedence order of the HttpTrait APIs. In * particular, if a {@link HttpPipeline} is specified, this takes precedence over all other APIs in the trait, and * they will be ignored. If no {@link HttpPipeline} is specified, a HTTP pipeline will be constructed internally * based on the settings provided to this trait. Additionally, there may be other APIs in types that implement this * trait that are also ignored if an {@link HttpPipeline} is specified, so please be sure to refer to the * documentation of types that implement this trait to understand the full set of implications.</p> * * @param httpClient The {@link HttpClient} to use for requests. * @return the updated EncryptedBlobClientBuilder object */ @Override public EncryptedBlobClientBuilder httpClient(HttpClient httpClient) { if (this.httpClient != null && httpClient == null) { LOGGER.info("'httpClient' is being set to 'null' when it was previously configured."); } this.httpClient = httpClient; return this; } /** * Adds a {@link HttpPipelinePolicy pipeline policy} to apply on each request sent. * * <p><strong>Note:</strong> It is important to understand the precedence order of the HttpTrait APIs. In * particular, if a {@link HttpPipeline} is specified, this takes precedence over all other APIs in the trait, and * they will be ignored. If no {@link HttpPipeline} is specified, a HTTP pipeline will be constructed internally * based on the settings provided to this trait. Additionally, there may be other APIs in types that implement this * trait that are also ignored if an {@link HttpPipeline} is specified, so please be sure to refer to the * documentation of types that implement this trait to understand the full set of implications.</p> * * @param pipelinePolicy A {@link HttpPipelinePolicy pipeline policy}. * @return the updated EncryptedBlobClientBuilder object * @throws NullPointerException If {@code pipelinePolicy} is {@code null}. */ @Override public EncryptedBlobClientBuilder addPolicy(HttpPipelinePolicy pipelinePolicy) { Objects.requireNonNull(pipelinePolicy, "'pipelinePolicy' cannot be null"); if (pipelinePolicy.getPipelinePosition() == HttpPipelinePosition.PER_CALL) { perCallPolicies.add(pipelinePolicy); } else { perRetryPolicies.add(pipelinePolicy); } return this; } /** * Sets the {@link HttpLogOptions logging configuration} to use when sending and receiving requests to and from * the service. If a {@code logLevel} is not provided, default value of {@link HttpLogDetailLevel * * <p><strong>Note:</strong> It is important to understand the precedence order of the HttpTrait APIs. In * particular, if a {@link HttpPipeline} is specified, this takes precedence over all other APIs in the trait, and * they will be ignored. If no {@link HttpPipeline} is specified, a HTTP pipeline will be constructed internally * based on the settings provided to this trait. Additionally, there may be other APIs in types that implement this * trait that are also ignored if an {@link HttpPipeline} is specified, so please be sure to refer to the * documentation of types that implement this trait to understand the full set of implications.</p> * * @param logOptions The {@link HttpLogOptions logging configuration} to use when sending and receiving requests to * and from the service. * @return the updated EncryptedBlobClientBuilder object * @throws NullPointerException If {@code logOptions} is {@code null}. */ @Override public EncryptedBlobClientBuilder httpLogOptions(HttpLogOptions logOptions) { this.logOptions = Objects.requireNonNull(logOptions, "'logOptions' cannot be null."); return this; } /** * Gets the default Storage allowlist log headers and query parameters. * * @return the default http log options. */ public static HttpLogOptions getDefaultHttpLogOptions() { return BuilderHelper.getDefaultHttpLogOptions(); } /** * Sets the configuration object used to retrieve environment configuration values during building of the client. * * @param configuration Configuration store used to retrieve environment configurations. * @return the updated EncryptedBlobClientBuilder object */ @Override public EncryptedBlobClientBuilder configuration(Configuration configuration) { this.configuration = configuration; return this; } /** * Sets the request retry options for all the requests made through the client. * * Setting this is mutually exclusive with using {@link * * @param retryOptions {@link RequestRetryOptions}. * @return the updated EncryptedBlobClientBuilder object. */ public EncryptedBlobClientBuilder retryOptions(RequestRetryOptions retryOptions) { this.retryOptions = retryOptions; return this; } /** * Sets the {@link RetryOptions} for all the requests made through the client. * * <p><strong>Note:</strong> It is important to understand the precedence order of the HttpTrait APIs. In * particular, if a {@link HttpPipeline} is specified, this takes precedence over all other APIs in the trait, and * they will be ignored. If no {@link HttpPipeline} is specified, a HTTP pipeline will be constructed internally * based on the settings provided to this trait. Additionally, there may be other APIs in types that implement this * trait that are also ignored if an {@link HttpPipeline} is specified, so please be sure to refer to the * documentation of types that implement this trait to understand the full set of implications.</p> * <p> * Setting this is mutually exclusive with using {@link * Consider using {@link * * @param retryOptions The {@link RetryOptions} to use for all the requests made through the client. * @return the updated EncryptedBlobClientBuilder object */ @Override public EncryptedBlobClientBuilder retryOptions(RetryOptions retryOptions) { this.coreRetryOptions = retryOptions; return this; } /** * Sets the {@link HttpPipeline} to use for the service client. * * <p><strong>Note:</strong> It is important to understand the precedence order of the HttpTrait APIs. In * particular, if a {@link HttpPipeline} is specified, this takes precedence over all other APIs in the trait, and * they will be ignored. If no {@link HttpPipeline} is specified, a HTTP pipeline will be constructed internally * based on the settings provided to this trait. Additionally, there may be other APIs in types that implement this * trait that are also ignored if an {@link HttpPipeline} is specified, so please be sure to refer to the * documentation of types that implement this trait to understand the full set of implications.</p> * <p> * The {@link * {@link * not ignored when {@code pipeline} is set. * * @return the updated EncryptedBlobClientBuilder object */ @Override public EncryptedBlobClientBuilder pipeline(HttpPipeline httpPipeline) { if (this.httpPipeline != null && httpPipeline == null) { LOGGER.info("HttpPipeline is being set to 'null' when it was previously configured."); } this.httpPipeline = httpPipeline; return this; } /** * Allows for setting common properties such as application ID, headers, proxy configuration, etc. Note that it is * recommended that this method be called with an instance of the {@link HttpClientOptions} * class (a subclass of the {@link ClientOptions} base class). The HttpClientOptions subclass provides more * configuration options suitable for HTTP clients, which is applicable for any class that implements this HttpTrait * interface. * * <p><strong>Note:</strong> It is important to understand the precedence order of the HttpTrait APIs. In * particular, if a {@link HttpPipeline} is specified, this takes precedence over all other APIs in the trait, and * they will be ignored. If no {@link HttpPipeline} is specified, a HTTP pipeline will be constructed internally * based on the settings provided to this trait. Additionally, there may be other APIs in types that implement this * trait that are also ignored if an {@link HttpPipeline} is specified, so please be sure to refer to the * documentation of types that implement this trait to understand the full set of implications.</p> * * @param clientOptions A configured instance of {@link HttpClientOptions}. * @see HttpClientOptions * @return the updated EncryptedBlobClientBuilder object * @throws NullPointerException If {@code clientOptions} is {@code null}. */ @Override public EncryptedBlobClientBuilder clientOptions(ClientOptions clientOptions) { this.clientOptions = Objects.requireNonNull(clientOptions, "'clientOptions' cannot be null."); return this; } /** * Sets the {@link BlobServiceVersion} that is used when making API requests. * <p> * If a service version is not provided, the service version that will be used will be the latest known service * version based on the version of the client library being used. If no service version is specified, updating to a * newer version of the client library will have the result of potentially moving to a newer service version. * <p> * Targeting a specific service version may also mean that the service will return an error for newer APIs. * * @param version {@link BlobServiceVersion} of the service to be used when making requests. * @return the updated EncryptedBlobClientBuilder object */ public EncryptedBlobClientBuilder serviceVersion(BlobServiceVersion version) { this.version = version; return this; } /** * Sets the {@link CustomerProvidedKey customer provided key} that is used to encrypt blob contents on the server. * * @param customerProvidedKey {@link CustomerProvidedKey} * @return the updated EncryptedBlobClientBuilder object */ public EncryptedBlobClientBuilder customerProvidedKey(CustomerProvidedKey customerProvidedKey) { if (customerProvidedKey == null) { this.customerProvidedKey = null; } else { this.customerProvidedKey = new CpkInfo() .setEncryptionKey(customerProvidedKey.getKey()) .setEncryptionKeySha256(customerProvidedKey.getKeySha256()) .setEncryptionAlgorithm(customerProvidedKey.getEncryptionAlgorithm()); } return this; } /** * Sets the {@code encryption scope} that is used to encrypt blob contents on the server. * * @param encryptionScope Encryption scope containing the encryption key information. * @return the updated EncryptedBlobClientBuilder object */ public EncryptedBlobClientBuilder encryptionScope(String encryptionScope) { if (encryptionScope == null) { this.encryptionScope = null; } else { this.encryptionScope = new EncryptionScope().setEncryptionScope(encryptionScope); } return this; } /** * Configures the builder based on the passed {@link BlobClient}. This will set the {@link HttpPipeline}, * {@link URL} and {@link BlobServiceVersion} that are used to interact with the service. Note that the underlying * pipeline should not already be configured for encryption/decryption. * * <p>If {@code pipeline} is set, all other settings are ignored, aside from {@link * {@link * * <p>Note that for security reasons, this method does not copy over the {@link CustomerProvidedKey} and * encryption scope properties from the provided client. To set CPK, please use * {@link * * @param blobClient BlobClient used to configure the builder. * @return the updated EncryptedBlobClientBuilder object * @throws NullPointerException If {@code containerClient} is {@code null}. */ public EncryptedBlobClientBuilder blobClient(BlobClient blobClient) { Objects.requireNonNull(blobClient); return client(blobClient.getHttpPipeline(), blobClient.getBlobUrl(), blobClient.getServiceVersion()); } /** * Configures the builder based on the passed {@link BlobAsyncClient}. This will set the {@link HttpPipeline}, * {@link URL} and {@link BlobServiceVersion} that are used to interact with the service. Note that the underlying * pipeline should not already be configured for encryption/decryption. * * <p>If {@code pipeline} is set, all other settings are ignored, aside from {@link * {@link * * <p>Note that for security reasons, this method does not copy over the {@link CustomerProvidedKey} and * encryption scope properties from the provided client. To set CPK, please use * {@link * * @param blobAsyncClient BlobAsyncClient used to configure the builder. * @return the updated EncryptedBlobClientBuilder object * @throws NullPointerException If {@code containerClient} is {@code null}. */ public EncryptedBlobClientBuilder blobAsyncClient(BlobAsyncClient blobAsyncClient) { Objects.requireNonNull(blobAsyncClient); return client(blobAsyncClient.getHttpPipeline(), blobAsyncClient.getBlobUrl(), blobAsyncClient.getServiceVersion()); } /** * Helper method to transform a regular client into an encrypted client * * @param httpPipeline {@link HttpPipeline} * @param endpoint The endpoint. * @param version {@link BlobServiceVersion} of the service to be used when making requests. * @return the updated EncryptedBlobClientBuilder object */ private EncryptedBlobClientBuilder client(HttpPipeline httpPipeline, String endpoint, BlobServiceVersion version) { this.endpoint(endpoint); this.serviceVersion(version); return this.pipeline(httpPipeline); } /** * Sets the requires encryption option. * * @param requiresEncryption Whether encryption is enforced by this client. Client will throw if data is * downloaded and it is not encrypted. * @return the updated EncryptedBlobClientBuilder object */ public EncryptedBlobClientBuilder requiresEncryption(boolean requiresEncryption) { this.requiresEncryption = requiresEncryption; return this; } /** * Sets the encryption version for this client. For any new workloads, using version 2 or above is highly * encouraged as version 1 uses AES/CBC, which is no longer considered secure. For compatibility reasons, the * default value is version 1. * @param version The encryption version. * @return The updated builder. */ public EncryptedBlobClientBuilder encryptionVersion(EncryptionVersion version) { this.encryptionVersion = version; return this; } }
class EncryptedBlobClientBuilder implements TokenCredentialTrait<EncryptedBlobClientBuilder>, ConnectionStringTrait<EncryptedBlobClientBuilder>, AzureNamedKeyCredentialTrait<EncryptedBlobClientBuilder>, AzureSasCredentialTrait<EncryptedBlobClientBuilder>, HttpTrait<EncryptedBlobClientBuilder>, ConfigurationTrait<EncryptedBlobClientBuilder>, EndpointTrait<EncryptedBlobClientBuilder> { private static final ClientLogger LOGGER = new ClientLogger(EncryptedBlobClientBuilder.class); private static final Map<String, String> PROPERTIES = CoreUtils.getProperties("azure-storage-blob-cryptography.properties"); private static final String SDK_NAME = "name"; private static final String SDK_VERSION = "version"; private static final String CLIENT_NAME = PROPERTIES.getOrDefault(SDK_NAME, "UnknownName"); private static final String CLIENT_VERSION = PROPERTIES.getOrDefault(SDK_VERSION, "UnknownVersion"); private static final String BLOB_CLIENT_NAME = USER_AGENT_PROPERTIES.getOrDefault(SDK_NAME, "UnknownName"); private static final String BLOB_CLIENT_VERSION = USER_AGENT_PROPERTIES.getOrDefault(SDK_VERSION, "UnknownVersion"); private String endpoint; private String accountName; private String containerName; private String blobName; private String snapshot; private String versionId; private boolean requiresEncryption; private EncryptionVersion encryptionVersion; private StorageSharedKeyCredential storageSharedKeyCredential; private TokenCredential tokenCredential; private AzureSasCredential azureSasCredential; private String sasToken; private HttpClient httpClient; private final List<HttpPipelinePolicy> perCallPolicies = new ArrayList<>(); private final List<HttpPipelinePolicy> perRetryPolicies = new ArrayList<>(); private HttpLogOptions logOptions; private RequestRetryOptions retryOptions; private RetryOptions coreRetryOptions; private HttpPipeline httpPipeline; private ClientOptions clientOptions = new ClientOptions(); private Configuration configuration; private AsyncKeyEncryptionKey keyWrapper; private AsyncKeyEncryptionKeyResolver keyResolver; private String keyWrapAlgorithm; private BlobServiceVersion version; private CpkInfo customerProvidedKey; private EncryptionScope encryptionScope; /** * Creates a new instance of the EncryptedBlobClientBuilder * @deprecated Use {@link EncryptedBlobClientBuilder */ @Deprecated public EncryptedBlobClientBuilder() { logOptions = getDefaultHttpLogOptions(); } /** * * @param version The version of the client side encryption protocol to use. It is highly recommended that v2 be * preferred for security reasons, though v1 continues to be supported for compatibility reasons. Note that even a * client configured to encrypt using v2 can decrypt blobs that use the v1 protocol. */ public EncryptedBlobClientBuilder(EncryptionVersion version) { Objects.requireNonNull(version); logOptions = getDefaultHttpLogOptions(); this.encryptionVersion = version; } /** * Creates a {@link EncryptedBlobClient} based on options set in the Builder. * * <p><strong>Code Samples</strong></p> * * <!-- src_embed com.azure.storage.blob.specialized.cryptography.EncryptedBlobClientBuilder.buildEncryptedBlobAsyncClient --> * <pre> * EncryptedBlobAsyncClient client = new EncryptedBlobClientBuilder& * .key& * .keyResolver& * .connectionString& * .buildEncryptedBlobAsyncClient& * </pre> * <!-- end com.azure.storage.blob.specialized.cryptography.EncryptedBlobClientBuilder.buildEncryptedBlobAsyncClient --> * * @return a {@link EncryptedBlobClient} created from the configurations in this builder. * @throws NullPointerException If {@code endpoint}, {@code containerName}, or {@code blobName} is {@code null}. * @throws IllegalStateException If multiple credentials have been specified. * @throws IllegalStateException If both {@link * and {@link */ public EncryptedBlobClient buildEncryptedBlobClient() { return new EncryptedBlobClient(buildEncryptedBlobAsyncClient()); } /** * Creates a {@link EncryptedBlobAsyncClient} based on options set in the Builder. * * <p><strong>Code Samples</strong></p> * * <!-- src_embed com.azure.storage.blob.specialized.cryptography.EncryptedBlobClientBuilder.buildEncryptedBlobClient --> * <pre> * EncryptedBlobClient client = new EncryptedBlobClientBuilder& * .key& * .keyResolver& * .connectionString& * .buildEncryptedBlobClient& * </pre> * <!-- end com.azure.storage.blob.specialized.cryptography.EncryptedBlobClientBuilder.buildEncryptedBlobClient --> * * @return a {@link EncryptedBlobAsyncClient} created from the configurations in this builder. * @throws NullPointerException If {@code endpoint}, {@code containerName}, or {@code blobName} is {@code null}. * @throws IllegalStateException If multiple credentials have been specified. * @throws IllegalStateException If both {@link * and {@link */ private HttpPipeline addBlobUserAgentModificationPolicy(HttpPipeline pipeline) { List<HttpPipelinePolicy> policies = new ArrayList<>(); for (int i = 0; i < pipeline.getPolicyCount(); i++) { HttpPipelinePolicy currPolicy = pipeline.getPolicy(i); policies.add(currPolicy); if (currPolicy instanceof UserAgentPolicy) { policies.add(new BlobUserAgentModificationPolicy(CLIENT_NAME, CLIENT_VERSION)); } } return new HttpPipelineBuilder() .httpClient(pipeline.getHttpClient()) .policies(policies.toArray(new HttpPipelinePolicy[0])) .build(); } private BlobAsyncClient getUnencryptedBlobClient() { BlobClientBuilder builder = new BlobClientBuilder() .endpoint(endpoint) .containerName(containerName) .blobName(blobName) .snapshot(snapshot) .customerProvidedKey( customerProvidedKey == null ? null : new CustomerProvidedKey(customerProvidedKey.getEncryptionKey())) .encryptionScope(encryptionScope == null ? null : encryptionScope.getEncryptionScope()) .versionId(versionId) .serviceVersion(version) .pipeline(this.httpPipeline) .httpClient(httpClient) .configuration(configuration) .retryOptions(this.retryOptions) .clientOptions(this.clientOptions); if (storageSharedKeyCredential != null) { builder.credential(storageSharedKeyCredential); } else if (tokenCredential != null) { builder.credential(tokenCredential); } else if (azureSasCredential != null) { builder.credential(azureSasCredential); } else if (sasToken != null) { builder.credential(new AzureSasCredential(sasToken)); } for (HttpPipelinePolicy policy : perCallPolicies) { builder.addPolicy(policy); } for (HttpPipelinePolicy policy : perRetryPolicies) { builder.addPolicy(policy); } return builder.buildAsyncClient(); } private HttpPipeline getHttpPipeline() { CredentialValidator.validateSingleCredentialIsPresent( storageSharedKeyCredential, tokenCredential, azureSasCredential, sasToken, LOGGER); if (httpPipeline != null) { List<HttpPipelinePolicy> policies = new ArrayList<>(); boolean decryptionPolicyPresent = false; for (int i = 0; i < httpPipeline.getPolicyCount(); i++) { HttpPipelinePolicy currPolicy = httpPipeline.getPolicy(i); if (currPolicy instanceof BlobDecryptionPolicy) { throw LOGGER.logExceptionAsError(new IllegalArgumentException("The passed pipeline was already" + " configured for encryption/decryption in a way that might conflict with the passed key " + "information. Please ensure that the passed pipeline is not already configured for " + "encryption/decryption")); } policies.add(currPolicy); } policies.add(0, new BlobDecryptionPolicy(keyWrapper, keyResolver, requiresEncryption, getUnencryptedBlobClient())); return new HttpPipelineBuilder() .httpClient(httpPipeline.getHttpClient()) .policies(policies.toArray(new HttpPipelinePolicy[0])) .build(); } Configuration userAgentConfiguration = (configuration == null) ? Configuration.NONE : configuration; List<HttpPipelinePolicy> policies = new ArrayList<>(); policies.add(new BlobDecryptionPolicy(keyWrapper, keyResolver, requiresEncryption, getUnencryptedBlobClient())); String applicationId = clientOptions.getApplicationId() != null ? clientOptions.getApplicationId() : logOptions.getApplicationId(); policies.add(new UserAgentPolicy(applicationId, BLOB_CLIENT_NAME, BLOB_CLIENT_VERSION, userAgentConfiguration)); policies.add(new RequestIdPolicy()); policies.addAll(perCallPolicies); HttpPolicyProviders.addBeforeRetryPolicies(policies); policies.add(BuilderUtils.createRetryPolicy(retryOptions, coreRetryOptions, LOGGER)); policies.add(new AddDatePolicy()); HttpHeaders headers = new HttpHeaders(); clientOptions.getHeaders().forEach(header -> headers.put(header.getName(), header.getValue())); if (headers.getSize() > 0) { policies.add(new AddHeadersPolicy(headers)); } policies.add(new MetadataValidationPolicy()); if (storageSharedKeyCredential != null) { policies.add(new StorageSharedKeyCredentialPolicy(storageSharedKeyCredential)); } else if (tokenCredential != null) { BuilderHelper.httpsValidation(tokenCredential, "bearer token", endpoint, LOGGER); policies.add(new BearerTokenAuthenticationPolicy(tokenCredential, Constants.STORAGE_SCOPE)); } else if (azureSasCredential != null) { policies.add(new AzureSasCredentialPolicy(azureSasCredential, false)); } else if (sasToken != null) { policies.add(new AzureSasCredentialPolicy(new AzureSasCredential(sasToken), false)); } policies.addAll(perRetryPolicies); HttpPolicyProviders.addAfterRetryPolicies(policies); policies.add(new ResponseValidationPolicyBuilder() .addOptionalEcho(Constants.HeaderConstants.CLIENT_REQUEST_ID) .addOptionalEcho(Constants.HeaderConstants.ENCRYPTION_KEY_SHA256) .build()); policies.add(new HttpLoggingPolicy(logOptions)); policies.add(new ScrubEtagPolicy()); return new HttpPipelineBuilder() .policies(policies.toArray(new HttpPipelinePolicy[0])) .httpClient(httpClient) .build(); } /** * Sets the encryption key parameters for the client * * @param key An object of type {@link AsyncKeyEncryptionKey} that is used to wrap/unwrap the content encryption key * @param keyWrapAlgorithm The {@link String} used to wrap the key. * @return the updated EncryptedBlobClientBuilder object */ public EncryptedBlobClientBuilder key(AsyncKeyEncryptionKey key, String keyWrapAlgorithm) { this.keyWrapper = key; this.keyWrapAlgorithm = keyWrapAlgorithm; return this; } /** * Sets the encryption parameters for this client * * @param keyResolver The key resolver used to select the correct key for decrypting existing blobs. * @return the updated EncryptedBlobClientBuilder object */ public EncryptedBlobClientBuilder keyResolver(AsyncKeyEncryptionKeyResolver keyResolver) { this.keyResolver = keyResolver; return this; } private void checkValidEncryptionParameters() { if (this.keyWrapper == null && this.keyResolver == null) { throw LOGGER.logExceptionAsError(new IllegalArgumentException("Key and KeyResolver cannot both be null")); } if (this.keyWrapper != null && this.keyWrapAlgorithm == null) { throw LOGGER.logExceptionAsError( new IllegalArgumentException("Key Wrap Algorithm must be specified with a Key.")); } } /** * Sets the {@link StorageSharedKeyCredential} used to authorize requests sent to the service. * * @param credential {@link StorageSharedKeyCredential}. * @return the updated EncryptedBlobClientBuilder * @throws NullPointerException If {@code credential} is {@code null}. */ public EncryptedBlobClientBuilder credential(StorageSharedKeyCredential credential) { this.storageSharedKeyCredential = Objects.requireNonNull(credential, "'credential' cannot be null."); this.tokenCredential = null; this.sasToken = null; return this; } /** * Sets the {@link AzureNamedKeyCredential} used to authorize requests sent to the service. * * @param credential {@link AzureNamedKeyCredential}. * @return the updated EncryptedBlobClientBuilder * @throws NullPointerException If {@code credential} is {@code null}. */ @Override public EncryptedBlobClientBuilder credential(AzureNamedKeyCredential credential) { Objects.requireNonNull(credential, "'credential' cannot be null."); return credential(StorageSharedKeyCredential.fromAzureNamedKeyCredential(credential)); } /** * Sets the {@link TokenCredential} used to authorize requests sent to the service. Refer to the Azure SDK for Java * <a href="https: * documentation for more details on proper usage of the {@link TokenCredential} type. * * @param credential {@link TokenCredential} used to authorize requests sent to the service. * @return the updated EncryptedBlobClientBuilder * @throws NullPointerException If {@code credential} is {@code null}. */ @Override public EncryptedBlobClientBuilder credential(TokenCredential credential) { this.tokenCredential = Objects.requireNonNull(credential, "'credential' cannot be null."); this.storageSharedKeyCredential = null; this.sasToken = null; return this; } /** * Sets the SAS token used to authorize requests sent to the service. * * @param sasToken The SAS token to use for authenticating requests. This string should only be the query parameters * (with or without a leading '?') and not a full url. * @return the updated EncryptedBlobClientBuilder * @throws NullPointerException If {@code sasToken} is {@code null}. */ public EncryptedBlobClientBuilder sasToken(String sasToken) { this.sasToken = Objects.requireNonNull(sasToken, "'sasToken' cannot be null."); this.storageSharedKeyCredential = null; this.tokenCredential = null; return this; } /** * Sets the {@link AzureSasCredential} used to authorize requests sent to the service. * * @param credential {@link AzureSasCredential} used to authorize requests sent to the service. * @return the updated EncryptedBlobClientBuilder * @throws NullPointerException If {@code credential} is {@code null}. */ @Override public EncryptedBlobClientBuilder credential(AzureSasCredential credential) { this.azureSasCredential = Objects.requireNonNull(credential, "'credential' cannot be null."); return this; } /** * Clears the credential used to authorize the request. * * <p>This is for blobs that are publicly accessible.</p> * * @return the updated EncryptedBlobClientBuilder */ public EncryptedBlobClientBuilder setAnonymousAccess() { this.storageSharedKeyCredential = null; this.tokenCredential = null; this.azureSasCredential = null; this.sasToken = null; return this; } /** * Sets the connection string to connect to the service. * * @param connectionString Connection string of the storage account. * @return the updated EncryptedBlobClientBuilder * @throws IllegalArgumentException If {@code connectionString} is invalid. */ @Override public EncryptedBlobClientBuilder connectionString(String connectionString) { StorageConnectionString storageConnectionString = StorageConnectionString.create(connectionString, LOGGER); StorageEndpoint endpoint = storageConnectionString.getBlobEndpoint(); if (endpoint == null || endpoint.getPrimaryUri() == null) { throw LOGGER .logExceptionAsError(new IllegalArgumentException( "connectionString missing required settings to derive blob service endpoint.")); } this.endpoint(endpoint.getPrimaryUri()); if (storageConnectionString.getAccountName() != null) { this.accountName = storageConnectionString.getAccountName(); } StorageAuthenticationSettings authSettings = storageConnectionString.getStorageAuthSettings(); if (authSettings.getType() == StorageAuthenticationSettings.Type.ACCOUNT_NAME_KEY) { this.credential(new StorageSharedKeyCredential(authSettings.getAccount().getName(), authSettings.getAccount().getAccessKey())); } else if (authSettings.getType() == StorageAuthenticationSettings.Type.SAS_TOKEN) { this.sasToken(authSettings.getSasToken()); } return this; } /** * Sets the service endpoint, additionally parses it for information (SAS token, container name, blob name) * * <p>If the blob name contains special characters, pass in the url encoded version of the blob name. </p> * * <p>If the endpoint is to a blob in the root container, this method will fail as it will interpret the blob name * as the container name. With only one path element, it is impossible to distinguish between a container name and a * blob in the root container, so it is assumed to be the container name as this is much more common. When working * with blobs in the root container, it is best to set the endpoint to the account url and specify the blob name * separately using the {@link EncryptedBlobClientBuilder * * @param endpoint URL of the service * @return the updated EncryptedBlobClientBuilder object * @throws IllegalArgumentException If {@code endpoint} is {@code null} or is a malformed URL. */ @Override public EncryptedBlobClientBuilder endpoint(String endpoint) { try { URL url = new URL(endpoint); BlobUrlParts parts = BlobUrlParts.parse(url); this.accountName = parts.getAccountName(); this.endpoint = BuilderHelper.getEndpoint(parts); this.containerName = parts.getBlobContainerName() == null ? this.containerName : parts.getBlobContainerName(); this.blobName = parts.getBlobName() == null ? this.blobName : Utility.urlEncode(parts.getBlobName()); this.snapshot = parts.getSnapshot(); this.versionId = parts.getVersionId(); String sasToken = parts.getCommonSasQueryParameters().encode(); if (!CoreUtils.isNullOrEmpty(sasToken)) { this.sasToken(sasToken); } } catch (MalformedURLException ex) { throw LOGGER.logExceptionAsError( new IllegalArgumentException("The Azure Storage Blob endpoint url is malformed.", ex)); } return this; } /** * Sets the name of the container that contains the blob. * * @param containerName Name of the container. If the value {@code null} or empty the root container, {@code $root}, * will be used. * @return the updated EncryptedBlobClientBuilder object */ public EncryptedBlobClientBuilder containerName(String containerName) { this.containerName = containerName; return this; } /** * Sets the name of the blob. * * @param blobName Name of the blob. If the blob name contains special characters, pass in the url encoded version * of the blob name. * @return the updated EncryptedBlobClientBuilder object * @throws NullPointerException If {@code blobName} is {@code null} */ public EncryptedBlobClientBuilder blobName(String blobName) { this.blobName = Utility.urlEncode(Utility.urlDecode(Objects.requireNonNull(blobName, "'blobName' cannot be null."))); return this; } /** * Sets the snapshot identifier of the blob. * * @param snapshot Snapshot identifier for the blob. * @return the updated EncryptedBlobClientBuilder object */ public EncryptedBlobClientBuilder snapshot(String snapshot) { this.snapshot = snapshot; return this; } /** * Sets the version identifier of the blob. * * @param versionId Version identifier for the blob, pass {@code null} to interact with the latest blob version. * @return the updated EncryptedBlobClientBuilder object */ public EncryptedBlobClientBuilder versionId(String versionId) { this.versionId = versionId; return this; } /** * Sets the {@link HttpClient} to use for sending and receiving requests to and from the service. * * <p><strong>Note:</strong> It is important to understand the precedence order of the HttpTrait APIs. In * particular, if a {@link HttpPipeline} is specified, this takes precedence over all other APIs in the trait, and * they will be ignored. If no {@link HttpPipeline} is specified, a HTTP pipeline will be constructed internally * based on the settings provided to this trait. Additionally, there may be other APIs in types that implement this * trait that are also ignored if an {@link HttpPipeline} is specified, so please be sure to refer to the * documentation of types that implement this trait to understand the full set of implications.</p> * * @param httpClient The {@link HttpClient} to use for requests. * @return the updated EncryptedBlobClientBuilder object */ @Override public EncryptedBlobClientBuilder httpClient(HttpClient httpClient) { if (this.httpClient != null && httpClient == null) { LOGGER.info("'httpClient' is being set to 'null' when it was previously configured."); } this.httpClient = httpClient; return this; } /** * Adds a {@link HttpPipelinePolicy pipeline policy} to apply on each request sent. * * <p><strong>Note:</strong> It is important to understand the precedence order of the HttpTrait APIs. In * particular, if a {@link HttpPipeline} is specified, this takes precedence over all other APIs in the trait, and * they will be ignored. If no {@link HttpPipeline} is specified, a HTTP pipeline will be constructed internally * based on the settings provided to this trait. Additionally, there may be other APIs in types that implement this * trait that are also ignored if an {@link HttpPipeline} is specified, so please be sure to refer to the * documentation of types that implement this trait to understand the full set of implications.</p> * * @param pipelinePolicy A {@link HttpPipelinePolicy pipeline policy}. * @return the updated EncryptedBlobClientBuilder object * @throws NullPointerException If {@code pipelinePolicy} is {@code null}. */ @Override public EncryptedBlobClientBuilder addPolicy(HttpPipelinePolicy pipelinePolicy) { Objects.requireNonNull(pipelinePolicy, "'pipelinePolicy' cannot be null"); if (pipelinePolicy.getPipelinePosition() == HttpPipelinePosition.PER_CALL) { perCallPolicies.add(pipelinePolicy); } else { perRetryPolicies.add(pipelinePolicy); } return this; } /** * Sets the {@link HttpLogOptions logging configuration} to use when sending and receiving requests to and from * the service. If a {@code logLevel} is not provided, default value of {@link HttpLogDetailLevel * * <p><strong>Note:</strong> It is important to understand the precedence order of the HttpTrait APIs. In * particular, if a {@link HttpPipeline} is specified, this takes precedence over all other APIs in the trait, and * they will be ignored. If no {@link HttpPipeline} is specified, a HTTP pipeline will be constructed internally * based on the settings provided to this trait. Additionally, there may be other APIs in types that implement this * trait that are also ignored if an {@link HttpPipeline} is specified, so please be sure to refer to the * documentation of types that implement this trait to understand the full set of implications.</p> * * @param logOptions The {@link HttpLogOptions logging configuration} to use when sending and receiving requests to * and from the service. * @return the updated EncryptedBlobClientBuilder object * @throws NullPointerException If {@code logOptions} is {@code null}. */ @Override public EncryptedBlobClientBuilder httpLogOptions(HttpLogOptions logOptions) { this.logOptions = Objects.requireNonNull(logOptions, "'logOptions' cannot be null."); return this; } /** * Gets the default Storage allowlist log headers and query parameters. * * @return the default http log options. */ public static HttpLogOptions getDefaultHttpLogOptions() { return BuilderHelper.getDefaultHttpLogOptions(); } /** * Sets the configuration object used to retrieve environment configuration values during building of the client. * * @param configuration Configuration store used to retrieve environment configurations. * @return the updated EncryptedBlobClientBuilder object */ @Override public EncryptedBlobClientBuilder configuration(Configuration configuration) { this.configuration = configuration; return this; } /** * Sets the request retry options for all the requests made through the client. * * Setting this is mutually exclusive with using {@link * * @param retryOptions {@link RequestRetryOptions}. * @return the updated EncryptedBlobClientBuilder object. */ public EncryptedBlobClientBuilder retryOptions(RequestRetryOptions retryOptions) { this.retryOptions = retryOptions; return this; } /** * Sets the {@link RetryOptions} for all the requests made through the client. * * <p><strong>Note:</strong> It is important to understand the precedence order of the HttpTrait APIs. In * particular, if a {@link HttpPipeline} is specified, this takes precedence over all other APIs in the trait, and * they will be ignored. If no {@link HttpPipeline} is specified, a HTTP pipeline will be constructed internally * based on the settings provided to this trait. Additionally, there may be other APIs in types that implement this * trait that are also ignored if an {@link HttpPipeline} is specified, so please be sure to refer to the * documentation of types that implement this trait to understand the full set of implications.</p> * <p> * Setting this is mutually exclusive with using {@link * Consider using {@link * * @param retryOptions The {@link RetryOptions} to use for all the requests made through the client. * @return the updated EncryptedBlobClientBuilder object */ @Override public EncryptedBlobClientBuilder retryOptions(RetryOptions retryOptions) { this.coreRetryOptions = retryOptions; return this; } /** * Sets the {@link HttpPipeline} to use for the service client. * * <p><strong>Note:</strong> It is important to understand the precedence order of the HttpTrait APIs. In * particular, if a {@link HttpPipeline} is specified, this takes precedence over all other APIs in the trait, and * they will be ignored. If no {@link HttpPipeline} is specified, a HTTP pipeline will be constructed internally * based on the settings provided to this trait. Additionally, there may be other APIs in types that implement this * trait that are also ignored if an {@link HttpPipeline} is specified, so please be sure to refer to the * documentation of types that implement this trait to understand the full set of implications.</p> * <p> * The {@link * {@link * not ignored when {@code pipeline} is set. * * @return the updated EncryptedBlobClientBuilder object */ @Override public EncryptedBlobClientBuilder pipeline(HttpPipeline httpPipeline) { if (this.httpPipeline != null && httpPipeline == null) { LOGGER.info("HttpPipeline is being set to 'null' when it was previously configured."); } this.httpPipeline = httpPipeline; return this; } /** * Allows for setting common properties such as application ID, headers, proxy configuration, etc. Note that it is * recommended that this method be called with an instance of the {@link HttpClientOptions} * class (a subclass of the {@link ClientOptions} base class). The HttpClientOptions subclass provides more * configuration options suitable for HTTP clients, which is applicable for any class that implements this HttpTrait * interface. * * <p><strong>Note:</strong> It is important to understand the precedence order of the HttpTrait APIs. In * particular, if a {@link HttpPipeline} is specified, this takes precedence over all other APIs in the trait, and * they will be ignored. If no {@link HttpPipeline} is specified, a HTTP pipeline will be constructed internally * based on the settings provided to this trait. Additionally, there may be other APIs in types that implement this * trait that are also ignored if an {@link HttpPipeline} is specified, so please be sure to refer to the * documentation of types that implement this trait to understand the full set of implications.</p> * * @param clientOptions A configured instance of {@link HttpClientOptions}. * @see HttpClientOptions * @return the updated EncryptedBlobClientBuilder object * @throws NullPointerException If {@code clientOptions} is {@code null}. */ @Override public EncryptedBlobClientBuilder clientOptions(ClientOptions clientOptions) { this.clientOptions = Objects.requireNonNull(clientOptions, "'clientOptions' cannot be null."); return this; } /** * Sets the {@link BlobServiceVersion} that is used when making API requests. * <p> * If a service version is not provided, the service version that will be used will be the latest known service * version based on the version of the client library being used. If no service version is specified, updating to a * newer version of the client library will have the result of potentially moving to a newer service version. * <p> * Targeting a specific service version may also mean that the service will return an error for newer APIs. * * @param version {@link BlobServiceVersion} of the service to be used when making requests. * @return the updated EncryptedBlobClientBuilder object */ public EncryptedBlobClientBuilder serviceVersion(BlobServiceVersion version) { this.version = version; return this; } /** * Sets the {@link CustomerProvidedKey customer provided key} that is used to encrypt blob contents on the server. * * @param customerProvidedKey {@link CustomerProvidedKey} * @return the updated EncryptedBlobClientBuilder object */ public EncryptedBlobClientBuilder customerProvidedKey(CustomerProvidedKey customerProvidedKey) { if (customerProvidedKey == null) { this.customerProvidedKey = null; } else { this.customerProvidedKey = new CpkInfo() .setEncryptionKey(customerProvidedKey.getKey()) .setEncryptionKeySha256(customerProvidedKey.getKeySha256()) .setEncryptionAlgorithm(customerProvidedKey.getEncryptionAlgorithm()); } return this; } /** * Sets the {@code encryption scope} that is used to encrypt blob contents on the server. * * @param encryptionScope Encryption scope containing the encryption key information. * @return the updated EncryptedBlobClientBuilder object */ public EncryptedBlobClientBuilder encryptionScope(String encryptionScope) { if (encryptionScope == null) { this.encryptionScope = null; } else { this.encryptionScope = new EncryptionScope().setEncryptionScope(encryptionScope); } return this; } /** * Configures the builder based on the passed {@link BlobClient}. This will set the {@link HttpPipeline}, * {@link URL} and {@link BlobServiceVersion} that are used to interact with the service. Note that the underlying * pipeline should not already be configured for encryption/decryption. * * <p>If {@code pipeline} is set, all other settings are ignored, aside from {@link * {@link * * <p>Note that for security reasons, this method does not copy over the {@link CustomerProvidedKey} and * encryption scope properties from the provided client. To set CPK, please use * {@link * * @param blobClient BlobClient used to configure the builder. * @return the updated EncryptedBlobClientBuilder object * @throws NullPointerException If {@code containerClient} is {@code null}. */ public EncryptedBlobClientBuilder blobClient(BlobClient blobClient) { Objects.requireNonNull(blobClient); return client(blobClient.getHttpPipeline(), blobClient.getBlobUrl(), blobClient.getServiceVersion()); } /** * Configures the builder based on the passed {@link BlobAsyncClient}. This will set the {@link HttpPipeline}, * {@link URL} and {@link BlobServiceVersion} that are used to interact with the service. Note that the underlying * pipeline should not already be configured for encryption/decryption. * * <p>If {@code pipeline} is set, all other settings are ignored, aside from {@link * {@link * * <p>Note that for security reasons, this method does not copy over the {@link CustomerProvidedKey} and * encryption scope properties from the provided client. To set CPK, please use * {@link * * @param blobAsyncClient BlobAsyncClient used to configure the builder. * @return the updated EncryptedBlobClientBuilder object * @throws NullPointerException If {@code containerClient} is {@code null}. */ public EncryptedBlobClientBuilder blobAsyncClient(BlobAsyncClient blobAsyncClient) { Objects.requireNonNull(blobAsyncClient); return client(blobAsyncClient.getHttpPipeline(), blobAsyncClient.getBlobUrl(), blobAsyncClient.getServiceVersion()); } /** * Helper method to transform a regular client into an encrypted client * * @param httpPipeline {@link HttpPipeline} * @param endpoint The endpoint. * @param version {@link BlobServiceVersion} of the service to be used when making requests. * @return the updated EncryptedBlobClientBuilder object */ private EncryptedBlobClientBuilder client(HttpPipeline httpPipeline, String endpoint, BlobServiceVersion version) { this.endpoint(endpoint); this.serviceVersion(version); return this.pipeline(httpPipeline); } /** * Sets the requires encryption option. * * @param requiresEncryption Whether encryption is enforced by this client. Client will throw if data is * downloaded and it is not encrypted. * @return the updated EncryptedBlobClientBuilder object */ public EncryptedBlobClientBuilder requiresEncryption(boolean requiresEncryption) { this.requiresEncryption = requiresEncryption; return this; } }
Why is this info? Should we do debug for this as well?
public static void setPartitionKeyAccessor(final PartitionKeyAccessor newAccessor) { if (!accessor.compareAndSet(null, newAccessor)) { logger.debug("PartitionKeyAccessor already initialized!"); } else { logger.info("Setting PartitionKeyAccessor..."); partitionKeyClassLoaded.set(true); } }
logger.info("Setting PartitionKeyAccessor...");
public static void setPartitionKeyAccessor(final PartitionKeyAccessor newAccessor) { if (!accessor.compareAndSet(null, newAccessor)) { logger.debug("PartitionKeyAccessor already initialized!"); } else { logger.info("Setting PartitionKeyAccessor..."); partitionKeyClassLoaded.set(true); } }
class PartitionKeyHelper { private final static AtomicBoolean partitionKeyClassLoaded = new AtomicBoolean(false); private final static AtomicReference<PartitionKeyAccessor> accessor = new AtomicReference<>(); private PartitionKeyHelper() {} public static PartitionKeyAccessor getPartitionKeyAccessor() { if (!partitionKeyClassLoaded.get()) { logger.debug("Initializing PartitionKeyAccessor..."); PartitionKey.doNothingButEnsureLoadingClass(); } PartitionKeyAccessor snapshot = accessor.get(); if (snapshot == null) { logger.error("PartitionKeyAccessor is not initialized yet!"); System.exit(9701); } return snapshot; } public interface PartitionKeyAccessor { PartitionKey toPartitionKey(PartitionKeyInternal partitionKeyInternal); } }
class PartitionKeyHelper { private final static AtomicBoolean partitionKeyClassLoaded = new AtomicBoolean(false); private final static AtomicReference<PartitionKeyAccessor> accessor = new AtomicReference<>(); private PartitionKeyHelper() {} public static PartitionKeyAccessor getPartitionKeyAccessor() { if (!partitionKeyClassLoaded.get()) { logger.debug("Initializing PartitionKeyAccessor..."); PartitionKey.doNothingButEnsureLoadingClass(); } PartitionKeyAccessor snapshot = accessor.get(); if (snapshot == null) { logger.error("PartitionKeyAccessor is not initialized yet!"); System.exit(9701); } return snapshot; } public interface PartitionKeyAccessor { PartitionKey toPartitionKey(PartitionKeyInternal partitionKeyInternal); } }
Do we want to resolve this default before the warning check above?
public EncryptedBlobAsyncClient buildEncryptedBlobAsyncClient() { Objects.requireNonNull(blobName, "'blobName' cannot be null."); checkValidEncryptionParameters(); if (EncryptionVersion.V1.equals(this.encryptionVersion)) { LOGGER.warning("Client is being configured to use v1 of client side encryption, which is no longer " + "considered secure. The default is v1 for compatibility reasons, but it is highly recommended " + "the version be set to v2 using this builder"); } this.encryptionVersion = encryptionVersion == null ? EncryptionVersion.V1 : encryptionVersion; /* Implicit and explicit root container access are functionally equivalent, but explicit references are easier to read and debug. */ if (CoreUtils.isNullOrEmpty(containerName)) { containerName = BlobContainerAsyncClient.ROOT_CONTAINER_NAME; } BlobServiceVersion serviceVersion = version != null ? version : BlobServiceVersion.getLatest(); return new EncryptedBlobAsyncClient(addBlobUserAgentModificationPolicy(getHttpPipeline()), endpoint, serviceVersion, accountName, containerName, blobName, snapshot, customerProvidedKey, encryptionScope, keyWrapper, keyWrapAlgorithm, versionId, encryptionVersion); }
this.encryptionVersion = encryptionVersion == null ? EncryptionVersion.V1 : encryptionVersion;
public EncryptedBlobAsyncClient buildEncryptedBlobAsyncClient() { Objects.requireNonNull(blobName, "'blobName' cannot be null."); checkValidEncryptionParameters(); this.encryptionVersion = encryptionVersion == null ? EncryptionVersion.V1 : encryptionVersion; if (EncryptionVersion.V1.equals(this.encryptionVersion)) { LOGGER.warning("Client is being configured to use v1 of client side encryption, which is no longer " + "considered secure. The default is v1 for compatibility reasons, but it is highly recommended " + "the version be set to v2 using the constructor"); } /* Implicit and explicit root container access are functionally equivalent, but explicit references are easier to read and debug. */ if (CoreUtils.isNullOrEmpty(containerName)) { containerName = BlobContainerAsyncClient.ROOT_CONTAINER_NAME; } BlobServiceVersion serviceVersion = version != null ? version : BlobServiceVersion.getLatest(); return new EncryptedBlobAsyncClient(addBlobUserAgentModificationPolicy(getHttpPipeline()), endpoint, serviceVersion, accountName, containerName, blobName, snapshot, customerProvidedKey, encryptionScope, keyWrapper, keyWrapAlgorithm, versionId, encryptionVersion); }
class EncryptedBlobClientBuilder implements TokenCredentialTrait<EncryptedBlobClientBuilder>, ConnectionStringTrait<EncryptedBlobClientBuilder>, AzureNamedKeyCredentialTrait<EncryptedBlobClientBuilder>, AzureSasCredentialTrait<EncryptedBlobClientBuilder>, HttpTrait<EncryptedBlobClientBuilder>, ConfigurationTrait<EncryptedBlobClientBuilder>, EndpointTrait<EncryptedBlobClientBuilder> { private static final ClientLogger LOGGER = new ClientLogger(EncryptedBlobClientBuilder.class); private static final Map<String, String> PROPERTIES = CoreUtils.getProperties("azure-storage-blob-cryptography.properties"); private static final String SDK_NAME = "name"; private static final String SDK_VERSION = "version"; private static final String CLIENT_NAME = PROPERTIES.getOrDefault(SDK_NAME, "UnknownName"); private static final String CLIENT_VERSION = PROPERTIES.getOrDefault(SDK_VERSION, "UnknownVersion"); private static final String BLOB_CLIENT_NAME = USER_AGENT_PROPERTIES.getOrDefault(SDK_NAME, "UnknownName"); private static final String BLOB_CLIENT_VERSION = USER_AGENT_PROPERTIES.getOrDefault(SDK_VERSION, "UnknownVersion"); private String endpoint; private String accountName; private String containerName; private String blobName; private String snapshot; private String versionId; private boolean requiresEncryption; private EncryptionVersion encryptionVersion; private StorageSharedKeyCredential storageSharedKeyCredential; private TokenCredential tokenCredential; private AzureSasCredential azureSasCredential; private String sasToken; private HttpClient httpClient; private final List<HttpPipelinePolicy> perCallPolicies = new ArrayList<>(); private final List<HttpPipelinePolicy> perRetryPolicies = new ArrayList<>(); private HttpLogOptions logOptions; private RequestRetryOptions retryOptions; private RetryOptions coreRetryOptions; private HttpPipeline httpPipeline; private ClientOptions clientOptions = new ClientOptions(); private Configuration configuration; private AsyncKeyEncryptionKey keyWrapper; private AsyncKeyEncryptionKeyResolver keyResolver; private String keyWrapAlgorithm; private BlobServiceVersion version; private CpkInfo customerProvidedKey; private EncryptionScope encryptionScope; /** * Creates a new instance of the EncryptedBlobClientBuilder */ public EncryptedBlobClientBuilder() { logOptions = getDefaultHttpLogOptions(); } /** * Creates a {@link EncryptedBlobClient} based on options set in the Builder. * * <p><strong>Code Samples</strong></p> * * <!-- src_embed com.azure.storage.blob.specialized.cryptography.EncryptedBlobClientBuilder.buildEncryptedBlobAsyncClient --> * <pre> * EncryptedBlobAsyncClient client = new EncryptedBlobClientBuilder& * .key& * .keyResolver& * .connectionString& * .buildEncryptedBlobAsyncClient& * </pre> * <!-- end com.azure.storage.blob.specialized.cryptography.EncryptedBlobClientBuilder.buildEncryptedBlobAsyncClient --> * * @return a {@link EncryptedBlobClient} created from the configurations in this builder. * @throws NullPointerException If {@code endpoint}, {@code containerName}, or {@code blobName} is {@code null}. * @throws IllegalStateException If multiple credentials have been specified. * @throws IllegalStateException If both {@link * and {@link */ public EncryptedBlobClient buildEncryptedBlobClient() { return new EncryptedBlobClient(buildEncryptedBlobAsyncClient()); } /** * Creates a {@link EncryptedBlobAsyncClient} based on options set in the Builder. * * <p><strong>Code Samples</strong></p> * * <!-- src_embed com.azure.storage.blob.specialized.cryptography.EncryptedBlobClientBuilder.buildEncryptedBlobClient --> * <pre> * EncryptedBlobClient client = new EncryptedBlobClientBuilder& * .key& * .keyResolver& * .connectionString& * .buildEncryptedBlobClient& * </pre> * <!-- end com.azure.storage.blob.specialized.cryptography.EncryptedBlobClientBuilder.buildEncryptedBlobClient --> * * @return a {@link EncryptedBlobAsyncClient} created from the configurations in this builder. * @throws NullPointerException If {@code endpoint}, {@code containerName}, or {@code blobName} is {@code null}. * @throws IllegalStateException If multiple credentials have been specified. * @throws IllegalStateException If both {@link * and {@link */ private HttpPipeline addBlobUserAgentModificationPolicy(HttpPipeline pipeline) { List<HttpPipelinePolicy> policies = new ArrayList<>(); for (int i = 0; i < pipeline.getPolicyCount(); i++) { HttpPipelinePolicy currPolicy = pipeline.getPolicy(i); policies.add(currPolicy); if (currPolicy instanceof UserAgentPolicy) { policies.add(new BlobUserAgentModificationPolicy(CLIENT_NAME, CLIENT_VERSION)); } } return new HttpPipelineBuilder() .httpClient(pipeline.getHttpClient()) .policies(policies.toArray(new HttpPipelinePolicy[0])) .build(); } private BlobAsyncClient getUnencryptedBlobClient() { BlobClientBuilder builder = new BlobClientBuilder() .endpoint(endpoint) .containerName(containerName) .blobName(blobName) .snapshot(snapshot) .customerProvidedKey( customerProvidedKey == null ? null : new CustomerProvidedKey(customerProvidedKey.getEncryptionKey())) .encryptionScope(encryptionScope == null ? null : encryptionScope.getEncryptionScope()) .versionId(versionId) .serviceVersion(version) .httpClient(httpClient); if (storageSharedKeyCredential != null) { builder.credential(storageSharedKeyCredential); } else if (tokenCredential != null) { builder.credential(tokenCredential); } else if (azureSasCredential != null) { builder.credential(azureSasCredential); } else if (sasToken != null) { builder.credential(new AzureSasCredential(sasToken)); } return builder.buildAsyncClient(); } private HttpPipeline getHttpPipeline() { CredentialValidator.validateSingleCredentialIsPresent( storageSharedKeyCredential, tokenCredential, azureSasCredential, sasToken, LOGGER); if (httpPipeline != null) { List<HttpPipelinePolicy> policies = new ArrayList<>(); boolean decryptionPolicyPresent = false; for (int i = 0; i < httpPipeline.getPolicyCount(); i++) { HttpPipelinePolicy currPolicy = httpPipeline.getPolicy(i); if (currPolicy instanceof BlobDecryptionPolicy || currPolicy instanceof FetchEncryptionVersionPolicy) { throw LOGGER.logExceptionAsError(new IllegalArgumentException("The passed pipeline was already" + " configured for encryption/decryption in a way that might conflict with the passed key " + "information. Please ensure that the passed pipeline is not already configured for " + "encryption/decryption")); } policies.add(currPolicy); } policies.add(0, new BlobDecryptionPolicy(keyWrapper, keyResolver, requiresEncryption)); policies.add(0, new FetchEncryptionVersionPolicy(getUnencryptedBlobClient(), requiresEncryption)); return new HttpPipelineBuilder() .httpClient(httpPipeline.getHttpClient()) .policies(policies.toArray(new HttpPipelinePolicy[0])) .build(); } Configuration userAgentConfiguration = (configuration == null) ? Configuration.NONE : configuration; List<HttpPipelinePolicy> policies = new ArrayList<>(); policies.add(new FetchEncryptionVersionPolicy(getUnencryptedBlobClient(), requiresEncryption)); policies.add(new BlobDecryptionPolicy(keyWrapper, keyResolver, requiresEncryption)); String applicationId = clientOptions.getApplicationId() != null ? clientOptions.getApplicationId() : logOptions.getApplicationId(); policies.add(new UserAgentPolicy(applicationId, BLOB_CLIENT_NAME, BLOB_CLIENT_VERSION, userAgentConfiguration)); policies.add(new RequestIdPolicy()); policies.addAll(perCallPolicies); HttpPolicyProviders.addBeforeRetryPolicies(policies); policies.add(BuilderUtils.createRetryPolicy(retryOptions, coreRetryOptions, LOGGER)); policies.add(new AddDatePolicy()); HttpHeaders headers = new HttpHeaders(); clientOptions.getHeaders().forEach(header -> headers.put(header.getName(), header.getValue())); if (headers.getSize() > 0) { policies.add(new AddHeadersPolicy(headers)); } policies.add(new MetadataValidationPolicy()); if (storageSharedKeyCredential != null) { policies.add(new StorageSharedKeyCredentialPolicy(storageSharedKeyCredential)); } else if (tokenCredential != null) { BuilderHelper.httpsValidation(tokenCredential, "bearer token", endpoint, LOGGER); policies.add(new BearerTokenAuthenticationPolicy(tokenCredential, Constants.STORAGE_SCOPE)); } else if (azureSasCredential != null) { policies.add(new AzureSasCredentialPolicy(azureSasCredential, false)); } else if (sasToken != null) { policies.add(new AzureSasCredentialPolicy(new AzureSasCredential(sasToken), false)); } policies.addAll(perRetryPolicies); HttpPolicyProviders.addAfterRetryPolicies(policies); policies.add(new ResponseValidationPolicyBuilder() .addOptionalEcho(Constants.HeaderConstants.CLIENT_REQUEST_ID) .addOptionalEcho(Constants.HeaderConstants.ENCRYPTION_KEY_SHA256) .build()); policies.add(new HttpLoggingPolicy(logOptions)); policies.add(new ScrubEtagPolicy()); return new HttpPipelineBuilder() .policies(policies.toArray(new HttpPipelinePolicy[0])) .httpClient(httpClient) .build(); } /** * Sets the encryption key parameters for the client * * @param key An object of type {@link AsyncKeyEncryptionKey} that is used to wrap/unwrap the content encryption key * @param keyWrapAlgorithm The {@link String} used to wrap the key. * @return the updated EncryptedBlobClientBuilder object */ public EncryptedBlobClientBuilder key(AsyncKeyEncryptionKey key, String keyWrapAlgorithm) { this.keyWrapper = key; this.keyWrapAlgorithm = keyWrapAlgorithm; return this; } /** * Sets the encryption parameters for this client * * @param keyResolver The key resolver used to select the correct key for decrypting existing blobs. * @return the updated EncryptedBlobClientBuilder object */ public EncryptedBlobClientBuilder keyResolver(AsyncKeyEncryptionKeyResolver keyResolver) { this.keyResolver = keyResolver; return this; } private void checkValidEncryptionParameters() { if (this.keyWrapper == null && this.keyResolver == null) { throw LOGGER.logExceptionAsError(new IllegalArgumentException("Key and KeyResolver cannot both be null")); } if (this.keyWrapper != null && this.keyWrapAlgorithm == null) { throw LOGGER.logExceptionAsError( new IllegalArgumentException("Key Wrap Algorithm must be specified with a Key.")); } } /** * Sets the {@link StorageSharedKeyCredential} used to authorize requests sent to the service. * * @param credential {@link StorageSharedKeyCredential}. * @return the updated EncryptedBlobClientBuilder * @throws NullPointerException If {@code credential} is {@code null}. */ public EncryptedBlobClientBuilder credential(StorageSharedKeyCredential credential) { this.storageSharedKeyCredential = Objects.requireNonNull(credential, "'credential' cannot be null."); this.tokenCredential = null; this.sasToken = null; return this; } /** * Sets the {@link AzureNamedKeyCredential} used to authorize requests sent to the service. * * @param credential {@link AzureNamedKeyCredential}. * @return the updated EncryptedBlobClientBuilder * @throws NullPointerException If {@code credential} is {@code null}. */ @Override public EncryptedBlobClientBuilder credential(AzureNamedKeyCredential credential) { Objects.requireNonNull(credential, "'credential' cannot be null."); return credential(StorageSharedKeyCredential.fromAzureNamedKeyCredential(credential)); } /** * Sets the {@link TokenCredential} used to authorize requests sent to the service. Refer to the Azure SDK for Java * <a href="https: * documentation for more details on proper usage of the {@link TokenCredential} type. * * @param credential {@link TokenCredential} used to authorize requests sent to the service. * @return the updated EncryptedBlobClientBuilder * @throws NullPointerException If {@code credential} is {@code null}. */ @Override public EncryptedBlobClientBuilder credential(TokenCredential credential) { this.tokenCredential = Objects.requireNonNull(credential, "'credential' cannot be null."); this.storageSharedKeyCredential = null; this.sasToken = null; return this; } /** * Sets the SAS token used to authorize requests sent to the service. * * @param sasToken The SAS token to use for authenticating requests. This string should only be the query parameters * (with or without a leading '?') and not a full url. * @return the updated EncryptedBlobClientBuilder * @throws NullPointerException If {@code sasToken} is {@code null}. */ public EncryptedBlobClientBuilder sasToken(String sasToken) { this.sasToken = Objects.requireNonNull(sasToken, "'sasToken' cannot be null."); this.storageSharedKeyCredential = null; this.tokenCredential = null; return this; } /** * Sets the {@link AzureSasCredential} used to authorize requests sent to the service. * * @param credential {@link AzureSasCredential} used to authorize requests sent to the service. * @return the updated EncryptedBlobClientBuilder * @throws NullPointerException If {@code credential} is {@code null}. */ @Override public EncryptedBlobClientBuilder credential(AzureSasCredential credential) { this.azureSasCredential = Objects.requireNonNull(credential, "'credential' cannot be null."); return this; } /** * Clears the credential used to authorize the request. * * <p>This is for blobs that are publicly accessible.</p> * * @return the updated EncryptedBlobClientBuilder */ public EncryptedBlobClientBuilder setAnonymousAccess() { this.storageSharedKeyCredential = null; this.tokenCredential = null; this.azureSasCredential = null; this.sasToken = null; return this; } /** * Sets the connection string to connect to the service. * * @param connectionString Connection string of the storage account. * @return the updated EncryptedBlobClientBuilder * @throws IllegalArgumentException If {@code connectionString} is invalid. */ @Override public EncryptedBlobClientBuilder connectionString(String connectionString) { StorageConnectionString storageConnectionString = StorageConnectionString.create(connectionString, LOGGER); StorageEndpoint endpoint = storageConnectionString.getBlobEndpoint(); if (endpoint == null || endpoint.getPrimaryUri() == null) { throw LOGGER .logExceptionAsError(new IllegalArgumentException( "connectionString missing required settings to derive blob service endpoint.")); } this.endpoint(endpoint.getPrimaryUri()); if (storageConnectionString.getAccountName() != null) { this.accountName = storageConnectionString.getAccountName(); } StorageAuthenticationSettings authSettings = storageConnectionString.getStorageAuthSettings(); if (authSettings.getType() == StorageAuthenticationSettings.Type.ACCOUNT_NAME_KEY) { this.credential(new StorageSharedKeyCredential(authSettings.getAccount().getName(), authSettings.getAccount().getAccessKey())); } else if (authSettings.getType() == StorageAuthenticationSettings.Type.SAS_TOKEN) { this.sasToken(authSettings.getSasToken()); } return this; } /** * Sets the service endpoint, additionally parses it for information (SAS token, container name, blob name) * * <p>If the blob name contains special characters, pass in the url encoded version of the blob name. </p> * * <p>If the endpoint is to a blob in the root container, this method will fail as it will interpret the blob name * as the container name. With only one path element, it is impossible to distinguish between a container name and a * blob in the root container, so it is assumed to be the container name as this is much more common. When working * with blobs in the root container, it is best to set the endpoint to the account url and specify the blob name * separately using the {@link EncryptedBlobClientBuilder * * @param endpoint URL of the service * @return the updated EncryptedBlobClientBuilder object * @throws IllegalArgumentException If {@code endpoint} is {@code null} or is a malformed URL. */ @Override public EncryptedBlobClientBuilder endpoint(String endpoint) { try { URL url = new URL(endpoint); BlobUrlParts parts = BlobUrlParts.parse(url); this.accountName = parts.getAccountName(); this.endpoint = BuilderHelper.getEndpoint(parts); this.containerName = parts.getBlobContainerName() == null ? this.containerName : parts.getBlobContainerName(); this.blobName = parts.getBlobName() == null ? this.blobName : Utility.urlEncode(parts.getBlobName()); this.snapshot = parts.getSnapshot(); this.versionId = parts.getVersionId(); String sasToken = parts.getCommonSasQueryParameters().encode(); if (!CoreUtils.isNullOrEmpty(sasToken)) { this.sasToken(sasToken); } } catch (MalformedURLException ex) { throw LOGGER.logExceptionAsError( new IllegalArgumentException("The Azure Storage Blob endpoint url is malformed.", ex)); } return this; } /** * Sets the name of the container that contains the blob. * * @param containerName Name of the container. If the value {@code null} or empty the root container, {@code $root}, * will be used. * @return the updated EncryptedBlobClientBuilder object */ public EncryptedBlobClientBuilder containerName(String containerName) { this.containerName = containerName; return this; } /** * Sets the name of the blob. * * @param blobName Name of the blob. If the blob name contains special characters, pass in the url encoded version * of the blob name. * @return the updated EncryptedBlobClientBuilder object * @throws NullPointerException If {@code blobName} is {@code null} */ public EncryptedBlobClientBuilder blobName(String blobName) { this.blobName = Utility.urlEncode(Utility.urlDecode(Objects.requireNonNull(blobName, "'blobName' cannot be null."))); return this; } /** * Sets the snapshot identifier of the blob. * * @param snapshot Snapshot identifier for the blob. * @return the updated EncryptedBlobClientBuilder object */ public EncryptedBlobClientBuilder snapshot(String snapshot) { this.snapshot = snapshot; return this; } /** * Sets the version identifier of the blob. * * @param versionId Version identifier for the blob, pass {@code null} to interact with the latest blob version. * @return the updated EncryptedBlobClientBuilder object */ public EncryptedBlobClientBuilder versionId(String versionId) { this.versionId = versionId; return this; } /** * Sets the {@link HttpClient} to use for sending and receiving requests to and from the service. * * <p><strong>Note:</strong> It is important to understand the precedence order of the HttpTrait APIs. In * particular, if a {@link HttpPipeline} is specified, this takes precedence over all other APIs in the trait, and * they will be ignored. If no {@link HttpPipeline} is specified, a HTTP pipeline will be constructed internally * based on the settings provided to this trait. Additionally, there may be other APIs in types that implement this * trait that are also ignored if an {@link HttpPipeline} is specified, so please be sure to refer to the * documentation of types that implement this trait to understand the full set of implications.</p> * * @param httpClient The {@link HttpClient} to use for requests. * @return the updated EncryptedBlobClientBuilder object */ @Override public EncryptedBlobClientBuilder httpClient(HttpClient httpClient) { if (this.httpClient != null && httpClient == null) { LOGGER.info("'httpClient' is being set to 'null' when it was previously configured."); } this.httpClient = httpClient; return this; } /** * Adds a {@link HttpPipelinePolicy pipeline policy} to apply on each request sent. * * <p><strong>Note:</strong> It is important to understand the precedence order of the HttpTrait APIs. In * particular, if a {@link HttpPipeline} is specified, this takes precedence over all other APIs in the trait, and * they will be ignored. If no {@link HttpPipeline} is specified, a HTTP pipeline will be constructed internally * based on the settings provided to this trait. Additionally, there may be other APIs in types that implement this * trait that are also ignored if an {@link HttpPipeline} is specified, so please be sure to refer to the * documentation of types that implement this trait to understand the full set of implications.</p> * * @param pipelinePolicy A {@link HttpPipelinePolicy pipeline policy}. * @return the updated EncryptedBlobClientBuilder object * @throws NullPointerException If {@code pipelinePolicy} is {@code null}. */ @Override public EncryptedBlobClientBuilder addPolicy(HttpPipelinePolicy pipelinePolicy) { Objects.requireNonNull(pipelinePolicy, "'pipelinePolicy' cannot be null"); if (pipelinePolicy.getPipelinePosition() == HttpPipelinePosition.PER_CALL) { perCallPolicies.add(pipelinePolicy); } else { perRetryPolicies.add(pipelinePolicy); } return this; } /** * Sets the {@link HttpLogOptions logging configuration} to use when sending and receiving requests to and from * the service. If a {@code logLevel} is not provided, default value of {@link HttpLogDetailLevel * * <p><strong>Note:</strong> It is important to understand the precedence order of the HttpTrait APIs. In * particular, if a {@link HttpPipeline} is specified, this takes precedence over all other APIs in the trait, and * they will be ignored. If no {@link HttpPipeline} is specified, a HTTP pipeline will be constructed internally * based on the settings provided to this trait. Additionally, there may be other APIs in types that implement this * trait that are also ignored if an {@link HttpPipeline} is specified, so please be sure to refer to the * documentation of types that implement this trait to understand the full set of implications.</p> * * @param logOptions The {@link HttpLogOptions logging configuration} to use when sending and receiving requests to * and from the service. * @return the updated EncryptedBlobClientBuilder object * @throws NullPointerException If {@code logOptions} is {@code null}. */ @Override public EncryptedBlobClientBuilder httpLogOptions(HttpLogOptions logOptions) { this.logOptions = Objects.requireNonNull(logOptions, "'logOptions' cannot be null."); return this; } /** * Gets the default Storage allowlist log headers and query parameters. * * @return the default http log options. */ public static HttpLogOptions getDefaultHttpLogOptions() { return BuilderHelper.getDefaultHttpLogOptions(); } /** * Sets the configuration object used to retrieve environment configuration values during building of the client. * * @param configuration Configuration store used to retrieve environment configurations. * @return the updated EncryptedBlobClientBuilder object */ @Override public EncryptedBlobClientBuilder configuration(Configuration configuration) { this.configuration = configuration; return this; } /** * Sets the request retry options for all the requests made through the client. * * Setting this is mutually exclusive with using {@link * * @param retryOptions {@link RequestRetryOptions}. * @return the updated EncryptedBlobClientBuilder object. */ public EncryptedBlobClientBuilder retryOptions(RequestRetryOptions retryOptions) { this.retryOptions = retryOptions; return this; } /** * Sets the {@link RetryOptions} for all the requests made through the client. * * <p><strong>Note:</strong> It is important to understand the precedence order of the HttpTrait APIs. In * particular, if a {@link HttpPipeline} is specified, this takes precedence over all other APIs in the trait, and * they will be ignored. If no {@link HttpPipeline} is specified, a HTTP pipeline will be constructed internally * based on the settings provided to this trait. Additionally, there may be other APIs in types that implement this * trait that are also ignored if an {@link HttpPipeline} is specified, so please be sure to refer to the * documentation of types that implement this trait to understand the full set of implications.</p> * <p> * Setting this is mutually exclusive with using {@link * Consider using {@link * * @param retryOptions The {@link RetryOptions} to use for all the requests made through the client. * @return the updated EncryptedBlobClientBuilder object */ @Override public EncryptedBlobClientBuilder retryOptions(RetryOptions retryOptions) { this.coreRetryOptions = retryOptions; return this; } /** * Sets the {@link HttpPipeline} to use for the service client. * * <p><strong>Note:</strong> It is important to understand the precedence order of the HttpTrait APIs. In * particular, if a {@link HttpPipeline} is specified, this takes precedence over all other APIs in the trait, and * they will be ignored. If no {@link HttpPipeline} is specified, a HTTP pipeline will be constructed internally * based on the settings provided to this trait. Additionally, there may be other APIs in types that implement this * trait that are also ignored if an {@link HttpPipeline} is specified, so please be sure to refer to the * documentation of types that implement this trait to understand the full set of implications.</p> * <p> * The {@link * {@link * not ignored when {@code pipeline} is set. * * @return the updated EncryptedBlobClientBuilder object */ @Override public EncryptedBlobClientBuilder pipeline(HttpPipeline httpPipeline) { if (this.httpPipeline != null && httpPipeline == null) { LOGGER.info("HttpPipeline is being set to 'null' when it was previously configured."); } this.httpPipeline = httpPipeline; return this; } /** * Allows for setting common properties such as application ID, headers, proxy configuration, etc. Note that it is * recommended that this method be called with an instance of the {@link HttpClientOptions} * class (a subclass of the {@link ClientOptions} base class). The HttpClientOptions subclass provides more * configuration options suitable for HTTP clients, which is applicable for any class that implements this HttpTrait * interface. * * <p><strong>Note:</strong> It is important to understand the precedence order of the HttpTrait APIs. In * particular, if a {@link HttpPipeline} is specified, this takes precedence over all other APIs in the trait, and * they will be ignored. If no {@link HttpPipeline} is specified, a HTTP pipeline will be constructed internally * based on the settings provided to this trait. Additionally, there may be other APIs in types that implement this * trait that are also ignored if an {@link HttpPipeline} is specified, so please be sure to refer to the * documentation of types that implement this trait to understand the full set of implications.</p> * * @param clientOptions A configured instance of {@link HttpClientOptions}. * @see HttpClientOptions * @return the updated EncryptedBlobClientBuilder object * @throws NullPointerException If {@code clientOptions} is {@code null}. */ @Override public EncryptedBlobClientBuilder clientOptions(ClientOptions clientOptions) { this.clientOptions = Objects.requireNonNull(clientOptions, "'clientOptions' cannot be null."); return this; } /** * Sets the {@link BlobServiceVersion} that is used when making API requests. * <p> * If a service version is not provided, the service version that will be used will be the latest known service * version based on the version of the client library being used. If no service version is specified, updating to a * newer version of the client library will have the result of potentially moving to a newer service version. * <p> * Targeting a specific service version may also mean that the service will return an error for newer APIs. * * @param version {@link BlobServiceVersion} of the service to be used when making requests. * @return the updated EncryptedBlobClientBuilder object */ public EncryptedBlobClientBuilder serviceVersion(BlobServiceVersion version) { this.version = version; return this; } /** * Sets the {@link CustomerProvidedKey customer provided key} that is used to encrypt blob contents on the server. * * @param customerProvidedKey {@link CustomerProvidedKey} * @return the updated EncryptedBlobClientBuilder object */ public EncryptedBlobClientBuilder customerProvidedKey(CustomerProvidedKey customerProvidedKey) { if (customerProvidedKey == null) { this.customerProvidedKey = null; } else { this.customerProvidedKey = new CpkInfo() .setEncryptionKey(customerProvidedKey.getKey()) .setEncryptionKeySha256(customerProvidedKey.getKeySha256()) .setEncryptionAlgorithm(customerProvidedKey.getEncryptionAlgorithm()); } return this; } /** * Sets the {@code encryption scope} that is used to encrypt blob contents on the server. * * @param encryptionScope Encryption scope containing the encryption key information. * @return the updated EncryptedBlobClientBuilder object */ public EncryptedBlobClientBuilder encryptionScope(String encryptionScope) { if (encryptionScope == null) { this.encryptionScope = null; } else { this.encryptionScope = new EncryptionScope().setEncryptionScope(encryptionScope); } return this; } /** * Configures the builder based on the passed {@link BlobClient}. This will set the {@link HttpPipeline}, * {@link URL} and {@link BlobServiceVersion} that are used to interact with the service. Note that the underlying * pipeline should not already be configured for encryption/decryption. * * <p>If {@code pipeline} is set, all other settings are ignored, aside from {@link * {@link * * <p>Note that for security reasons, this method does not copy over the {@link CustomerProvidedKey} and * encryption scope properties from the provided client. To set CPK, please use * {@link * * @param blobClient BlobClient used to configure the builder. * @return the updated EncryptedBlobClientBuilder object * @throws NullPointerException If {@code containerClient} is {@code null}. */ public EncryptedBlobClientBuilder blobClient(BlobClient blobClient) { Objects.requireNonNull(blobClient); return client(blobClient.getHttpPipeline(), blobClient.getBlobUrl(), blobClient.getServiceVersion()); } /** * Configures the builder based on the passed {@link BlobAsyncClient}. This will set the {@link HttpPipeline}, * {@link URL} and {@link BlobServiceVersion} that are used to interact with the service. Note that the underlying * pipeline should not already be configured for encryption/decryption. * * <p>If {@code pipeline} is set, all other settings are ignored, aside from {@link * {@link * * <p>Note that for security reasons, this method does not copy over the {@link CustomerProvidedKey} and * encryption scope properties from the provided client. To set CPK, please use * {@link * * @param blobAsyncClient BlobAsyncClient used to configure the builder. * @return the updated EncryptedBlobClientBuilder object * @throws NullPointerException If {@code containerClient} is {@code null}. */ public EncryptedBlobClientBuilder blobAsyncClient(BlobAsyncClient blobAsyncClient) { Objects.requireNonNull(blobAsyncClient); return client(blobAsyncClient.getHttpPipeline(), blobAsyncClient.getBlobUrl(), blobAsyncClient.getServiceVersion()); } /** * Helper method to transform a regular client into an encrypted client * * @param httpPipeline {@link HttpPipeline} * @param endpoint The endpoint. * @param version {@link BlobServiceVersion} of the service to be used when making requests. * @return the updated EncryptedBlobClientBuilder object */ private EncryptedBlobClientBuilder client(HttpPipeline httpPipeline, String endpoint, BlobServiceVersion version) { this.endpoint(endpoint); this.serviceVersion(version); return this.pipeline(httpPipeline); } /** * Sets the requires encryption option. * * @param requiresEncryption Whether encryption is enforced by this client. Client will throw if data is * downloaded and it is not encrypted. * @return the updated EncryptedBlobClientBuilder object */ public EncryptedBlobClientBuilder requiresEncryption(boolean requiresEncryption) { this.requiresEncryption = requiresEncryption; return this; } /** * Sets the encryption version for this client. For any new workloads, using version 2 or above is highly * encouraged as version 1 uses AES/CBC, which is no longer considered secure. For compatibility reasons, the * default value is version 1. * @param version The encryption version. * @return The updated builder. */ public EncryptedBlobClientBuilder encryptionVersion(EncryptionVersion version) { this.encryptionVersion = version; return this; } }
class EncryptedBlobClientBuilder implements TokenCredentialTrait<EncryptedBlobClientBuilder>, ConnectionStringTrait<EncryptedBlobClientBuilder>, AzureNamedKeyCredentialTrait<EncryptedBlobClientBuilder>, AzureSasCredentialTrait<EncryptedBlobClientBuilder>, HttpTrait<EncryptedBlobClientBuilder>, ConfigurationTrait<EncryptedBlobClientBuilder>, EndpointTrait<EncryptedBlobClientBuilder> { private static final ClientLogger LOGGER = new ClientLogger(EncryptedBlobClientBuilder.class); private static final Map<String, String> PROPERTIES = CoreUtils.getProperties("azure-storage-blob-cryptography.properties"); private static final String SDK_NAME = "name"; private static final String SDK_VERSION = "version"; private static final String CLIENT_NAME = PROPERTIES.getOrDefault(SDK_NAME, "UnknownName"); private static final String CLIENT_VERSION = PROPERTIES.getOrDefault(SDK_VERSION, "UnknownVersion"); private static final String BLOB_CLIENT_NAME = USER_AGENT_PROPERTIES.getOrDefault(SDK_NAME, "UnknownName"); private static final String BLOB_CLIENT_VERSION = USER_AGENT_PROPERTIES.getOrDefault(SDK_VERSION, "UnknownVersion"); private String endpoint; private String accountName; private String containerName; private String blobName; private String snapshot; private String versionId; private boolean requiresEncryption; private EncryptionVersion encryptionVersion; private StorageSharedKeyCredential storageSharedKeyCredential; private TokenCredential tokenCredential; private AzureSasCredential azureSasCredential; private String sasToken; private HttpClient httpClient; private final List<HttpPipelinePolicy> perCallPolicies = new ArrayList<>(); private final List<HttpPipelinePolicy> perRetryPolicies = new ArrayList<>(); private HttpLogOptions logOptions; private RequestRetryOptions retryOptions; private RetryOptions coreRetryOptions; private HttpPipeline httpPipeline; private ClientOptions clientOptions = new ClientOptions(); private Configuration configuration; private AsyncKeyEncryptionKey keyWrapper; private AsyncKeyEncryptionKeyResolver keyResolver; private String keyWrapAlgorithm; private BlobServiceVersion version; private CpkInfo customerProvidedKey; private EncryptionScope encryptionScope; /** * Creates a new instance of the EncryptedBlobClientBuilder * @deprecated Use {@link EncryptedBlobClientBuilder */ @Deprecated public EncryptedBlobClientBuilder() { logOptions = getDefaultHttpLogOptions(); } /** * * @param version The version of the client side encryption protocol to use. It is highly recommended that v2 be * preferred for security reasons, though v1 continues to be supported for compatibility reasons. Note that even a * client configured to encrypt using v2 can decrypt blobs that use the v1 protocol. */ public EncryptedBlobClientBuilder(EncryptionVersion version) { Objects.requireNonNull(version); logOptions = getDefaultHttpLogOptions(); this.encryptionVersion = version; } /** * Creates a {@link EncryptedBlobClient} based on options set in the Builder. * * <p><strong>Code Samples</strong></p> * * <!-- src_embed com.azure.storage.blob.specialized.cryptography.EncryptedBlobClientBuilder.buildEncryptedBlobAsyncClient --> * <pre> * EncryptedBlobAsyncClient client = new EncryptedBlobClientBuilder& * .key& * .keyResolver& * .connectionString& * .buildEncryptedBlobAsyncClient& * </pre> * <!-- end com.azure.storage.blob.specialized.cryptography.EncryptedBlobClientBuilder.buildEncryptedBlobAsyncClient --> * * @return a {@link EncryptedBlobClient} created from the configurations in this builder. * @throws NullPointerException If {@code endpoint}, {@code containerName}, or {@code blobName} is {@code null}. * @throws IllegalStateException If multiple credentials have been specified. * @throws IllegalStateException If both {@link * and {@link */ public EncryptedBlobClient buildEncryptedBlobClient() { return new EncryptedBlobClient(buildEncryptedBlobAsyncClient()); } /** * Creates a {@link EncryptedBlobAsyncClient} based on options set in the Builder. * * <p><strong>Code Samples</strong></p> * * <!-- src_embed com.azure.storage.blob.specialized.cryptography.EncryptedBlobClientBuilder.buildEncryptedBlobClient --> * <pre> * EncryptedBlobClient client = new EncryptedBlobClientBuilder& * .key& * .keyResolver& * .connectionString& * .buildEncryptedBlobClient& * </pre> * <!-- end com.azure.storage.blob.specialized.cryptography.EncryptedBlobClientBuilder.buildEncryptedBlobClient --> * * @return a {@link EncryptedBlobAsyncClient} created from the configurations in this builder. * @throws NullPointerException If {@code endpoint}, {@code containerName}, or {@code blobName} is {@code null}. * @throws IllegalStateException If multiple credentials have been specified. * @throws IllegalStateException If both {@link * and {@link */ private HttpPipeline addBlobUserAgentModificationPolicy(HttpPipeline pipeline) { List<HttpPipelinePolicy> policies = new ArrayList<>(); for (int i = 0; i < pipeline.getPolicyCount(); i++) { HttpPipelinePolicy currPolicy = pipeline.getPolicy(i); policies.add(currPolicy); if (currPolicy instanceof UserAgentPolicy) { policies.add(new BlobUserAgentModificationPolicy(CLIENT_NAME, CLIENT_VERSION)); } } return new HttpPipelineBuilder() .httpClient(pipeline.getHttpClient()) .policies(policies.toArray(new HttpPipelinePolicy[0])) .build(); } private BlobAsyncClient getUnencryptedBlobClient() { BlobClientBuilder builder = new BlobClientBuilder() .endpoint(endpoint) .containerName(containerName) .blobName(blobName) .snapshot(snapshot) .customerProvidedKey( customerProvidedKey == null ? null : new CustomerProvidedKey(customerProvidedKey.getEncryptionKey())) .encryptionScope(encryptionScope == null ? null : encryptionScope.getEncryptionScope()) .versionId(versionId) .serviceVersion(version) .pipeline(this.httpPipeline) .httpClient(httpClient) .configuration(configuration) .retryOptions(this.retryOptions) .clientOptions(this.clientOptions); if (storageSharedKeyCredential != null) { builder.credential(storageSharedKeyCredential); } else if (tokenCredential != null) { builder.credential(tokenCredential); } else if (azureSasCredential != null) { builder.credential(azureSasCredential); } else if (sasToken != null) { builder.credential(new AzureSasCredential(sasToken)); } for (HttpPipelinePolicy policy : perCallPolicies) { builder.addPolicy(policy); } for (HttpPipelinePolicy policy : perRetryPolicies) { builder.addPolicy(policy); } return builder.buildAsyncClient(); } private HttpPipeline getHttpPipeline() { CredentialValidator.validateSingleCredentialIsPresent( storageSharedKeyCredential, tokenCredential, azureSasCredential, sasToken, LOGGER); if (httpPipeline != null) { List<HttpPipelinePolicy> policies = new ArrayList<>(); boolean decryptionPolicyPresent = false; for (int i = 0; i < httpPipeline.getPolicyCount(); i++) { HttpPipelinePolicy currPolicy = httpPipeline.getPolicy(i); if (currPolicy instanceof BlobDecryptionPolicy) { throw LOGGER.logExceptionAsError(new IllegalArgumentException("The passed pipeline was already" + " configured for encryption/decryption in a way that might conflict with the passed key " + "information. Please ensure that the passed pipeline is not already configured for " + "encryption/decryption")); } policies.add(currPolicy); } policies.add(0, new BlobDecryptionPolicy(keyWrapper, keyResolver, requiresEncryption, getUnencryptedBlobClient())); return new HttpPipelineBuilder() .httpClient(httpPipeline.getHttpClient()) .policies(policies.toArray(new HttpPipelinePolicy[0])) .build(); } Configuration userAgentConfiguration = (configuration == null) ? Configuration.NONE : configuration; List<HttpPipelinePolicy> policies = new ArrayList<>(); policies.add(new BlobDecryptionPolicy(keyWrapper, keyResolver, requiresEncryption, getUnencryptedBlobClient())); String applicationId = clientOptions.getApplicationId() != null ? clientOptions.getApplicationId() : logOptions.getApplicationId(); policies.add(new UserAgentPolicy(applicationId, BLOB_CLIENT_NAME, BLOB_CLIENT_VERSION, userAgentConfiguration)); policies.add(new RequestIdPolicy()); policies.addAll(perCallPolicies); HttpPolicyProviders.addBeforeRetryPolicies(policies); policies.add(BuilderUtils.createRetryPolicy(retryOptions, coreRetryOptions, LOGGER)); policies.add(new AddDatePolicy()); HttpHeaders headers = new HttpHeaders(); clientOptions.getHeaders().forEach(header -> headers.put(header.getName(), header.getValue())); if (headers.getSize() > 0) { policies.add(new AddHeadersPolicy(headers)); } policies.add(new MetadataValidationPolicy()); if (storageSharedKeyCredential != null) { policies.add(new StorageSharedKeyCredentialPolicy(storageSharedKeyCredential)); } else if (tokenCredential != null) { BuilderHelper.httpsValidation(tokenCredential, "bearer token", endpoint, LOGGER); policies.add(new BearerTokenAuthenticationPolicy(tokenCredential, Constants.STORAGE_SCOPE)); } else if (azureSasCredential != null) { policies.add(new AzureSasCredentialPolicy(azureSasCredential, false)); } else if (sasToken != null) { policies.add(new AzureSasCredentialPolicy(new AzureSasCredential(sasToken), false)); } policies.addAll(perRetryPolicies); HttpPolicyProviders.addAfterRetryPolicies(policies); policies.add(new ResponseValidationPolicyBuilder() .addOptionalEcho(Constants.HeaderConstants.CLIENT_REQUEST_ID) .addOptionalEcho(Constants.HeaderConstants.ENCRYPTION_KEY_SHA256) .build()); policies.add(new HttpLoggingPolicy(logOptions)); policies.add(new ScrubEtagPolicy()); return new HttpPipelineBuilder() .policies(policies.toArray(new HttpPipelinePolicy[0])) .httpClient(httpClient) .build(); } /** * Sets the encryption key parameters for the client * * @param key An object of type {@link AsyncKeyEncryptionKey} that is used to wrap/unwrap the content encryption key * @param keyWrapAlgorithm The {@link String} used to wrap the key. * @return the updated EncryptedBlobClientBuilder object */ public EncryptedBlobClientBuilder key(AsyncKeyEncryptionKey key, String keyWrapAlgorithm) { this.keyWrapper = key; this.keyWrapAlgorithm = keyWrapAlgorithm; return this; } /** * Sets the encryption parameters for this client * * @param keyResolver The key resolver used to select the correct key for decrypting existing blobs. * @return the updated EncryptedBlobClientBuilder object */ public EncryptedBlobClientBuilder keyResolver(AsyncKeyEncryptionKeyResolver keyResolver) { this.keyResolver = keyResolver; return this; } private void checkValidEncryptionParameters() { if (this.keyWrapper == null && this.keyResolver == null) { throw LOGGER.logExceptionAsError(new IllegalArgumentException("Key and KeyResolver cannot both be null")); } if (this.keyWrapper != null && this.keyWrapAlgorithm == null) { throw LOGGER.logExceptionAsError( new IllegalArgumentException("Key Wrap Algorithm must be specified with a Key.")); } } /** * Sets the {@link StorageSharedKeyCredential} used to authorize requests sent to the service. * * @param credential {@link StorageSharedKeyCredential}. * @return the updated EncryptedBlobClientBuilder * @throws NullPointerException If {@code credential} is {@code null}. */ public EncryptedBlobClientBuilder credential(StorageSharedKeyCredential credential) { this.storageSharedKeyCredential = Objects.requireNonNull(credential, "'credential' cannot be null."); this.tokenCredential = null; this.sasToken = null; return this; } /** * Sets the {@link AzureNamedKeyCredential} used to authorize requests sent to the service. * * @param credential {@link AzureNamedKeyCredential}. * @return the updated EncryptedBlobClientBuilder * @throws NullPointerException If {@code credential} is {@code null}. */ @Override public EncryptedBlobClientBuilder credential(AzureNamedKeyCredential credential) { Objects.requireNonNull(credential, "'credential' cannot be null."); return credential(StorageSharedKeyCredential.fromAzureNamedKeyCredential(credential)); } /** * Sets the {@link TokenCredential} used to authorize requests sent to the service. Refer to the Azure SDK for Java * <a href="https: * documentation for more details on proper usage of the {@link TokenCredential} type. * * @param credential {@link TokenCredential} used to authorize requests sent to the service. * @return the updated EncryptedBlobClientBuilder * @throws NullPointerException If {@code credential} is {@code null}. */ @Override public EncryptedBlobClientBuilder credential(TokenCredential credential) { this.tokenCredential = Objects.requireNonNull(credential, "'credential' cannot be null."); this.storageSharedKeyCredential = null; this.sasToken = null; return this; } /** * Sets the SAS token used to authorize requests sent to the service. * * @param sasToken The SAS token to use for authenticating requests. This string should only be the query parameters * (with or without a leading '?') and not a full url. * @return the updated EncryptedBlobClientBuilder * @throws NullPointerException If {@code sasToken} is {@code null}. */ public EncryptedBlobClientBuilder sasToken(String sasToken) { this.sasToken = Objects.requireNonNull(sasToken, "'sasToken' cannot be null."); this.storageSharedKeyCredential = null; this.tokenCredential = null; return this; } /** * Sets the {@link AzureSasCredential} used to authorize requests sent to the service. * * @param credential {@link AzureSasCredential} used to authorize requests sent to the service. * @return the updated EncryptedBlobClientBuilder * @throws NullPointerException If {@code credential} is {@code null}. */ @Override public EncryptedBlobClientBuilder credential(AzureSasCredential credential) { this.azureSasCredential = Objects.requireNonNull(credential, "'credential' cannot be null."); return this; } /** * Clears the credential used to authorize the request. * * <p>This is for blobs that are publicly accessible.</p> * * @return the updated EncryptedBlobClientBuilder */ public EncryptedBlobClientBuilder setAnonymousAccess() { this.storageSharedKeyCredential = null; this.tokenCredential = null; this.azureSasCredential = null; this.sasToken = null; return this; } /** * Sets the connection string to connect to the service. * * @param connectionString Connection string of the storage account. * @return the updated EncryptedBlobClientBuilder * @throws IllegalArgumentException If {@code connectionString} is invalid. */ @Override public EncryptedBlobClientBuilder connectionString(String connectionString) { StorageConnectionString storageConnectionString = StorageConnectionString.create(connectionString, LOGGER); StorageEndpoint endpoint = storageConnectionString.getBlobEndpoint(); if (endpoint == null || endpoint.getPrimaryUri() == null) { throw LOGGER .logExceptionAsError(new IllegalArgumentException( "connectionString missing required settings to derive blob service endpoint.")); } this.endpoint(endpoint.getPrimaryUri()); if (storageConnectionString.getAccountName() != null) { this.accountName = storageConnectionString.getAccountName(); } StorageAuthenticationSettings authSettings = storageConnectionString.getStorageAuthSettings(); if (authSettings.getType() == StorageAuthenticationSettings.Type.ACCOUNT_NAME_KEY) { this.credential(new StorageSharedKeyCredential(authSettings.getAccount().getName(), authSettings.getAccount().getAccessKey())); } else if (authSettings.getType() == StorageAuthenticationSettings.Type.SAS_TOKEN) { this.sasToken(authSettings.getSasToken()); } return this; } /** * Sets the service endpoint, additionally parses it for information (SAS token, container name, blob name) * * <p>If the blob name contains special characters, pass in the url encoded version of the blob name. </p> * * <p>If the endpoint is to a blob in the root container, this method will fail as it will interpret the blob name * as the container name. With only one path element, it is impossible to distinguish between a container name and a * blob in the root container, so it is assumed to be the container name as this is much more common. When working * with blobs in the root container, it is best to set the endpoint to the account url and specify the blob name * separately using the {@link EncryptedBlobClientBuilder * * @param endpoint URL of the service * @return the updated EncryptedBlobClientBuilder object * @throws IllegalArgumentException If {@code endpoint} is {@code null} or is a malformed URL. */ @Override public EncryptedBlobClientBuilder endpoint(String endpoint) { try { URL url = new URL(endpoint); BlobUrlParts parts = BlobUrlParts.parse(url); this.accountName = parts.getAccountName(); this.endpoint = BuilderHelper.getEndpoint(parts); this.containerName = parts.getBlobContainerName() == null ? this.containerName : parts.getBlobContainerName(); this.blobName = parts.getBlobName() == null ? this.blobName : Utility.urlEncode(parts.getBlobName()); this.snapshot = parts.getSnapshot(); this.versionId = parts.getVersionId(); String sasToken = parts.getCommonSasQueryParameters().encode(); if (!CoreUtils.isNullOrEmpty(sasToken)) { this.sasToken(sasToken); } } catch (MalformedURLException ex) { throw LOGGER.logExceptionAsError( new IllegalArgumentException("The Azure Storage Blob endpoint url is malformed.", ex)); } return this; } /** * Sets the name of the container that contains the blob. * * @param containerName Name of the container. If the value {@code null} or empty the root container, {@code $root}, * will be used. * @return the updated EncryptedBlobClientBuilder object */ public EncryptedBlobClientBuilder containerName(String containerName) { this.containerName = containerName; return this; } /** * Sets the name of the blob. * * @param blobName Name of the blob. If the blob name contains special characters, pass in the url encoded version * of the blob name. * @return the updated EncryptedBlobClientBuilder object * @throws NullPointerException If {@code blobName} is {@code null} */ public EncryptedBlobClientBuilder blobName(String blobName) { this.blobName = Utility.urlEncode(Utility.urlDecode(Objects.requireNonNull(blobName, "'blobName' cannot be null."))); return this; } /** * Sets the snapshot identifier of the blob. * * @param snapshot Snapshot identifier for the blob. * @return the updated EncryptedBlobClientBuilder object */ public EncryptedBlobClientBuilder snapshot(String snapshot) { this.snapshot = snapshot; return this; } /** * Sets the version identifier of the blob. * * @param versionId Version identifier for the blob, pass {@code null} to interact with the latest blob version. * @return the updated EncryptedBlobClientBuilder object */ public EncryptedBlobClientBuilder versionId(String versionId) { this.versionId = versionId; return this; } /** * Sets the {@link HttpClient} to use for sending and receiving requests to and from the service. * * <p><strong>Note:</strong> It is important to understand the precedence order of the HttpTrait APIs. In * particular, if a {@link HttpPipeline} is specified, this takes precedence over all other APIs in the trait, and * they will be ignored. If no {@link HttpPipeline} is specified, a HTTP pipeline will be constructed internally * based on the settings provided to this trait. Additionally, there may be other APIs in types that implement this * trait that are also ignored if an {@link HttpPipeline} is specified, so please be sure to refer to the * documentation of types that implement this trait to understand the full set of implications.</p> * * @param httpClient The {@link HttpClient} to use for requests. * @return the updated EncryptedBlobClientBuilder object */ @Override public EncryptedBlobClientBuilder httpClient(HttpClient httpClient) { if (this.httpClient != null && httpClient == null) { LOGGER.info("'httpClient' is being set to 'null' when it was previously configured."); } this.httpClient = httpClient; return this; } /** * Adds a {@link HttpPipelinePolicy pipeline policy} to apply on each request sent. * * <p><strong>Note:</strong> It is important to understand the precedence order of the HttpTrait APIs. In * particular, if a {@link HttpPipeline} is specified, this takes precedence over all other APIs in the trait, and * they will be ignored. If no {@link HttpPipeline} is specified, a HTTP pipeline will be constructed internally * based on the settings provided to this trait. Additionally, there may be other APIs in types that implement this * trait that are also ignored if an {@link HttpPipeline} is specified, so please be sure to refer to the * documentation of types that implement this trait to understand the full set of implications.</p> * * @param pipelinePolicy A {@link HttpPipelinePolicy pipeline policy}. * @return the updated EncryptedBlobClientBuilder object * @throws NullPointerException If {@code pipelinePolicy} is {@code null}. */ @Override public EncryptedBlobClientBuilder addPolicy(HttpPipelinePolicy pipelinePolicy) { Objects.requireNonNull(pipelinePolicy, "'pipelinePolicy' cannot be null"); if (pipelinePolicy.getPipelinePosition() == HttpPipelinePosition.PER_CALL) { perCallPolicies.add(pipelinePolicy); } else { perRetryPolicies.add(pipelinePolicy); } return this; } /** * Sets the {@link HttpLogOptions logging configuration} to use when sending and receiving requests to and from * the service. If a {@code logLevel} is not provided, default value of {@link HttpLogDetailLevel * * <p><strong>Note:</strong> It is important to understand the precedence order of the HttpTrait APIs. In * particular, if a {@link HttpPipeline} is specified, this takes precedence over all other APIs in the trait, and * they will be ignored. If no {@link HttpPipeline} is specified, a HTTP pipeline will be constructed internally * based on the settings provided to this trait. Additionally, there may be other APIs in types that implement this * trait that are also ignored if an {@link HttpPipeline} is specified, so please be sure to refer to the * documentation of types that implement this trait to understand the full set of implications.</p> * * @param logOptions The {@link HttpLogOptions logging configuration} to use when sending and receiving requests to * and from the service. * @return the updated EncryptedBlobClientBuilder object * @throws NullPointerException If {@code logOptions} is {@code null}. */ @Override public EncryptedBlobClientBuilder httpLogOptions(HttpLogOptions logOptions) { this.logOptions = Objects.requireNonNull(logOptions, "'logOptions' cannot be null."); return this; } /** * Gets the default Storage allowlist log headers and query parameters. * * @return the default http log options. */ public static HttpLogOptions getDefaultHttpLogOptions() { return BuilderHelper.getDefaultHttpLogOptions(); } /** * Sets the configuration object used to retrieve environment configuration values during building of the client. * * @param configuration Configuration store used to retrieve environment configurations. * @return the updated EncryptedBlobClientBuilder object */ @Override public EncryptedBlobClientBuilder configuration(Configuration configuration) { this.configuration = configuration; return this; } /** * Sets the request retry options for all the requests made through the client. * * Setting this is mutually exclusive with using {@link * * @param retryOptions {@link RequestRetryOptions}. * @return the updated EncryptedBlobClientBuilder object. */ public EncryptedBlobClientBuilder retryOptions(RequestRetryOptions retryOptions) { this.retryOptions = retryOptions; return this; } /** * Sets the {@link RetryOptions} for all the requests made through the client. * * <p><strong>Note:</strong> It is important to understand the precedence order of the HttpTrait APIs. In * particular, if a {@link HttpPipeline} is specified, this takes precedence over all other APIs in the trait, and * they will be ignored. If no {@link HttpPipeline} is specified, a HTTP pipeline will be constructed internally * based on the settings provided to this trait. Additionally, there may be other APIs in types that implement this * trait that are also ignored if an {@link HttpPipeline} is specified, so please be sure to refer to the * documentation of types that implement this trait to understand the full set of implications.</p> * <p> * Setting this is mutually exclusive with using {@link * Consider using {@link * * @param retryOptions The {@link RetryOptions} to use for all the requests made through the client. * @return the updated EncryptedBlobClientBuilder object */ @Override public EncryptedBlobClientBuilder retryOptions(RetryOptions retryOptions) { this.coreRetryOptions = retryOptions; return this; } /** * Sets the {@link HttpPipeline} to use for the service client. * * <p><strong>Note:</strong> It is important to understand the precedence order of the HttpTrait APIs. In * particular, if a {@link HttpPipeline} is specified, this takes precedence over all other APIs in the trait, and * they will be ignored. If no {@link HttpPipeline} is specified, a HTTP pipeline will be constructed internally * based on the settings provided to this trait. Additionally, there may be other APIs in types that implement this * trait that are also ignored if an {@link HttpPipeline} is specified, so please be sure to refer to the * documentation of types that implement this trait to understand the full set of implications.</p> * <p> * The {@link * {@link * not ignored when {@code pipeline} is set. * * @return the updated EncryptedBlobClientBuilder object */ @Override public EncryptedBlobClientBuilder pipeline(HttpPipeline httpPipeline) { if (this.httpPipeline != null && httpPipeline == null) { LOGGER.info("HttpPipeline is being set to 'null' when it was previously configured."); } this.httpPipeline = httpPipeline; return this; } /** * Allows for setting common properties such as application ID, headers, proxy configuration, etc. Note that it is * recommended that this method be called with an instance of the {@link HttpClientOptions} * class (a subclass of the {@link ClientOptions} base class). The HttpClientOptions subclass provides more * configuration options suitable for HTTP clients, which is applicable for any class that implements this HttpTrait * interface. * * <p><strong>Note:</strong> It is important to understand the precedence order of the HttpTrait APIs. In * particular, if a {@link HttpPipeline} is specified, this takes precedence over all other APIs in the trait, and * they will be ignored. If no {@link HttpPipeline} is specified, a HTTP pipeline will be constructed internally * based on the settings provided to this trait. Additionally, there may be other APIs in types that implement this * trait that are also ignored if an {@link HttpPipeline} is specified, so please be sure to refer to the * documentation of types that implement this trait to understand the full set of implications.</p> * * @param clientOptions A configured instance of {@link HttpClientOptions}. * @see HttpClientOptions * @return the updated EncryptedBlobClientBuilder object * @throws NullPointerException If {@code clientOptions} is {@code null}. */ @Override public EncryptedBlobClientBuilder clientOptions(ClientOptions clientOptions) { this.clientOptions = Objects.requireNonNull(clientOptions, "'clientOptions' cannot be null."); return this; } /** * Sets the {@link BlobServiceVersion} that is used when making API requests. * <p> * If a service version is not provided, the service version that will be used will be the latest known service * version based on the version of the client library being used. If no service version is specified, updating to a * newer version of the client library will have the result of potentially moving to a newer service version. * <p> * Targeting a specific service version may also mean that the service will return an error for newer APIs. * * @param version {@link BlobServiceVersion} of the service to be used when making requests. * @return the updated EncryptedBlobClientBuilder object */ public EncryptedBlobClientBuilder serviceVersion(BlobServiceVersion version) { this.version = version; return this; } /** * Sets the {@link CustomerProvidedKey customer provided key} that is used to encrypt blob contents on the server. * * @param customerProvidedKey {@link CustomerProvidedKey} * @return the updated EncryptedBlobClientBuilder object */ public EncryptedBlobClientBuilder customerProvidedKey(CustomerProvidedKey customerProvidedKey) { if (customerProvidedKey == null) { this.customerProvidedKey = null; } else { this.customerProvidedKey = new CpkInfo() .setEncryptionKey(customerProvidedKey.getKey()) .setEncryptionKeySha256(customerProvidedKey.getKeySha256()) .setEncryptionAlgorithm(customerProvidedKey.getEncryptionAlgorithm()); } return this; } /** * Sets the {@code encryption scope} that is used to encrypt blob contents on the server. * * @param encryptionScope Encryption scope containing the encryption key information. * @return the updated EncryptedBlobClientBuilder object */ public EncryptedBlobClientBuilder encryptionScope(String encryptionScope) { if (encryptionScope == null) { this.encryptionScope = null; } else { this.encryptionScope = new EncryptionScope().setEncryptionScope(encryptionScope); } return this; } /** * Configures the builder based on the passed {@link BlobClient}. This will set the {@link HttpPipeline}, * {@link URL} and {@link BlobServiceVersion} that are used to interact with the service. Note that the underlying * pipeline should not already be configured for encryption/decryption. * * <p>If {@code pipeline} is set, all other settings are ignored, aside from {@link * {@link * * <p>Note that for security reasons, this method does not copy over the {@link CustomerProvidedKey} and * encryption scope properties from the provided client. To set CPK, please use * {@link * * @param blobClient BlobClient used to configure the builder. * @return the updated EncryptedBlobClientBuilder object * @throws NullPointerException If {@code containerClient} is {@code null}. */ public EncryptedBlobClientBuilder blobClient(BlobClient blobClient) { Objects.requireNonNull(blobClient); return client(blobClient.getHttpPipeline(), blobClient.getBlobUrl(), blobClient.getServiceVersion()); } /** * Configures the builder based on the passed {@link BlobAsyncClient}. This will set the {@link HttpPipeline}, * {@link URL} and {@link BlobServiceVersion} that are used to interact with the service. Note that the underlying * pipeline should not already be configured for encryption/decryption. * * <p>If {@code pipeline} is set, all other settings are ignored, aside from {@link * {@link * * <p>Note that for security reasons, this method does not copy over the {@link CustomerProvidedKey} and * encryption scope properties from the provided client. To set CPK, please use * {@link * * @param blobAsyncClient BlobAsyncClient used to configure the builder. * @return the updated EncryptedBlobClientBuilder object * @throws NullPointerException If {@code containerClient} is {@code null}. */ public EncryptedBlobClientBuilder blobAsyncClient(BlobAsyncClient blobAsyncClient) { Objects.requireNonNull(blobAsyncClient); return client(blobAsyncClient.getHttpPipeline(), blobAsyncClient.getBlobUrl(), blobAsyncClient.getServiceVersion()); } /** * Helper method to transform a regular client into an encrypted client * * @param httpPipeline {@link HttpPipeline} * @param endpoint The endpoint. * @param version {@link BlobServiceVersion} of the service to be used when making requests. * @return the updated EncryptedBlobClientBuilder object */ private EncryptedBlobClientBuilder client(HttpPipeline httpPipeline, String endpoint, BlobServiceVersion version) { this.endpoint(endpoint); this.serviceVersion(version); return this.pipeline(httpPipeline); } /** * Sets the requires encryption option. * * @param requiresEncryption Whether encryption is enforced by this client. Client will throw if data is * downloaded and it is not encrypted. * @return the updated EncryptedBlobClientBuilder object */ public EncryptedBlobClientBuilder requiresEncryption(boolean requiresEncryption) { this.requiresEncryption = requiresEncryption; return this; } }
if it's multi request process then having the orchestrator prefetch EncryptionData and set it into Context is good idea.
public Mono<HttpResponse> process(HttpPipelineCallContext context, HttpPipelineNextPolicy next) { HttpHeaders requestHeaders = context.getHttpRequest().getHeaders(); String initialRangeHeader = requestHeaders.getValue(RANGE_HEADER); if (initialRangeHeader == null) { return next.process().flatMap(httpResponse -> { if (httpResponse.getRequest().getHttpMethod() == HttpMethod.GET && httpResponse.getBody() != null) { HttpHeaders responseHeaders = httpResponse.getHeaders(); /* * Deserialize encryption data. * If there is no encryption data set on the blob, then we can return the request as is since we * didn't expand the range at all. */ EncryptionData encryptionData = EncryptionData.getAndValidateEncryptionData( httpResponse.getHeaderValue(Constants.HeaderConstants.X_MS_META + "-" + ENCRYPTION_DATA_KEY), requiresEncryption); if (encryptionData == null) { return Mono.just(httpResponse); } /* * We will need to know the total size of the data to know when to finalize the decryption. If it * was not set originally with the intent of downloading the whole blob, update it here. * If there was no range set on the request, we skipped instantiating a BlobRange as we did not have * encryption data at the time. Instantiate now with a BlobRange that indicates a full blob. */ EncryptedBlobRange encryptedRange = new EncryptedBlobRange(new BlobRange(0), encryptionData); encryptedRange.setAdjustedDownloadCount( Long.parseLong(responseHeaders.getValue(CONTENT_LENGTH))); /* * We expect padding only if we are at the end of a blob and it is not a multiple of the encryption * block size. Padding is only ever present in track 1. */ boolean padding = encryptionData.getEncryptionAgent().getProtocol().equals(ENCRYPTION_PROTOCOL_V1) && (encryptedRange.toBlobRange().getOffset() + encryptedRange.toBlobRange().getCount() > (blobSize(responseHeaders) - ENCRYPTION_BLOCK_SIZE)); Flux<ByteBuffer> plainTextData = this.decryptBlob(httpResponse.getBody(), encryptedRange, padding, encryptionData, httpResponse.getRequest().getUrl().toString()); return Mono.just(new BlobDecryptionPolicy.DecryptedResponse(httpResponse, plainTextData)); } else { return Mono.just(httpResponse); } }); } else { BlobRequestConditions rc = extractRequestConditionsFromRequest(requestHeaders); return this.blobClient.getPropertiesWithResponse(rc).flatMap(response -> { EncryptionData data = EncryptionData.getAndValidateEncryptionData( response.getValue().getMetadata().get(CryptographyConstants.ENCRYPTION_DATA_KEY), requiresEncryption); return data == null ? next.process() : Mono.just(data) .flatMap(encryptionData -> { EncryptedBlobRange encryptedRange = EncryptedBlobRange.getEncryptedBlobRangeFromHeader( initialRangeHeader, encryptionData); if (context.getHttpRequest().getHeaders().getValue(RANGE_HEADER) != null && encryptionData != null) { requestHeaders.set(RANGE_HEADER, encryptedRange.toBlobRange().toString()); } return Mono.zip(Mono.just(encryptionData), Mono.just(encryptedRange)); }) .flatMap(tuple2 -> next.process().flatMap(httpResponse -> { if (httpResponse.getRequest().getHttpMethod() == HttpMethod.GET && httpResponse.getBody() != null) { HttpHeaders responseHeaders = httpResponse.getHeaders(); if (httpResponse.getHeaderValue(Constants.HeaderConstants.X_MS_META + "-" + ENCRYPTION_DATA_KEY) == null) { return Mono.just(httpResponse); } tuple2.getT2().setAdjustedDownloadCount( Long.parseLong(responseHeaders.getValue(CONTENT_LENGTH))); /* * We expect padding only if we are at the end of a blob and it is not a multiple of the * encryption block size. Padding is only ever present in track 1. */ boolean padding = tuple2.getT1().getEncryptionAgent().getProtocol() .equals(ENCRYPTION_PROTOCOL_V1) && (tuple2.getT2().toBlobRange().getOffset() + tuple2.getT2().toBlobRange().getCount() > (blobSize(responseHeaders) - ENCRYPTION_BLOCK_SIZE)); Flux<ByteBuffer> plainTextData = this.decryptBlob(httpResponse.getBody(), tuple2.getT2(), padding, tuple2.getT1(), httpResponse.getRequest().getUrl().toString()); return Mono.just(new DecryptedResponse(httpResponse, plainTextData)); } else { return Mono.just(httpResponse); } })); }); } }
public Mono<HttpResponse> process(HttpPipelineCallContext context, HttpPipelineNextPolicy next) { HttpHeaders requestHeaders = context.getHttpRequest().getHeaders(); String initialRangeHeader = requestHeaders.getValue(RANGE_HEADER); if (initialRangeHeader == null) { return next.process().flatMap(httpResponse -> { if (httpResponse.getRequest().getHttpMethod() == HttpMethod.GET && httpResponse.getBody() != null) { HttpHeaders responseHeaders = httpResponse.getHeaders(); /* * Deserialize encryption data. * If there is no encryption data set on the blob, then we can return the request as is since we * didn't expand the range at all. */ EncryptionData encryptionData = EncryptionData.getAndValidateEncryptionData( httpResponse.getHeaderValue(Constants.HeaderConstants.X_MS_META + "-" + ENCRYPTION_DATA_KEY), requiresEncryption); if (encryptionData == null) { return Mono.just(httpResponse); } /* * We will need to know the total size of the data to know when to finalize the decryption. If it * was not set originally with the intent of downloading the whole blob, update it here. * If there was no range set on the request, we skipped instantiating a BlobRange as we did not have * encryption data at the time. Instantiate now with a BlobRange that indicates a full blob. */ EncryptedBlobRange encryptedRange = new EncryptedBlobRange(new BlobRange(0), encryptionData); encryptedRange.setAdjustedDownloadCount( Long.parseLong(responseHeaders.getValue(CONTENT_LENGTH))); /* * We expect padding only if we are at the end of a blob and it is not a multiple of the encryption * block size. Padding is only ever present in track 1. */ boolean padding = encryptionData.getEncryptionAgent().getProtocol().equals(ENCRYPTION_PROTOCOL_V1) && (encryptedRange.toBlobRange().getOffset() + encryptedRange.toBlobRange().getCount() > (blobSize(responseHeaders) - ENCRYPTION_BLOCK_SIZE)); Flux<ByteBuffer> plainTextData = this.decryptBlob(httpResponse.getBody(), encryptedRange, padding, encryptionData, httpResponse.getRequest().getUrl().toString()); return Mono.just(new BlobDecryptionPolicy.DecryptedResponse(httpResponse, plainTextData)); } else { return Mono.just(httpResponse); } }); } else { BlobRequestConditions rc = extractRequestConditionsFromRequest(requestHeaders); return this.blobClient.getPropertiesWithResponse(rc).flatMap(response -> { EncryptionData encryptionData = EncryptionData.getAndValidateEncryptionData( response.getValue().getMetadata().get(CryptographyConstants.ENCRYPTION_DATA_KEY), requiresEncryption); String etag = response.getValue().getETag(); requestHeaders.set("ETag", etag); if (encryptionData == null) { return next.process(); } EncryptedBlobRange encryptedRange = EncryptedBlobRange.getEncryptedBlobRangeFromHeader( initialRangeHeader, encryptionData); if (context.getHttpRequest().getHeaders().getValue(RANGE_HEADER) != null) { requestHeaders.set(RANGE_HEADER, encryptedRange.toBlobRange().toString()); } return next.process().map(httpResponse -> { if (httpResponse.getRequest().getHttpMethod() == HttpMethod.GET && httpResponse.getBody() != null) { HttpHeaders responseHeaders = httpResponse.getHeaders(); if (httpResponse.getHeaderValue(ENCRYPTION_METADATA_HEADER) == null) { return httpResponse; } encryptedRange.setAdjustedDownloadCount( Long.parseLong(responseHeaders.getValue(CONTENT_LENGTH))); /* * We expect padding only if we are at the end of a blob and it is not a multiple of the * encryption block size. Padding is only ever present in track 1. */ boolean padding = encryptionData.getEncryptionAgent().getProtocol() .equals(ENCRYPTION_PROTOCOL_V1) && (encryptedRange.toBlobRange().getOffset() + encryptedRange.toBlobRange().getCount() > (blobSize(responseHeaders) - ENCRYPTION_BLOCK_SIZE)); Flux<ByteBuffer> plainTextData = this.decryptBlob(httpResponse.getBody(), encryptedRange, padding, encryptionData, httpResponse.getRequest().getUrl().toString()); return new DecryptedResponse(httpResponse, plainTextData); } else { return httpResponse; } }); }); } }
class with the specified key and resolver. * <p> * If the generated policy is intended to be used for encryption, users are expected to provide a key at the * minimum. The absence of key will cause an exception to be thrown during encryption. If the generated policy is * intended to be used for decryption, users can provide a keyResolver. The client library will - 1. Invoke the key * resolver if specified to get the key. 2. If resolver is not specified but a key is specified, match the key id on * the key and use it. * * @param key An object of type {@link AsyncKeyEncryptionKey}
class with the specified key and resolver. * <p> * If the generated policy is intended to be used for encryption, users are expected to provide a key at the * minimum. The absence of key will cause an exception to be thrown during encryption. If the generated policy is * intended to be used for decryption, users can provide a keyResolver. The client library will - 1. Invoke the key * resolver if specified to get the key. 2. If resolver is not specified but a key is specified, match the key id on * the key and use it. * * @param key An object of type {@link AsyncKeyEncryptionKey}
for GA: we should introduce BlobRequestCondition(HttpHeaders) ctor.
private BlobRequestConditions extractRequestConditionsFromRequest(HttpHeaders requestHeaders) { return new BlobRequestConditions() .setLeaseId(requestHeaders.getValue("x-ms-lease-id")) .setIfUnmodifiedSince(requestHeaders.getValue("If-Unmodified-Since") == null ? null : new DateTimeRfc1123(requestHeaders.getValue("If-Unmodified-Since")).getDateTime()) .setIfNoneMatch(requestHeaders.getValue("If-None-Match")) .setIfMatch(requestHeaders.getValue("If-Match")) .setIfModifiedSince(requestHeaders.getValue("If-Modified-Since") == null ? null : new DateTimeRfc1123(requestHeaders.getValue("If-Modified-Since")).getDateTime()); }
return new BlobRequestConditions()
private BlobRequestConditions extractRequestConditionsFromRequest(HttpHeaders requestHeaders) { return new BlobRequestConditions() .setLeaseId(requestHeaders.getValue("x-ms-lease-id")) .setIfUnmodifiedSince(requestHeaders.getValue("If-Unmodified-Since") == null ? null : new DateTimeRfc1123(requestHeaders.getValue("If-Unmodified-Since")).getDateTime()) .setIfNoneMatch(requestHeaders.getValue("If-None-Match")) .setIfMatch(requestHeaders.getValue("If-Match")) .setIfModifiedSince(requestHeaders.getValue("If-Modified-Since") == null ? null : new DateTimeRfc1123(requestHeaders.getValue("If-Modified-Since")).getDateTime()); }
class with the specified key and resolver. * <p> * If the generated policy is intended to be used for encryption, users are expected to provide a key at the * minimum. The absence of key will cause an exception to be thrown during encryption. If the generated policy is * intended to be used for decryption, users can provide a keyResolver. The client library will - 1. Invoke the key * resolver if specified to get the key. 2. If resolver is not specified but a key is specified, match the key id on * the key and use it. * * @param key An object of type {@link AsyncKeyEncryptionKey}
class with the specified key and resolver. * <p> * If the generated policy is intended to be used for encryption, users are expected to provide a key at the * minimum. The absence of key will cause an exception to be thrown during encryption. If the generated policy is * intended to be used for decryption, users can provide a keyResolver. The client library will - 1. Invoke the key * resolver if specified to get the key. 2. If resolver is not specified but a key is specified, match the key id on * the key and use it. * * @param key An object of type {@link AsyncKeyEncryptionKey}
minor nit: for readability we should turn `Constants.HeaderConstants.X_MS_META + "-" + ENCRYPTION_DATA_KEY` into a constant `EncryptionConstants.ENCRYPTION_META_DATA_KEY` (doesn't have to be that class) or make it into a utility API somewhere. This will help centralize all uses of it and help prevent a case where we may accidentally do `Constants.HeaderConstants.X_MS_META + ENCRYPTION_DATA_KEY` and drop the needed `-`
public Mono<HttpResponse> process(HttpPipelineCallContext context, HttpPipelineNextPolicy next) { HttpHeaders requestHeaders = context.getHttpRequest().getHeaders(); String initialRangeHeader = requestHeaders.getValue(RANGE_HEADER); if (initialRangeHeader == null) { return next.process().flatMap(httpResponse -> { if (httpResponse.getRequest().getHttpMethod() == HttpMethod.GET && httpResponse.getBody() != null) { HttpHeaders responseHeaders = httpResponse.getHeaders(); /* * Deserialize encryption data. * If there is no encryption data set on the blob, then we can return the request as is since we * didn't expand the range at all. */ EncryptionData encryptionData = EncryptionData.getAndValidateEncryptionData( httpResponse.getHeaderValue(Constants.HeaderConstants.X_MS_META + "-" + ENCRYPTION_DATA_KEY), requiresEncryption); if (encryptionData == null) { return Mono.just(httpResponse); } /* * We will need to know the total size of the data to know when to finalize the decryption. If it * was not set originally with the intent of downloading the whole blob, update it here. * If there was no range set on the request, we skipped instantiating a BlobRange as we did not have * encryption data at the time. Instantiate now with a BlobRange that indicates a full blob. */ EncryptedBlobRange encryptedRange = new EncryptedBlobRange(new BlobRange(0), encryptionData); encryptedRange.setAdjustedDownloadCount( Long.parseLong(responseHeaders.getValue(CONTENT_LENGTH))); /* * We expect padding only if we are at the end of a blob and it is not a multiple of the encryption * block size. Padding is only ever present in track 1. */ boolean padding = encryptionData.getEncryptionAgent().getProtocol().equals(ENCRYPTION_PROTOCOL_V1) && (encryptedRange.toBlobRange().getOffset() + encryptedRange.toBlobRange().getCount() > (blobSize(responseHeaders) - ENCRYPTION_BLOCK_SIZE)); Flux<ByteBuffer> plainTextData = this.decryptBlob(httpResponse.getBody(), encryptedRange, padding, encryptionData, httpResponse.getRequest().getUrl().toString()); return Mono.just(new BlobDecryptionPolicy.DecryptedResponse(httpResponse, plainTextData)); } else { return Mono.just(httpResponse); } }); } else { BlobRequestConditions rc = extractRequestConditionsFromRequest(requestHeaders); return this.blobClient.getPropertiesWithResponse(rc).flatMap(response -> { EncryptionData data = EncryptionData.getAndValidateEncryptionData( response.getValue().getMetadata().get(CryptographyConstants.ENCRYPTION_DATA_KEY), requiresEncryption); return data == null ? next.process() : Mono.just(data) .flatMap(encryptionData -> { EncryptedBlobRange encryptedRange = EncryptedBlobRange.getEncryptedBlobRangeFromHeader( initialRangeHeader, encryptionData); if (context.getHttpRequest().getHeaders().getValue(RANGE_HEADER) != null && encryptionData != null) { requestHeaders.set(RANGE_HEADER, encryptedRange.toBlobRange().toString()); } return Mono.zip(Mono.just(encryptionData), Mono.just(encryptedRange)); }) .flatMap(tuple2 -> next.process().flatMap(httpResponse -> { if (httpResponse.getRequest().getHttpMethod() == HttpMethod.GET && httpResponse.getBody() != null) { HttpHeaders responseHeaders = httpResponse.getHeaders(); if (httpResponse.getHeaderValue(Constants.HeaderConstants.X_MS_META + "-" + ENCRYPTION_DATA_KEY) == null) { return Mono.just(httpResponse); } tuple2.getT2().setAdjustedDownloadCount( Long.parseLong(responseHeaders.getValue(CONTENT_LENGTH))); /* * We expect padding only if we are at the end of a blob and it is not a multiple of the * encryption block size. Padding is only ever present in track 1. */ boolean padding = tuple2.getT1().getEncryptionAgent().getProtocol() .equals(ENCRYPTION_PROTOCOL_V1) && (tuple2.getT2().toBlobRange().getOffset() + tuple2.getT2().toBlobRange().getCount() > (blobSize(responseHeaders) - ENCRYPTION_BLOCK_SIZE)); Flux<ByteBuffer> plainTextData = this.decryptBlob(httpResponse.getBody(), tuple2.getT2(), padding, tuple2.getT1(), httpResponse.getRequest().getUrl().toString()); return Mono.just(new DecryptedResponse(httpResponse, plainTextData)); } else { return Mono.just(httpResponse); } })); }); } }
httpResponse.getHeaderValue(Constants.HeaderConstants.X_MS_META + "-"
public Mono<HttpResponse> process(HttpPipelineCallContext context, HttpPipelineNextPolicy next) { HttpHeaders requestHeaders = context.getHttpRequest().getHeaders(); String initialRangeHeader = requestHeaders.getValue(RANGE_HEADER); if (initialRangeHeader == null) { return next.process().flatMap(httpResponse -> { if (httpResponse.getRequest().getHttpMethod() == HttpMethod.GET && httpResponse.getBody() != null) { HttpHeaders responseHeaders = httpResponse.getHeaders(); /* * Deserialize encryption data. * If there is no encryption data set on the blob, then we can return the request as is since we * didn't expand the range at all. */ EncryptionData encryptionData = EncryptionData.getAndValidateEncryptionData( httpResponse.getHeaderValue(Constants.HeaderConstants.X_MS_META + "-" + ENCRYPTION_DATA_KEY), requiresEncryption); if (encryptionData == null) { return Mono.just(httpResponse); } /* * We will need to know the total size of the data to know when to finalize the decryption. If it * was not set originally with the intent of downloading the whole blob, update it here. * If there was no range set on the request, we skipped instantiating a BlobRange as we did not have * encryption data at the time. Instantiate now with a BlobRange that indicates a full blob. */ EncryptedBlobRange encryptedRange = new EncryptedBlobRange(new BlobRange(0), encryptionData); encryptedRange.setAdjustedDownloadCount( Long.parseLong(responseHeaders.getValue(CONTENT_LENGTH))); /* * We expect padding only if we are at the end of a blob and it is not a multiple of the encryption * block size. Padding is only ever present in track 1. */ boolean padding = encryptionData.getEncryptionAgent().getProtocol().equals(ENCRYPTION_PROTOCOL_V1) && (encryptedRange.toBlobRange().getOffset() + encryptedRange.toBlobRange().getCount() > (blobSize(responseHeaders) - ENCRYPTION_BLOCK_SIZE)); Flux<ByteBuffer> plainTextData = this.decryptBlob(httpResponse.getBody(), encryptedRange, padding, encryptionData, httpResponse.getRequest().getUrl().toString()); return Mono.just(new BlobDecryptionPolicy.DecryptedResponse(httpResponse, plainTextData)); } else { return Mono.just(httpResponse); } }); } else { BlobRequestConditions rc = extractRequestConditionsFromRequest(requestHeaders); return this.blobClient.getPropertiesWithResponse(rc).flatMap(response -> { EncryptionData encryptionData = EncryptionData.getAndValidateEncryptionData( response.getValue().getMetadata().get(CryptographyConstants.ENCRYPTION_DATA_KEY), requiresEncryption); String etag = response.getValue().getETag(); requestHeaders.set("ETag", etag); if (encryptionData == null) { return next.process(); } EncryptedBlobRange encryptedRange = EncryptedBlobRange.getEncryptedBlobRangeFromHeader( initialRangeHeader, encryptionData); if (context.getHttpRequest().getHeaders().getValue(RANGE_HEADER) != null) { requestHeaders.set(RANGE_HEADER, encryptedRange.toBlobRange().toString()); } return next.process().map(httpResponse -> { if (httpResponse.getRequest().getHttpMethod() == HttpMethod.GET && httpResponse.getBody() != null) { HttpHeaders responseHeaders = httpResponse.getHeaders(); if (httpResponse.getHeaderValue(ENCRYPTION_METADATA_HEADER) == null) { return httpResponse; } encryptedRange.setAdjustedDownloadCount( Long.parseLong(responseHeaders.getValue(CONTENT_LENGTH))); /* * We expect padding only if we are at the end of a blob and it is not a multiple of the * encryption block size. Padding is only ever present in track 1. */ boolean padding = encryptionData.getEncryptionAgent().getProtocol() .equals(ENCRYPTION_PROTOCOL_V1) && (encryptedRange.toBlobRange().getOffset() + encryptedRange.toBlobRange().getCount() > (blobSize(responseHeaders) - ENCRYPTION_BLOCK_SIZE)); Flux<ByteBuffer> plainTextData = this.decryptBlob(httpResponse.getBody(), encryptedRange, padding, encryptionData, httpResponse.getRequest().getUrl().toString()); return new DecryptedResponse(httpResponse, plainTextData); } else { return httpResponse; } }); }); } }
class with the specified key and resolver. * <p> * If the generated policy is intended to be used for encryption, users are expected to provide a key at the * minimum. The absence of key will cause an exception to be thrown during encryption. If the generated policy is * intended to be used for decryption, users can provide a keyResolver. The client library will - 1. Invoke the key * resolver if specified to get the key. 2. If resolver is not specified but a key is specified, match the key id on * the key and use it. * * @param key An object of type {@link AsyncKeyEncryptionKey}
class with the specified key and resolver. * <p> * If the generated policy is intended to be used for encryption, users are expected to provide a key at the * minimum. The absence of key will cause an exception to be thrown during encryption. If the generated policy is * intended to be used for decryption, users can provide a keyResolver. The client library will - 1. Invoke the key * resolver if specified to get the key. 2. If resolver is not specified but a key is specified, match the key id on * the key and use it. * * @param key An object of type {@link AsyncKeyEncryptionKey}
Does this end up skipping the rest of the `HttpPipeline` when data isn't null?
public Mono<HttpResponse> process(HttpPipelineCallContext context, HttpPipelineNextPolicy next) { HttpHeaders requestHeaders = context.getHttpRequest().getHeaders(); String initialRangeHeader = requestHeaders.getValue(RANGE_HEADER); if (initialRangeHeader == null) { return next.process().flatMap(httpResponse -> { if (httpResponse.getRequest().getHttpMethod() == HttpMethod.GET && httpResponse.getBody() != null) { HttpHeaders responseHeaders = httpResponse.getHeaders(); /* * Deserialize encryption data. * If there is no encryption data set on the blob, then we can return the request as is since we * didn't expand the range at all. */ EncryptionData encryptionData = EncryptionData.getAndValidateEncryptionData( httpResponse.getHeaderValue(Constants.HeaderConstants.X_MS_META + "-" + ENCRYPTION_DATA_KEY), requiresEncryption); if (encryptionData == null) { return Mono.just(httpResponse); } /* * We will need to know the total size of the data to know when to finalize the decryption. If it * was not set originally with the intent of downloading the whole blob, update it here. * If there was no range set on the request, we skipped instantiating a BlobRange as we did not have * encryption data at the time. Instantiate now with a BlobRange that indicates a full blob. */ EncryptedBlobRange encryptedRange = new EncryptedBlobRange(new BlobRange(0), encryptionData); encryptedRange.setAdjustedDownloadCount( Long.parseLong(responseHeaders.getValue(CONTENT_LENGTH))); /* * We expect padding only if we are at the end of a blob and it is not a multiple of the encryption * block size. Padding is only ever present in track 1. */ boolean padding = encryptionData.getEncryptionAgent().getProtocol().equals(ENCRYPTION_PROTOCOL_V1) && (encryptedRange.toBlobRange().getOffset() + encryptedRange.toBlobRange().getCount() > (blobSize(responseHeaders) - ENCRYPTION_BLOCK_SIZE)); Flux<ByteBuffer> plainTextData = this.decryptBlob(httpResponse.getBody(), encryptedRange, padding, encryptionData, httpResponse.getRequest().getUrl().toString()); return Mono.just(new BlobDecryptionPolicy.DecryptedResponse(httpResponse, plainTextData)); } else { return Mono.just(httpResponse); } }); } else { BlobRequestConditions rc = extractRequestConditionsFromRequest(requestHeaders); return this.blobClient.getPropertiesWithResponse(rc).flatMap(response -> { EncryptionData data = EncryptionData.getAndValidateEncryptionData( response.getValue().getMetadata().get(CryptographyConstants.ENCRYPTION_DATA_KEY), requiresEncryption); return data == null ? next.process() : Mono.just(data) .flatMap(encryptionData -> { EncryptedBlobRange encryptedRange = EncryptedBlobRange.getEncryptedBlobRangeFromHeader( initialRangeHeader, encryptionData); if (context.getHttpRequest().getHeaders().getValue(RANGE_HEADER) != null && encryptionData != null) { requestHeaders.set(RANGE_HEADER, encryptedRange.toBlobRange().toString()); } return Mono.zip(Mono.just(encryptionData), Mono.just(encryptedRange)); }) .flatMap(tuple2 -> next.process().flatMap(httpResponse -> { if (httpResponse.getRequest().getHttpMethod() == HttpMethod.GET && httpResponse.getBody() != null) { HttpHeaders responseHeaders = httpResponse.getHeaders(); if (httpResponse.getHeaderValue(Constants.HeaderConstants.X_MS_META + "-" + ENCRYPTION_DATA_KEY) == null) { return Mono.just(httpResponse); } tuple2.getT2().setAdjustedDownloadCount( Long.parseLong(responseHeaders.getValue(CONTENT_LENGTH))); /* * We expect padding only if we are at the end of a blob and it is not a multiple of the * encryption block size. Padding is only ever present in track 1. */ boolean padding = tuple2.getT1().getEncryptionAgent().getProtocol() .equals(ENCRYPTION_PROTOCOL_V1) && (tuple2.getT2().toBlobRange().getOffset() + tuple2.getT2().toBlobRange().getCount() > (blobSize(responseHeaders) - ENCRYPTION_BLOCK_SIZE)); Flux<ByteBuffer> plainTextData = this.decryptBlob(httpResponse.getBody(), tuple2.getT2(), padding, tuple2.getT1(), httpResponse.getRequest().getUrl().toString()); return Mono.just(new DecryptedResponse(httpResponse, plainTextData)); } else { return Mono.just(httpResponse); } })); }); } }
return data == null ? next.process() : Mono.just(data)
public Mono<HttpResponse> process(HttpPipelineCallContext context, HttpPipelineNextPolicy next) { HttpHeaders requestHeaders = context.getHttpRequest().getHeaders(); String initialRangeHeader = requestHeaders.getValue(RANGE_HEADER); if (initialRangeHeader == null) { return next.process().flatMap(httpResponse -> { if (httpResponse.getRequest().getHttpMethod() == HttpMethod.GET && httpResponse.getBody() != null) { HttpHeaders responseHeaders = httpResponse.getHeaders(); /* * Deserialize encryption data. * If there is no encryption data set on the blob, then we can return the request as is since we * didn't expand the range at all. */ EncryptionData encryptionData = EncryptionData.getAndValidateEncryptionData( httpResponse.getHeaderValue(Constants.HeaderConstants.X_MS_META + "-" + ENCRYPTION_DATA_KEY), requiresEncryption); if (encryptionData == null) { return Mono.just(httpResponse); } /* * We will need to know the total size of the data to know when to finalize the decryption. If it * was not set originally with the intent of downloading the whole blob, update it here. * If there was no range set on the request, we skipped instantiating a BlobRange as we did not have * encryption data at the time. Instantiate now with a BlobRange that indicates a full blob. */ EncryptedBlobRange encryptedRange = new EncryptedBlobRange(new BlobRange(0), encryptionData); encryptedRange.setAdjustedDownloadCount( Long.parseLong(responseHeaders.getValue(CONTENT_LENGTH))); /* * We expect padding only if we are at the end of a blob and it is not a multiple of the encryption * block size. Padding is only ever present in track 1. */ boolean padding = encryptionData.getEncryptionAgent().getProtocol().equals(ENCRYPTION_PROTOCOL_V1) && (encryptedRange.toBlobRange().getOffset() + encryptedRange.toBlobRange().getCount() > (blobSize(responseHeaders) - ENCRYPTION_BLOCK_SIZE)); Flux<ByteBuffer> plainTextData = this.decryptBlob(httpResponse.getBody(), encryptedRange, padding, encryptionData, httpResponse.getRequest().getUrl().toString()); return Mono.just(new BlobDecryptionPolicy.DecryptedResponse(httpResponse, plainTextData)); } else { return Mono.just(httpResponse); } }); } else { BlobRequestConditions rc = extractRequestConditionsFromRequest(requestHeaders); return this.blobClient.getPropertiesWithResponse(rc).flatMap(response -> { EncryptionData encryptionData = EncryptionData.getAndValidateEncryptionData( response.getValue().getMetadata().get(CryptographyConstants.ENCRYPTION_DATA_KEY), requiresEncryption); String etag = response.getValue().getETag(); requestHeaders.set("ETag", etag); if (encryptionData == null) { return next.process(); } EncryptedBlobRange encryptedRange = EncryptedBlobRange.getEncryptedBlobRangeFromHeader( initialRangeHeader, encryptionData); if (context.getHttpRequest().getHeaders().getValue(RANGE_HEADER) != null) { requestHeaders.set(RANGE_HEADER, encryptedRange.toBlobRange().toString()); } return next.process().map(httpResponse -> { if (httpResponse.getRequest().getHttpMethod() == HttpMethod.GET && httpResponse.getBody() != null) { HttpHeaders responseHeaders = httpResponse.getHeaders(); if (httpResponse.getHeaderValue(ENCRYPTION_METADATA_HEADER) == null) { return httpResponse; } encryptedRange.setAdjustedDownloadCount( Long.parseLong(responseHeaders.getValue(CONTENT_LENGTH))); /* * We expect padding only if we are at the end of a blob and it is not a multiple of the * encryption block size. Padding is only ever present in track 1. */ boolean padding = encryptionData.getEncryptionAgent().getProtocol() .equals(ENCRYPTION_PROTOCOL_V1) && (encryptedRange.toBlobRange().getOffset() + encryptedRange.toBlobRange().getCount() > (blobSize(responseHeaders) - ENCRYPTION_BLOCK_SIZE)); Flux<ByteBuffer> plainTextData = this.decryptBlob(httpResponse.getBody(), encryptedRange, padding, encryptionData, httpResponse.getRequest().getUrl().toString()); return new DecryptedResponse(httpResponse, plainTextData); } else { return httpResponse; } }); }); } }
class with the specified key and resolver. * <p> * If the generated policy is intended to be used for encryption, users are expected to provide a key at the * minimum. The absence of key will cause an exception to be thrown during encryption. If the generated policy is * intended to be used for decryption, users can provide a keyResolver. The client library will - 1. Invoke the key * resolver if specified to get the key. 2. If resolver is not specified but a key is specified, match the key id on * the key and use it. * * @param key An object of type {@link AsyncKeyEncryptionKey}
class with the specified key and resolver. * <p> * If the generated policy is intended to be used for encryption, users are expected to provide a key at the * minimum. The absence of key will cause an exception to be thrown during encryption. If the generated policy is * intended to be used for decryption, users can provide a keyResolver. The client library will - 1. Invoke the key * resolver if specified to get the key. 2. If resolver is not specified but a key is specified, match the key id on * the key and use it. * * @param key An object of type {@link AsyncKeyEncryptionKey}
To me, it looks like most these calls are operating synchronously, do you think it'd be easier to read and debug if calling into async code is held to the end? For example creating the `Tuple<EncryptionData, EncryptedBlobRange>` is just pulling values from headers, could that first flatMap be removed?
public Mono<HttpResponse> process(HttpPipelineCallContext context, HttpPipelineNextPolicy next) { HttpHeaders requestHeaders = context.getHttpRequest().getHeaders(); String initialRangeHeader = requestHeaders.getValue(RANGE_HEADER); if (initialRangeHeader == null) { return next.process().flatMap(httpResponse -> { if (httpResponse.getRequest().getHttpMethod() == HttpMethod.GET && httpResponse.getBody() != null) { HttpHeaders responseHeaders = httpResponse.getHeaders(); /* * Deserialize encryption data. * If there is no encryption data set on the blob, then we can return the request as is since we * didn't expand the range at all. */ EncryptionData encryptionData = EncryptionData.getAndValidateEncryptionData( httpResponse.getHeaderValue(Constants.HeaderConstants.X_MS_META + "-" + ENCRYPTION_DATA_KEY), requiresEncryption); if (encryptionData == null) { return Mono.just(httpResponse); } /* * We will need to know the total size of the data to know when to finalize the decryption. If it * was not set originally with the intent of downloading the whole blob, update it here. * If there was no range set on the request, we skipped instantiating a BlobRange as we did not have * encryption data at the time. Instantiate now with a BlobRange that indicates a full blob. */ EncryptedBlobRange encryptedRange = new EncryptedBlobRange(new BlobRange(0), encryptionData); encryptedRange.setAdjustedDownloadCount( Long.parseLong(responseHeaders.getValue(CONTENT_LENGTH))); /* * We expect padding only if we are at the end of a blob and it is not a multiple of the encryption * block size. Padding is only ever present in track 1. */ boolean padding = encryptionData.getEncryptionAgent().getProtocol().equals(ENCRYPTION_PROTOCOL_V1) && (encryptedRange.toBlobRange().getOffset() + encryptedRange.toBlobRange().getCount() > (blobSize(responseHeaders) - ENCRYPTION_BLOCK_SIZE)); Flux<ByteBuffer> plainTextData = this.decryptBlob(httpResponse.getBody(), encryptedRange, padding, encryptionData, httpResponse.getRequest().getUrl().toString()); return Mono.just(new BlobDecryptionPolicy.DecryptedResponse(httpResponse, plainTextData)); } else { return Mono.just(httpResponse); } }); } else { BlobRequestConditions rc = extractRequestConditionsFromRequest(requestHeaders); return this.blobClient.getPropertiesWithResponse(rc).flatMap(response -> { EncryptionData data = EncryptionData.getAndValidateEncryptionData( response.getValue().getMetadata().get(CryptographyConstants.ENCRYPTION_DATA_KEY), requiresEncryption); return data == null ? next.process() : Mono.just(data) .flatMap(encryptionData -> { EncryptedBlobRange encryptedRange = EncryptedBlobRange.getEncryptedBlobRangeFromHeader( initialRangeHeader, encryptionData); if (context.getHttpRequest().getHeaders().getValue(RANGE_HEADER) != null && encryptionData != null) { requestHeaders.set(RANGE_HEADER, encryptedRange.toBlobRange().toString()); } return Mono.zip(Mono.just(encryptionData), Mono.just(encryptedRange)); }) .flatMap(tuple2 -> next.process().flatMap(httpResponse -> { if (httpResponse.getRequest().getHttpMethod() == HttpMethod.GET && httpResponse.getBody() != null) { HttpHeaders responseHeaders = httpResponse.getHeaders(); if (httpResponse.getHeaderValue(Constants.HeaderConstants.X_MS_META + "-" + ENCRYPTION_DATA_KEY) == null) { return Mono.just(httpResponse); } tuple2.getT2().setAdjustedDownloadCount( Long.parseLong(responseHeaders.getValue(CONTENT_LENGTH))); /* * We expect padding only if we are at the end of a blob and it is not a multiple of the * encryption block size. Padding is only ever present in track 1. */ boolean padding = tuple2.getT1().getEncryptionAgent().getProtocol() .equals(ENCRYPTION_PROTOCOL_V1) && (tuple2.getT2().toBlobRange().getOffset() + tuple2.getT2().toBlobRange().getCount() > (blobSize(responseHeaders) - ENCRYPTION_BLOCK_SIZE)); Flux<ByteBuffer> plainTextData = this.decryptBlob(httpResponse.getBody(), tuple2.getT2(), padding, tuple2.getT1(), httpResponse.getRequest().getUrl().toString()); return Mono.just(new DecryptedResponse(httpResponse, plainTextData)); } else { return Mono.just(httpResponse); } })); }); } }
}
public Mono<HttpResponse> process(HttpPipelineCallContext context, HttpPipelineNextPolicy next) { HttpHeaders requestHeaders = context.getHttpRequest().getHeaders(); String initialRangeHeader = requestHeaders.getValue(RANGE_HEADER); if (initialRangeHeader == null) { return next.process().flatMap(httpResponse -> { if (httpResponse.getRequest().getHttpMethod() == HttpMethod.GET && httpResponse.getBody() != null) { HttpHeaders responseHeaders = httpResponse.getHeaders(); /* * Deserialize encryption data. * If there is no encryption data set on the blob, then we can return the request as is since we * didn't expand the range at all. */ EncryptionData encryptionData = EncryptionData.getAndValidateEncryptionData( httpResponse.getHeaderValue(Constants.HeaderConstants.X_MS_META + "-" + ENCRYPTION_DATA_KEY), requiresEncryption); if (encryptionData == null) { return Mono.just(httpResponse); } /* * We will need to know the total size of the data to know when to finalize the decryption. If it * was not set originally with the intent of downloading the whole blob, update it here. * If there was no range set on the request, we skipped instantiating a BlobRange as we did not have * encryption data at the time. Instantiate now with a BlobRange that indicates a full blob. */ EncryptedBlobRange encryptedRange = new EncryptedBlobRange(new BlobRange(0), encryptionData); encryptedRange.setAdjustedDownloadCount( Long.parseLong(responseHeaders.getValue(CONTENT_LENGTH))); /* * We expect padding only if we are at the end of a blob and it is not a multiple of the encryption * block size. Padding is only ever present in track 1. */ boolean padding = encryptionData.getEncryptionAgent().getProtocol().equals(ENCRYPTION_PROTOCOL_V1) && (encryptedRange.toBlobRange().getOffset() + encryptedRange.toBlobRange().getCount() > (blobSize(responseHeaders) - ENCRYPTION_BLOCK_SIZE)); Flux<ByteBuffer> plainTextData = this.decryptBlob(httpResponse.getBody(), encryptedRange, padding, encryptionData, httpResponse.getRequest().getUrl().toString()); return Mono.just(new BlobDecryptionPolicy.DecryptedResponse(httpResponse, plainTextData)); } else { return Mono.just(httpResponse); } }); } else { BlobRequestConditions rc = extractRequestConditionsFromRequest(requestHeaders); return this.blobClient.getPropertiesWithResponse(rc).flatMap(response -> { EncryptionData encryptionData = EncryptionData.getAndValidateEncryptionData( response.getValue().getMetadata().get(CryptographyConstants.ENCRYPTION_DATA_KEY), requiresEncryption); String etag = response.getValue().getETag(); requestHeaders.set("ETag", etag); if (encryptionData == null) { return next.process(); } EncryptedBlobRange encryptedRange = EncryptedBlobRange.getEncryptedBlobRangeFromHeader( initialRangeHeader, encryptionData); if (context.getHttpRequest().getHeaders().getValue(RANGE_HEADER) != null) { requestHeaders.set(RANGE_HEADER, encryptedRange.toBlobRange().toString()); } return next.process().map(httpResponse -> { if (httpResponse.getRequest().getHttpMethod() == HttpMethod.GET && httpResponse.getBody() != null) { HttpHeaders responseHeaders = httpResponse.getHeaders(); if (httpResponse.getHeaderValue(ENCRYPTION_METADATA_HEADER) == null) { return httpResponse; } encryptedRange.setAdjustedDownloadCount( Long.parseLong(responseHeaders.getValue(CONTENT_LENGTH))); /* * We expect padding only if we are at the end of a blob and it is not a multiple of the * encryption block size. Padding is only ever present in track 1. */ boolean padding = encryptionData.getEncryptionAgent().getProtocol() .equals(ENCRYPTION_PROTOCOL_V1) && (encryptedRange.toBlobRange().getOffset() + encryptedRange.toBlobRange().getCount() > (blobSize(responseHeaders) - ENCRYPTION_BLOCK_SIZE)); Flux<ByteBuffer> plainTextData = this.decryptBlob(httpResponse.getBody(), encryptedRange, padding, encryptionData, httpResponse.getRequest().getUrl().toString()); return new DecryptedResponse(httpResponse, plainTextData); } else { return httpResponse; } }); }); } }
class with the specified key and resolver. * <p> * If the generated policy is intended to be used for encryption, users are expected to provide a key at the * minimum. The absence of key will cause an exception to be thrown during encryption. If the generated policy is * intended to be used for decryption, users can provide a keyResolver. The client library will - 1. Invoke the key * resolver if specified to get the key. 2. If resolver is not specified but a key is specified, match the key id on * the key and use it. * * @param key An object of type {@link AsyncKeyEncryptionKey}
class with the specified key and resolver. * <p> * If the generated policy is intended to be used for encryption, users are expected to provide a key at the * minimum. The absence of key will cause an exception to be thrown during encryption. If the generated policy is * intended to be used for decryption, users can provide a keyResolver. The client library will - 1. Invoke the key * resolver if specified to get the key. 2. If resolver is not specified but a key is specified, match the key id on * the key and use it. * * @param key An object of type {@link AsyncKeyEncryptionKey}
it's not clear to me why checking `!suppressed` ? in the scenario: `CLIENT` -> `CLIENT` -> `CLIENT`, would this lead to the second `CLIENT` span getting suppressed, but then the third `CLIENT` not getting suppressed?
private static boolean shouldSuppress(SpanKind kind, Context context) { if (isClientCall(kind)) { boolean suppress = getBoolean(CLIENT_METHOD_CALL_FLAG, context); boolean suppressed = getBoolean(SUPPRESSED_SPAN_FLAG, context); return suppress && !suppressed; } return false; }
return suppress && !suppressed;
private static boolean shouldSuppress(SpanKind kind, Context context) { return isClientCall(kind) && getBoolean(CLIENT_METHOD_CALL_FLAG, context); }
class object */ @SuppressWarnings("unchecked") private static <T> T getOrNull(Context context, String key, Class<T> clazz) { final Optional<Object> optional = context.getData(key); final Object result = optional.filter(value -> clazz.isAssignableFrom(value.getClass())).orElseGet(() -> { LOGGER.verbose("Could not extract key '{}' of type '{}' from context.", key, clazz); return null; }); return (T) result; }
class object */ @SuppressWarnings("unchecked") private static <T> T getOrNull(Context context, String key, Class<T> clazz) { final Optional<Object> optional = context.getData(key); final Object result = optional.filter(value -> clazz.isAssignableFrom(value.getClass())).orElseGet(() -> { LOGGER.verbose("Could not extract key '{}' of type '{}' from context.", key, clazz); return null; }); return (T) result; }
```suggestion outer = openTelemetryTracer.start("outer", outer, ProcessKind.SEND); ```
public void suppressNestedInterleavedClientSpan() { Context outer = openTelemetryTracer.getSharedSpanBuilder("outer", Context.NONE); openTelemetryTracer.addLink(outer.addData(SPAN_CONTEXT_KEY, TEST_CONTEXT)); outer = openTelemetryTracer.start("innerSuppressed", outer, ProcessKind.SEND); Context inner1Suppressed = openTelemetryTracer.start("innerSuppressed", outer); Context inner1NotSuppressed = openTelemetryTracer.start("innerNotSuppressed", new StartSpanOptions(com.azure.core.util.tracing.SpanKind.PRODUCER), inner1Suppressed); Context inner2Suppressed = openTelemetryTracer.start("innerSuppressed", inner1NotSuppressed); openTelemetryTracer.end("ok", null, inner2Suppressed); assertEquals(0, testExporter.getSpans().size()); openTelemetryTracer.end("ok", null, inner1NotSuppressed); openTelemetryTracer.end("ok", null, inner1Suppressed); openTelemetryTracer.end("ok", null, outer); assertEquals(2, testExporter.getSpans().size()); SpanData innerNotSuppressedSpan = testExporter.getSpans().get(0); SpanData outerSpan = testExporter.getSpans().get(1); assertEquals(innerNotSuppressedSpan.getSpanContext().getTraceId(), outerSpan.getSpanContext().getTraceId()); assertEquals(innerNotSuppressedSpan.getParentSpanId(), outerSpan.getSpanContext().getSpanId()); }
outer = openTelemetryTracer.start("innerSuppressed", outer, ProcessKind.SEND);
public void suppressNestedInterleavedClientSpan() { Context outer = openTelemetryTracer.getSharedSpanBuilder("outer", Context.NONE); openTelemetryTracer.addLink(outer.addData(SPAN_CONTEXT_KEY, TEST_CONTEXT)); outer = openTelemetryTracer.start("outer", outer, ProcessKind.SEND); Context inner1Suppressed = openTelemetryTracer.start("innerSuppressed", outer); Context inner1NotSuppressed = openTelemetryTracer.start("innerNotSuppressed", new StartSpanOptions(com.azure.core.util.tracing.SpanKind.PRODUCER), inner1Suppressed); Context inner2Suppressed = openTelemetryTracer.start("innerSuppressed", inner1NotSuppressed); openTelemetryTracer.end("ok", null, inner2Suppressed); assertEquals(0, testExporter.getFinishedSpanItems().size()); openTelemetryTracer.end("ok", null, inner1NotSuppressed); openTelemetryTracer.end("ok", null, inner1Suppressed); openTelemetryTracer.end("ok", null, outer); assertEquals(2, testExporter.getFinishedSpanItems().size()); SpanData innerNotSuppressedSpan = testExporter.getFinishedSpanItems().get(0); SpanData outerSpan = testExporter.getFinishedSpanItems().get(1); assertEquals(innerNotSuppressedSpan.getSpanContext().getTraceId(), outerSpan.getSpanContext().getTraceId()); assertEquals(innerNotSuppressedSpan.getParentSpanId(), outerSpan.getSpanContext().getSpanId()); }
class TestScope implements Scope { private boolean closed = false; @Override public void close() { closed = true; } public boolean isClosed() { return this.closed; } }
class TestScope implements Scope { private boolean closed = false; @Override public void close() { closed = true; } public boolean isClosed() { return this.closed; } }
manual code here
public void certificateCRUD() { ResourceManager resourceManager = createResourceManager(); IotDpsManager iotDpsManager = createIotDpsManager(); ResourceGroup resourceGroup = createResourceGroup(resourceManager); try { ProvisioningServiceDescriptionInner provisioningServiceDescription = createProvisioningService(iotDpsManager, resourceGroup); CertificateResponseInner certificateInner = new CertificateResponseInner() .withProperties(new CertificateProperties() .withCertificate(Constants.Certificate.CONTENT.getBytes(StandardCharsets.UTF_8))); iotDpsManager .serviceClient() .getDpsCertificates() .createOrUpdate( resourceGroup.name(), provisioningServiceDescription.name(), Constants.Certificate.NAME, certificateInner); CertificateListDescriptionInner certificateListDescription = iotDpsManager .serviceClient() .getDpsCertificates() .list( resourceGroup.name(), provisioningServiceDescription.name()); assertEquals(1, certificateListDescription.value().size()); CertificateResponseInner certificate = certificateListDescription.value().get(0); assertFalse(certificate.properties().isVerified()); assertEquals(Constants.Certificate.SUBJECT, certificate.properties().subject()); assertEquals(Constants.Certificate.THUMBPRINT, certificate.properties().thumbprint()); VerificationCodeResponseInner verificationCodeResponse = iotDpsManager .serviceClient() .getDpsCertificates() .generateVerificationCode( certificate.name(), certificate.etag(), resourceGroup.name(), provisioningServiceDescription.name()); assertNotNull(verificationCodeResponse.properties().verificationCode()); iotDpsManager .serviceClient() .getDpsCertificates() .delete( resourceGroup.name(), verificationCodeResponse.etag(), provisioningServiceDescription.name(), certificate.name()); certificateListDescription = iotDpsManager .serviceClient() .getDpsCertificates() .list( resourceGroup.name(), provisioningServiceDescription.name()); assertEquals(0, certificateListDescription.value().size()); } finally { deleteResourceGroup(resourceManager, resourceGroup); } }
CertificateResponseInner certificateInner = new CertificateResponseInner()
public void certificateCRUD() { ResourceManager resourceManager = createResourceManager(); IotDpsManager iotDpsManager = createIotDpsManager(); ResourceGroup resourceGroup = createResourceGroup(resourceManager); try { ProvisioningServiceDescriptionInner provisioningServiceDescription = createProvisioningService(iotDpsManager, resourceGroup); CertificateResponseInner certificateInner = new CertificateResponseInner() .withProperties(new CertificateProperties() .withCertificate(Constants.Certificate.CONTENT.getBytes(StandardCharsets.UTF_8))); iotDpsManager .serviceClient() .getDpsCertificates() .createOrUpdate( resourceGroup.name(), provisioningServiceDescription.name(), Constants.Certificate.NAME, certificateInner); CertificateListDescriptionInner certificateListDescription = iotDpsManager .serviceClient() .getDpsCertificates() .list( resourceGroup.name(), provisioningServiceDescription.name()); assertEquals(1, certificateListDescription.value().size()); CertificateResponseInner certificate = certificateListDescription.value().get(0); assertFalse(certificate.properties().isVerified()); assertEquals(Constants.Certificate.SUBJECT, certificate.properties().subject()); assertEquals(Constants.Certificate.THUMBPRINT, certificate.properties().thumbprint()); VerificationCodeResponseInner verificationCodeResponse = iotDpsManager .serviceClient() .getDpsCertificates() .generateVerificationCode( certificate.name(), certificate.etag(), resourceGroup.name(), provisioningServiceDescription.name()); assertNotNull(verificationCodeResponse.properties().verificationCode()); iotDpsManager .serviceClient() .getDpsCertificates() .delete( resourceGroup.name(), verificationCodeResponse.etag(), provisioningServiceDescription.name(), certificate.name()); certificateListDescription = iotDpsManager .serviceClient() .getDpsCertificates() .list( resourceGroup.name(), provisioningServiceDescription.name()); assertEquals(0, certificateListDescription.value().size()); } finally { deleteResourceGroup(resourceManager, resourceGroup); } }
class CertificatesTests extends DeviceProvisioningTestBase { @Test @DoNotRecord(skipInPlayback = true) }
class CertificatesTests extends DeviceProvisioningTestBase { @Test @DoNotRecord(skipInPlayback = true) }
should we just create static logger in restproxyutil?
public Object invoke(Object proxy, final Method method, Object[] args) { RestProxyUtil.validateResumeOperationIsNotPresent(method, LOGGER); try { final SwaggerMethodParser methodParser = getMethodParser(method); final HttpRequest request = createHttpRequest(methodParser, args); Context context = methodParser.setContext(args); RequestOptions options = methodParser.setRequestOptions(args); context = mergeRequestOptionsContext(context, options); context = context.addData("caller-method", methodParser.getFullyQualifiedMethodName()) .addData("azure-eagerly-read-response", shouldEagerlyReadResponse(methodParser.getReturnType())); context = startTracingSpan(method, context); if (options != null) { options.getRequestCallback().accept(request); } if (request.getBody() != null) { request.setBody(validateLength(request)); } final Mono<HttpResponse> asyncResponse = send(request, context); Mono<HttpDecodedResponse> asyncDecodedResponse = this.decoder.decode(asyncResponse, methodParser); return handleRestReturnType(asyncDecodedResponse, methodParser, methodParser.getReturnType(), context, options); } catch (IOException e) { throw LOGGER.logExceptionAsError(Exceptions.propagate(e)); } }
RestProxyUtil.validateResumeOperationIsNotPresent(method, LOGGER);
public Object invoke(Object proxy, final Method method, Object[] args) { RestProxyUtils.validateResumeOperationIsNotPresent(method); try { final SwaggerMethodParser methodParser = getMethodParser(method); final HttpRequest request = createHttpRequest(methodParser, args); Context context = methodParser.setContext(args); RequestOptions options = methodParser.setRequestOptions(args); context = RestProxyUtils.mergeRequestOptionsContext(context, options); context = context.addData("caller-method", methodParser.getFullyQualifiedMethodName()) .addData("azure-eagerly-read-response", shouldEagerlyReadResponse(methodParser.getReturnType())); context = startTracingSpan(method, context); if (options != null) { options.getRequestCallback().accept(request); } Context finalContext = context; final Mono<HttpResponse> asyncResponse = RestProxyUtils.validateLengthAsync(request) .flatMap(r -> send(r, finalContext)); Mono<HttpDecodedResponse> asyncDecodedResponse = this.decoder.decode(asyncResponse, methodParser); return handleRestReturnType(asyncDecodedResponse, methodParser, methodParser.getReturnType(), context, options); } catch (IOException e) { throw LOGGER.logExceptionAsError(Exceptions.propagate(e)); } }
class RestProxy implements InvocationHandler { private static final ByteBuffer VALIDATION_BUFFER = ByteBuffer.allocate(0); private static final String BODY_TOO_LARGE = "Request body emitted %d bytes, more than the expected %d bytes."; private static final String BODY_TOO_SMALL = "Request body emitted %d bytes, less than the expected %d bytes."; private static final String MUST_IMPLEMENT_PAGE_ERROR = "Unable to create PagedResponse<T>. Body must be of a type that implements: " + Page.class; private static final ResponseConstructorsCache RESPONSE_CONSTRUCTORS_CACHE = new ResponseConstructorsCache(); private static final ClientLogger LOGGER = new ClientLogger(RestProxy.class); private final HttpPipeline httpPipeline; private final SerializerAdapter serializer; private final SwaggerInterfaceParser interfaceParser; private final HttpResponseDecoder decoder; /** * Create a RestProxy. * * @param httpPipeline the HttpPipelinePolicy and HttpClient httpPipeline that will be used to send HTTP requests. * @param serializer the serializer that will be used to convert response bodies to POJOs. * @param interfaceParser the parser that contains information about the interface describing REST API methods that * this RestProxy "implements". */ private RestProxy(HttpPipeline httpPipeline, SerializerAdapter serializer, SwaggerInterfaceParser interfaceParser) { this.httpPipeline = httpPipeline; this.serializer = serializer; this.interfaceParser = interfaceParser; this.decoder = new HttpResponseDecoder(this.serializer); } /** * Get the SwaggerMethodParser for the provided method. The Method must exist on the Swagger interface that this * RestProxy was created to "implement". * * @param method the method to get a SwaggerMethodParser for * @return the SwaggerMethodParser for the provided method */ private SwaggerMethodParser getMethodParser(Method method) { return interfaceParser.getMethodParser(method); } /** * Send the provided request asynchronously, applying any request policies provided to the HttpClient instance. * * @param request the HTTP request to send * @param contextData the context * @return a {@link Mono} that emits HttpResponse asynchronously */ public Mono<HttpResponse> send(HttpRequest request, Context contextData) { return httpPipeline.send(request, contextData); } @Override static Flux<ByteBuffer> validateLength(final HttpRequest request) { final Flux<ByteBuffer> bbFlux = request.getBody(); if (bbFlux == null) { return Flux.empty(); } final long expectedLength = Long.parseLong(request.getHeaders().getValue("Content-Length")); return Flux.defer(() -> { final long[] currentTotalLength = new long[1]; return Flux.concat(bbFlux, Flux.just(VALIDATION_BUFFER)).handle((buffer, sink) -> { if (buffer == null) { return; } if (buffer == VALIDATION_BUFFER) { if (expectedLength != currentTotalLength[0]) { sink.error(new UnexpectedLengthException(String.format(BODY_TOO_SMALL, currentTotalLength[0], expectedLength), currentTotalLength[0], expectedLength)); } else { sink.complete(); } return; } currentTotalLength[0] += buffer.remaining(); if (currentTotalLength[0] > expectedLength) { sink.error(new UnexpectedLengthException(String.format(BODY_TOO_LARGE, currentTotalLength[0], expectedLength), currentTotalLength[0], expectedLength)); return; } sink.next(buffer); }); }); } /** * Starts the tracing span for the current service call, additionally set metadata attributes on the span by passing * additional context information. * * @param method Service method being called. * @param context Context information about the current service call. * @return The updated context containing the span context. */ private Context startTracingSpan(Method method, Context context) { if (!TracerProxy.isTracingEnabled()) { return context; } if ((boolean) context.getData(Tracer.DISABLE_TRACING_KEY).orElse(false)) { return context; } String spanName = interfaceParser.getServiceName() + "." + method.getName(); context = TracerProxy.setSpanName(spanName, context); return TracerProxy.start(spanName, context); } /** * Create a HttpRequest for the provided Swagger method using the provided arguments. * * @param methodParser the Swagger method parser to use * @param args the arguments to use to populate the method's annotation values * @return a HttpRequest * @throws IOException thrown if the body contents cannot be serialized */ private HttpRequest createHttpRequest(SwaggerMethodParser methodParser, Object[] args) throws IOException { final String path = methodParser.setPath(args); final UrlBuilder pathUrlBuilder = UrlBuilder.parse(path); final UrlBuilder urlBuilder; if (pathUrlBuilder.getScheme() != null) { urlBuilder = pathUrlBuilder; } else { urlBuilder = new UrlBuilder(); methodParser.setSchemeAndHost(args, urlBuilder); if (path != null && !path.isEmpty() && !"/".equals(path)) { String hostPath = urlBuilder.getPath(); if (hostPath == null || hostPath.isEmpty() || "/".equals(hostPath) || path.contains(": urlBuilder.setPath(path); } else { if (path.startsWith("/")) { urlBuilder.setPath(hostPath + path); } else { urlBuilder.setPath(hostPath + "/" + path); } } } } methodParser.setEncodedQueryParameters(args, urlBuilder); final URL url = urlBuilder.toUrl(); final HttpRequest request = configRequest(new HttpRequest(methodParser.getHttpMethod(), url), methodParser, args); HttpHeaders httpHeaders = request.getHeaders(); methodParser.setHeaders(args, httpHeaders); return request; } @SuppressWarnings("unchecked") private HttpRequest configRequest(final HttpRequest request, final SwaggerMethodParser methodParser, final Object[] args) throws IOException { final Object bodyContentObject = methodParser.setBody(args); if (bodyContentObject == null) { request.getHeaders().set("Content-Length", "0"); } else { String contentType = methodParser.getBodyContentType(); if (contentType == null || contentType.isEmpty()) { if (bodyContentObject instanceof byte[] || bodyContentObject instanceof String) { contentType = ContentType.APPLICATION_OCTET_STREAM; } else { contentType = ContentType.APPLICATION_JSON; } } request.getHeaders().set("Content-Type", contentType); if (bodyContentObject instanceof BinaryData) { BinaryData binaryData = (BinaryData) bodyContentObject; if (binaryData.getLength() != null) { request.setHeader("Content-Length", binaryData.getLength().toString()); } request.setBody(binaryData.toFluxByteBuffer()); return request; } boolean isJson = false; final String[] contentTypeParts = contentType.split(";"); for (final String contentTypePart : contentTypeParts) { if (contentTypePart.trim().equalsIgnoreCase(ContentType.APPLICATION_JSON)) { isJson = true; break; } } if (isJson) { ByteArrayOutputStream stream = new AccessibleByteArrayOutputStream(); serializer.serialize(bodyContentObject, SerializerEncoding.JSON, stream); request.setHeader("Content-Length", String.valueOf(stream.size())); request.setBody(Flux.defer(() -> Flux.just(ByteBuffer.wrap(stream.toByteArray(), 0, stream.size())))); } else if (FluxUtil.isFluxByteBuffer(methodParser.getBodyJavaType())) { request.setBody((Flux<ByteBuffer>) bodyContentObject); } else if (bodyContentObject instanceof byte[]) { request.setBody((byte[]) bodyContentObject); } else if (bodyContentObject instanceof String) { final String bodyContentString = (String) bodyContentObject; if (!bodyContentString.isEmpty()) { request.setBody(bodyContentString); } } else if (bodyContentObject instanceof ByteBuffer) { request.setBody(Flux.just((ByteBuffer) bodyContentObject)); } else { ByteArrayOutputStream stream = new AccessibleByteArrayOutputStream(); serializer.serialize(bodyContentObject, SerializerEncoding.fromHeaders(request.getHeaders()), stream); request.setHeader("Content-Length", String.valueOf(stream.size())); request.setBody(Flux.defer(() -> Flux.just(ByteBuffer.wrap(stream.toByteArray(), 0, stream.size())))); } } return request; } private Mono<HttpDecodedResponse> ensureExpectedStatus(final Mono<HttpDecodedResponse> asyncDecodedResponse, final SwaggerMethodParser methodParser, RequestOptions options) { return asyncDecodedResponse .flatMap(decodedHttpResponse -> ensureExpectedStatus(decodedHttpResponse, methodParser, options)); } /** * Create a publisher that (1) emits error if the provided response {@code decodedResponse} has 'disallowed status * code' OR (2) emits provided response if it's status code ia allowed. * * 'disallowed status code' is one of the status code defined in the provided SwaggerMethodParser or is in the int[] * of additional allowed status codes. * * @param decodedResponse The HttpResponse to check. * @param methodParser The method parser that contains information about the service interface method that initiated * the HTTP request. * @return An async-version of the provided decodedResponse. */ private Mono<HttpDecodedResponse> ensureExpectedStatus(final HttpDecodedResponse decodedResponse, final SwaggerMethodParser methodParser, RequestOptions options) { final int responseStatusCode = decodedResponse.getSourceResponse().getStatusCode(); if (methodParser.isExpectedResponseStatusCode(responseStatusCode) || (options != null && options.getErrorOptions().contains(ErrorOptions.NO_THROW))) { return Mono.just(decodedResponse); } return decodedResponse.getSourceResponse().getBodyAsByteArray() .switchIfEmpty(Mono.defer(() -> { return Mono.error(instantiateUnexpectedException( methodParser.getUnexpectedException(responseStatusCode), decodedResponse.getSourceResponse(), null, null)); })) .flatMap(responseBytes -> decodedResponse.getDecodedBody(responseBytes) .switchIfEmpty(Mono.defer(() -> { return Mono.error(instantiateUnexpectedException( methodParser.getUnexpectedException(responseStatusCode), decodedResponse.getSourceResponse(), responseBytes, null)); })) .flatMap(decodedBody -> { return Mono.error(instantiateUnexpectedException( methodParser.getUnexpectedException(responseStatusCode), decodedResponse.getSourceResponse(), responseBytes, decodedBody)); })); } private Mono<?> handleRestResponseReturnType(final HttpDecodedResponse response, final SwaggerMethodParser methodParser, final Type entityType) { if (TypeUtil.isTypeOrSubTypeOf(entityType, Response.class)) { if (entityType.equals(StreamResponse.class)) { return createResponse(response, entityType, null); } final Type bodyType = TypeUtil.getRestResponseBodyType(entityType); if (TypeUtil.isTypeOrSubTypeOf(bodyType, Void.class)) { return response.getSourceResponse().getBody().ignoreElements() .then(createResponse(response, entityType, null)); } else { return handleBodyReturnType(response, methodParser, bodyType) .flatMap(bodyAsObject -> createResponse(response, entityType, bodyAsObject)) .switchIfEmpty(Mono.defer((Supplier<Mono<Response<?>>>) () -> createResponse(response, entityType, null))); } } else { return handleBodyReturnType(response, methodParser, entityType); } } @SuppressWarnings("unchecked") private Mono<Response<?>> createResponse(HttpDecodedResponse response, Type entityType, Object bodyAsObject) { final Class<? extends Response<?>> cls = (Class<? extends Response<?>>) TypeUtil.getRawClass(entityType); final HttpResponse httpResponse = response.getSourceResponse(); final HttpRequest request = httpResponse.getRequest(); final int statusCode = httpResponse.getStatusCode(); final HttpHeaders headers = httpResponse.getHeaders(); final Object decodedHeaders = response.getDecodedHeaders(); if (cls.equals(Response.class)) { return Mono.defer(() -> Mono.just(cls.cast(new ResponseBase<>(request, statusCode, headers, bodyAsObject, decodedHeaders)))); } else if (cls.equals(PagedResponse.class)) { return Mono.create(sink -> { if (bodyAsObject != null && !TypeUtil.isTypeOrSubTypeOf(bodyAsObject.getClass(), Page.class)) { sink.error(LOGGER.logExceptionAsError(new RuntimeException(MUST_IMPLEMENT_PAGE_ERROR))); } else if (bodyAsObject == null) { sink.success(cls.cast(new PagedResponseBase<>(request, statusCode, headers, null, null, decodedHeaders))); } else { sink.success(cls.cast(new PagedResponseBase<>(request, statusCode, headers, (Page<?>) bodyAsObject, decodedHeaders))); } }); } else if (cls.equals(StreamResponse.class)) { return Mono.just(new StreamResponse(request, httpResponse)); } return Mono.just(RESPONSE_CONSTRUCTORS_CACHE.get(cls)) .switchIfEmpty(Mono.defer(() -> Mono.error(new RuntimeException("Cannot find suitable constructor for class " + cls)))) .flatMap(ctr -> RESPONSE_CONSTRUCTORS_CACHE.invoke(ctr, response, bodyAsObject)); } private Mono<?> handleBodyReturnType(final HttpDecodedResponse response, final SwaggerMethodParser methodParser, final Type entityType) { final int responseStatusCode = response.getSourceResponse().getStatusCode(); final HttpMethod httpMethod = methodParser.getHttpMethod(); final Type returnValueWireType = methodParser.getReturnValueWireType(); final Mono<?> asyncResult; if (httpMethod == HttpMethod.HEAD && (TypeUtil.isTypeOrSubTypeOf( entityType, Boolean.TYPE) || TypeUtil.isTypeOrSubTypeOf(entityType, Boolean.class))) { boolean isSuccess = (responseStatusCode / 100) == 2; asyncResult = Mono.just(isSuccess); } else if (TypeUtil.isTypeOrSubTypeOf(entityType, byte[].class)) { Mono<byte[]> responseBodyBytesAsync = response.getSourceResponse().getBodyAsByteArray(); if (returnValueWireType == Base64Url.class) { responseBodyBytesAsync = responseBodyBytesAsync .mapNotNull(base64UrlBytes -> new Base64Url(base64UrlBytes).decodedBytes()); } asyncResult = responseBodyBytesAsync; } else if (FluxUtil.isFluxByteBuffer(entityType)) { asyncResult = Mono.just(response.getSourceResponse().getBody()); } else if (TypeUtil.isTypeOrSubTypeOf(entityType, BinaryData.class)) { if (methodParser.getReturnType().equals(StreamResponse.class)) { asyncResult = Mono.empty(); } else { asyncResult = BinaryData.fromFlux(response.getSourceResponse().getBody()); } } else { asyncResult = response.getDecodedBody((byte[]) null); } return asyncResult; } /** * Handle the provided asynchronous HTTP response and return the deserialized value. * * @param asyncHttpDecodedResponse the asynchronous HTTP response to the original HTTP request * @param methodParser the SwaggerMethodParser that the request originates from * @param returnType the type of value that will be returned * @param context Additional context that is passed through the Http pipeline during the service call. * @return the deserialized result */ private Object handleRestReturnType(final Mono<HttpDecodedResponse> asyncHttpDecodedResponse, final SwaggerMethodParser methodParser, final Type returnType, final Context context, final RequestOptions options) { final Mono<HttpDecodedResponse> asyncExpectedResponse = ensureExpectedStatus(asyncHttpDecodedResponse, methodParser, options) .doOnEach(RestProxy::endTracingSpan) .contextWrite(reactor.util.context.Context.of("TRACING_CONTEXT", context)); final Object result; if (TypeUtil.isTypeOrSubTypeOf(returnType, Mono.class)) { final Type monoTypeParam = TypeUtil.getTypeArgument(returnType); if (TypeUtil.isTypeOrSubTypeOf(monoTypeParam, Void.class)) { result = asyncExpectedResponse.then(); } else { result = asyncExpectedResponse.flatMap(response -> handleRestResponseReturnType(response, methodParser, monoTypeParam)); } } else if (FluxUtil.isFluxByteBuffer(returnType)) { result = asyncExpectedResponse.flatMapMany(ar -> ar.getSourceResponse().getBody()); } else if (TypeUtil.isTypeOrSubTypeOf(returnType, void.class) || TypeUtil.isTypeOrSubTypeOf(returnType, Void.class)) { asyncExpectedResponse.block(); result = null; } else { result = asyncExpectedResponse .flatMap(httpResponse -> handleRestResponseReturnType(httpResponse, methodParser, returnType)) .block(); } return result; } private static void endTracingSpan(Signal<HttpDecodedResponse> signal) { if (!TracerProxy.isTracingEnabled()) { return; } if (signal.isOnComplete() || signal.isOnSubscribe()) { return; } ContextView context = signal.getContextView(); Optional<Context> tracingContext = context.getOrEmpty("TRACING_CONTEXT"); boolean disableTracing = Boolean.TRUE.equals(context.getOrDefault(Tracer.DISABLE_TRACING_KEY, false)); if (!tracingContext.isPresent() || disableTracing) { return; } int statusCode = 0; HttpDecodedResponse httpDecodedResponse; Throwable throwable = null; if (signal.hasValue()) { httpDecodedResponse = signal.get(); statusCode = httpDecodedResponse.getSourceResponse().getStatusCode(); } else if (signal.hasError()) { throwable = signal.getThrowable(); if (throwable instanceof HttpResponseException) { HttpResponseException exception = (HttpResponseException) throwable; statusCode = exception.getResponse().getStatusCode(); } } TracerProxy.end(statusCode, throwable, tracingContext.get()); } /** * Create a proxy implementation of the provided Swagger interface. * * @param swaggerInterface the Swagger interface to provide a proxy implementation for * @param <A> the type of the Swagger interface * @return a proxy implementation of the provided Swagger interface */ public static <A> A create(Class<A> swaggerInterface) { return create(swaggerInterface, createDefaultPipeline(), createDefaultSerializer()); } /** * Create a proxy implementation of the provided Swagger interface. * * @param swaggerInterface the Swagger interface to provide a proxy implementation for * @param httpPipeline the HttpPipelinePolicy and HttpClient pipeline that will be used to send Http requests * @param <A> the type of the Swagger interface * @return a proxy implementation of the provided Swagger interface */ public static <A> A create(Class<A> swaggerInterface, HttpPipeline httpPipeline) { return create(swaggerInterface, httpPipeline, createDefaultSerializer()); } /** * Create a proxy implementation of the provided Swagger interface. * * @param swaggerInterface the Swagger interface to provide a proxy implementation for * @param httpPipeline the HttpPipelinePolicy and HttpClient pipline that will be used to send Http requests * @param serializer the serializer that will be used to convert POJOs to and from request and response bodies * @param <A> the type of the Swagger interface. * @return a proxy implementation of the provided Swagger interface */ @SuppressWarnings("unchecked") public static <A> A create(Class<A> swaggerInterface, HttpPipeline httpPipeline, SerializerAdapter serializer) { final SwaggerInterfaceParser interfaceParser = new SwaggerInterfaceParser(swaggerInterface, serializer); final RestProxy restProxy = new RestProxy(httpPipeline, serializer, interfaceParser); return (A) Proxy.newProxyInstance(swaggerInterface.getClassLoader(), new Class<?>[]{swaggerInterface}, restProxy); } }
class RestProxy implements InvocationHandler { private static final String MUST_IMPLEMENT_PAGE_ERROR = "Unable to create PagedResponse<T>. Body must be of a type that implements: " + Page.class; private static final ResponseConstructorsCache RESPONSE_CONSTRUCTORS_CACHE = new ResponseConstructorsCache(); private static final ClientLogger LOGGER = new ClientLogger(RestProxy.class); private final HttpPipeline httpPipeline; private final SerializerAdapter serializer; private final SwaggerInterfaceParser interfaceParser; private final HttpResponseDecoder decoder; /** * Create a RestProxy. * * @param httpPipeline the HttpPipelinePolicy and HttpClient httpPipeline that will be used to send HTTP requests. * @param serializer the serializer that will be used to convert response bodies to POJOs. * @param interfaceParser the parser that contains information about the interface describing REST API methods that * this RestProxy "implements". */ private RestProxy(HttpPipeline httpPipeline, SerializerAdapter serializer, SwaggerInterfaceParser interfaceParser) { this.httpPipeline = httpPipeline; this.serializer = serializer; this.interfaceParser = interfaceParser; this.decoder = new HttpResponseDecoder(this.serializer); } /** * Get the SwaggerMethodParser for the provided method. The Method must exist on the Swagger interface that this * RestProxy was created to "implement". * * @param method the method to get a SwaggerMethodParser for * @return the SwaggerMethodParser for the provided method */ private SwaggerMethodParser getMethodParser(Method method) { return interfaceParser.getMethodParser(method); } /** * Send the provided request asynchronously, applying any request policies provided to the HttpClient instance. * * @param request the HTTP request to send * @param contextData the context * @return a {@link Mono} that emits HttpResponse asynchronously */ public Mono<HttpResponse> send(HttpRequest request, Context contextData) { return httpPipeline.send(request, contextData); } @Override /** * Starts the tracing span for the current service call, additionally set metadata attributes on the span by passing * additional context information. * * @param method Service method being called. * @param context Context information about the current service call. * @return The updated context containing the span context. */ private Context startTracingSpan(Method method, Context context) { if (!TracerProxy.isTracingEnabled()) { return context; } if ((boolean) context.getData(Tracer.DISABLE_TRACING_KEY).orElse(false)) { return context; } String spanName = interfaceParser.getServiceName() + "." + method.getName(); context = TracerProxy.setSpanName(spanName, context); return TracerProxy.start(spanName, context); } /** * Create a HttpRequest for the provided Swagger method using the provided arguments. * * @param methodParser the Swagger method parser to use * @param args the arguments to use to populate the method's annotation values * @return a HttpRequest * @throws IOException thrown if the body contents cannot be serialized */ private HttpRequest createHttpRequest(SwaggerMethodParser methodParser, Object[] args) throws IOException { final String path = methodParser.setPath(args); final UrlBuilder pathUrlBuilder = UrlBuilder.parse(path); final UrlBuilder urlBuilder; if (pathUrlBuilder.getScheme() != null) { urlBuilder = pathUrlBuilder; } else { urlBuilder = new UrlBuilder(); methodParser.setSchemeAndHost(args, urlBuilder); if (path != null && !path.isEmpty() && !"/".equals(path)) { String hostPath = urlBuilder.getPath(); if (hostPath == null || hostPath.isEmpty() || "/".equals(hostPath) || path.contains(": urlBuilder.setPath(path); } else { if (path.startsWith("/")) { urlBuilder.setPath(hostPath + path); } else { urlBuilder.setPath(hostPath + "/" + path); } } } } methodParser.setEncodedQueryParameters(args, urlBuilder); final URL url = urlBuilder.toUrl(); final HttpRequest request = configRequest(new HttpRequest(methodParser.getHttpMethod(), url), methodParser, args); HttpHeaders httpHeaders = request.getHeaders(); methodParser.setHeaders(args, httpHeaders); return request; } @SuppressWarnings("unchecked") private HttpRequest configRequest(final HttpRequest request, final SwaggerMethodParser methodParser, final Object[] args) throws IOException { final Object bodyContentObject = methodParser.setBody(args); if (bodyContentObject == null) { request.getHeaders().set("Content-Length", "0"); } else { String contentType = methodParser.getBodyContentType(); if (contentType == null || contentType.isEmpty()) { if (bodyContentObject instanceof byte[] || bodyContentObject instanceof String) { contentType = ContentType.APPLICATION_OCTET_STREAM; } else { contentType = ContentType.APPLICATION_JSON; } } request.getHeaders().set("Content-Type", contentType); if (bodyContentObject instanceof BinaryData) { BinaryData binaryData = (BinaryData) bodyContentObject; if (binaryData.getLength() != null) { request.setHeader("Content-Length", binaryData.getLength().toString()); } request.setBody(binaryData); return request; } boolean isJson = false; final String[] contentTypeParts = contentType.split(";"); for (final String contentTypePart : contentTypeParts) { if (contentTypePart.trim().equalsIgnoreCase(ContentType.APPLICATION_JSON)) { isJson = true; break; } } if (isJson) { request.setBody(serializer.serializeToBytes(bodyContentObject, SerializerEncoding.JSON)); } else if (FluxUtil.isFluxByteBuffer(methodParser.getBodyJavaType())) { request.setBody((Flux<ByteBuffer>) bodyContentObject); } else if (bodyContentObject instanceof byte[]) { request.setBody((byte[]) bodyContentObject); } else if (bodyContentObject instanceof String) { final String bodyContentString = (String) bodyContentObject; if (!bodyContentString.isEmpty()) { request.setBody(bodyContentString); } } else if (bodyContentObject instanceof ByteBuffer) { request.setBody(Flux.just((ByteBuffer) bodyContentObject)); } else { request.setBody(serializer.serializeToBytes(bodyContentObject, SerializerEncoding.fromHeaders(request.getHeaders()))); } } return request; } /** * Create a publisher that (1) emits error if the provided response {@code decodedResponse} has 'disallowed status * code' OR (2) emits provided response if it's status code ia allowed. * * 'disallowed status code' is one of the status code defined in the provided SwaggerMethodParser or is in the int[] * of additional allowed status codes. * * @param asyncDecodedResponse The HttpResponse to check. * @param methodParser The method parser that contains information about the service interface method that initiated * the HTTP request. * @param options Additional options passed as part of the request. * @return An async-version of the provided decodedResponse. */ private static Mono<HttpDecodedResponse> ensureExpectedStatus(final Mono<HttpDecodedResponse> asyncDecodedResponse, final SwaggerMethodParser methodParser, RequestOptions options) { return asyncDecodedResponse.flatMap(decodedResponse -> { int responseStatusCode = decodedResponse.getSourceResponse().getStatusCode(); if (methodParser.isExpectedResponseStatusCode(responseStatusCode) || (options != null && options.getErrorOptions().contains(ErrorOptions.NO_THROW))) { return Mono.just(decodedResponse); } return decodedResponse.getSourceResponse().getBodyAsByteArray() .map(bytes -> RestProxyUtils.instantiateUnexpectedException(methodParser.getUnexpectedException(responseStatusCode), decodedResponse.getSourceResponse(), bytes, decodedResponse.getDecodedBody(bytes))) .switchIfEmpty(Mono.fromSupplier(() -> RestProxyUtils.instantiateUnexpectedException( methodParser.getUnexpectedException(responseStatusCode), decodedResponse.getSourceResponse(), null, null))) .flatMap(Mono::error); }); } private static Mono<?> handleRestResponseReturnType(final HttpDecodedResponse response, final SwaggerMethodParser methodParser, final Type entityType) { if (TypeUtil.isTypeOrSubTypeOf(entityType, Response.class)) { if (entityType.equals(StreamResponse.class)) { return Mono.fromCallable(() -> createResponse(response, entityType, null)); } final Type bodyType = TypeUtil.getRestResponseBodyType(entityType); if (TypeUtil.isTypeOrSubTypeOf(bodyType, Void.class)) { return response.getSourceResponse().getBody().ignoreElements() .then(Mono.fromCallable(() -> createResponse(response, entityType, null))); } else { return handleBodyReturnType(response, methodParser, bodyType) .map(bodyAsObject -> createResponse(response, entityType, bodyAsObject)) .switchIfEmpty(Mono.fromCallable(() -> createResponse(response, entityType, null))); } } else { return handleBodyReturnType(response, methodParser, entityType); } } @SuppressWarnings({"unchecked", "rawtypes"}) private static Response createResponse(HttpDecodedResponse response, Type entityType, Object bodyAsObject) { final Class<? extends Response<?>> cls = (Class<? extends Response<?>>) TypeUtil.getRawClass(entityType); final HttpResponse httpResponse = response.getSourceResponse(); final HttpRequest request = httpResponse.getRequest(); final int statusCode = httpResponse.getStatusCode(); final HttpHeaders headers = httpResponse.getHeaders(); final Object decodedHeaders = response.getDecodedHeaders(); if (cls.equals(Response.class)) { return cls.cast(new ResponseBase<>(request, statusCode, headers, bodyAsObject, decodedHeaders)); } else if (cls.equals(PagedResponse.class)) { if (bodyAsObject != null && !TypeUtil.isTypeOrSubTypeOf(bodyAsObject.getClass(), Page.class)) { throw LOGGER.logExceptionAsError(new RuntimeException(MUST_IMPLEMENT_PAGE_ERROR)); } else if (bodyAsObject == null) { return cls.cast(new PagedResponseBase<>(request, statusCode, headers, null, null, decodedHeaders)); } else { return cls.cast(new PagedResponseBase<>(request, statusCode, headers, (Page<?>) bodyAsObject, decodedHeaders)); } } else if (cls.equals(StreamResponse.class)) { return new StreamResponse(request, httpResponse); } MethodHandle constructorHandle = RESPONSE_CONSTRUCTORS_CACHE.get(cls); return RESPONSE_CONSTRUCTORS_CACHE.invoke(constructorHandle, response, bodyAsObject); } private static Mono<?> handleBodyReturnType(final HttpDecodedResponse response, final SwaggerMethodParser methodParser, final Type entityType) { final int responseStatusCode = response.getSourceResponse().getStatusCode(); final HttpMethod httpMethod = methodParser.getHttpMethod(); final Type returnValueWireType = methodParser.getReturnValueWireType(); final Mono<?> asyncResult; if (httpMethod == HttpMethod.HEAD && (TypeUtil.isTypeOrSubTypeOf(entityType, Boolean.TYPE) || TypeUtil.isTypeOrSubTypeOf(entityType, Boolean.class))) { boolean isSuccess = (responseStatusCode / 100) == 2; asyncResult = Mono.just(isSuccess); } else if (TypeUtil.isTypeOrSubTypeOf(entityType, byte[].class)) { Mono<byte[]> responseBodyBytesAsync = response.getSourceResponse().getBodyAsByteArray(); if (returnValueWireType == Base64Url.class) { responseBodyBytesAsync = responseBodyBytesAsync .mapNotNull(base64UrlBytes -> new Base64Url(base64UrlBytes).decodedBytes()); } asyncResult = responseBodyBytesAsync; } else if (FluxUtil.isFluxByteBuffer(entityType)) { asyncResult = Mono.just(response.getSourceResponse().getBody()); } else if (TypeUtil.isTypeOrSubTypeOf(entityType, BinaryData.class)) { if (methodParser.getReturnType().equals(StreamResponse.class)) { asyncResult = Mono.empty(); } else { asyncResult = BinaryData.fromFlux(response.getSourceResponse().getBody()); } } else { asyncResult = response.getSourceResponse().getBodyAsByteArray().mapNotNull(response::getDecodedBody); } return asyncResult; } /** * Handle the provided asynchronous HTTP response and return the deserialized value. * * @param asyncHttpDecodedResponse the asynchronous HTTP response to the original HTTP request * @param methodParser the SwaggerMethodParser that the request originates from * @param returnType the type of value that will be returned * @param context Additional context that is passed through the Http pipeline during the service call. * @return the deserialized result */ private Object handleRestReturnType(final Mono<HttpDecodedResponse> asyncHttpDecodedResponse, final SwaggerMethodParser methodParser, final Type returnType, final Context context, final RequestOptions options) { final Mono<HttpDecodedResponse> asyncExpectedResponse = ensureExpectedStatus(asyncHttpDecodedResponse, methodParser, options) .doOnEach(RestProxy::endTracingSpan) .contextWrite(reactor.util.context.Context.of("TRACING_CONTEXT", context)); final Object result; if (TypeUtil.isTypeOrSubTypeOf(returnType, Mono.class)) { final Type monoTypeParam = TypeUtil.getTypeArgument(returnType); if (TypeUtil.isTypeOrSubTypeOf(monoTypeParam, Void.class)) { result = asyncExpectedResponse.then(); } else { result = asyncExpectedResponse.flatMap(response -> handleRestResponseReturnType(response, methodParser, monoTypeParam)); } } else if (FluxUtil.isFluxByteBuffer(returnType)) { result = asyncExpectedResponse.flatMapMany(ar -> ar.getSourceResponse().getBody()); } else if (TypeUtil.isTypeOrSubTypeOf(returnType, void.class) || TypeUtil.isTypeOrSubTypeOf(returnType, Void.class)) { asyncExpectedResponse.block(); result = null; } else { result = asyncExpectedResponse .flatMap(httpResponse -> handleRestResponseReturnType(httpResponse, methodParser, returnType)) .block(); } return result; } private static void endTracingSpan(Signal<HttpDecodedResponse> signal) { if (!TracerProxy.isTracingEnabled()) { return; } if (signal.isOnComplete() || signal.isOnSubscribe()) { return; } ContextView context = signal.getContextView(); Optional<Context> tracingContext = context.getOrEmpty("TRACING_CONTEXT"); boolean disableTracing = Boolean.TRUE.equals(context.getOrDefault(Tracer.DISABLE_TRACING_KEY, false)); if (!tracingContext.isPresent() || disableTracing) { return; } int statusCode = 0; HttpDecodedResponse httpDecodedResponse; Throwable throwable = null; if (signal.hasValue()) { httpDecodedResponse = signal.get(); statusCode = httpDecodedResponse.getSourceResponse().getStatusCode(); } else if (signal.hasError()) { throwable = signal.getThrowable(); if (throwable instanceof HttpResponseException) { HttpResponseException exception = (HttpResponseException) throwable; statusCode = exception.getResponse().getStatusCode(); } } TracerProxy.end(statusCode, throwable, tracingContext.get()); } /** * Create a proxy implementation of the provided Swagger interface. * * @param swaggerInterface the Swagger interface to provide a proxy implementation for * @param <A> the type of the Swagger interface * @return a proxy implementation of the provided Swagger interface */ public static <A> A create(Class<A> swaggerInterface) { return create(swaggerInterface, RestProxyUtils.createDefaultPipeline(), RestProxyUtils.createDefaultSerializer()); } /** * Create a proxy implementation of the provided Swagger interface. * * @param swaggerInterface the Swagger interface to provide a proxy implementation for * @param httpPipeline the HttpPipelinePolicy and HttpClient pipeline that will be used to send Http requests * @param <A> the type of the Swagger interface * @return a proxy implementation of the provided Swagger interface */ public static <A> A create(Class<A> swaggerInterface, HttpPipeline httpPipeline) { return create(swaggerInterface, httpPipeline, RestProxyUtils.createDefaultSerializer()); } /** * Create a proxy implementation of the provided Swagger interface. * * @param swaggerInterface the Swagger interface to provide a proxy implementation for * @param httpPipeline the HttpPipelinePolicy and HttpClient pipline that will be used to send Http requests * @param serializer the serializer that will be used to convert POJOs to and from request and response bodies * @param <A> the type of the Swagger interface. * @return a proxy implementation of the provided Swagger interface */ @SuppressWarnings("unchecked") public static <A> A create(Class<A> swaggerInterface, HttpPipeline httpPipeline, SerializerAdapter serializer) { final SwaggerInterfaceParser interfaceParser = new SwaggerInterfaceParser(swaggerInterface, serializer); final RestProxy restProxy = new RestProxy(httpPipeline, serializer, interfaceParser); return (A) Proxy.newProxyInstance(swaggerInterface.getClassLoader(), new Class<?>[]{swaggerInterface}, restProxy); } }
```suggestion throw new FileNotFoundException("Share or file does not exist."); ```
public InputStream getInputStream() throws IOException { try { return this.shareFileClient.openInputStream(); } catch (ShareStorageException e) { if (e.getErrorCode() == ShareErrorCode.SHARE_NOT_FOUND || e.getErrorCode() == ShareErrorCode.RESOURCE_NOT_FOUND) { throw new FileNotFoundException("Share or file not existed"); } else { throw new IOException(MSG_FAIL_OPEN_OUTPUT, e); } } }
throw new FileNotFoundException("Share or file not existed");
public InputStream getInputStream() throws IOException { try { return this.shareFileClient.openInputStream(); } catch (ShareStorageException e) { if (e.getErrorCode() == ShareErrorCode.SHARE_NOT_FOUND || e.getErrorCode() == ShareErrorCode.RESOURCE_NOT_FOUND) { throw new FileNotFoundException("Share or file does not exist"); } else { throw new IOException(MSG_FAIL_OPEN_OUTPUT, e); } } }
class StorageFileResource extends AzureStorageResource { private static final String MSG_FAIL_OPEN_OUTPUT = "Failed to open output stream of file"; private final ShareServiceClient shareServiceClient; private final ShareClient shareClient; private final ShareFileClient shareFileClient; private final String location; private final boolean autoCreateFiles; private final String contentType; /** * Creates a new instance of {@link StorageFileResource}. * * @param shareServiceClient the ShareServiceClient * @param location the location */ public StorageFileResource(ShareServiceClient shareServiceClient, String location) { this(shareServiceClient, location, false); } /** * Creates a new instance of {@link StorageFileResource}. * * @param shareServiceClient the ShareServiceClient * @param location the location * @param autoCreateFiles whether to automatically create files */ public StorageFileResource(ShareServiceClient shareServiceClient, String location, boolean autoCreateFiles) { this(shareServiceClient, location, autoCreateFiles, null); } /** * Creates a new instance of {@link StorageFileResource}. * * @param shareServiceClient the ShareServiceClient * @param location the location * @param autoCreateFiles whether to automatically create files * @param contentType the content type */ public StorageFileResource(ShareServiceClient shareServiceClient, String location, boolean autoCreateFiles, String contentType) { assertIsAzureStorageLocation(location); this.autoCreateFiles = autoCreateFiles; this.location = location; this.shareServiceClient = shareServiceClient; this.shareClient = shareServiceClient.getShareClient(getContainerName(location)); this.shareFileClient = shareClient.getFileClient(getFilename(location)); this.contentType = StringUtils.hasText(contentType) ? contentType : getContentType(location); } /** * Checks whether an Azure Storage File can be opened, * if the file is not existed, and autoCreateFiles==true, * it will create the file on Azure Storage. * @return A {@link StorageFileOutputStream} object used to write data to the file. * @throws IOException when fail to open the output stream. */ @Override public OutputStream getOutputStream() throws IOException { try { if (this.autoCreateFiles) { this.shareClient.createIfNotExists(); this.create(); } return this.shareFileClient.getFileOutputStream(); } catch (ShareStorageException e) { throw new IOException(MSG_FAIL_OPEN_OUTPUT, e); } } /** * Determines if the file this client represents exists in the cloud. * * @return Flag indicating existence of the file. */ @Override public boolean exists() { return this.shareFileClient.exists(); } /** * Get the url of the storage file client. * * @return the URL of the storage file client. */ @Override public URL getURL() throws IOException { return new URL(this.shareFileClient.getFileUrl()); } /** * This implementation throws a FileNotFoundException, assuming * that the resource cannot be resolved to an absolute file path. */ @Override public File getFile() { throw new UnsupportedOperationException(getDescription() + " cannot be resolved to absolute file path"); } /** * @return The number of bytes present in the response body. */ @Override public long contentLength() { return this.shareFileClient.getProperties().getContentLength(); } /** * * @return Last time the directory was modified. */ @Override public long lastModified() { return this.shareFileClient.getProperties().getLastModified().toEpochSecond() * 1000; } /** * Create relative resource from current location. * * @param relativePath the relative path. * @return StorageFileResource with relative path from current location. */ @Override public Resource createRelative(String relativePath) { String newLocation = this.location + "/" + relativePath; return new StorageFileResource(this.shareServiceClient, newLocation, autoCreateFiles); } /** * @return The name of the file. */ @Override public String getFilename() { final String[] split = this.shareFileClient.getFilePath().split("/"); return split[split.length - 1]; } /** * @return a description for this resource, * to be used for error output when working with the resource. */ @Override public String getDescription() { return String.format("Azure storage account file resource [container='%s', file='%s']", this.shareFileClient.getShareName(), this.getFilename()); } @Override @Override StorageType getStorageType() { return StorageType.FILE; } private void create() throws ShareStorageException { if (!this.exists()) { ShareFileHttpHeaders header = null; if (StringUtils.hasText(contentType)) { header = new ShareFileHttpHeaders(); header.setContentType(contentType); } this.shareFileClient.createWithResponse(1024, header, null, null, null, null, Context.NONE) .getValue(); } } }
class StorageFileResource extends AzureStorageResource { private static final String MSG_FAIL_OPEN_OUTPUT = "Failed to open output stream of file"; private final ShareServiceClient shareServiceClient; private final ShareClient shareClient; private final ShareFileClient shareFileClient; private final String location; private final boolean autoCreateFiles; private final String contentType; /** * Creates a new instance of {@link StorageFileResource}. * * @param shareServiceClient the ShareServiceClient * @param location the location */ public StorageFileResource(ShareServiceClient shareServiceClient, String location) { this(shareServiceClient, location, false); } /** * Creates a new instance of {@link StorageFileResource}. * * @param shareServiceClient the ShareServiceClient * @param location the location * @param autoCreateFiles whether to automatically create files */ public StorageFileResource(ShareServiceClient shareServiceClient, String location, boolean autoCreateFiles) { this(shareServiceClient, location, autoCreateFiles, null); } /** * Creates a new instance of {@link StorageFileResource}. * * @param shareServiceClient the ShareServiceClient * @param location the location * @param autoCreateFiles whether to automatically create files * @param contentType the content type */ public StorageFileResource(ShareServiceClient shareServiceClient, String location, boolean autoCreateFiles, String contentType) { assertIsAzureStorageLocation(location); this.autoCreateFiles = autoCreateFiles; this.location = location; this.shareServiceClient = shareServiceClient; this.shareClient = shareServiceClient.getShareClient(getContainerName(location)); this.shareFileClient = shareClient.getFileClient(getFilename(location)); this.contentType = StringUtils.hasText(contentType) ? contentType : getContentType(location); } /** * Checks whether an Azure Storage File can be opened, * if the file is not existed, and autoCreateFiles==true, * it will create the file on Azure Storage. * @return A {@link StorageFileOutputStream} object used to write data to the file. * @throws IOException when fail to open the output stream. */ @Override public OutputStream getOutputStream() throws IOException { try { if (this.autoCreateFiles) { this.shareClient.createIfNotExists(); this.create(); } return this.shareFileClient.getFileOutputStream(); } catch (ShareStorageException e) { throw new IOException(MSG_FAIL_OPEN_OUTPUT, e); } } /** * Determines if the file this client represents exists in the cloud. * * @return Flag indicating existence of the file. */ @Override public boolean exists() { return this.shareFileClient.exists(); } /** * Get the url of the storage file client. * * @return the URL of the storage file client. */ @Override public URL getURL() throws IOException { return new URL(this.shareFileClient.getFileUrl()); } /** * This implementation throws a FileNotFoundException, assuming * that the resource cannot be resolved to an absolute file path. */ @Override public File getFile() { throw new UnsupportedOperationException(getDescription() + " cannot be resolved to absolute file path"); } /** * @return The number of bytes present in the response body. */ @Override public long contentLength() { return this.shareFileClient.getProperties().getContentLength(); } /** * * @return Last time the directory was modified. */ @Override public long lastModified() { return this.shareFileClient.getProperties().getLastModified().toEpochSecond() * 1000; } /** * Create relative resource from current location. * * @param relativePath the relative path. * @return StorageFileResource with relative path from current location. */ @Override public Resource createRelative(String relativePath) { String newLocation = this.location + "/" + relativePath; return new StorageFileResource(this.shareServiceClient, newLocation, autoCreateFiles); } /** * @return The name of the file. */ @Override public String getFilename() { final String[] split = this.shareFileClient.getFilePath().split("/"); return split[split.length - 1]; } /** * @return a description for this resource, * to be used for error output when working with the resource. */ @Override public String getDescription() { return String.format("Azure storage account file resource [container='%s', file='%s']", this.shareFileClient.getShareName(), this.getFilename()); } @Override @Override StorageType getStorageType() { return StorageType.FILE; } private void create() throws ShareStorageException { if (!this.exists()) { ShareFileHttpHeaders header = null; if (StringUtils.hasText(contentType)) { header = new ShareFileHttpHeaders(); header.setContentType(contentType); } this.shareFileClient.createWithResponse(1024, header, null, null, null, null, Context.NONE) .getValue(); } } }
```suggestion throw new FileNotFoundException("Blob or container does not exist."); ```
public InputStream getInputStream() throws IOException { try { return this.blockBlobClient.openInputStream(); } catch (BlobStorageException e) { if (e.getErrorCode() == BlobErrorCode.CONTAINER_NOT_FOUND || e.getErrorCode() == BlobErrorCode.BLOB_NOT_FOUND) { throw new FileNotFoundException("Blob or container not existed."); } else { throw new IOException(MSG_FAIL_OPEN_INPUT, e); } } }
throw new FileNotFoundException("Blob or container not existed.");
public InputStream getInputStream() throws IOException { try { return this.blockBlobClient.openInputStream(); } catch (BlobStorageException e) { if (e.getErrorCode() == BlobErrorCode.CONTAINER_NOT_FOUND || e.getErrorCode() == BlobErrorCode.BLOB_NOT_FOUND) { throw new FileNotFoundException("Blob or container does not exist."); } else { throw new IOException(MSG_FAIL_OPEN_INPUT, e); } } }
class StorageBlobResource extends AzureStorageResource { private static final Logger LOGGER = LoggerFactory.getLogger(StorageBlobResource.class); private static final String MSG_FAIL_OPEN_OUTPUT = "Failed to open output stream of cloud blob"; private static final String MSG_FAIL_OPEN_INPUT = "Failed to open input stream of blob"; private final BlobServiceClient blobServiceClient; private final String location; private final BlobContainerClient blobContainerClient; private final BlockBlobClient blockBlobClient; private final boolean autoCreateFiles; private BlobProperties blobProperties; private final String snapshot; private final String versionId; private final String contentType; /** * Creates a new instance of {@link StorageBlobResource}. * * @param blobServiceClient the BlobServiceClient * @param location the location */ public StorageBlobResource(BlobServiceClient blobServiceClient, String location) { this(blobServiceClient, location, true); } /** * Creates a new instance of {@link StorageBlobResource}. * * @param blobServiceClient the BlobServiceClient * @param location the location * @param autoCreateFiles whether to automatically create files */ public StorageBlobResource(BlobServiceClient blobServiceClient, String location, Boolean autoCreateFiles) { this(blobServiceClient, location, autoCreateFiles, null, null, null); } /** * Creates a new instance of {@link StorageBlobResource}. * * @param blobServiceClient the BlobServiceClient * @param location the location * @param autoCreateFiles whether to automatically create files * @param snapshot the snapshot name * @param versionId the version id * @param contentType the content type */ public StorageBlobResource(BlobServiceClient blobServiceClient, String location, Boolean autoCreateFiles, String snapshot, String versionId, String contentType) { assertIsAzureStorageLocation(location); this.autoCreateFiles = autoCreateFiles == null ? isAutoCreateFiles(location) : autoCreateFiles; this.blobServiceClient = blobServiceClient; this.location = location; this.snapshot = snapshot; this.versionId = versionId; this.contentType = StringUtils.hasText(contentType) ? contentType : getContentType(location); Assert.isTrue(!(StringUtils.hasText(versionId) && StringUtils.hasText(snapshot)), "'versionId' and 'snapshot' can not be both set"); this.blobContainerClient = blobServiceClient.getBlobContainerClient(getContainerName(location)); BlobClient blobClient = blobContainerClient.getBlobClient(getFilename(location)); if (StringUtils.hasText(versionId)) { blobClient = blobClient.getVersionClient(versionId); } if (StringUtils.hasText(snapshot)) { blobClient = blobClient.getSnapshotClient(snapshot); } this.blockBlobClient = blobClient.getBlockBlobClient(); } private boolean isAutoCreateFiles(String location) { return true; } /** * Creates and opens an output stream to write data to the block blob. If the blob already exists on the service, it * will be overwritten. * * @return A {@link BlobOutputStream} object used to write data to the blob. * @throws IOException If a storage service error occurred or blob not found. */ @Override public OutputStream getOutputStream() throws IOException { try { if (this.autoCreateFiles) { this.blobContainerClient.createIfNotExists(); } BlockBlobOutputStreamOptions options = new BlockBlobOutputStreamOptions(); if (StringUtils.hasText(contentType)) { BlobHttpHeaders blobHttpHeaders = new BlobHttpHeaders(); blobHttpHeaders.setContentType(contentType); options.setHeaders(blobHttpHeaders); } return this.blockBlobClient.getBlobOutputStream(options); } catch (BlobStorageException e) { throw new IOException(MSG_FAIL_OPEN_OUTPUT, e); } } /** * Gets if the blob this client represents exists in the cloud. * * @return true if the blob exists, false if it doesn't */ @Override public boolean exists() { return blockBlobClient.exists(); } /** * Gets the URL of the blob represented by this client. * * @return the URL. */ @Override public URL getURL() throws IOException { return new URL(this.blockBlobClient.getBlobUrl()); } /** * This implementation throws a FileNotFoundException, assuming * that the resource cannot be resolved to an absolute file path. */ @Override public File getFile() { throw new UnsupportedOperationException(getDescription() + " cannot be resolved to absolute file path"); } private BlobProperties getBlobProperties() { if (blobProperties == null) { blobProperties = blockBlobClient.getProperties(); } return blobProperties; } /** * @return the size of the blob in bytes */ @Override public long contentLength() { return getBlobProperties().getBlobSize(); } /** * @return the time when the blob was last modified */ @Override public long lastModified() { return getBlobProperties().getLastModified().toEpochSecond(); } @Override public Resource createRelative(String relativePath) { String newLocation = this.location + "/" + relativePath; return new StorageBlobResource(this.blobServiceClient, newLocation, autoCreateFiles); } /** * @return The decoded name of the blob. */ @Override public String getFilename() { return this.blockBlobClient.getBlobName(); } /** * @return a description for this resource, * to be used for error output when working with the resource. */ @Override public String getDescription() { StringBuilder sb = new StringBuilder(); sb.append("Azure storage account blob resource [container='"); sb.append(this.blockBlobClient.getContainerName()); sb.append("', blob='"); sb.append(blockBlobClient.getBlobName()); sb.append("'"); if (versionId != null) { sb.append(", versionId='").append(versionId).append("'"); } if (snapshot != null) { sb.append(", snapshot='").append(snapshot).append("'"); } sb.append("]"); return sb.toString(); } @Override @Override StorageType getStorageType() { return StorageType.BLOB; } }
class StorageBlobResource extends AzureStorageResource { private static final Logger LOGGER = LoggerFactory.getLogger(StorageBlobResource.class); private static final String MSG_FAIL_OPEN_OUTPUT = "Failed to open output stream of cloud blob"; private static final String MSG_FAIL_OPEN_INPUT = "Failed to open input stream of blob"; private final BlobServiceClient blobServiceClient; private final String location; private final BlobContainerClient blobContainerClient; private final BlockBlobClient blockBlobClient; private final boolean autoCreateFiles; private BlobProperties blobProperties; private final String snapshot; private final String versionId; private final String contentType; /** * Creates a new instance of {@link StorageBlobResource}. * * @param blobServiceClient the BlobServiceClient * @param location the location */ public StorageBlobResource(BlobServiceClient blobServiceClient, String location) { this(blobServiceClient, location, true); } /** * Creates a new instance of {@link StorageBlobResource}. * * @param blobServiceClient the BlobServiceClient * @param location the location * @param autoCreateFiles whether to automatically create files */ public StorageBlobResource(BlobServiceClient blobServiceClient, String location, Boolean autoCreateFiles) { this(blobServiceClient, location, autoCreateFiles, null, null, null); } /** * Creates a new instance of {@link StorageBlobResource}. * * @param blobServiceClient the BlobServiceClient * @param location the location * @param autoCreateFiles whether to automatically create files * @param snapshot the snapshot name * @param versionId the version id * @param contentType the content type */ public StorageBlobResource(BlobServiceClient blobServiceClient, String location, Boolean autoCreateFiles, String snapshot, String versionId, String contentType) { assertIsAzureStorageLocation(location); this.autoCreateFiles = autoCreateFiles == null ? isAutoCreateFiles(location) : autoCreateFiles; this.blobServiceClient = blobServiceClient; this.location = location; this.snapshot = snapshot; this.versionId = versionId; this.contentType = StringUtils.hasText(contentType) ? contentType : getContentType(location); Assert.isTrue(!(StringUtils.hasText(versionId) && StringUtils.hasText(snapshot)), "'versionId' and 'snapshot' can not be both set"); this.blobContainerClient = blobServiceClient.getBlobContainerClient(getContainerName(location)); BlobClient blobClient = blobContainerClient.getBlobClient(getFilename(location)); if (StringUtils.hasText(versionId)) { blobClient = blobClient.getVersionClient(versionId); } if (StringUtils.hasText(snapshot)) { blobClient = blobClient.getSnapshotClient(snapshot); } this.blockBlobClient = blobClient.getBlockBlobClient(); } private boolean isAutoCreateFiles(String location) { return true; } /** * Creates and opens an output stream to write data to the block blob. If the blob already exists on the service, it * will be overwritten. * * @return A {@link BlobOutputStream} object used to write data to the blob. * @throws IOException If a storage service error occurred or blob not found. */ @Override public OutputStream getOutputStream() throws IOException { try { if (this.autoCreateFiles) { this.blobContainerClient.createIfNotExists(); } BlockBlobOutputStreamOptions options = new BlockBlobOutputStreamOptions(); if (StringUtils.hasText(contentType)) { BlobHttpHeaders blobHttpHeaders = new BlobHttpHeaders(); blobHttpHeaders.setContentType(contentType); options.setHeaders(blobHttpHeaders); } return this.blockBlobClient.getBlobOutputStream(options); } catch (BlobStorageException e) { throw new IOException(MSG_FAIL_OPEN_OUTPUT, e); } } /** * Gets if the blob this client represents exists in the cloud. * * @return true if the blob exists, false if it doesn't */ @Override public boolean exists() { return blockBlobClient.exists(); } /** * Gets the URL of the blob represented by this client. * * @return the URL. */ @Override public URL getURL() throws IOException { return new URL(this.blockBlobClient.getBlobUrl()); } /** * This implementation throws a FileNotFoundException, assuming * that the resource cannot be resolved to an absolute file path. */ @Override public File getFile() { throw new UnsupportedOperationException(getDescription() + " cannot be resolved to absolute file path"); } private BlobProperties getBlobProperties() { if (blobProperties == null) { blobProperties = blockBlobClient.getProperties(); } return blobProperties; } /** * @return the size of the blob in bytes */ @Override public long contentLength() { return getBlobProperties().getBlobSize(); } /** * @return the time when the blob was last modified */ @Override public long lastModified() { return getBlobProperties().getLastModified().toEpochSecond(); } @Override public Resource createRelative(String relativePath) { String newLocation = this.location + "/" + relativePath; return new StorageBlobResource(this.blobServiceClient, newLocation, autoCreateFiles); } /** * @return The decoded name of the blob. */ @Override public String getFilename() { return this.blockBlobClient.getBlobName(); } /** * @return a description for this resource, * to be used for error output when working with the resource. */ @Override public String getDescription() { StringBuilder sb = new StringBuilder(); sb.append("Azure storage account blob resource [container='"); sb.append(this.blockBlobClient.getContainerName()); sb.append("', blob='"); sb.append(blockBlobClient.getBlobName()); sb.append("'"); if (versionId != null) { sb.append(", versionId='").append(versionId).append("'"); } if (snapshot != null) { sb.append(", snapshot='").append(snapshot).append("'"); } sb.append("]"); return sb.toString(); } @Override @Override StorageType getStorageType() { return StorageType.BLOB; } }
If the application has no activity by it's own accord, would that translate into unhealthy report?
public Future<String> isHealthyWithFailureReason(final Channel channel) { checkNotNull(channel, "expected non-null channel"); final RntbdRequestManager requestManager = channel.pipeline().get(RntbdRequestManager.class); final Promise<String> promise = channel.eventLoop().newPromise(); if (requestManager == null) { reportIssueUnless(logger, !channel.isActive(), channel, "active with no request manager"); return promise.setSuccess("active with no request manager"); } final Timestamps timestamps = requestManager.snapshotTimestamps(); final long currentTime = System.nanoTime(); if (currentTime - timestamps.lastChannelReadNanoTime() < recentReadWindowInNanos) { return promise.setSuccess(RntbdConstants.RntbdHealthCheckResults.SuccessValue); } final long writeDelayInNanos = timestamps.lastChannelWriteAttemptNanoTime() - timestamps.lastChannelWriteNanoTime(); final long writeHangDurationInNanos = currentTime - timestamps.lastChannelWriteAttemptNanoTime(); if (writeDelayInNanos > this.writeDelayLimitInNanos && writeHangDurationInNanos > writeHangGracePeriodInNanos) { final Optional<RntbdContext> rntbdContext = requestManager.rntbdContext(); final int pendingRequestCount = requestManager.pendingRequestCount(); logger.warn("{} health check failed due to nonresponding write: {lastChannelWriteAttemptNanoTime: {}, " + "lastChannelWriteNanoTime: {}, writeDelayInNanos: {}, writeDelayLimitInNanos: {}, " + "rntbdContext: {}, pendingRequestCount: {}}", channel, timestamps.lastChannelWriteAttemptNanoTime(), timestamps.lastChannelWriteNanoTime(), writeDelayInNanos, this.writeDelayLimitInNanos, rntbdContext, pendingRequestCount); String msg = MessageFormat.format( "{0} health check failed due to nonresponding write: (lastChannelWriteAttemptNanoTime: {1}, " + "lastChannelWriteNanoTime: {2}, writeDelayInNanos: {3}, writeDelayLimitInNanos: {4}, " + "rntbdContext: {5}, pendingRequestCount: {6})", channel, timestamps.lastChannelWriteAttemptNanoTime(), timestamps.lastChannelWriteNanoTime(), writeDelayInNanos, this.writeDelayLimitInNanos, rntbdContext, pendingRequestCount ); return promise.setSuccess(msg); } final long readDelay = timestamps.lastChannelWriteNanoTime() - timestamps.lastChannelReadNanoTime(); final long readHangDuration = currentTime - timestamps.lastChannelWriteNanoTime(); if (readDelay > this.readDelayLimitInNanos && readHangDuration > readHangGracePeriodInNanos) { final Optional<RntbdContext> rntbdContext = requestManager.rntbdContext(); final int pendingRequestCount = requestManager.pendingRequestCount(); logger.warn("{} health check failed due to nonresponding read: {lastChannelWrite: {}, lastChannelRead: {}, " + "readDelay: {}, readDelayLimit: {}, rntbdContext: {}, pendingRequestCount: {}}", channel, timestamps.lastChannelWriteNanoTime(), timestamps.lastChannelReadNanoTime(), readDelay, this.readDelayLimitInNanos, rntbdContext, pendingRequestCount); String msg = MessageFormat.format( "{0} health check failed due to nonresponding read: (lastChannelWrite: {1}, lastChannelRead: {2}, " + "readDelay: {3}, readDelayLimit: {4}, rntbdContext: {5}, pendingRequestCount: {6})", channel, timestamps.lastChannelWriteNanoTime(), timestamps.lastChannelReadNanoTime(), readDelay, this.readDelayLimitInNanos, rntbdContext, pendingRequestCount ); return promise.setSuccess(msg); } if (this.idleConnectionTimeoutInNanos > 0L) { if (currentTime - timestamps.lastChannelReadNanoTime() > this.idleConnectionTimeoutInNanos) { String msg = MessageFormat.format( "{0} health check failed due to idle connection timeout: (lastChannelWrite: {1}, lastChannelRead: {2}, " + "idleConnectionTimeout: {3}, currentTime: {4}", channel, timestamps.lastChannelWriteNanoTime(), timestamps.lastChannelReadNanoTime(), idleConnectionTimeoutInNanos, currentTime ); return promise.setSuccess(msg); } } channel.writeAndFlush(RntbdHealthCheckRequest.MESSAGE).addListener(completed -> { if (completed.isSuccess()) { promise.setSuccess(RntbdConstants.RntbdHealthCheckResults.SuccessValue); } else { logger.warn("{} health check request failed due to:", channel, completed.cause()); String msg = MessageFormat.format( "{0} health check request failed due to: {1}", channel, completed.cause().toString() ); promise.setSuccess(msg); } }); return promise; }
final long readDelay = timestamps.lastChannelWriteNanoTime() - timestamps.lastChannelReadNanoTime();
public Future<String> isHealthyWithFailureReason(final Channel channel) { checkNotNull(channel, "expected non-null channel"); final RntbdRequestManager requestManager = channel.pipeline().get(RntbdRequestManager.class); final Promise<String> promise = channel.eventLoop().newPromise(); if (requestManager == null) { reportIssueUnless(logger, !channel.isActive(), channel, "active with no request manager"); return promise.setSuccess("active with no request manager"); } final Timestamps timestamps = requestManager.snapshotTimestamps(); final long currentTime = System.nanoTime(); if (currentTime - timestamps.lastChannelReadNanoTime() < recentReadWindowInNanos) { return promise.setSuccess(RntbdConstants.RntbdHealthCheckResults.SuccessValue); } final long writeDelayInNanos = timestamps.lastChannelWriteAttemptNanoTime() - timestamps.lastChannelWriteNanoTime(); final long writeHangDurationInNanos = currentTime - timestamps.lastChannelWriteAttemptNanoTime(); if (writeDelayInNanos > this.writeDelayLimitInNanos && writeHangDurationInNanos > writeHangGracePeriodInNanos) { final Optional<RntbdContext> rntbdContext = requestManager.rntbdContext(); final int pendingRequestCount = requestManager.pendingRequestCount(); logger.warn("{} health check failed due to nonresponding write: {lastChannelWriteAttemptNanoTime: {}, " + "lastChannelWriteNanoTime: {}, writeDelayInNanos: {}, writeDelayLimitInNanos: {}, " + "rntbdContext: {}, pendingRequestCount: {}}", channel, timestamps.lastChannelWriteAttemptNanoTime(), timestamps.lastChannelWriteNanoTime(), writeDelayInNanos, this.writeDelayLimitInNanos, rntbdContext, pendingRequestCount); String msg = MessageFormat.format( "{0} health check failed due to nonresponding write: (lastChannelWriteAttemptNanoTime: {1}, " + "lastChannelWriteNanoTime: {2}, writeDelayInNanos: {3}, writeDelayLimitInNanos: {4}, " + "rntbdContext: {5}, pendingRequestCount: {6})", channel, timestamps.lastChannelWriteAttemptNanoTime(), timestamps.lastChannelWriteNanoTime(), writeDelayInNanos, this.writeDelayLimitInNanos, rntbdContext, pendingRequestCount ); return promise.setSuccess(msg); } final long readDelay = timestamps.lastChannelWriteNanoTime() - timestamps.lastChannelReadNanoTime(); final long readHangDuration = currentTime - timestamps.lastChannelWriteNanoTime(); if (readDelay > this.readDelayLimitInNanos && readHangDuration > readHangGracePeriodInNanos) { final Optional<RntbdContext> rntbdContext = requestManager.rntbdContext(); final int pendingRequestCount = requestManager.pendingRequestCount(); logger.warn("{} health check failed due to nonresponding read: {lastChannelWrite: {}, lastChannelRead: {}, " + "readDelay: {}, readDelayLimit: {}, rntbdContext: {}, pendingRequestCount: {}}", channel, timestamps.lastChannelWriteNanoTime(), timestamps.lastChannelReadNanoTime(), readDelay, this.readDelayLimitInNanos, rntbdContext, pendingRequestCount); String msg = MessageFormat.format( "{0} health check failed due to nonresponding read: (lastChannelWrite: {1}, lastChannelRead: {2}, " + "readDelay: {3}, readDelayLimit: {4}, rntbdContext: {5}, pendingRequestCount: {6})", channel, timestamps.lastChannelWriteNanoTime(), timestamps.lastChannelReadNanoTime(), readDelay, this.readDelayLimitInNanos, rntbdContext, pendingRequestCount ); return promise.setSuccess(msg); } if (this.idleConnectionTimeoutInNanos > 0L) { if (currentTime - timestamps.lastChannelReadNanoTime() > this.idleConnectionTimeoutInNanos) { String msg = MessageFormat.format( "{0} health check failed due to idle connection timeout: (lastChannelWrite: {1}, lastChannelRead: {2}, " + "idleConnectionTimeout: {3}, currentTime: {4}", channel, timestamps.lastChannelWriteNanoTime(), timestamps.lastChannelReadNanoTime(), idleConnectionTimeoutInNanos, currentTime ); return promise.setSuccess(msg); } } channel.writeAndFlush(RntbdHealthCheckRequest.MESSAGE).addListener(completed -> { if (completed.isSuccess()) { promise.setSuccess(RntbdConstants.RntbdHealthCheckResults.SuccessValue); } else { logger.warn("{} health check request failed due to:", channel, completed.cause()); String msg = MessageFormat.format( "{0} health check request failed due to: {1}", channel, completed.cause().toString() ); promise.setSuccess(msg); } }); return promise; }
class RntbdClientChannelHealthChecker implements ChannelHealthChecker { private static final Logger logger = LoggerFactory.getLogger(RntbdClientChannelHealthChecker.class); private static final long recentReadWindowInNanos = 1_000_000_000L; private static final long readHangGracePeriodInNanos = (45L + 10L) * 1_000_000_000L; private static final long writeHangGracePeriodInNanos = 2L * 1_000_000_000L; @JsonProperty private final long idleConnectionTimeoutInNanos; @JsonProperty private final long readDelayLimitInNanos; @JsonProperty private final long writeDelayLimitInNanos; public RntbdClientChannelHealthChecker(final Config config) { checkNotNull(config, "expected non-null config"); checkArgument(config.receiveHangDetectionTimeInNanos() > readHangGracePeriodInNanos, "config.receiveHangDetectionTimeInNanos: %s", config.receiveHangDetectionTimeInNanos()); checkArgument(config.sendHangDetectionTimeInNanos() > writeHangGracePeriodInNanos, "config.sendHangDetectionTimeInNanos: %s", config.sendHangDetectionTimeInNanos()); this.idleConnectionTimeoutInNanos = config.idleConnectionTimeoutInNanos(); this.readDelayLimitInNanos = config.receiveHangDetectionTimeInNanos(); this.writeDelayLimitInNanos = config.sendHangDetectionTimeInNanos(); } /** * Returns the idle connection timeout interval in nanoseconds. * <p> * A channel is considered idle if {@link * the last channel read is greater than {@link * * @return Idle connection timeout interval in nanoseconds. */ public long idleConnectionTimeoutInNanos() { return this.idleConnectionTimeoutInNanos; } /** * Returns the read delay limit in nanoseconds. * <p> * A channel will be declared unhealthy if the gap between the last channel write and the last channel read grows * beyond this value. * <p> * Constraint: {@link * * @return Read delay limit in nanoseconds. */ public long readDelayLimitInNanos() { return this.readDelayLimitInNanos; } /** * Returns the write delay limit in nanoseconds. * <p> * A channel will be declared unhealthy if the gap between the last channel write attempt and the last channel write * grows beyond this value. * <p> * Constraint: {@link * * @return Write delay limit in nanoseconds. */ public long writeDelayLimitInNanos() { return this.writeDelayLimitInNanos; } /** * Determines whether a specified channel is healthy. * * @param channel A channel whose health is to be checked. * @return A future with a result of {@code true} if the channel is healthy, or {@code false} otherwise. */ public Future<Boolean> isHealthy(final Channel channel) { checkNotNull(channel, "expected non-null channel"); final RntbdRequestManager requestManager = channel.pipeline().get(RntbdRequestManager.class); final Promise<Boolean> promise = channel.eventLoop().newPromise(); if (requestManager == null) { reportIssueUnless(logger, !channel.isActive(), channel, "active with no request manager"); return promise.setSuccess(Boolean.FALSE); } final Timestamps timestamps = requestManager.snapshotTimestamps(); final long currentTime = System.nanoTime(); if (currentTime - timestamps.lastChannelReadNanoTime() < recentReadWindowInNanos) { return promise.setSuccess(Boolean.TRUE); } final long writeDelayInNanos = timestamps.lastChannelWriteAttemptNanoTime() - timestamps.lastChannelWriteNanoTime(); final long writeHangDurationInNanos = currentTime - timestamps.lastChannelWriteAttemptNanoTime(); if (writeDelayInNanos > this.writeDelayLimitInNanos && writeHangDurationInNanos > writeHangGracePeriodInNanos) { final Optional<RntbdContext> rntbdContext = requestManager.rntbdContext(); final int pendingRequestCount = requestManager.pendingRequestCount(); logger.warn("{} health check failed due to nonresponding write: {lastChannelWriteAttemptNanoTime: {}, " + "lastChannelWriteNanoTime: {}, writeDelayInNanos: {}, writeDelayLimitInNanos: {}, " + "rntbdContext: {}, pendingRequestCount: {}}", channel, timestamps.lastChannelWriteAttemptNanoTime(), timestamps.lastChannelWriteNanoTime(), writeDelayInNanos, this.writeDelayLimitInNanos, rntbdContext, pendingRequestCount); return promise.setSuccess(Boolean.FALSE); } final long readDelay = timestamps.lastChannelWriteNanoTime() - timestamps.lastChannelReadNanoTime(); final long readHangDuration = currentTime - timestamps.lastChannelWriteNanoTime(); if (readDelay > this.readDelayLimitInNanos && readHangDuration > readHangGracePeriodInNanos) { final Optional<RntbdContext> rntbdContext = requestManager.rntbdContext(); final int pendingRequestCount = requestManager.pendingRequestCount(); logger.warn("{} health check failed due to nonresponding read: {lastChannelWrite: {}, lastChannelRead: {}, " + "readDelay: {}, readDelayLimit: {}, rntbdContext: {}, pendingRequestCount: {}}", channel, timestamps.lastChannelWriteNanoTime(), timestamps.lastChannelReadNanoTime(), readDelay, this.readDelayLimitInNanos, rntbdContext, pendingRequestCount); return promise.setSuccess(Boolean.FALSE); } if (this.idleConnectionTimeoutInNanos > 0L) { if (currentTime - timestamps.lastChannelReadNanoTime() > this.idleConnectionTimeoutInNanos) { return promise.setSuccess(Boolean.FALSE); } } channel.writeAndFlush(RntbdHealthCheckRequest.MESSAGE).addListener(completed -> { if (completed.isSuccess()) { promise.setSuccess(Boolean.TRUE); } else { logger.warn("{} health check request failed due to:", channel, completed.cause()); promise.setSuccess(Boolean.FALSE); } }); return promise; } /** * Determines whether a specified channel is healthy. * * @param channel A channel whose health is to be checked. * @return A future with a result of {@code true} if the channel is healthy, or {@code false} otherwise. */ @Override public String toString() { return RntbdObjectMapper.toString(this); } static final class Timestamps { private static final AtomicLongFieldUpdater<Timestamps> lastPingUpdater = newUpdater(Timestamps.class, "lastPingNanoTime"); private static final AtomicLongFieldUpdater<Timestamps> lastReadUpdater = newUpdater(Timestamps.class, "lastReadNanoTime"); private static final AtomicLongFieldUpdater<Timestamps> lastWriteUpdater = newUpdater(Timestamps.class, "lastWriteNanoTime"); private static final AtomicLongFieldUpdater<Timestamps> lastWriteAttemptUpdater = newUpdater(Timestamps.class, "lastWriteAttemptNanoTime"); private volatile long lastPingNanoTime; private volatile long lastReadNanoTime; private volatile long lastWriteNanoTime; private volatile long lastWriteAttemptNanoTime; public Timestamps() { } @SuppressWarnings("CopyConstructorMissesField") public Timestamps(Timestamps other) { checkNotNull(other, "other: null"); this.lastPingNanoTime = lastPingUpdater.get(other); this.lastReadNanoTime = lastReadUpdater.get(other); this.lastWriteNanoTime = lastWriteUpdater.get(other); this.lastWriteAttemptNanoTime = lastWriteAttemptUpdater.get(other); } public void channelPingCompleted() { lastPingUpdater.set(this, System.nanoTime()); } public void channelReadCompleted() { lastReadUpdater.set(this, System.nanoTime()); } public void channelWriteAttempted() { lastWriteUpdater.set(this, System.nanoTime()); } public void channelWriteCompleted() { lastWriteAttemptUpdater.set(this, System.nanoTime()); } @JsonProperty public long lastChannelPingNanoTime() { return lastPingUpdater.get(this); } @JsonProperty public long lastChannelReadNanoTime() { return lastReadUpdater.get(this); } @JsonProperty public long lastChannelWriteNanoTime() { return lastWriteUpdater.get(this); } @JsonProperty public long lastChannelWriteAttemptNanoTime() { return lastWriteAttemptUpdater.get(this); } @Override public String toString() { return RntbdObjectMapper.toString(this); } } }
class RntbdClientChannelHealthChecker implements ChannelHealthChecker { private static final Logger logger = LoggerFactory.getLogger(RntbdClientChannelHealthChecker.class); private static final long recentReadWindowInNanos = 1_000_000_000L; private static final long readHangGracePeriodInNanos = (45L + 10L) * 1_000_000_000L; private static final long writeHangGracePeriodInNanos = 2L * 1_000_000_000L; @JsonProperty private final long idleConnectionTimeoutInNanos; @JsonProperty private final long readDelayLimitInNanos; @JsonProperty private final long writeDelayLimitInNanos; public RntbdClientChannelHealthChecker(final Config config) { checkNotNull(config, "expected non-null config"); checkArgument(config.receiveHangDetectionTimeInNanos() > readHangGracePeriodInNanos, "config.receiveHangDetectionTimeInNanos: %s", config.receiveHangDetectionTimeInNanos()); checkArgument(config.sendHangDetectionTimeInNanos() > writeHangGracePeriodInNanos, "config.sendHangDetectionTimeInNanos: %s", config.sendHangDetectionTimeInNanos()); this.idleConnectionTimeoutInNanos = config.idleConnectionTimeoutInNanos(); this.readDelayLimitInNanos = config.receiveHangDetectionTimeInNanos(); this.writeDelayLimitInNanos = config.sendHangDetectionTimeInNanos(); } /** * Returns the idle connection timeout interval in nanoseconds. * <p> * A channel is considered idle if {@link * the last channel read is greater than {@link * * @return Idle connection timeout interval in nanoseconds. */ public long idleConnectionTimeoutInNanos() { return this.idleConnectionTimeoutInNanos; } /** * Returns the read delay limit in nanoseconds. * <p> * A channel will be declared unhealthy if the gap between the last channel write and the last channel read grows * beyond this value. * <p> * Constraint: {@link * * @return Read delay limit in nanoseconds. */ public long readDelayLimitInNanos() { return this.readDelayLimitInNanos; } /** * Returns the write delay limit in nanoseconds. * <p> * A channel will be declared unhealthy if the gap between the last channel write attempt and the last channel write * grows beyond this value. * <p> * Constraint: {@link * * @return Write delay limit in nanoseconds. */ public long writeDelayLimitInNanos() { return this.writeDelayLimitInNanos; } /** * Determines whether a specified channel is healthy. * * @param channel A channel whose health is to be checked. * @return A future with a result of {@code true} if the channel is healthy, or {@code false} otherwise. */ public Future<Boolean> isHealthy(final Channel channel) { checkNotNull(channel, "expected non-null channel"); final RntbdRequestManager requestManager = channel.pipeline().get(RntbdRequestManager.class); final Promise<Boolean> promise = channel.eventLoop().newPromise(); if (requestManager == null) { reportIssueUnless(logger, !channel.isActive(), channel, "active with no request manager"); return promise.setSuccess(Boolean.FALSE); } final Timestamps timestamps = requestManager.snapshotTimestamps(); final long currentTime = System.nanoTime(); if (currentTime - timestamps.lastChannelReadNanoTime() < recentReadWindowInNanos) { return promise.setSuccess(Boolean.TRUE); } final long writeDelayInNanos = timestamps.lastChannelWriteAttemptNanoTime() - timestamps.lastChannelWriteNanoTime(); final long writeHangDurationInNanos = currentTime - timestamps.lastChannelWriteAttemptNanoTime(); if (writeDelayInNanos > this.writeDelayLimitInNanos && writeHangDurationInNanos > writeHangGracePeriodInNanos) { final Optional<RntbdContext> rntbdContext = requestManager.rntbdContext(); final int pendingRequestCount = requestManager.pendingRequestCount(); logger.warn("{} health check failed due to nonresponding write: {lastChannelWriteAttemptNanoTime: {}, " + "lastChannelWriteNanoTime: {}, writeDelayInNanos: {}, writeDelayLimitInNanos: {}, " + "rntbdContext: {}, pendingRequestCount: {}}", channel, timestamps.lastChannelWriteAttemptNanoTime(), timestamps.lastChannelWriteNanoTime(), writeDelayInNanos, this.writeDelayLimitInNanos, rntbdContext, pendingRequestCount); return promise.setSuccess(Boolean.FALSE); } final long readDelay = timestamps.lastChannelWriteNanoTime() - timestamps.lastChannelReadNanoTime(); final long readHangDuration = currentTime - timestamps.lastChannelWriteNanoTime(); if (readDelay > this.readDelayLimitInNanos && readHangDuration > readHangGracePeriodInNanos) { final Optional<RntbdContext> rntbdContext = requestManager.rntbdContext(); final int pendingRequestCount = requestManager.pendingRequestCount(); logger.warn("{} health check failed due to nonresponding read: {lastChannelWrite: {}, lastChannelRead: {}, " + "readDelay: {}, readDelayLimit: {}, rntbdContext: {}, pendingRequestCount: {}}", channel, timestamps.lastChannelWriteNanoTime(), timestamps.lastChannelReadNanoTime(), readDelay, this.readDelayLimitInNanos, rntbdContext, pendingRequestCount); return promise.setSuccess(Boolean.FALSE); } if (this.idleConnectionTimeoutInNanos > 0L) { if (currentTime - timestamps.lastChannelReadNanoTime() > this.idleConnectionTimeoutInNanos) { return promise.setSuccess(Boolean.FALSE); } } channel.writeAndFlush(RntbdHealthCheckRequest.MESSAGE).addListener(completed -> { if (completed.isSuccess()) { promise.setSuccess(Boolean.TRUE); } else { logger.warn("{} health check request failed due to:", channel, completed.cause()); promise.setSuccess(Boolean.FALSE); } }); return promise; } /** * Determines whether a specified channel is healthy. * * @param channel A channel whose health is to be checked. * @return A future with a result of {@code true} if the channel is healthy, or {@code false} otherwise. */ @Override public String toString() { return RntbdObjectMapper.toString(this); } static final class Timestamps { private static final AtomicLongFieldUpdater<Timestamps> lastPingUpdater = newUpdater(Timestamps.class, "lastPingNanoTime"); private static final AtomicLongFieldUpdater<Timestamps> lastReadUpdater = newUpdater(Timestamps.class, "lastReadNanoTime"); private static final AtomicLongFieldUpdater<Timestamps> lastWriteUpdater = newUpdater(Timestamps.class, "lastWriteNanoTime"); private static final AtomicLongFieldUpdater<Timestamps> lastWriteAttemptUpdater = newUpdater(Timestamps.class, "lastWriteAttemptNanoTime"); private volatile long lastPingNanoTime; private volatile long lastReadNanoTime; private volatile long lastWriteNanoTime; private volatile long lastWriteAttemptNanoTime; public Timestamps() { } @SuppressWarnings("CopyConstructorMissesField") public Timestamps(Timestamps other) { checkNotNull(other, "other: null"); this.lastPingNanoTime = lastPingUpdater.get(other); this.lastReadNanoTime = lastReadUpdater.get(other); this.lastWriteNanoTime = lastWriteUpdater.get(other); this.lastWriteAttemptNanoTime = lastWriteAttemptUpdater.get(other); } public void channelPingCompleted() { lastPingUpdater.set(this, System.nanoTime()); } public void channelReadCompleted() { lastReadUpdater.set(this, System.nanoTime()); } public void channelWriteAttempted() { lastWriteUpdater.set(this, System.nanoTime()); } public void channelWriteCompleted() { lastWriteAttemptUpdater.set(this, System.nanoTime()); } @JsonProperty public long lastChannelPingNanoTime() { return lastPingUpdater.get(this); } @JsonProperty public long lastChannelReadNanoTime() { return lastReadUpdater.get(this); } @JsonProperty public long lastChannelWriteNanoTime() { return lastWriteUpdater.get(this); } @JsonProperty public long lastChannelWriteAttemptNanoTime() { return lastWriteAttemptUpdater.get(this); } @Override public String toString() { return RntbdObjectMapper.toString(this); } } }
Is write not applicable for idleness?
public Future<String> isHealthyWithFailureReason(final Channel channel) { checkNotNull(channel, "expected non-null channel"); final RntbdRequestManager requestManager = channel.pipeline().get(RntbdRequestManager.class); final Promise<String> promise = channel.eventLoop().newPromise(); if (requestManager == null) { reportIssueUnless(logger, !channel.isActive(), channel, "active with no request manager"); return promise.setSuccess("active with no request manager"); } final Timestamps timestamps = requestManager.snapshotTimestamps(); final long currentTime = System.nanoTime(); if (currentTime - timestamps.lastChannelReadNanoTime() < recentReadWindowInNanos) { return promise.setSuccess(RntbdConstants.RntbdHealthCheckResults.SuccessValue); } final long writeDelayInNanos = timestamps.lastChannelWriteAttemptNanoTime() - timestamps.lastChannelWriteNanoTime(); final long writeHangDurationInNanos = currentTime - timestamps.lastChannelWriteAttemptNanoTime(); if (writeDelayInNanos > this.writeDelayLimitInNanos && writeHangDurationInNanos > writeHangGracePeriodInNanos) { final Optional<RntbdContext> rntbdContext = requestManager.rntbdContext(); final int pendingRequestCount = requestManager.pendingRequestCount(); logger.warn("{} health check failed due to nonresponding write: {lastChannelWriteAttemptNanoTime: {}, " + "lastChannelWriteNanoTime: {}, writeDelayInNanos: {}, writeDelayLimitInNanos: {}, " + "rntbdContext: {}, pendingRequestCount: {}}", channel, timestamps.lastChannelWriteAttemptNanoTime(), timestamps.lastChannelWriteNanoTime(), writeDelayInNanos, this.writeDelayLimitInNanos, rntbdContext, pendingRequestCount); String msg = MessageFormat.format( "{0} health check failed due to nonresponding write: (lastChannelWriteAttemptNanoTime: {1}, " + "lastChannelWriteNanoTime: {2}, writeDelayInNanos: {3}, writeDelayLimitInNanos: {4}, " + "rntbdContext: {5}, pendingRequestCount: {6})", channel, timestamps.lastChannelWriteAttemptNanoTime(), timestamps.lastChannelWriteNanoTime(), writeDelayInNanos, this.writeDelayLimitInNanos, rntbdContext, pendingRequestCount ); return promise.setSuccess(msg); } final long readDelay = timestamps.lastChannelWriteNanoTime() - timestamps.lastChannelReadNanoTime(); final long readHangDuration = currentTime - timestamps.lastChannelWriteNanoTime(); if (readDelay > this.readDelayLimitInNanos && readHangDuration > readHangGracePeriodInNanos) { final Optional<RntbdContext> rntbdContext = requestManager.rntbdContext(); final int pendingRequestCount = requestManager.pendingRequestCount(); logger.warn("{} health check failed due to nonresponding read: {lastChannelWrite: {}, lastChannelRead: {}, " + "readDelay: {}, readDelayLimit: {}, rntbdContext: {}, pendingRequestCount: {}}", channel, timestamps.lastChannelWriteNanoTime(), timestamps.lastChannelReadNanoTime(), readDelay, this.readDelayLimitInNanos, rntbdContext, pendingRequestCount); String msg = MessageFormat.format( "{0} health check failed due to nonresponding read: (lastChannelWrite: {1}, lastChannelRead: {2}, " + "readDelay: {3}, readDelayLimit: {4}, rntbdContext: {5}, pendingRequestCount: {6})", channel, timestamps.lastChannelWriteNanoTime(), timestamps.lastChannelReadNanoTime(), readDelay, this.readDelayLimitInNanos, rntbdContext, pendingRequestCount ); return promise.setSuccess(msg); } if (this.idleConnectionTimeoutInNanos > 0L) { if (currentTime - timestamps.lastChannelReadNanoTime() > this.idleConnectionTimeoutInNanos) { String msg = MessageFormat.format( "{0} health check failed due to idle connection timeout: (lastChannelWrite: {1}, lastChannelRead: {2}, " + "idleConnectionTimeout: {3}, currentTime: {4}", channel, timestamps.lastChannelWriteNanoTime(), timestamps.lastChannelReadNanoTime(), idleConnectionTimeoutInNanos, currentTime ); return promise.setSuccess(msg); } } channel.writeAndFlush(RntbdHealthCheckRequest.MESSAGE).addListener(completed -> { if (completed.isSuccess()) { promise.setSuccess(RntbdConstants.RntbdHealthCheckResults.SuccessValue); } else { logger.warn("{} health check request failed due to:", channel, completed.cause()); String msg = MessageFormat.format( "{0} health check request failed due to: {1}", channel, completed.cause().toString() ); promise.setSuccess(msg); } }); return promise; }
if (currentTime - timestamps.lastChannelReadNanoTime() > this.idleConnectionTimeoutInNanos) {
public Future<String> isHealthyWithFailureReason(final Channel channel) { checkNotNull(channel, "expected non-null channel"); final RntbdRequestManager requestManager = channel.pipeline().get(RntbdRequestManager.class); final Promise<String> promise = channel.eventLoop().newPromise(); if (requestManager == null) { reportIssueUnless(logger, !channel.isActive(), channel, "active with no request manager"); return promise.setSuccess("active with no request manager"); } final Timestamps timestamps = requestManager.snapshotTimestamps(); final long currentTime = System.nanoTime(); if (currentTime - timestamps.lastChannelReadNanoTime() < recentReadWindowInNanos) { return promise.setSuccess(RntbdConstants.RntbdHealthCheckResults.SuccessValue); } final long writeDelayInNanos = timestamps.lastChannelWriteAttemptNanoTime() - timestamps.lastChannelWriteNanoTime(); final long writeHangDurationInNanos = currentTime - timestamps.lastChannelWriteAttemptNanoTime(); if (writeDelayInNanos > this.writeDelayLimitInNanos && writeHangDurationInNanos > writeHangGracePeriodInNanos) { final Optional<RntbdContext> rntbdContext = requestManager.rntbdContext(); final int pendingRequestCount = requestManager.pendingRequestCount(); logger.warn("{} health check failed due to nonresponding write: {lastChannelWriteAttemptNanoTime: {}, " + "lastChannelWriteNanoTime: {}, writeDelayInNanos: {}, writeDelayLimitInNanos: {}, " + "rntbdContext: {}, pendingRequestCount: {}}", channel, timestamps.lastChannelWriteAttemptNanoTime(), timestamps.lastChannelWriteNanoTime(), writeDelayInNanos, this.writeDelayLimitInNanos, rntbdContext, pendingRequestCount); String msg = MessageFormat.format( "{0} health check failed due to nonresponding write: (lastChannelWriteAttemptNanoTime: {1}, " + "lastChannelWriteNanoTime: {2}, writeDelayInNanos: {3}, writeDelayLimitInNanos: {4}, " + "rntbdContext: {5}, pendingRequestCount: {6})", channel, timestamps.lastChannelWriteAttemptNanoTime(), timestamps.lastChannelWriteNanoTime(), writeDelayInNanos, this.writeDelayLimitInNanos, rntbdContext, pendingRequestCount ); return promise.setSuccess(msg); } final long readDelay = timestamps.lastChannelWriteNanoTime() - timestamps.lastChannelReadNanoTime(); final long readHangDuration = currentTime - timestamps.lastChannelWriteNanoTime(); if (readDelay > this.readDelayLimitInNanos && readHangDuration > readHangGracePeriodInNanos) { final Optional<RntbdContext> rntbdContext = requestManager.rntbdContext(); final int pendingRequestCount = requestManager.pendingRequestCount(); logger.warn("{} health check failed due to nonresponding read: {lastChannelWrite: {}, lastChannelRead: {}, " + "readDelay: {}, readDelayLimit: {}, rntbdContext: {}, pendingRequestCount: {}}", channel, timestamps.lastChannelWriteNanoTime(), timestamps.lastChannelReadNanoTime(), readDelay, this.readDelayLimitInNanos, rntbdContext, pendingRequestCount); String msg = MessageFormat.format( "{0} health check failed due to nonresponding read: (lastChannelWrite: {1}, lastChannelRead: {2}, " + "readDelay: {3}, readDelayLimit: {4}, rntbdContext: {5}, pendingRequestCount: {6})", channel, timestamps.lastChannelWriteNanoTime(), timestamps.lastChannelReadNanoTime(), readDelay, this.readDelayLimitInNanos, rntbdContext, pendingRequestCount ); return promise.setSuccess(msg); } if (this.idleConnectionTimeoutInNanos > 0L) { if (currentTime - timestamps.lastChannelReadNanoTime() > this.idleConnectionTimeoutInNanos) { String msg = MessageFormat.format( "{0} health check failed due to idle connection timeout: (lastChannelWrite: {1}, lastChannelRead: {2}, " + "idleConnectionTimeout: {3}, currentTime: {4}", channel, timestamps.lastChannelWriteNanoTime(), timestamps.lastChannelReadNanoTime(), idleConnectionTimeoutInNanos, currentTime ); return promise.setSuccess(msg); } } channel.writeAndFlush(RntbdHealthCheckRequest.MESSAGE).addListener(completed -> { if (completed.isSuccess()) { promise.setSuccess(RntbdConstants.RntbdHealthCheckResults.SuccessValue); } else { logger.warn("{} health check request failed due to:", channel, completed.cause()); String msg = MessageFormat.format( "{0} health check request failed due to: {1}", channel, completed.cause().toString() ); promise.setSuccess(msg); } }); return promise; }
class RntbdClientChannelHealthChecker implements ChannelHealthChecker { private static final Logger logger = LoggerFactory.getLogger(RntbdClientChannelHealthChecker.class); private static final long recentReadWindowInNanos = 1_000_000_000L; private static final long readHangGracePeriodInNanos = (45L + 10L) * 1_000_000_000L; private static final long writeHangGracePeriodInNanos = 2L * 1_000_000_000L; @JsonProperty private final long idleConnectionTimeoutInNanos; @JsonProperty private final long readDelayLimitInNanos; @JsonProperty private final long writeDelayLimitInNanos; public RntbdClientChannelHealthChecker(final Config config) { checkNotNull(config, "expected non-null config"); checkArgument(config.receiveHangDetectionTimeInNanos() > readHangGracePeriodInNanos, "config.receiveHangDetectionTimeInNanos: %s", config.receiveHangDetectionTimeInNanos()); checkArgument(config.sendHangDetectionTimeInNanos() > writeHangGracePeriodInNanos, "config.sendHangDetectionTimeInNanos: %s", config.sendHangDetectionTimeInNanos()); this.idleConnectionTimeoutInNanos = config.idleConnectionTimeoutInNanos(); this.readDelayLimitInNanos = config.receiveHangDetectionTimeInNanos(); this.writeDelayLimitInNanos = config.sendHangDetectionTimeInNanos(); } /** * Returns the idle connection timeout interval in nanoseconds. * <p> * A channel is considered idle if {@link * the last channel read is greater than {@link * * @return Idle connection timeout interval in nanoseconds. */ public long idleConnectionTimeoutInNanos() { return this.idleConnectionTimeoutInNanos; } /** * Returns the read delay limit in nanoseconds. * <p> * A channel will be declared unhealthy if the gap between the last channel write and the last channel read grows * beyond this value. * <p> * Constraint: {@link * * @return Read delay limit in nanoseconds. */ public long readDelayLimitInNanos() { return this.readDelayLimitInNanos; } /** * Returns the write delay limit in nanoseconds. * <p> * A channel will be declared unhealthy if the gap between the last channel write attempt and the last channel write * grows beyond this value. * <p> * Constraint: {@link * * @return Write delay limit in nanoseconds. */ public long writeDelayLimitInNanos() { return this.writeDelayLimitInNanos; } /** * Determines whether a specified channel is healthy. * * @param channel A channel whose health is to be checked. * @return A future with a result of {@code true} if the channel is healthy, or {@code false} otherwise. */ public Future<Boolean> isHealthy(final Channel channel) { checkNotNull(channel, "expected non-null channel"); final RntbdRequestManager requestManager = channel.pipeline().get(RntbdRequestManager.class); final Promise<Boolean> promise = channel.eventLoop().newPromise(); if (requestManager == null) { reportIssueUnless(logger, !channel.isActive(), channel, "active with no request manager"); return promise.setSuccess(Boolean.FALSE); } final Timestamps timestamps = requestManager.snapshotTimestamps(); final long currentTime = System.nanoTime(); if (currentTime - timestamps.lastChannelReadNanoTime() < recentReadWindowInNanos) { return promise.setSuccess(Boolean.TRUE); } final long writeDelayInNanos = timestamps.lastChannelWriteAttemptNanoTime() - timestamps.lastChannelWriteNanoTime(); final long writeHangDurationInNanos = currentTime - timestamps.lastChannelWriteAttemptNanoTime(); if (writeDelayInNanos > this.writeDelayLimitInNanos && writeHangDurationInNanos > writeHangGracePeriodInNanos) { final Optional<RntbdContext> rntbdContext = requestManager.rntbdContext(); final int pendingRequestCount = requestManager.pendingRequestCount(); logger.warn("{} health check failed due to nonresponding write: {lastChannelWriteAttemptNanoTime: {}, " + "lastChannelWriteNanoTime: {}, writeDelayInNanos: {}, writeDelayLimitInNanos: {}, " + "rntbdContext: {}, pendingRequestCount: {}}", channel, timestamps.lastChannelWriteAttemptNanoTime(), timestamps.lastChannelWriteNanoTime(), writeDelayInNanos, this.writeDelayLimitInNanos, rntbdContext, pendingRequestCount); return promise.setSuccess(Boolean.FALSE); } final long readDelay = timestamps.lastChannelWriteNanoTime() - timestamps.lastChannelReadNanoTime(); final long readHangDuration = currentTime - timestamps.lastChannelWriteNanoTime(); if (readDelay > this.readDelayLimitInNanos && readHangDuration > readHangGracePeriodInNanos) { final Optional<RntbdContext> rntbdContext = requestManager.rntbdContext(); final int pendingRequestCount = requestManager.pendingRequestCount(); logger.warn("{} health check failed due to nonresponding read: {lastChannelWrite: {}, lastChannelRead: {}, " + "readDelay: {}, readDelayLimit: {}, rntbdContext: {}, pendingRequestCount: {}}", channel, timestamps.lastChannelWriteNanoTime(), timestamps.lastChannelReadNanoTime(), readDelay, this.readDelayLimitInNanos, rntbdContext, pendingRequestCount); return promise.setSuccess(Boolean.FALSE); } if (this.idleConnectionTimeoutInNanos > 0L) { if (currentTime - timestamps.lastChannelReadNanoTime() > this.idleConnectionTimeoutInNanos) { return promise.setSuccess(Boolean.FALSE); } } channel.writeAndFlush(RntbdHealthCheckRequest.MESSAGE).addListener(completed -> { if (completed.isSuccess()) { promise.setSuccess(Boolean.TRUE); } else { logger.warn("{} health check request failed due to:", channel, completed.cause()); promise.setSuccess(Boolean.FALSE); } }); return promise; } /** * Determines whether a specified channel is healthy. * * @param channel A channel whose health is to be checked. * @return A future with a result of {@code true} if the channel is healthy, or {@code false} otherwise. */ @Override public String toString() { return RntbdObjectMapper.toString(this); } static final class Timestamps { private static final AtomicLongFieldUpdater<Timestamps> lastPingUpdater = newUpdater(Timestamps.class, "lastPingNanoTime"); private static final AtomicLongFieldUpdater<Timestamps> lastReadUpdater = newUpdater(Timestamps.class, "lastReadNanoTime"); private static final AtomicLongFieldUpdater<Timestamps> lastWriteUpdater = newUpdater(Timestamps.class, "lastWriteNanoTime"); private static final AtomicLongFieldUpdater<Timestamps> lastWriteAttemptUpdater = newUpdater(Timestamps.class, "lastWriteAttemptNanoTime"); private volatile long lastPingNanoTime; private volatile long lastReadNanoTime; private volatile long lastWriteNanoTime; private volatile long lastWriteAttemptNanoTime; public Timestamps() { } @SuppressWarnings("CopyConstructorMissesField") public Timestamps(Timestamps other) { checkNotNull(other, "other: null"); this.lastPingNanoTime = lastPingUpdater.get(other); this.lastReadNanoTime = lastReadUpdater.get(other); this.lastWriteNanoTime = lastWriteUpdater.get(other); this.lastWriteAttemptNanoTime = lastWriteAttemptUpdater.get(other); } public void channelPingCompleted() { lastPingUpdater.set(this, System.nanoTime()); } public void channelReadCompleted() { lastReadUpdater.set(this, System.nanoTime()); } public void channelWriteAttempted() { lastWriteUpdater.set(this, System.nanoTime()); } public void channelWriteCompleted() { lastWriteAttemptUpdater.set(this, System.nanoTime()); } @JsonProperty public long lastChannelPingNanoTime() { return lastPingUpdater.get(this); } @JsonProperty public long lastChannelReadNanoTime() { return lastReadUpdater.get(this); } @JsonProperty public long lastChannelWriteNanoTime() { return lastWriteUpdater.get(this); } @JsonProperty public long lastChannelWriteAttemptNanoTime() { return lastWriteAttemptUpdater.get(this); } @Override public String toString() { return RntbdObjectMapper.toString(this); } } }
class RntbdClientChannelHealthChecker implements ChannelHealthChecker { private static final Logger logger = LoggerFactory.getLogger(RntbdClientChannelHealthChecker.class); private static final long recentReadWindowInNanos = 1_000_000_000L; private static final long readHangGracePeriodInNanos = (45L + 10L) * 1_000_000_000L; private static final long writeHangGracePeriodInNanos = 2L * 1_000_000_000L; @JsonProperty private final long idleConnectionTimeoutInNanos; @JsonProperty private final long readDelayLimitInNanos; @JsonProperty private final long writeDelayLimitInNanos; public RntbdClientChannelHealthChecker(final Config config) { checkNotNull(config, "expected non-null config"); checkArgument(config.receiveHangDetectionTimeInNanos() > readHangGracePeriodInNanos, "config.receiveHangDetectionTimeInNanos: %s", config.receiveHangDetectionTimeInNanos()); checkArgument(config.sendHangDetectionTimeInNanos() > writeHangGracePeriodInNanos, "config.sendHangDetectionTimeInNanos: %s", config.sendHangDetectionTimeInNanos()); this.idleConnectionTimeoutInNanos = config.idleConnectionTimeoutInNanos(); this.readDelayLimitInNanos = config.receiveHangDetectionTimeInNanos(); this.writeDelayLimitInNanos = config.sendHangDetectionTimeInNanos(); } /** * Returns the idle connection timeout interval in nanoseconds. * <p> * A channel is considered idle if {@link * the last channel read is greater than {@link * * @return Idle connection timeout interval in nanoseconds. */ public long idleConnectionTimeoutInNanos() { return this.idleConnectionTimeoutInNanos; } /** * Returns the read delay limit in nanoseconds. * <p> * A channel will be declared unhealthy if the gap between the last channel write and the last channel read grows * beyond this value. * <p> * Constraint: {@link * * @return Read delay limit in nanoseconds. */ public long readDelayLimitInNanos() { return this.readDelayLimitInNanos; } /** * Returns the write delay limit in nanoseconds. * <p> * A channel will be declared unhealthy if the gap between the last channel write attempt and the last channel write * grows beyond this value. * <p> * Constraint: {@link * * @return Write delay limit in nanoseconds. */ public long writeDelayLimitInNanos() { return this.writeDelayLimitInNanos; } /** * Determines whether a specified channel is healthy. * * @param channel A channel whose health is to be checked. * @return A future with a result of {@code true} if the channel is healthy, or {@code false} otherwise. */ public Future<Boolean> isHealthy(final Channel channel) { checkNotNull(channel, "expected non-null channel"); final RntbdRequestManager requestManager = channel.pipeline().get(RntbdRequestManager.class); final Promise<Boolean> promise = channel.eventLoop().newPromise(); if (requestManager == null) { reportIssueUnless(logger, !channel.isActive(), channel, "active with no request manager"); return promise.setSuccess(Boolean.FALSE); } final Timestamps timestamps = requestManager.snapshotTimestamps(); final long currentTime = System.nanoTime(); if (currentTime - timestamps.lastChannelReadNanoTime() < recentReadWindowInNanos) { return promise.setSuccess(Boolean.TRUE); } final long writeDelayInNanos = timestamps.lastChannelWriteAttemptNanoTime() - timestamps.lastChannelWriteNanoTime(); final long writeHangDurationInNanos = currentTime - timestamps.lastChannelWriteAttemptNanoTime(); if (writeDelayInNanos > this.writeDelayLimitInNanos && writeHangDurationInNanos > writeHangGracePeriodInNanos) { final Optional<RntbdContext> rntbdContext = requestManager.rntbdContext(); final int pendingRequestCount = requestManager.pendingRequestCount(); logger.warn("{} health check failed due to nonresponding write: {lastChannelWriteAttemptNanoTime: {}, " + "lastChannelWriteNanoTime: {}, writeDelayInNanos: {}, writeDelayLimitInNanos: {}, " + "rntbdContext: {}, pendingRequestCount: {}}", channel, timestamps.lastChannelWriteAttemptNanoTime(), timestamps.lastChannelWriteNanoTime(), writeDelayInNanos, this.writeDelayLimitInNanos, rntbdContext, pendingRequestCount); return promise.setSuccess(Boolean.FALSE); } final long readDelay = timestamps.lastChannelWriteNanoTime() - timestamps.lastChannelReadNanoTime(); final long readHangDuration = currentTime - timestamps.lastChannelWriteNanoTime(); if (readDelay > this.readDelayLimitInNanos && readHangDuration > readHangGracePeriodInNanos) { final Optional<RntbdContext> rntbdContext = requestManager.rntbdContext(); final int pendingRequestCount = requestManager.pendingRequestCount(); logger.warn("{} health check failed due to nonresponding read: {lastChannelWrite: {}, lastChannelRead: {}, " + "readDelay: {}, readDelayLimit: {}, rntbdContext: {}, pendingRequestCount: {}}", channel, timestamps.lastChannelWriteNanoTime(), timestamps.lastChannelReadNanoTime(), readDelay, this.readDelayLimitInNanos, rntbdContext, pendingRequestCount); return promise.setSuccess(Boolean.FALSE); } if (this.idleConnectionTimeoutInNanos > 0L) { if (currentTime - timestamps.lastChannelReadNanoTime() > this.idleConnectionTimeoutInNanos) { return promise.setSuccess(Boolean.FALSE); } } channel.writeAndFlush(RntbdHealthCheckRequest.MESSAGE).addListener(completed -> { if (completed.isSuccess()) { promise.setSuccess(Boolean.TRUE); } else { logger.warn("{} health check request failed due to:", channel, completed.cause()); promise.setSuccess(Boolean.FALSE); } }); return promise; } /** * Determines whether a specified channel is healthy. * * @param channel A channel whose health is to be checked. * @return A future with a result of {@code true} if the channel is healthy, or {@code false} otherwise. */ @Override public String toString() { return RntbdObjectMapper.toString(this); } static final class Timestamps { private static final AtomicLongFieldUpdater<Timestamps> lastPingUpdater = newUpdater(Timestamps.class, "lastPingNanoTime"); private static final AtomicLongFieldUpdater<Timestamps> lastReadUpdater = newUpdater(Timestamps.class, "lastReadNanoTime"); private static final AtomicLongFieldUpdater<Timestamps> lastWriteUpdater = newUpdater(Timestamps.class, "lastWriteNanoTime"); private static final AtomicLongFieldUpdater<Timestamps> lastWriteAttemptUpdater = newUpdater(Timestamps.class, "lastWriteAttemptNanoTime"); private volatile long lastPingNanoTime; private volatile long lastReadNanoTime; private volatile long lastWriteNanoTime; private volatile long lastWriteAttemptNanoTime; public Timestamps() { } @SuppressWarnings("CopyConstructorMissesField") public Timestamps(Timestamps other) { checkNotNull(other, "other: null"); this.lastPingNanoTime = lastPingUpdater.get(other); this.lastReadNanoTime = lastReadUpdater.get(other); this.lastWriteNanoTime = lastWriteUpdater.get(other); this.lastWriteAttemptNanoTime = lastWriteAttemptUpdater.get(other); } public void channelPingCompleted() { lastPingUpdater.set(this, System.nanoTime()); } public void channelReadCompleted() { lastReadUpdater.set(this, System.nanoTime()); } public void channelWriteAttempted() { lastWriteUpdater.set(this, System.nanoTime()); } public void channelWriteCompleted() { lastWriteAttemptUpdater.set(this, System.nanoTime()); } @JsonProperty public long lastChannelPingNanoTime() { return lastPingUpdater.get(this); } @JsonProperty public long lastChannelReadNanoTime() { return lastReadUpdater.get(this); } @JsonProperty public long lastChannelWriteNanoTime() { return lastWriteUpdater.get(this); } @JsonProperty public long lastChannelWriteAttemptNanoTime() { return lastWriteAttemptUpdater.get(this); } @Override public String toString() { return RntbdObjectMapper.toString(this); } } }
Are the reads and writes the end user workloads? If so for write-only/heavy below condition might match right?
public Future<String> isHealthyWithFailureReason(final Channel channel) { checkNotNull(channel, "expected non-null channel"); final RntbdRequestManager requestManager = channel.pipeline().get(RntbdRequestManager.class); final Promise<String> promise = channel.eventLoop().newPromise(); if (requestManager == null) { reportIssueUnless(logger, !channel.isActive(), channel, "active with no request manager"); return promise.setSuccess("active with no request manager"); } final Timestamps timestamps = requestManager.snapshotTimestamps(); final long currentTime = System.nanoTime(); if (currentTime - timestamps.lastChannelReadNanoTime() < recentReadWindowInNanos) { return promise.setSuccess(RntbdConstants.RntbdHealthCheckResults.SuccessValue); } final long writeDelayInNanos = timestamps.lastChannelWriteAttemptNanoTime() - timestamps.lastChannelWriteNanoTime(); final long writeHangDurationInNanos = currentTime - timestamps.lastChannelWriteAttemptNanoTime(); if (writeDelayInNanos > this.writeDelayLimitInNanos && writeHangDurationInNanos > writeHangGracePeriodInNanos) { final Optional<RntbdContext> rntbdContext = requestManager.rntbdContext(); final int pendingRequestCount = requestManager.pendingRequestCount(); logger.warn("{} health check failed due to nonresponding write: {lastChannelWriteAttemptNanoTime: {}, " + "lastChannelWriteNanoTime: {}, writeDelayInNanos: {}, writeDelayLimitInNanos: {}, " + "rntbdContext: {}, pendingRequestCount: {}}", channel, timestamps.lastChannelWriteAttemptNanoTime(), timestamps.lastChannelWriteNanoTime(), writeDelayInNanos, this.writeDelayLimitInNanos, rntbdContext, pendingRequestCount); String msg = MessageFormat.format( "{0} health check failed due to nonresponding write: (lastChannelWriteAttemptNanoTime: {1}, " + "lastChannelWriteNanoTime: {2}, writeDelayInNanos: {3}, writeDelayLimitInNanos: {4}, " + "rntbdContext: {5}, pendingRequestCount: {6})", channel, timestamps.lastChannelWriteAttemptNanoTime(), timestamps.lastChannelWriteNanoTime(), writeDelayInNanos, this.writeDelayLimitInNanos, rntbdContext, pendingRequestCount ); return promise.setSuccess(msg); } final long readDelay = timestamps.lastChannelWriteNanoTime() - timestamps.lastChannelReadNanoTime(); final long readHangDuration = currentTime - timestamps.lastChannelWriteNanoTime(); if (readDelay > this.readDelayLimitInNanos && readHangDuration > readHangGracePeriodInNanos) { final Optional<RntbdContext> rntbdContext = requestManager.rntbdContext(); final int pendingRequestCount = requestManager.pendingRequestCount(); logger.warn("{} health check failed due to nonresponding read: {lastChannelWrite: {}, lastChannelRead: {}, " + "readDelay: {}, readDelayLimit: {}, rntbdContext: {}, pendingRequestCount: {}}", channel, timestamps.lastChannelWriteNanoTime(), timestamps.lastChannelReadNanoTime(), readDelay, this.readDelayLimitInNanos, rntbdContext, pendingRequestCount); String msg = MessageFormat.format( "{0} health check failed due to nonresponding read: (lastChannelWrite: {1}, lastChannelRead: {2}, " + "readDelay: {3}, readDelayLimit: {4}, rntbdContext: {5}, pendingRequestCount: {6})", channel, timestamps.lastChannelWriteNanoTime(), timestamps.lastChannelReadNanoTime(), readDelay, this.readDelayLimitInNanos, rntbdContext, pendingRequestCount ); return promise.setSuccess(msg); } if (this.idleConnectionTimeoutInNanos > 0L) { if (currentTime - timestamps.lastChannelReadNanoTime() > this.idleConnectionTimeoutInNanos) { String msg = MessageFormat.format( "{0} health check failed due to idle connection timeout: (lastChannelWrite: {1}, lastChannelRead: {2}, " + "idleConnectionTimeout: {3}, currentTime: {4}", channel, timestamps.lastChannelWriteNanoTime(), timestamps.lastChannelReadNanoTime(), idleConnectionTimeoutInNanos, currentTime ); return promise.setSuccess(msg); } } channel.writeAndFlush(RntbdHealthCheckRequest.MESSAGE).addListener(completed -> { if (completed.isSuccess()) { promise.setSuccess(RntbdConstants.RntbdHealthCheckResults.SuccessValue); } else { logger.warn("{} health check request failed due to:", channel, completed.cause()); String msg = MessageFormat.format( "{0} health check request failed due to: {1}", channel, completed.cause().toString() ); promise.setSuccess(msg); } }); return promise; }
public Future<String> isHealthyWithFailureReason(final Channel channel) { checkNotNull(channel, "expected non-null channel"); final RntbdRequestManager requestManager = channel.pipeline().get(RntbdRequestManager.class); final Promise<String> promise = channel.eventLoop().newPromise(); if (requestManager == null) { reportIssueUnless(logger, !channel.isActive(), channel, "active with no request manager"); return promise.setSuccess("active with no request manager"); } final Timestamps timestamps = requestManager.snapshotTimestamps(); final long currentTime = System.nanoTime(); if (currentTime - timestamps.lastChannelReadNanoTime() < recentReadWindowInNanos) { return promise.setSuccess(RntbdConstants.RntbdHealthCheckResults.SuccessValue); } final long writeDelayInNanos = timestamps.lastChannelWriteAttemptNanoTime() - timestamps.lastChannelWriteNanoTime(); final long writeHangDurationInNanos = currentTime - timestamps.lastChannelWriteAttemptNanoTime(); if (writeDelayInNanos > this.writeDelayLimitInNanos && writeHangDurationInNanos > writeHangGracePeriodInNanos) { final Optional<RntbdContext> rntbdContext = requestManager.rntbdContext(); final int pendingRequestCount = requestManager.pendingRequestCount(); logger.warn("{} health check failed due to nonresponding write: {lastChannelWriteAttemptNanoTime: {}, " + "lastChannelWriteNanoTime: {}, writeDelayInNanos: {}, writeDelayLimitInNanos: {}, " + "rntbdContext: {}, pendingRequestCount: {}}", channel, timestamps.lastChannelWriteAttemptNanoTime(), timestamps.lastChannelWriteNanoTime(), writeDelayInNanos, this.writeDelayLimitInNanos, rntbdContext, pendingRequestCount); String msg = MessageFormat.format( "{0} health check failed due to nonresponding write: (lastChannelWriteAttemptNanoTime: {1}, " + "lastChannelWriteNanoTime: {2}, writeDelayInNanos: {3}, writeDelayLimitInNanos: {4}, " + "rntbdContext: {5}, pendingRequestCount: {6})", channel, timestamps.lastChannelWriteAttemptNanoTime(), timestamps.lastChannelWriteNanoTime(), writeDelayInNanos, this.writeDelayLimitInNanos, rntbdContext, pendingRequestCount ); return promise.setSuccess(msg); } final long readDelay = timestamps.lastChannelWriteNanoTime() - timestamps.lastChannelReadNanoTime(); final long readHangDuration = currentTime - timestamps.lastChannelWriteNanoTime(); if (readDelay > this.readDelayLimitInNanos && readHangDuration > readHangGracePeriodInNanos) { final Optional<RntbdContext> rntbdContext = requestManager.rntbdContext(); final int pendingRequestCount = requestManager.pendingRequestCount(); logger.warn("{} health check failed due to nonresponding read: {lastChannelWrite: {}, lastChannelRead: {}, " + "readDelay: {}, readDelayLimit: {}, rntbdContext: {}, pendingRequestCount: {}}", channel, timestamps.lastChannelWriteNanoTime(), timestamps.lastChannelReadNanoTime(), readDelay, this.readDelayLimitInNanos, rntbdContext, pendingRequestCount); String msg = MessageFormat.format( "{0} health check failed due to nonresponding read: (lastChannelWrite: {1}, lastChannelRead: {2}, " + "readDelay: {3}, readDelayLimit: {4}, rntbdContext: {5}, pendingRequestCount: {6})", channel, timestamps.lastChannelWriteNanoTime(), timestamps.lastChannelReadNanoTime(), readDelay, this.readDelayLimitInNanos, rntbdContext, pendingRequestCount ); return promise.setSuccess(msg); } if (this.idleConnectionTimeoutInNanos > 0L) { if (currentTime - timestamps.lastChannelReadNanoTime() > this.idleConnectionTimeoutInNanos) { String msg = MessageFormat.format( "{0} health check failed due to idle connection timeout: (lastChannelWrite: {1}, lastChannelRead: {2}, " + "idleConnectionTimeout: {3}, currentTime: {4}", channel, timestamps.lastChannelWriteNanoTime(), timestamps.lastChannelReadNanoTime(), idleConnectionTimeoutInNanos, currentTime ); return promise.setSuccess(msg); } } channel.writeAndFlush(RntbdHealthCheckRequest.MESSAGE).addListener(completed -> { if (completed.isSuccess()) { promise.setSuccess(RntbdConstants.RntbdHealthCheckResults.SuccessValue); } else { logger.warn("{} health check request failed due to:", channel, completed.cause()); String msg = MessageFormat.format( "{0} health check request failed due to: {1}", channel, completed.cause().toString() ); promise.setSuccess(msg); } }); return promise; }
class RntbdClientChannelHealthChecker implements ChannelHealthChecker { private static final Logger logger = LoggerFactory.getLogger(RntbdClientChannelHealthChecker.class); private static final long recentReadWindowInNanos = 1_000_000_000L; private static final long readHangGracePeriodInNanos = (45L + 10L) * 1_000_000_000L; private static final long writeHangGracePeriodInNanos = 2L * 1_000_000_000L; @JsonProperty private final long idleConnectionTimeoutInNanos; @JsonProperty private final long readDelayLimitInNanos; @JsonProperty private final long writeDelayLimitInNanos; public RntbdClientChannelHealthChecker(final Config config) { checkNotNull(config, "expected non-null config"); checkArgument(config.receiveHangDetectionTimeInNanos() > readHangGracePeriodInNanos, "config.receiveHangDetectionTimeInNanos: %s", config.receiveHangDetectionTimeInNanos()); checkArgument(config.sendHangDetectionTimeInNanos() > writeHangGracePeriodInNanos, "config.sendHangDetectionTimeInNanos: %s", config.sendHangDetectionTimeInNanos()); this.idleConnectionTimeoutInNanos = config.idleConnectionTimeoutInNanos(); this.readDelayLimitInNanos = config.receiveHangDetectionTimeInNanos(); this.writeDelayLimitInNanos = config.sendHangDetectionTimeInNanos(); } /** * Returns the idle connection timeout interval in nanoseconds. * <p> * A channel is considered idle if {@link * the last channel read is greater than {@link * * @return Idle connection timeout interval in nanoseconds. */ public long idleConnectionTimeoutInNanos() { return this.idleConnectionTimeoutInNanos; } /** * Returns the read delay limit in nanoseconds. * <p> * A channel will be declared unhealthy if the gap between the last channel write and the last channel read grows * beyond this value. * <p> * Constraint: {@link * * @return Read delay limit in nanoseconds. */ public long readDelayLimitInNanos() { return this.readDelayLimitInNanos; } /** * Returns the write delay limit in nanoseconds. * <p> * A channel will be declared unhealthy if the gap between the last channel write attempt and the last channel write * grows beyond this value. * <p> * Constraint: {@link * * @return Write delay limit in nanoseconds. */ public long writeDelayLimitInNanos() { return this.writeDelayLimitInNanos; } /** * Determines whether a specified channel is healthy. * * @param channel A channel whose health is to be checked. * @return A future with a result of {@code true} if the channel is healthy, or {@code false} otherwise. */ public Future<Boolean> isHealthy(final Channel channel) { checkNotNull(channel, "expected non-null channel"); final RntbdRequestManager requestManager = channel.pipeline().get(RntbdRequestManager.class); final Promise<Boolean> promise = channel.eventLoop().newPromise(); if (requestManager == null) { reportIssueUnless(logger, !channel.isActive(), channel, "active with no request manager"); return promise.setSuccess(Boolean.FALSE); } final Timestamps timestamps = requestManager.snapshotTimestamps(); final long currentTime = System.nanoTime(); if (currentTime - timestamps.lastChannelReadNanoTime() < recentReadWindowInNanos) { return promise.setSuccess(Boolean.TRUE); } final long writeDelayInNanos = timestamps.lastChannelWriteAttemptNanoTime() - timestamps.lastChannelWriteNanoTime(); final long writeHangDurationInNanos = currentTime - timestamps.lastChannelWriteAttemptNanoTime(); if (writeDelayInNanos > this.writeDelayLimitInNanos && writeHangDurationInNanos > writeHangGracePeriodInNanos) { final Optional<RntbdContext> rntbdContext = requestManager.rntbdContext(); final int pendingRequestCount = requestManager.pendingRequestCount(); logger.warn("{} health check failed due to nonresponding write: {lastChannelWriteAttemptNanoTime: {}, " + "lastChannelWriteNanoTime: {}, writeDelayInNanos: {}, writeDelayLimitInNanos: {}, " + "rntbdContext: {}, pendingRequestCount: {}}", channel, timestamps.lastChannelWriteAttemptNanoTime(), timestamps.lastChannelWriteNanoTime(), writeDelayInNanos, this.writeDelayLimitInNanos, rntbdContext, pendingRequestCount); return promise.setSuccess(Boolean.FALSE); } final long readDelay = timestamps.lastChannelWriteNanoTime() - timestamps.lastChannelReadNanoTime(); final long readHangDuration = currentTime - timestamps.lastChannelWriteNanoTime(); if (readDelay > this.readDelayLimitInNanos && readHangDuration > readHangGracePeriodInNanos) { final Optional<RntbdContext> rntbdContext = requestManager.rntbdContext(); final int pendingRequestCount = requestManager.pendingRequestCount(); logger.warn("{} health check failed due to nonresponding read: {lastChannelWrite: {}, lastChannelRead: {}, " + "readDelay: {}, readDelayLimit: {}, rntbdContext: {}, pendingRequestCount: {}}", channel, timestamps.lastChannelWriteNanoTime(), timestamps.lastChannelReadNanoTime(), readDelay, this.readDelayLimitInNanos, rntbdContext, pendingRequestCount); return promise.setSuccess(Boolean.FALSE); } if (this.idleConnectionTimeoutInNanos > 0L) { if (currentTime - timestamps.lastChannelReadNanoTime() > this.idleConnectionTimeoutInNanos) { return promise.setSuccess(Boolean.FALSE); } } channel.writeAndFlush(RntbdHealthCheckRequest.MESSAGE).addListener(completed -> { if (completed.isSuccess()) { promise.setSuccess(Boolean.TRUE); } else { logger.warn("{} health check request failed due to:", channel, completed.cause()); promise.setSuccess(Boolean.FALSE); } }); return promise; } /** * Determines whether a specified channel is healthy. * * @param channel A channel whose health is to be checked. * @return A future with a result of {@code true} if the channel is healthy, or {@code false} otherwise. */ @Override public String toString() { return RntbdObjectMapper.toString(this); } static final class Timestamps { private static final AtomicLongFieldUpdater<Timestamps> lastPingUpdater = newUpdater(Timestamps.class, "lastPingNanoTime"); private static final AtomicLongFieldUpdater<Timestamps> lastReadUpdater = newUpdater(Timestamps.class, "lastReadNanoTime"); private static final AtomicLongFieldUpdater<Timestamps> lastWriteUpdater = newUpdater(Timestamps.class, "lastWriteNanoTime"); private static final AtomicLongFieldUpdater<Timestamps> lastWriteAttemptUpdater = newUpdater(Timestamps.class, "lastWriteAttemptNanoTime"); private volatile long lastPingNanoTime; private volatile long lastReadNanoTime; private volatile long lastWriteNanoTime; private volatile long lastWriteAttemptNanoTime; public Timestamps() { } @SuppressWarnings("CopyConstructorMissesField") public Timestamps(Timestamps other) { checkNotNull(other, "other: null"); this.lastPingNanoTime = lastPingUpdater.get(other); this.lastReadNanoTime = lastReadUpdater.get(other); this.lastWriteNanoTime = lastWriteUpdater.get(other); this.lastWriteAttemptNanoTime = lastWriteAttemptUpdater.get(other); } public void channelPingCompleted() { lastPingUpdater.set(this, System.nanoTime()); } public void channelReadCompleted() { lastReadUpdater.set(this, System.nanoTime()); } public void channelWriteAttempted() { lastWriteUpdater.set(this, System.nanoTime()); } public void channelWriteCompleted() { lastWriteAttemptUpdater.set(this, System.nanoTime()); } @JsonProperty public long lastChannelPingNanoTime() { return lastPingUpdater.get(this); } @JsonProperty public long lastChannelReadNanoTime() { return lastReadUpdater.get(this); } @JsonProperty public long lastChannelWriteNanoTime() { return lastWriteUpdater.get(this); } @JsonProperty public long lastChannelWriteAttemptNanoTime() { return lastWriteAttemptUpdater.get(this); } @Override public String toString() { return RntbdObjectMapper.toString(this); } } }
class RntbdClientChannelHealthChecker implements ChannelHealthChecker { private static final Logger logger = LoggerFactory.getLogger(RntbdClientChannelHealthChecker.class); private static final long recentReadWindowInNanos = 1_000_000_000L; private static final long readHangGracePeriodInNanos = (45L + 10L) * 1_000_000_000L; private static final long writeHangGracePeriodInNanos = 2L * 1_000_000_000L; @JsonProperty private final long idleConnectionTimeoutInNanos; @JsonProperty private final long readDelayLimitInNanos; @JsonProperty private final long writeDelayLimitInNanos; public RntbdClientChannelHealthChecker(final Config config) { checkNotNull(config, "expected non-null config"); checkArgument(config.receiveHangDetectionTimeInNanos() > readHangGracePeriodInNanos, "config.receiveHangDetectionTimeInNanos: %s", config.receiveHangDetectionTimeInNanos()); checkArgument(config.sendHangDetectionTimeInNanos() > writeHangGracePeriodInNanos, "config.sendHangDetectionTimeInNanos: %s", config.sendHangDetectionTimeInNanos()); this.idleConnectionTimeoutInNanos = config.idleConnectionTimeoutInNanos(); this.readDelayLimitInNanos = config.receiveHangDetectionTimeInNanos(); this.writeDelayLimitInNanos = config.sendHangDetectionTimeInNanos(); } /** * Returns the idle connection timeout interval in nanoseconds. * <p> * A channel is considered idle if {@link * the last channel read is greater than {@link * * @return Idle connection timeout interval in nanoseconds. */ public long idleConnectionTimeoutInNanos() { return this.idleConnectionTimeoutInNanos; } /** * Returns the read delay limit in nanoseconds. * <p> * A channel will be declared unhealthy if the gap between the last channel write and the last channel read grows * beyond this value. * <p> * Constraint: {@link * * @return Read delay limit in nanoseconds. */ public long readDelayLimitInNanos() { return this.readDelayLimitInNanos; } /** * Returns the write delay limit in nanoseconds. * <p> * A channel will be declared unhealthy if the gap between the last channel write attempt and the last channel write * grows beyond this value. * <p> * Constraint: {@link * * @return Write delay limit in nanoseconds. */ public long writeDelayLimitInNanos() { return this.writeDelayLimitInNanos; } /** * Determines whether a specified channel is healthy. * * @param channel A channel whose health is to be checked. * @return A future with a result of {@code true} if the channel is healthy, or {@code false} otherwise. */ public Future<Boolean> isHealthy(final Channel channel) { checkNotNull(channel, "expected non-null channel"); final RntbdRequestManager requestManager = channel.pipeline().get(RntbdRequestManager.class); final Promise<Boolean> promise = channel.eventLoop().newPromise(); if (requestManager == null) { reportIssueUnless(logger, !channel.isActive(), channel, "active with no request manager"); return promise.setSuccess(Boolean.FALSE); } final Timestamps timestamps = requestManager.snapshotTimestamps(); final long currentTime = System.nanoTime(); if (currentTime - timestamps.lastChannelReadNanoTime() < recentReadWindowInNanos) { return promise.setSuccess(Boolean.TRUE); } final long writeDelayInNanos = timestamps.lastChannelWriteAttemptNanoTime() - timestamps.lastChannelWriteNanoTime(); final long writeHangDurationInNanos = currentTime - timestamps.lastChannelWriteAttemptNanoTime(); if (writeDelayInNanos > this.writeDelayLimitInNanos && writeHangDurationInNanos > writeHangGracePeriodInNanos) { final Optional<RntbdContext> rntbdContext = requestManager.rntbdContext(); final int pendingRequestCount = requestManager.pendingRequestCount(); logger.warn("{} health check failed due to nonresponding write: {lastChannelWriteAttemptNanoTime: {}, " + "lastChannelWriteNanoTime: {}, writeDelayInNanos: {}, writeDelayLimitInNanos: {}, " + "rntbdContext: {}, pendingRequestCount: {}}", channel, timestamps.lastChannelWriteAttemptNanoTime(), timestamps.lastChannelWriteNanoTime(), writeDelayInNanos, this.writeDelayLimitInNanos, rntbdContext, pendingRequestCount); return promise.setSuccess(Boolean.FALSE); } final long readDelay = timestamps.lastChannelWriteNanoTime() - timestamps.lastChannelReadNanoTime(); final long readHangDuration = currentTime - timestamps.lastChannelWriteNanoTime(); if (readDelay > this.readDelayLimitInNanos && readHangDuration > readHangGracePeriodInNanos) { final Optional<RntbdContext> rntbdContext = requestManager.rntbdContext(); final int pendingRequestCount = requestManager.pendingRequestCount(); logger.warn("{} health check failed due to nonresponding read: {lastChannelWrite: {}, lastChannelRead: {}, " + "readDelay: {}, readDelayLimit: {}, rntbdContext: {}, pendingRequestCount: {}}", channel, timestamps.lastChannelWriteNanoTime(), timestamps.lastChannelReadNanoTime(), readDelay, this.readDelayLimitInNanos, rntbdContext, pendingRequestCount); return promise.setSuccess(Boolean.FALSE); } if (this.idleConnectionTimeoutInNanos > 0L) { if (currentTime - timestamps.lastChannelReadNanoTime() > this.idleConnectionTimeoutInNanos) { return promise.setSuccess(Boolean.FALSE); } } channel.writeAndFlush(RntbdHealthCheckRequest.MESSAGE).addListener(completed -> { if (completed.isSuccess()) { promise.setSuccess(Boolean.TRUE); } else { logger.warn("{} health check request failed due to:", channel, completed.cause()); promise.setSuccess(Boolean.FALSE); } }); return promise; } /** * Determines whether a specified channel is healthy. * * @param channel A channel whose health is to be checked. * @return A future with a result of {@code true} if the channel is healthy, or {@code false} otherwise. */ @Override public String toString() { return RntbdObjectMapper.toString(this); } static final class Timestamps { private static final AtomicLongFieldUpdater<Timestamps> lastPingUpdater = newUpdater(Timestamps.class, "lastPingNanoTime"); private static final AtomicLongFieldUpdater<Timestamps> lastReadUpdater = newUpdater(Timestamps.class, "lastReadNanoTime"); private static final AtomicLongFieldUpdater<Timestamps> lastWriteUpdater = newUpdater(Timestamps.class, "lastWriteNanoTime"); private static final AtomicLongFieldUpdater<Timestamps> lastWriteAttemptUpdater = newUpdater(Timestamps.class, "lastWriteAttemptNanoTime"); private volatile long lastPingNanoTime; private volatile long lastReadNanoTime; private volatile long lastWriteNanoTime; private volatile long lastWriteAttemptNanoTime; public Timestamps() { } @SuppressWarnings("CopyConstructorMissesField") public Timestamps(Timestamps other) { checkNotNull(other, "other: null"); this.lastPingNanoTime = lastPingUpdater.get(other); this.lastReadNanoTime = lastReadUpdater.get(other); this.lastWriteNanoTime = lastWriteUpdater.get(other); this.lastWriteAttemptNanoTime = lastWriteAttemptUpdater.get(other); } public void channelPingCompleted() { lastPingUpdater.set(this, System.nanoTime()); } public void channelReadCompleted() { lastReadUpdater.set(this, System.nanoTime()); } public void channelWriteAttempted() { lastWriteUpdater.set(this, System.nanoTime()); } public void channelWriteCompleted() { lastWriteAttemptUpdater.set(this, System.nanoTime()); } @JsonProperty public long lastChannelPingNanoTime() { return lastPingUpdater.get(this); } @JsonProperty public long lastChannelReadNanoTime() { return lastReadUpdater.get(this); } @JsonProperty public long lastChannelWriteNanoTime() { return lastWriteUpdater.get(this); } @JsonProperty public long lastChannelWriteAttemptNanoTime() { return lastWriteAttemptUpdater.get(this); } @Override public String toString() { return RntbdObjectMapper.toString(this); } } }
Does this need to include the try count as that is inferred by attempting more than the maximum limit
private boolean isValidRedirectCount(int tryCount) { if (tryCount >= getMaxAttempts()) { LOGGER.atError() .addKeyValue(LoggingKeys.TRY_COUNT_KEY, tryCount) .addKeyValue("maxAttempts", getMaxAttempts()) .log("Redirect attempts have been exhausted."); return false; } return true; }
.addKeyValue(LoggingKeys.TRY_COUNT_KEY, tryCount)
private boolean isValidRedirectCount(int tryCount) { if (tryCount >= getMaxAttempts()) { LOGGER.atError() .addKeyValue("maxAttempts", getMaxAttempts()) .log("Redirect attempts have been exhausted."); return false; } return true; }
class DefaultRedirectStrategy implements RedirectStrategy { private static final ClientLogger LOGGER = new ClientLogger(DefaultRedirectStrategy.class); private static final int DEFAULT_MAX_REDIRECT_ATTEMPTS = 3; private static final String DEFAULT_REDIRECT_LOCATION_HEADER_NAME = "Location"; private static final int PERMANENT_REDIRECT_STATUS_CODE = 308; private static final int TEMPORARY_REDIRECT_STATUS_CODE = 307; private static final Set<HttpMethod> DEFAULT_REDIRECT_ALLOWED_METHODS = new HashSet<>(Arrays.asList(HttpMethod.GET, HttpMethod.HEAD)); private final int maxAttempts; private final String locationHeader; private final Set<HttpMethod> allowedRedirectHttpMethods; /** * Creates an instance of {@link DefaultRedirectStrategy} with a maximum number of redirect attempts 3, * header name "Location" to locate the redirect url in the response headers and {@link HttpMethod * and {@link HttpMethod */ public DefaultRedirectStrategy() { this(DEFAULT_MAX_REDIRECT_ATTEMPTS, DEFAULT_REDIRECT_LOCATION_HEADER_NAME, DEFAULT_REDIRECT_ALLOWED_METHODS); } /** * Creates an instance of {@link DefaultRedirectStrategy} with the provided number of redirect attempts and * default header name "Location" to locate the redirect url in the response headers and {@link HttpMethod * and {@link HttpMethod * * @param maxAttempts The max number of redirect attempts that can be made. * @throws IllegalArgumentException if {@code maxAttempts} is less than 0. */ public DefaultRedirectStrategy(int maxAttempts) { this(maxAttempts, DEFAULT_REDIRECT_LOCATION_HEADER_NAME, DEFAULT_REDIRECT_ALLOWED_METHODS); } /** * Creates an instance of {@link DefaultRedirectStrategy}. * * @param maxAttempts The max number of redirect attempts that can be made. * @param locationHeader The header name containing the redirect URL. * @param allowedMethods The set of {@link HttpMethod} that are allowed to be redirected. * @throws IllegalArgumentException if {@code maxAttempts} is less than 0. */ public DefaultRedirectStrategy(int maxAttempts, String locationHeader, Set<HttpMethod> allowedMethods) { if (maxAttempts < 0) { throw LOGGER.logExceptionAsError(new IllegalArgumentException("Max attempts cannot be less than 0.")); } this.maxAttempts = maxAttempts; if (CoreUtils.isNullOrEmpty(locationHeader)) { LOGGER.error("'locationHeader' provided as null will be defaulted to {}", DEFAULT_REDIRECT_LOCATION_HEADER_NAME); this.locationHeader = DEFAULT_REDIRECT_LOCATION_HEADER_NAME; } else { this.locationHeader = locationHeader; } if (CoreUtils.isNullOrEmpty(allowedMethods)) { LOGGER.error("'allowedMethods' provided as null will be defaulted to {}", DEFAULT_REDIRECT_ALLOWED_METHODS); this.allowedRedirectHttpMethods = DEFAULT_REDIRECT_ALLOWED_METHODS; } else { this.allowedRedirectHttpMethods = allowedMethods; } } @Override public boolean shouldAttemptRedirect(HttpPipelineCallContext context, HttpResponse httpResponse, int tryCount, Set<String> attemptedRedirectUrls) { if (isValidRedirectStatusCode(httpResponse.getStatusCode()) && isValidRedirectCount(tryCount) && isAllowedRedirectMethod(httpResponse.getRequest().getHttpMethod())) { String redirectUrl = tryGetRedirectHeader(httpResponse.getHeaders(), getLocationHeader()); if (redirectUrl != null && !alreadyAttemptedRedirectUrl(redirectUrl, attemptedRedirectUrls)) { LOGGER.atVerbose() .addKeyValue(LoggingKeys.TRY_COUNT_KEY, tryCount) .addKeyValue(LoggingKeys.REDIRECT_URL_KEY, redirectUrl) .log("Redirecting."); attemptedRedirectUrls.add(redirectUrl); return true; } else { return false; } } else { return false; } } @Override public HttpRequest createRedirectRequest(HttpResponse httpResponse) { String responseLocation = tryGetRedirectHeader(httpResponse.getHeaders(), getLocationHeader()); return httpResponse.getRequest().setUrl(responseLocation); } @Override public int getMaxAttempts() { return maxAttempts; } /* * The header name to look up the value for the redirect url in response headers. * * @return the value of the header, or null if the header doesn't exist in the response. */ String getLocationHeader() { return locationHeader; } /* * The {@link HttpMethod http methods} that are allowed to be redirected. * * @return the set of allowed redirect http methods. */ Set<HttpMethod> getAllowedRedirectHttpMethods() { return allowedRedirectHttpMethods; } /** * Check if the redirect url provided in the response headers is already attempted. * * @param redirectUrl the redirect url provided in the response header. * @param attemptedRedirectUrls the set containing a list of attempted redirect locations. * @return {@code true} if the redirectUrl provided in the response header is already being attempted for redirect * , {@code false} otherwise. */ private boolean alreadyAttemptedRedirectUrl(String redirectUrl, Set<String> attemptedRedirectUrls) { if (attemptedRedirectUrls.contains(redirectUrl)) { LOGGER.atError() .addKeyValue(LoggingKeys.REDIRECT_URL_KEY, redirectUrl) .log("Request was redirected more than once to the same URL."); return true; } return false; } /** * Check if the attempt count of the redirect is less than the {@code maxAttempts} * * @param tryCount the try count for the HTTP request associated to the HTTP response. * @return {@code true} if the {@code tryCount} is greater than the {@code maxAttempts}, {@code false} otherwise. */ /** * Check if the request http method is a valid redirect method. * * @param httpMethod the http method of the request. * @return {@code true} if the request {@code httpMethod} is a valid http redirect method, {@code false} otherwise. */ private boolean isAllowedRedirectMethod(HttpMethod httpMethod) { if (getAllowedRedirectHttpMethods().contains(httpMethod)) { return true; } else { LOGGER.atError() .addKeyValue(LoggingKeys.HTTP_METHOD_KEY, httpMethod) .log("Request was redirected from an invalid redirect allowed method."); return false; } } /** * Checks if the incoming request status code is a valid redirect status code. * * @param statusCode the status code of the incoming request. * @return {@code true} if the request {@code statusCode} is a valid http redirect method, {@code false} otherwise. */ private boolean isValidRedirectStatusCode(int statusCode) { return statusCode == HttpURLConnection.HTTP_MOVED_TEMP || statusCode == HttpURLConnection.HTTP_MOVED_PERM || statusCode == PERMANENT_REDIRECT_STATUS_CODE || statusCode == TEMPORARY_REDIRECT_STATUS_CODE; } /** * Gets the redirect url from the response headers. * * @param headers the http response headers. * @param headerName the header name to look up value for. * @return the header value for the provided header name, {@code null} otherwise. */ String tryGetRedirectHeader(HttpHeaders headers, String headerName) { String headerValue = headers.getValue(headerName); if (CoreUtils.isNullOrEmpty(headerValue)) { LOGGER.atError() .addKeyValue("headerName", headerName) .log("Redirect url was null, request redirect was terminated."); return null; } else { return headerValue; } } }
class DefaultRedirectStrategy implements RedirectStrategy { private static final ClientLogger LOGGER = new ClientLogger(DefaultRedirectStrategy.class); private static final int DEFAULT_MAX_REDIRECT_ATTEMPTS = 3; private static final String DEFAULT_REDIRECT_LOCATION_HEADER_NAME = "Location"; private static final int PERMANENT_REDIRECT_STATUS_CODE = 308; private static final int TEMPORARY_REDIRECT_STATUS_CODE = 307; private static final Set<HttpMethod> DEFAULT_REDIRECT_ALLOWED_METHODS = new HashSet<>(Arrays.asList(HttpMethod.GET, HttpMethod.HEAD)); private static final String REDIRECT_URLS_KEY = "redirectUrls"; private final int maxAttempts; private final String locationHeader; private final Set<HttpMethod> allowedRedirectHttpMethods; /** * Creates an instance of {@link DefaultRedirectStrategy} with a maximum number of redirect attempts 3, * header name "Location" to locate the redirect url in the response headers and {@link HttpMethod * and {@link HttpMethod */ public DefaultRedirectStrategy() { this(DEFAULT_MAX_REDIRECT_ATTEMPTS, DEFAULT_REDIRECT_LOCATION_HEADER_NAME, DEFAULT_REDIRECT_ALLOWED_METHODS); } /** * Creates an instance of {@link DefaultRedirectStrategy} with the provided number of redirect attempts and * default header name "Location" to locate the redirect url in the response headers and {@link HttpMethod * and {@link HttpMethod * * @param maxAttempts The max number of redirect attempts that can be made. * @throws IllegalArgumentException if {@code maxAttempts} is less than 0. */ public DefaultRedirectStrategy(int maxAttempts) { this(maxAttempts, DEFAULT_REDIRECT_LOCATION_HEADER_NAME, DEFAULT_REDIRECT_ALLOWED_METHODS); } /** * Creates an instance of {@link DefaultRedirectStrategy}. * * @param maxAttempts The max number of redirect attempts that can be made. * @param locationHeader The header name containing the redirect URL. * @param allowedMethods The set of {@link HttpMethod} that are allowed to be redirected. * @throws IllegalArgumentException if {@code maxAttempts} is less than 0. */ public DefaultRedirectStrategy(int maxAttempts, String locationHeader, Set<HttpMethod> allowedMethods) { if (maxAttempts < 0) { throw LOGGER.logExceptionAsError(new IllegalArgumentException("Max attempts cannot be less than 0.")); } this.maxAttempts = maxAttempts; if (CoreUtils.isNullOrEmpty(locationHeader)) { LOGGER.error("'locationHeader' provided as null will be defaulted to {}", DEFAULT_REDIRECT_LOCATION_HEADER_NAME); this.locationHeader = DEFAULT_REDIRECT_LOCATION_HEADER_NAME; } else { this.locationHeader = locationHeader; } if (CoreUtils.isNullOrEmpty(allowedMethods)) { LOGGER.error("'allowedMethods' provided as null will be defaulted to {}", DEFAULT_REDIRECT_ALLOWED_METHODS); this.allowedRedirectHttpMethods = DEFAULT_REDIRECT_ALLOWED_METHODS; } else { this.allowedRedirectHttpMethods = allowedMethods; } } @Override public boolean shouldAttemptRedirect(HttpPipelineCallContext context, HttpResponse httpResponse, int tryCount, Set<String> attemptedRedirectUrls) { if (isValidRedirectStatusCode(httpResponse.getStatusCode()) && isValidRedirectCount(tryCount) && isAllowedRedirectMethod(httpResponse.getRequest().getHttpMethod())) { String redirectUrl = tryGetRedirectHeader(httpResponse.getHeaders(), getLocationHeader()); if (redirectUrl != null && !alreadyAttemptedRedirectUrl(redirectUrl, attemptedRedirectUrls)) { LOGGER.atVerbose() .addKeyValue(LoggingKeys.TRY_COUNT_KEY, tryCount) .addKeyValue(REDIRECT_URLS_KEY, () -> attemptedRedirectUrls.toString()) .log("Redirecting."); attemptedRedirectUrls.add(redirectUrl); return true; } else { return false; } } else { return false; } } @Override public HttpRequest createRedirectRequest(HttpResponse httpResponse) { String responseLocation = tryGetRedirectHeader(httpResponse.getHeaders(), getLocationHeader()); return httpResponse.getRequest().setUrl(responseLocation); } @Override public int getMaxAttempts() { return maxAttempts; } /* * The header name to look up the value for the redirect url in response headers. * * @return the value of the header, or null if the header doesn't exist in the response. */ String getLocationHeader() { return locationHeader; } /* * The {@link HttpMethod http methods} that are allowed to be redirected. * * @return the set of allowed redirect http methods. */ Set<HttpMethod> getAllowedRedirectHttpMethods() { return allowedRedirectHttpMethods; } /** * Check if the redirect url provided in the response headers is already attempted. * * @param redirectUrl the redirect url provided in the response header. * @param attemptedRedirectUrls the set containing a list of attempted redirect locations. * @return {@code true} if the redirectUrl provided in the response header is already being attempted for redirect * , {@code false} otherwise. */ private boolean alreadyAttemptedRedirectUrl(String redirectUrl, Set<String> attemptedRedirectUrls) { if (attemptedRedirectUrls.contains(redirectUrl)) { LOGGER.atError() .addKeyValue(LoggingKeys.REDIRECT_URL_KEY, redirectUrl) .log("Request was redirected more than once to the same URL."); return true; } return false; } /** * Check if the attempt count of the redirect is less than the {@code maxAttempts} * * @param tryCount the try count for the HTTP request associated to the HTTP response. * @return {@code true} if the {@code tryCount} is greater than the {@code maxAttempts}, {@code false} otherwise. */ /** * Check if the request http method is a valid redirect method. * * @param httpMethod the http method of the request. * @return {@code true} if the request {@code httpMethod} is a valid http redirect method, {@code false} otherwise. */ private boolean isAllowedRedirectMethod(HttpMethod httpMethod) { if (getAllowedRedirectHttpMethods().contains(httpMethod)) { return true; } else { LOGGER.atError() .addKeyValue(LoggingKeys.HTTP_METHOD_KEY, httpMethod) .log("Request was redirected from an invalid redirect allowed method."); return false; } } /** * Checks if the incoming request status code is a valid redirect status code. * * @param statusCode the status code of the incoming request. * @return {@code true} if the request {@code statusCode} is a valid http redirect method, {@code false} otherwise. */ private boolean isValidRedirectStatusCode(int statusCode) { return statusCode == HttpURLConnection.HTTP_MOVED_TEMP || statusCode == HttpURLConnection.HTTP_MOVED_PERM || statusCode == PERMANENT_REDIRECT_STATUS_CODE || statusCode == TEMPORARY_REDIRECT_STATUS_CODE; } /** * Gets the redirect url from the response headers. * * @param headers the http response headers. * @param headerName the header name to look up value for. * @return the header value for the provided header name, {@code null} otherwise. */ String tryGetRedirectHeader(HttpHeaders headers, String headerName) { String headerValue = headers.getValue(headerName); if (CoreUtils.isNullOrEmpty(headerValue)) { LOGGER.atError() .addKeyValue("headerName", headerName) .log("Redirect url header was null, request redirect was terminated."); return null; } else { return headerValue; } } }
```suggestion .log("Redirect url header was null, request redirect was terminated."); ``` Thoughts?
String tryGetRedirectHeader(HttpHeaders headers, String headerName) { String headerValue = headers.getValue(headerName); if (CoreUtils.isNullOrEmpty(headerValue)) { LOGGER.atError() .addKeyValue("headerName", headerName) .log("Redirect url was null, request redirect was terminated."); return null; } else { return headerValue; } }
.log("Redirect url was null, request redirect was terminated.");
String tryGetRedirectHeader(HttpHeaders headers, String headerName) { String headerValue = headers.getValue(headerName); if (CoreUtils.isNullOrEmpty(headerValue)) { LOGGER.atError() .addKeyValue("headerName", headerName) .log("Redirect url header was null, request redirect was terminated."); return null; } else { return headerValue; } }
class DefaultRedirectStrategy implements RedirectStrategy { private static final ClientLogger LOGGER = new ClientLogger(DefaultRedirectStrategy.class); private static final int DEFAULT_MAX_REDIRECT_ATTEMPTS = 3; private static final String DEFAULT_REDIRECT_LOCATION_HEADER_NAME = "Location"; private static final int PERMANENT_REDIRECT_STATUS_CODE = 308; private static final int TEMPORARY_REDIRECT_STATUS_CODE = 307; private static final Set<HttpMethod> DEFAULT_REDIRECT_ALLOWED_METHODS = new HashSet<>(Arrays.asList(HttpMethod.GET, HttpMethod.HEAD)); private final int maxAttempts; private final String locationHeader; private final Set<HttpMethod> allowedRedirectHttpMethods; /** * Creates an instance of {@link DefaultRedirectStrategy} with a maximum number of redirect attempts 3, * header name "Location" to locate the redirect url in the response headers and {@link HttpMethod * and {@link HttpMethod */ public DefaultRedirectStrategy() { this(DEFAULT_MAX_REDIRECT_ATTEMPTS, DEFAULT_REDIRECT_LOCATION_HEADER_NAME, DEFAULT_REDIRECT_ALLOWED_METHODS); } /** * Creates an instance of {@link DefaultRedirectStrategy} with the provided number of redirect attempts and * default header name "Location" to locate the redirect url in the response headers and {@link HttpMethod * and {@link HttpMethod * * @param maxAttempts The max number of redirect attempts that can be made. * @throws IllegalArgumentException if {@code maxAttempts} is less than 0. */ public DefaultRedirectStrategy(int maxAttempts) { this(maxAttempts, DEFAULT_REDIRECT_LOCATION_HEADER_NAME, DEFAULT_REDIRECT_ALLOWED_METHODS); } /** * Creates an instance of {@link DefaultRedirectStrategy}. * * @param maxAttempts The max number of redirect attempts that can be made. * @param locationHeader The header name containing the redirect URL. * @param allowedMethods The set of {@link HttpMethod} that are allowed to be redirected. * @throws IllegalArgumentException if {@code maxAttempts} is less than 0. */ public DefaultRedirectStrategy(int maxAttempts, String locationHeader, Set<HttpMethod> allowedMethods) { if (maxAttempts < 0) { throw LOGGER.logExceptionAsError(new IllegalArgumentException("Max attempts cannot be less than 0.")); } this.maxAttempts = maxAttempts; if (CoreUtils.isNullOrEmpty(locationHeader)) { LOGGER.error("'locationHeader' provided as null will be defaulted to {}", DEFAULT_REDIRECT_LOCATION_HEADER_NAME); this.locationHeader = DEFAULT_REDIRECT_LOCATION_HEADER_NAME; } else { this.locationHeader = locationHeader; } if (CoreUtils.isNullOrEmpty(allowedMethods)) { LOGGER.error("'allowedMethods' provided as null will be defaulted to {}", DEFAULT_REDIRECT_ALLOWED_METHODS); this.allowedRedirectHttpMethods = DEFAULT_REDIRECT_ALLOWED_METHODS; } else { this.allowedRedirectHttpMethods = allowedMethods; } } @Override public boolean shouldAttemptRedirect(HttpPipelineCallContext context, HttpResponse httpResponse, int tryCount, Set<String> attemptedRedirectUrls) { if (isValidRedirectStatusCode(httpResponse.getStatusCode()) && isValidRedirectCount(tryCount) && isAllowedRedirectMethod(httpResponse.getRequest().getHttpMethod())) { String redirectUrl = tryGetRedirectHeader(httpResponse.getHeaders(), getLocationHeader()); if (redirectUrl != null && !alreadyAttemptedRedirectUrl(redirectUrl, attemptedRedirectUrls)) { LOGGER.atVerbose() .addKeyValue(LoggingKeys.TRY_COUNT_KEY, tryCount) .addKeyValue(LoggingKeys.REDIRECT_URL_KEY, redirectUrl) .log("Redirecting."); attemptedRedirectUrls.add(redirectUrl); return true; } else { return false; } } else { return false; } } @Override public HttpRequest createRedirectRequest(HttpResponse httpResponse) { String responseLocation = tryGetRedirectHeader(httpResponse.getHeaders(), getLocationHeader()); return httpResponse.getRequest().setUrl(responseLocation); } @Override public int getMaxAttempts() { return maxAttempts; } /* * The header name to look up the value for the redirect url in response headers. * * @return the value of the header, or null if the header doesn't exist in the response. */ String getLocationHeader() { return locationHeader; } /* * The {@link HttpMethod http methods} that are allowed to be redirected. * * @return the set of allowed redirect http methods. */ Set<HttpMethod> getAllowedRedirectHttpMethods() { return allowedRedirectHttpMethods; } /** * Check if the redirect url provided in the response headers is already attempted. * * @param redirectUrl the redirect url provided in the response header. * @param attemptedRedirectUrls the set containing a list of attempted redirect locations. * @return {@code true} if the redirectUrl provided in the response header is already being attempted for redirect * , {@code false} otherwise. */ private boolean alreadyAttemptedRedirectUrl(String redirectUrl, Set<String> attemptedRedirectUrls) { if (attemptedRedirectUrls.contains(redirectUrl)) { LOGGER.atError() .addKeyValue(LoggingKeys.REDIRECT_URL_KEY, redirectUrl) .log("Request was redirected more than once to the same URL."); return true; } return false; } /** * Check if the attempt count of the redirect is less than the {@code maxAttempts} * * @param tryCount the try count for the HTTP request associated to the HTTP response. * @return {@code true} if the {@code tryCount} is greater than the {@code maxAttempts}, {@code false} otherwise. */ private boolean isValidRedirectCount(int tryCount) { if (tryCount >= getMaxAttempts()) { LOGGER.atError() .addKeyValue(LoggingKeys.TRY_COUNT_KEY, tryCount) .addKeyValue("maxAttempts", getMaxAttempts()) .log("Redirect attempts have been exhausted."); return false; } return true; } /** * Check if the request http method is a valid redirect method. * * @param httpMethod the http method of the request. * @return {@code true} if the request {@code httpMethod} is a valid http redirect method, {@code false} otherwise. */ private boolean isAllowedRedirectMethod(HttpMethod httpMethod) { if (getAllowedRedirectHttpMethods().contains(httpMethod)) { return true; } else { LOGGER.atError() .addKeyValue(LoggingKeys.HTTP_METHOD_KEY, httpMethod) .log("Request was redirected from an invalid redirect allowed method."); return false; } } /** * Checks if the incoming request status code is a valid redirect status code. * * @param statusCode the status code of the incoming request. * @return {@code true} if the request {@code statusCode} is a valid http redirect method, {@code false} otherwise. */ private boolean isValidRedirectStatusCode(int statusCode) { return statusCode == HttpURLConnection.HTTP_MOVED_TEMP || statusCode == HttpURLConnection.HTTP_MOVED_PERM || statusCode == PERMANENT_REDIRECT_STATUS_CODE || statusCode == TEMPORARY_REDIRECT_STATUS_CODE; } /** * Gets the redirect url from the response headers. * * @param headers the http response headers. * @param headerName the header name to look up value for. * @return the header value for the provided header name, {@code null} otherwise. */ }
class DefaultRedirectStrategy implements RedirectStrategy { private static final ClientLogger LOGGER = new ClientLogger(DefaultRedirectStrategy.class); private static final int DEFAULT_MAX_REDIRECT_ATTEMPTS = 3; private static final String DEFAULT_REDIRECT_LOCATION_HEADER_NAME = "Location"; private static final int PERMANENT_REDIRECT_STATUS_CODE = 308; private static final int TEMPORARY_REDIRECT_STATUS_CODE = 307; private static final Set<HttpMethod> DEFAULT_REDIRECT_ALLOWED_METHODS = new HashSet<>(Arrays.asList(HttpMethod.GET, HttpMethod.HEAD)); private static final String REDIRECT_URLS_KEY = "redirectUrls"; private final int maxAttempts; private final String locationHeader; private final Set<HttpMethod> allowedRedirectHttpMethods; /** * Creates an instance of {@link DefaultRedirectStrategy} with a maximum number of redirect attempts 3, * header name "Location" to locate the redirect url in the response headers and {@link HttpMethod * and {@link HttpMethod */ public DefaultRedirectStrategy() { this(DEFAULT_MAX_REDIRECT_ATTEMPTS, DEFAULT_REDIRECT_LOCATION_HEADER_NAME, DEFAULT_REDIRECT_ALLOWED_METHODS); } /** * Creates an instance of {@link DefaultRedirectStrategy} with the provided number of redirect attempts and * default header name "Location" to locate the redirect url in the response headers and {@link HttpMethod * and {@link HttpMethod * * @param maxAttempts The max number of redirect attempts that can be made. * @throws IllegalArgumentException if {@code maxAttempts} is less than 0. */ public DefaultRedirectStrategy(int maxAttempts) { this(maxAttempts, DEFAULT_REDIRECT_LOCATION_HEADER_NAME, DEFAULT_REDIRECT_ALLOWED_METHODS); } /** * Creates an instance of {@link DefaultRedirectStrategy}. * * @param maxAttempts The max number of redirect attempts that can be made. * @param locationHeader The header name containing the redirect URL. * @param allowedMethods The set of {@link HttpMethod} that are allowed to be redirected. * @throws IllegalArgumentException if {@code maxAttempts} is less than 0. */ public DefaultRedirectStrategy(int maxAttempts, String locationHeader, Set<HttpMethod> allowedMethods) { if (maxAttempts < 0) { throw LOGGER.logExceptionAsError(new IllegalArgumentException("Max attempts cannot be less than 0.")); } this.maxAttempts = maxAttempts; if (CoreUtils.isNullOrEmpty(locationHeader)) { LOGGER.error("'locationHeader' provided as null will be defaulted to {}", DEFAULT_REDIRECT_LOCATION_HEADER_NAME); this.locationHeader = DEFAULT_REDIRECT_LOCATION_HEADER_NAME; } else { this.locationHeader = locationHeader; } if (CoreUtils.isNullOrEmpty(allowedMethods)) { LOGGER.error("'allowedMethods' provided as null will be defaulted to {}", DEFAULT_REDIRECT_ALLOWED_METHODS); this.allowedRedirectHttpMethods = DEFAULT_REDIRECT_ALLOWED_METHODS; } else { this.allowedRedirectHttpMethods = allowedMethods; } } @Override public boolean shouldAttemptRedirect(HttpPipelineCallContext context, HttpResponse httpResponse, int tryCount, Set<String> attemptedRedirectUrls) { if (isValidRedirectStatusCode(httpResponse.getStatusCode()) && isValidRedirectCount(tryCount) && isAllowedRedirectMethod(httpResponse.getRequest().getHttpMethod())) { String redirectUrl = tryGetRedirectHeader(httpResponse.getHeaders(), getLocationHeader()); if (redirectUrl != null && !alreadyAttemptedRedirectUrl(redirectUrl, attemptedRedirectUrls)) { LOGGER.atVerbose() .addKeyValue(LoggingKeys.TRY_COUNT_KEY, tryCount) .addKeyValue(REDIRECT_URLS_KEY, () -> attemptedRedirectUrls.toString()) .log("Redirecting."); attemptedRedirectUrls.add(redirectUrl); return true; } else { return false; } } else { return false; } } @Override public HttpRequest createRedirectRequest(HttpResponse httpResponse) { String responseLocation = tryGetRedirectHeader(httpResponse.getHeaders(), getLocationHeader()); return httpResponse.getRequest().setUrl(responseLocation); } @Override public int getMaxAttempts() { return maxAttempts; } /* * The header name to look up the value for the redirect url in response headers. * * @return the value of the header, or null if the header doesn't exist in the response. */ String getLocationHeader() { return locationHeader; } /* * The {@link HttpMethod http methods} that are allowed to be redirected. * * @return the set of allowed redirect http methods. */ Set<HttpMethod> getAllowedRedirectHttpMethods() { return allowedRedirectHttpMethods; } /** * Check if the redirect url provided in the response headers is already attempted. * * @param redirectUrl the redirect url provided in the response header. * @param attemptedRedirectUrls the set containing a list of attempted redirect locations. * @return {@code true} if the redirectUrl provided in the response header is already being attempted for redirect * , {@code false} otherwise. */ private boolean alreadyAttemptedRedirectUrl(String redirectUrl, Set<String> attemptedRedirectUrls) { if (attemptedRedirectUrls.contains(redirectUrl)) { LOGGER.atError() .addKeyValue(LoggingKeys.REDIRECT_URL_KEY, redirectUrl) .log("Request was redirected more than once to the same URL."); return true; } return false; } /** * Check if the attempt count of the redirect is less than the {@code maxAttempts} * * @param tryCount the try count for the HTTP request associated to the HTTP response. * @return {@code true} if the {@code tryCount} is greater than the {@code maxAttempts}, {@code false} otherwise. */ private boolean isValidRedirectCount(int tryCount) { if (tryCount >= getMaxAttempts()) { LOGGER.atError() .addKeyValue("maxAttempts", getMaxAttempts()) .log("Redirect attempts have been exhausted."); return false; } return true; } /** * Check if the request http method is a valid redirect method. * * @param httpMethod the http method of the request. * @return {@code true} if the request {@code httpMethod} is a valid http redirect method, {@code false} otherwise. */ private boolean isAllowedRedirectMethod(HttpMethod httpMethod) { if (getAllowedRedirectHttpMethods().contains(httpMethod)) { return true; } else { LOGGER.atError() .addKeyValue(LoggingKeys.HTTP_METHOD_KEY, httpMethod) .log("Request was redirected from an invalid redirect allowed method."); return false; } } /** * Checks if the incoming request status code is a valid redirect status code. * * @param statusCode the status code of the incoming request. * @return {@code true} if the request {@code statusCode} is a valid http redirect method, {@code false} otherwise. */ private boolean isValidRedirectStatusCode(int statusCode) { return statusCode == HttpURLConnection.HTTP_MOVED_TEMP || statusCode == HttpURLConnection.HTTP_MOVED_PERM || statusCode == PERMANENT_REDIRECT_STATUS_CODE || statusCode == TEMPORARY_REDIRECT_STATUS_CODE; } /** * Gets the redirect url from the response headers. * * @param headers the http response headers. * @param headerName the header name to look up value for. * @return the header value for the provided header name, {@code null} otherwise. */ }
should we use != null here to match the condition below?
private ProxyOptions getProxyOptions() { String config = Configs.getClientTelemetryProxyOptionsConfig(); if (StringUtils.isNotEmpty(config)) { try { JsonProxyOptionsConfig proxyOptionsConfig = Utils.getSimpleObjectMapper().readValue(config, JsonProxyOptionsConfig.class); ProxyOptions.Type type = ProxyOptions.Type.valueOf(proxyOptionsConfig.type); if (type != ProxyOptions.Type.HTTP) { throw new IllegalArgumentException("Only http proxy type is supported."); } if (logger.isDebugEnabled()) { logger.debug( "Enable proxy with type {}, host {}, port {}, userName {}, password length {}", type, proxyOptionsConfig.host, proxyOptionsConfig.port, proxyOptionsConfig.username, proxyOptionsConfig.password != null ? proxyOptionsConfig.password.length() : -1 ); } ProxyOptions proxyOptions = new ProxyOptions( type, new InetSocketAddress(proxyOptionsConfig.host, proxyOptionsConfig.port)); if (!Strings.isNullOrEmpty(proxyOptionsConfig.username) || !Strings.isNullOrEmpty(proxyOptionsConfig.password)) { proxyOptions.setCredentials( proxyOptionsConfig.username != null ? proxyOptionsConfig.username : "", proxyOptionsConfig.password != null ? proxyOptionsConfig.password : ""); } return proxyOptions; } catch (JsonProcessingException e) { logger.error("Failed to parse client telemetry proxy option config", e); } } return null; }
if (!Strings.isNullOrEmpty(proxyOptionsConfig.username) ||
private ProxyOptions getProxyOptions() { String config = Configs.getClientTelemetryProxyOptionsConfig(); if (StringUtils.isNotEmpty(config)) { try { JsonProxyOptionsConfig proxyOptionsConfig = Utils.getSimpleObjectMapper().readValue(config, JsonProxyOptionsConfig.class); ProxyOptions.Type type = ProxyOptions.Type.valueOf(proxyOptionsConfig.type); if (type != ProxyOptions.Type.HTTP) { throw new IllegalArgumentException("Only http proxy type is supported."); } if (logger.isDebugEnabled()) { logger.debug( "Enable proxy with type {}, host {}, port {}, userName {}, password length {}", type, proxyOptionsConfig.host, proxyOptionsConfig.port, proxyOptionsConfig.username, proxyOptionsConfig.password != null ? proxyOptionsConfig.password.length() : -1 ); } ProxyOptions proxyOptions = new ProxyOptions( type, new InetSocketAddress(proxyOptionsConfig.host, proxyOptionsConfig.port)); if (!Strings.isNullOrEmpty(proxyOptionsConfig.username) || !Strings.isNullOrEmpty(proxyOptionsConfig.password)) { proxyOptions.setCredentials( proxyOptionsConfig.username != null ? proxyOptionsConfig.username : "", proxyOptionsConfig.password != null ? proxyOptionsConfig.password : ""); } return proxyOptions; } catch (JsonProcessingException e) { logger.error("Failed to parse client telemetry proxy option config", e); } } return null; }
class ClientTelemetryConfig { private static Logger logger = LoggerFactory.getLogger(ClientTelemetryConfig.class); private static boolean DEFAULT_CLIENT_TELEMETRY_ENABLED = false; private static final Duration DEFAULT_NETWORK_REQUEST_TIMEOUT = Duration.ofSeconds(60); private static final Duration DEFAULT_IDLE_CONNECTION_TIMEOUT = Duration.ofSeconds(60); private static final int DEFAULT_MAX_CONNECTION_POOL_SIZE = 1000; private boolean clientTelemetryEnabled; private final Duration httpNetworkRequestTimeout; private final int maxConnectionPoolSize; private final Duration idleHttpConnectionTimeout; private final ProxyOptions proxy; public ClientTelemetryConfig() { this.clientTelemetryEnabled = DEFAULT_CLIENT_TELEMETRY_ENABLED; this.httpNetworkRequestTimeout = DEFAULT_NETWORK_REQUEST_TIMEOUT; this.maxConnectionPoolSize = DEFAULT_MAX_CONNECTION_POOL_SIZE; this.idleHttpConnectionTimeout = DEFAULT_IDLE_CONNECTION_TIMEOUT; this.proxy = this.getProxyOptions(); } public static ClientTelemetryConfig getDefaultConfig() { return new ClientTelemetryConfig(); } public void setClientTelemetryEnabled(boolean clientTelemetryEnabled) { this.clientTelemetryEnabled = clientTelemetryEnabled; } public boolean isClientTelemetryEnabled() { return this.clientTelemetryEnabled; } public Duration getHttpNetworkRequestTimeout() { return this.httpNetworkRequestTimeout; } public int getMaxConnectionPoolSize() { return this.maxConnectionPoolSize; } public Duration getIdleHttpConnectionTimeout() { return this.idleHttpConnectionTimeout; } public ProxyOptions getProxy() { return this.proxy; } private static class JsonProxyOptionsConfig { @JsonProperty private String host; @JsonProperty private int port; @JsonProperty private String type; @JsonProperty private String username; @JsonProperty private String password; private JsonProxyOptionsConfig() {} private JsonProxyOptionsConfig(String host, int port, String type, String username, String password) { this.host = host; this.port = port; this.type = type; this.username = username; this.password = password; } } }
class ClientTelemetryConfig { private static Logger logger = LoggerFactory.getLogger(ClientTelemetryConfig.class); private static boolean DEFAULT_CLIENT_TELEMETRY_ENABLED = false; private static final Duration DEFAULT_NETWORK_REQUEST_TIMEOUT = Duration.ofSeconds(60); private static final Duration DEFAULT_IDLE_CONNECTION_TIMEOUT = Duration.ofSeconds(60); private static final int DEFAULT_MAX_CONNECTION_POOL_SIZE = 1000; private boolean clientTelemetryEnabled; private final Duration httpNetworkRequestTimeout; private final int maxConnectionPoolSize; private final Duration idleHttpConnectionTimeout; private final ProxyOptions proxy; public ClientTelemetryConfig() { this.clientTelemetryEnabled = DEFAULT_CLIENT_TELEMETRY_ENABLED; this.httpNetworkRequestTimeout = DEFAULT_NETWORK_REQUEST_TIMEOUT; this.maxConnectionPoolSize = DEFAULT_MAX_CONNECTION_POOL_SIZE; this.idleHttpConnectionTimeout = DEFAULT_IDLE_CONNECTION_TIMEOUT; this.proxy = this.getProxyOptions(); } public static ClientTelemetryConfig getDefaultConfig() { return new ClientTelemetryConfig(); } public void setClientTelemetryEnabled(boolean clientTelemetryEnabled) { this.clientTelemetryEnabled = clientTelemetryEnabled; } public boolean isClientTelemetryEnabled() { return this.clientTelemetryEnabled; } public Duration getHttpNetworkRequestTimeout() { return this.httpNetworkRequestTimeout; } public int getMaxConnectionPoolSize() { return this.maxConnectionPoolSize; } public Duration getIdleHttpConnectionTimeout() { return this.idleHttpConnectionTimeout; } public ProxyOptions getProxy() { return this.proxy; } private static class JsonProxyOptionsConfig { @JsonProperty private String host; @JsonProperty private int port; @JsonProperty private String type; @JsonProperty private String username; @JsonProperty private String password; private JsonProxyOptionsConfig() {} private JsonProxyOptionsConfig(String host, int port, String type, String username, String password) { this.host = host; this.port = port; this.type = type; this.username = username; this.password = password; } } }
I believe it should be `distinctUntilChanged()`. `distinct` keeps a set, so if you see INACTIVE -> ACTIVE -> INACTIVE states, it'll only output INACTIVE and ACTIVE but not the last state.
public Flux<AmqpEndpointState> getEndpointStates() { return endpointStates .distinct() .takeUntilOther(this.terminateEndpointStates.asMono()); }
.distinct()
public Flux<AmqpEndpointState> getEndpointStates() { return endpointStates .distinctUntilChanged() .takeUntilOther(this.terminateEndpointStates.asMono()); }
class ReactorReceiver implements AmqpReceiveLink, AsyncCloseable, AutoCloseable { private final String entityPath; private final Receiver receiver; private final ReceiveLinkHandler handler; private final TokenManager tokenManager; private final ReactorDispatcher dispatcher; private final Disposable subscriptions; private final AtomicBoolean isDisposed = new AtomicBoolean(); private final Sinks.Empty<Void> isClosedMono = Sinks.empty(); private final AtomicBoolean isCompleteCloseCalled = new AtomicBoolean(); private final Flux<Message> messagesProcessor; private final AmqpRetryOptions retryOptions; private final ClientLogger logger; private final Flux<AmqpEndpointState> endpointStates; private final Sinks.Empty<AmqpEndpointState> terminateEndpointStates = Sinks.empty(); private final AtomicReference<Supplier<Integer>> creditSupplier = new AtomicReference<>(); protected ReactorReceiver(AmqpConnection amqpConnection, String entityPath, Receiver receiver, ReceiveLinkHandler handler, TokenManager tokenManager, ReactorDispatcher dispatcher, AmqpRetryOptions retryOptions) { this.entityPath = entityPath; this.receiver = receiver; this.handler = handler; this.tokenManager = tokenManager; this.dispatcher = dispatcher; Map<String, Object> loggingContext = createContextWithConnectionId(handler.getConnectionId()); loggingContext.put(LINK_NAME_KEY, this.handler.getLinkName()); this.logger = new ClientLogger(ReactorReceiver.class, loggingContext); this.messagesProcessor = this.handler.getDeliveredMessages() .flatMap(delivery -> { return Mono.create(sink -> { try { this.dispatcher.invoke(() -> { if (isDisposed()) { sink.error(new IllegalStateException( "Cannot decode delivery when ReactorReceiver instance is closed.")); return; } final Message message = decodeDelivery(delivery); final int creditsLeft = receiver.getRemoteCredit(); if (creditsLeft > 0) { sink.success(message); return; } final Supplier<Integer> supplier = creditSupplier.get(); final Integer credits = supplier.get(); if (credits != null && credits > 0) { logger.atInfo() .addKeyValue("credits", credits) .log("Adding credits."); receiver.flow(credits); } else { logger.atVerbose() .addKeyValue("credits", credits) .log("There are no credits to add."); } sink.success(message); }); } catch (IOException | RejectedExecutionException e) { sink.error(e); } }); }, 1); this.retryOptions = retryOptions; this.endpointStates = this.handler.getEndpointStates() .map(state -> { logger.atVerbose() .addKeyValue(ENTITY_PATH_KEY, entityPath) .log("State {}", state); return AmqpEndpointStateUtil.getConnectionState(state); }) .doOnError(error -> { final String message = isDisposed.getAndSet(true) ? "This was already disposed. Dropping error." : "Freeing resources due to error."; logger.atInfo() .addKeyValue(ENTITY_PATH_KEY, entityPath) .log(message); completeClose(); }) .doOnComplete(() -> { final String message = isDisposed.getAndSet(true) ? "This was already disposed." : "Freeing resources."; logger.atVerbose() .addKeyValue(ENTITY_PATH_KEY, entityPath) .log(message); completeClose(); }) .cache(1); this.subscriptions = Disposables.composite( this.endpointStates.subscribe(), this.tokenManager.getAuthorizationResults() .onErrorResume(error -> { final Mono<Void> operation = closeAsync("Token renewal failure. Disposing receive link.", new ErrorCondition(Symbol.getSymbol(AmqpErrorCondition.NOT_ALLOWED.getErrorCondition()), error.getMessage())); return operation.then(Mono.empty()); }).subscribe(response -> logger.atVerbose() .addKeyValue("response", response) .log("Token refreshed."), error -> { }, () -> { logger.atVerbose() .addKeyValue(ENTITY_PATH_KEY, entityPath) .log("Authorization completed."); closeAsync("Authorization completed. Disposing.", null).subscribe(); }), amqpConnection.getShutdownSignals().flatMap(signal -> { logger.verbose("Shutdown signal received."); return closeAsync("Connection shutdown.", null); }).subscribe()); } @Override @Override public Flux<Message> receive() { return messagesProcessor; } @Override public Mono<Void> addCredits(int credits) { if (isDisposed()) { return monoError(logger, new IllegalStateException("Cannot add credits to closed link: " + getLinkName())); } return Mono.create(sink -> { try { dispatcher.invoke(() -> { receiver.flow(credits); sink.success(); }); } catch (IOException e) { sink.error(new UncheckedIOException(String.format( "connectionId[%s] linkName[%s] Unable to schedule work to add more credits.", handler.getConnectionId(), getLinkName()), e)); } catch (RejectedExecutionException e) { sink.error(e); } }); } @Override public int getCredits() { return receiver.getRemoteCredit(); } @Override public void setEmptyCreditListener(Supplier<Integer> creditSupplier) { Objects.requireNonNull(creditSupplier, "'creditSupplier' cannot be null."); this.creditSupplier.set(creditSupplier); } @Override public String getLinkName() { return handler.getLinkName(); } @Override public String getEntityPath() { return entityPath; } @Override public String getHostname() { return handler.getHostname(); } @Override public boolean isDisposed() { return isDisposed.get(); } @Override public void dispose() { close(); } @Override public void close() { closeAsync().block(retryOptions.getTryTimeout()); } @Override public Mono<Void> closeAsync() { return closeAsync("User invoked close operation.", null); } protected Message decodeDelivery(Delivery delivery) { final int messageSize = delivery.pending(); final byte[] buffer = new byte[messageSize]; final int read = receiver.recv(buffer, 0, messageSize); receiver.advance(); final Message message = Proton.message(); message.decode(buffer, 0, read); delivery.settle(); return message; } /** * Disposes of the receiver when an exception is encountered. * <p> * While {@link ReactorReceiver * contract, this API performs the same disposal with additional * contextual information. For example, the context may indicate if the resource needs to be disposed of * internally when there is an error in the link, session or connection. * </p> * * <p> * Closing ReactorReceiver involves 3 stages, running in following order - * <ul> * <li>local-close (client to broker) via beginClose() </li> * <li>remote-close ack (broker to client)</li> * <li>disposal of ReactorReceiver resources via completeClose()</li> * </ul> * Ref:https: * * @param message Message to log. * @param errorCondition Error condition associated with close operation. */ protected Mono<Void> closeAsync(String message, ErrorCondition errorCondition) { if (isDisposed.getAndSet(true)) { return getIsClosedMono(); } addErrorCondition(logger.atVerbose(), errorCondition) .addKeyValue(ENTITY_PATH_KEY, entityPath) .log("Setting error condition and disposing. {}", message); return beginClose(errorCondition) .flatMap(localCloseScheduled -> { if (localCloseScheduled) { return timeoutRemoteCloseAck(); } else { return Mono.empty(); } }) .publishOn(Schedulers.boundedElastic()); } /** * Gets the Mono that signals completion when the disposal/closing of ReactorReceiver is completed. * * @return the disposal/closing completion Mono. */ protected Mono<Void> getIsClosedMono() { return isClosedMono.asMono().publishOn(Schedulers.boundedElastic()); } /** * Beings the client side close by initiating local-close on underlying receiver. * * @param errorCondition Error condition associated with close operation. * @return a {@link Mono} when subscribed attempt to initiate local-close, emitting {@code true} * if local-close is scheduled on the dispatcher, emits {@code false} if unable to schedule * local-close that lead to manual close. */ private Mono<Boolean> beginClose(ErrorCondition errorCondition) { final Runnable localClose = () -> { if (receiver.getLocalState() != EndpointState.CLOSED) { receiver.close(); if (receiver.getCondition() == null) { receiver.setCondition(errorCondition); } } }; return Mono.create(sink -> { boolean localCloseScheduled = false; try { dispatcher.invoke(localClose); localCloseScheduled = true; } catch (IOException e) { logger.warning("IO sink was closed when scheduling work. Manually invoking and completing close.", e); localClose.run(); terminateEndpointState(); completeClose(); } catch (RejectedExecutionException e) { logger.info("RejectedExecutionException when scheduling on ReactorDispatcher. Manually invoking and completing close."); localClose.run(); terminateEndpointState(); completeClose(); } finally { sink.success(localCloseScheduled); } }); } /** * Apply timeout on remote-close ack. If timeout happens, i.e., if remote-close ack doesn't arrive within * the timeout duration, then terminate the Flux returned by getEndpointStates() and complete close. * * a {@link Mono} that registers remote-close ack timeout based close cleanup. */ private Mono<Void> timeoutRemoteCloseAck() { return isClosedMono.asMono() .timeout(retryOptions.getTryTimeout()) .onErrorResume(error -> { if (error instanceof TimeoutException) { logger.info("Timeout waiting for RemoteClose. Manually terminating EndpointStates and completing close."); terminateEndpointState(); completeClose(); } return Mono.empty(); }) .subscribeOn(Schedulers.boundedElastic()); } /** * Terminate the Flux returned by the getEndpointStates() API. * * <p> * The termination of Flux returned by getEndpointStates() is the signal that "AmqpReceiveLinkProcessor" * uses to either terminate its downstream or obtain a new ReactorReceiver to continue delivering events * downstream. * </p> */ private void terminateEndpointState() { this.terminateEndpointStates.emitEmpty((signalType, emitResult) -> { addSignalTypeAndResult(logger.atVerbose(), signalType, emitResult) .log("Could not emit EndpointStates termination."); return false; }); } /** * Completes the closing of the underlying receiver, which includes disposing of subscriptions, * closing of token manager, and releasing of protonJ resources. * <p> * The completeClose invoked in 3 cases - when the broker ack for beginClose (i.e. ack via * remote-close frame), if the broker ack for beginClose never comes through within timeout, * if the client fails to run beginClose. * </p> */ private void completeClose() { if (isCompleteCloseCalled.getAndSet(true)) { return; } isClosedMono.emitEmpty((signalType, result) -> { addSignalTypeAndResult(logger.atWarning(), signalType, result) .log("Unable to emit shutdown signal."); return false; }); subscriptions.dispose(); if (tokenManager != null) { tokenManager.close(); } handler.close(); receiver.free(); } @Override public String toString() { return String.format("connectionId: [%s] entity path: [%s] linkName: [%s]", receiver.getName(), entityPath, getLinkName()); } }
class ReactorReceiver implements AmqpReceiveLink, AsyncCloseable, AutoCloseable { private final String entityPath; private final Receiver receiver; private final ReceiveLinkHandler handler; private final TokenManager tokenManager; private final ReactorDispatcher dispatcher; private final Disposable subscriptions; private final AtomicBoolean isDisposed = new AtomicBoolean(); private final Sinks.Empty<Void> isClosedMono = Sinks.empty(); private final AtomicBoolean isCompleteCloseCalled = new AtomicBoolean(); private final Flux<Message> messagesProcessor; private final AmqpRetryOptions retryOptions; private final ClientLogger logger; private final Flux<AmqpEndpointState> endpointStates; private final Sinks.Empty<AmqpEndpointState> terminateEndpointStates = Sinks.empty(); private final AtomicReference<Supplier<Integer>> creditSupplier = new AtomicReference<>(); protected ReactorReceiver(AmqpConnection amqpConnection, String entityPath, Receiver receiver, ReceiveLinkHandler handler, TokenManager tokenManager, ReactorDispatcher dispatcher, AmqpRetryOptions retryOptions) { this.entityPath = entityPath; this.receiver = receiver; this.handler = handler; this.tokenManager = tokenManager; this.dispatcher = dispatcher; Map<String, Object> loggingContext = createContextWithConnectionId(handler.getConnectionId()); loggingContext.put(LINK_NAME_KEY, this.handler.getLinkName()); loggingContext.put(ENTITY_PATH_KEY, entityPath); this.logger = new ClientLogger(ReactorReceiver.class, loggingContext); this.messagesProcessor = this.handler.getDeliveredMessages() .flatMap(delivery -> { return Mono.create(sink -> { try { this.dispatcher.invoke(() -> { if (isDisposed()) { sink.error(new IllegalStateException( "Cannot decode delivery when ReactorReceiver instance is closed.")); return; } final Message message = decodeDelivery(delivery); final int creditsLeft = receiver.getRemoteCredit(); if (creditsLeft > 0) { sink.success(message); return; } final Supplier<Integer> supplier = creditSupplier.get(); final Integer credits = supplier.get(); if (credits != null && credits > 0) { logger.atInfo() .addKeyValue("credits", credits) .log("Adding credits."); receiver.flow(credits); } else { logger.atVerbose() .addKeyValue("credits", credits) .log("There are no credits to add."); } sink.success(message); }); } catch (IOException | RejectedExecutionException e) { sink.error(e); } }); }, 1); this.retryOptions = retryOptions; this.endpointStates = this.handler.getEndpointStates() .map(state -> { logger.atVerbose() .log("State {}", state); return AmqpEndpointStateUtil.getConnectionState(state); }) .doOnError(error -> { final String message = isDisposed.getAndSet(true) ? "This was already disposed. Dropping error." : "Freeing resources due to error."; logger.atInfo() .log(message); completeClose(); }) .doOnComplete(() -> { final String message = isDisposed.getAndSet(true) ? "This was already disposed." : "Freeing resources."; logger.atVerbose() .log(message); completeClose(); }) .cache(1); this.subscriptions = Disposables.composite( this.endpointStates.subscribe(), this.tokenManager.getAuthorizationResults() .onErrorResume(error -> { final Mono<Void> operation = closeAsync("Token renewal failure. Disposing receive link.", new ErrorCondition(Symbol.getSymbol(AmqpErrorCondition.NOT_ALLOWED.getErrorCondition()), error.getMessage())); return operation.then(Mono.empty()); }).subscribe(response -> logger.atVerbose() .addKeyValue("response", response) .log("Token refreshed."), error -> { }, () -> { logger.atVerbose() .log("Authorization completed."); closeAsync("Authorization completed. Disposing.", null).subscribe(); }), amqpConnection.getShutdownSignals().flatMap(signal -> { logger.verbose("Shutdown signal received."); return closeAsync("Connection shutdown.", null); }).subscribe()); } @Override @Override public Flux<Message> receive() { return messagesProcessor; } @Override public Mono<Void> addCredits(int credits) { if (isDisposed()) { return monoError(logger, new IllegalStateException("Cannot add credits to closed link: " + getLinkName())); } return Mono.create(sink -> { try { dispatcher.invoke(() -> { receiver.flow(credits); sink.success(); }); } catch (IOException e) { sink.error(new UncheckedIOException(String.format( "connectionId[%s] linkName[%s] Unable to schedule work to add more credits.", handler.getConnectionId(), getLinkName()), e)); } catch (RejectedExecutionException e) { sink.error(e); } }); } @Override public int getCredits() { return receiver.getRemoteCredit(); } @Override public void setEmptyCreditListener(Supplier<Integer> creditSupplier) { Objects.requireNonNull(creditSupplier, "'creditSupplier' cannot be null."); this.creditSupplier.set(creditSupplier); } @Override public String getLinkName() { return handler.getLinkName(); } @Override public String getEntityPath() { return entityPath; } @Override public String getHostname() { return handler.getHostname(); } @Override public boolean isDisposed() { return isDisposed.get(); } @Override public void dispose() { close(); } @Override public void close() { closeAsync().block(retryOptions.getTryTimeout()); } @Override public Mono<Void> closeAsync() { return closeAsync("User invoked close operation.", null); } protected Message decodeDelivery(Delivery delivery) { final int messageSize = delivery.pending(); final byte[] buffer = new byte[messageSize]; final int read = receiver.recv(buffer, 0, messageSize); receiver.advance(); final Message message = Proton.message(); message.decode(buffer, 0, read); delivery.settle(); return message; } /** * Disposes of the receiver when an exception is encountered. * <p> * While {@link ReactorReceiver * contract, this API performs the same disposal with additional * contextual information. For example, the context may indicate if the resource needs to be disposed of * internally when there is an error in the link, session or connection. * </p> * * <p> * Closing ReactorReceiver involves 3 stages, running in following order - * <ul> * <li>local-close (client to broker) via beginClose() </li> * <li>remote-close ack (broker to client)</li> * <li>disposal of ReactorReceiver resources via completeClose()</li> * </ul> * @link <a href="https: * * @param message Message to log. * @param errorCondition Error condition associated with close operation. */ protected Mono<Void> closeAsync(String message, ErrorCondition errorCondition) { if (isDisposed.getAndSet(true)) { return getIsClosedMono(); } addErrorCondition(logger.atVerbose(), errorCondition) .log("Setting error condition and disposing. {}", message); return beginClose(errorCondition) .flatMap(localCloseScheduled -> { if (localCloseScheduled) { return timeoutRemoteCloseAck(); } else { return Mono.empty(); } }) .publishOn(Schedulers.boundedElastic()); } /** * Gets the Mono that signals completion when the disposal/closing of ReactorReceiver is completed. * * @return the disposal/closing completion Mono. */ protected Mono<Void> getIsClosedMono() { return isClosedMono.asMono().publishOn(Schedulers.boundedElastic()); } /** * Beings the client side close by initiating local-close on underlying receiver. * * @param errorCondition Error condition associated with close operation. * @return a {@link Mono} when subscribed attempt to initiate local-close, emitting {@code true} * if local-close is scheduled on the dispatcher, emits {@code false} if unable to schedule * local-close that lead to manual close. */ private Mono<Boolean> beginClose(ErrorCondition errorCondition) { final Runnable localClose = () -> { if (receiver.getLocalState() != EndpointState.CLOSED) { receiver.close(); if (receiver.getCondition() == null) { receiver.setCondition(errorCondition); } } }; return Mono.create(sink -> { boolean localCloseScheduled = false; try { dispatcher.invoke(localClose); localCloseScheduled = true; } catch (IOException e) { logger.warning("IO sink was closed when scheduling work. Manually invoking and completing close.", e); localClose.run(); terminateEndpointState(); completeClose(); } catch (RejectedExecutionException e) { logger.info("RejectedExecutionException when scheduling on ReactorDispatcher. Manually invoking and completing close."); localClose.run(); terminateEndpointState(); completeClose(); } finally { sink.success(localCloseScheduled); } }); } /** * Apply timeout on remote-close ack. If timeout happens, i.e., if remote-close ack doesn't arrive within * the timeout duration, then terminate the Flux returned by getEndpointStates() and complete close. * * a {@link Mono} that registers remote-close ack timeout based close cleanup. */ private Mono<Void> timeoutRemoteCloseAck() { return isClosedMono.asMono() .timeout(retryOptions.getTryTimeout()) .onErrorResume(error -> { if (error instanceof TimeoutException) { logger.info("Timeout waiting for RemoteClose. Manually terminating EndpointStates and completing close."); terminateEndpointState(); completeClose(); } return Mono.empty(); }) .subscribeOn(Schedulers.boundedElastic()); } /** * Terminate the Flux returned by the getEndpointStates() API. * * <p> * The termination of Flux returned by getEndpointStates() is the signal that "AmqpReceiveLinkProcessor" * uses to either terminate its downstream or obtain a new ReactorReceiver to continue delivering events * downstream. * </p> */ private void terminateEndpointState() { this.terminateEndpointStates.emitEmpty((signalType, emitResult) -> { addSignalTypeAndResult(logger.atVerbose(), signalType, emitResult) .log("Could not emit EndpointStates termination."); return false; }); } /** * Completes the closing of the underlying receiver, which includes disposing of subscriptions, * closing of token manager, and releasing of protonJ resources. * <p> * The completeClose invoked in 3 cases - when the broker ack for beginClose (i.e. ack via * remote-close frame), if the broker ack for beginClose never comes through within timeout, * if the client fails to run beginClose. * </p> */ private void completeClose() { if (isCompleteCloseCalled.getAndSet(true)) { return; } isClosedMono.emitEmpty((signalType, result) -> { addSignalTypeAndResult(logger.atWarning(), signalType, result) .log("Unable to emit shutdown signal."); return false; }); subscriptions.dispose(); if (tokenManager != null) { tokenManager.close(); } handler.close(); receiver.free(); } @Override public String toString() { return String.format("connectionId: [%s] entity path: [%s] linkName: [%s]", receiver.getName(), entityPath, getLinkName()); } }
Is there a way to make `1024` configurable? This is a pretty small number for average application.
private void create() throws ShareStorageException { if (!this.exists()) { ShareFileHttpHeaders header = null; if (StringUtils.hasText(contentType)) { header = new ShareFileHttpHeaders(); header.setContentType(contentType); } this.shareFileClient.createWithResponse(1024, header, null, null, null, null, Context.NONE) .getValue(); } }
.getValue();
private void create() throws ShareStorageException { if (!this.exists()) { ShareFileHttpHeaders header = null; if (StringUtils.hasText(contentType)) { header = new ShareFileHttpHeaders(); header.setContentType(contentType); } this.shareFileClient.createWithResponse(1024, header, null, null, null, null, Context.NONE) .getValue(); } }
class StorageFileResource extends AzureStorageResource { private static final String MSG_FAIL_OPEN_OUTPUT = "Failed to open output stream of file"; private final ShareServiceClient shareServiceClient; private final ShareClient shareClient; private final ShareFileClient shareFileClient; private final String location; private final boolean autoCreateFiles; private final String contentType; /** * Creates a new instance of {@link StorageFileResource}. * * @param shareServiceClient the ShareServiceClient * @param location the location */ public StorageFileResource(ShareServiceClient shareServiceClient, String location) { this(shareServiceClient, location, false); } /** * Creates a new instance of {@link StorageFileResource}. * * @param shareServiceClient the ShareServiceClient * @param location the location * @param autoCreateFiles whether to automatically create files */ public StorageFileResource(ShareServiceClient shareServiceClient, String location, boolean autoCreateFiles) { this(shareServiceClient, location, autoCreateFiles, null); } /** * Creates a new instance of {@link StorageFileResource}. * * @param shareServiceClient the ShareServiceClient * @param location the location * @param autoCreateFiles whether to automatically create files * @param contentType the content type */ public StorageFileResource(ShareServiceClient shareServiceClient, String location, boolean autoCreateFiles, String contentType) { assertIsAzureStorageLocation(location); this.autoCreateFiles = autoCreateFiles; this.location = location; this.shareServiceClient = shareServiceClient; this.shareClient = shareServiceClient.getShareClient(getContainerName(location)); this.shareFileClient = shareClient.getFileClient(getFilename(location)); this.contentType = StringUtils.hasText(contentType) ? contentType : getContentType(location); } /** * Checks whether an Azure Storage File can be opened, * if the file is not existed, and autoCreateFiles==true, * it will create the file on Azure Storage. * @return A {@link StorageFileOutputStream} object used to write data to the file. * @throws IOException when fail to open the output stream. */ @Override public OutputStream getOutputStream() throws IOException { try { if (this.autoCreateFiles) { this.shareClient.createIfNotExists(); this.create(); } return this.shareFileClient.getFileOutputStream(); } catch (ShareStorageException e) { throw new IOException(MSG_FAIL_OPEN_OUTPUT, e); } } /** * Determines if the file this client represents exists in the cloud. * * @return Flag indicating existence of the file. */ @Override public boolean exists() { return this.shareFileClient.exists(); } /** * Get the url of the storage file client. * * @return the URL of the storage file client. */ @Override public URL getURL() throws IOException { return new URL(this.shareFileClient.getFileUrl()); } /** * This implementation throws a FileNotFoundException, assuming * that the resource cannot be resolved to an absolute file path. */ @Override public File getFile() { throw new UnsupportedOperationException(getDescription() + " cannot be resolved to absolute file path"); } /** * @return The number of bytes present in the response body. */ @Override public long contentLength() { return this.shareFileClient.getProperties().getContentLength(); } /** * * @return Last time the directory was modified. */ @Override public long lastModified() { return this.shareFileClient.getProperties().getLastModified().toEpochSecond() * 1000; } /** * Create relative resource from current location. * * @param relativePath the relative path. * @return StorageFileResource with relative path from current location. */ @Override public Resource createRelative(String relativePath) { String newLocation = this.location + "/" + relativePath; return new StorageFileResource(this.shareServiceClient, newLocation, autoCreateFiles); } /** * @return The name of the file. */ @Override public String getFilename() { final String[] split = this.shareFileClient.getFilePath().split("/"); return split[split.length - 1]; } /** * @return a description for this resource, * to be used for error output when working with the resource. */ @Override public String getDescription() { return String.format("Azure storage account file resource [container='%s', file='%s']", this.shareFileClient.getShareName(), this.getFilename()); } @Override public InputStream getInputStream() throws IOException { try { return this.shareFileClient.openInputStream(); } catch (ShareStorageException e) { if (e.getErrorCode() == ShareErrorCode.SHARE_NOT_FOUND || e.getErrorCode() == ShareErrorCode.RESOURCE_NOT_FOUND) { throw new FileNotFoundException("Share or file not existed"); } else { throw new IOException(MSG_FAIL_OPEN_OUTPUT, e); } } } @Override StorageType getStorageType() { return StorageType.FILE; } }
class StorageFileResource extends AzureStorageResource { private static final String MSG_FAIL_OPEN_OUTPUT = "Failed to open output stream of file"; private final ShareServiceClient shareServiceClient; private final ShareClient shareClient; private final ShareFileClient shareFileClient; private final String location; private final boolean autoCreateFiles; private final String contentType; /** * Creates a new instance of {@link StorageFileResource}. * * @param shareServiceClient the ShareServiceClient * @param location the location */ public StorageFileResource(ShareServiceClient shareServiceClient, String location) { this(shareServiceClient, location, false); } /** * Creates a new instance of {@link StorageFileResource}. * * @param shareServiceClient the ShareServiceClient * @param location the location * @param autoCreateFiles whether to automatically create files */ public StorageFileResource(ShareServiceClient shareServiceClient, String location, boolean autoCreateFiles) { this(shareServiceClient, location, autoCreateFiles, null); } /** * Creates a new instance of {@link StorageFileResource}. * * @param shareServiceClient the ShareServiceClient * @param location the location * @param autoCreateFiles whether to automatically create files * @param contentType the content type */ public StorageFileResource(ShareServiceClient shareServiceClient, String location, boolean autoCreateFiles, String contentType) { assertIsAzureStorageLocation(location); this.autoCreateFiles = autoCreateFiles; this.location = location; this.shareServiceClient = shareServiceClient; this.shareClient = shareServiceClient.getShareClient(getContainerName(location)); this.shareFileClient = shareClient.getFileClient(getFilename(location)); this.contentType = StringUtils.hasText(contentType) ? contentType : getContentType(location); } /** * Checks whether an Azure Storage File can be opened, * if the file is not existed, and autoCreateFiles==true, * it will create the file on Azure Storage. * @return A {@link StorageFileOutputStream} object used to write data to the file. * @throws IOException when fail to open the output stream. */ @Override public OutputStream getOutputStream() throws IOException { try { if (this.autoCreateFiles) { this.shareClient.createIfNotExists(); this.create(); } return this.shareFileClient.getFileOutputStream(); } catch (ShareStorageException e) { throw new IOException(MSG_FAIL_OPEN_OUTPUT, e); } } /** * Determines if the file this client represents exists in the cloud. * * @return Flag indicating existence of the file. */ @Override public boolean exists() { return this.shareFileClient.exists(); } /** * Get the url of the storage file client. * * @return the URL of the storage file client. */ @Override public URL getURL() throws IOException { return new URL(this.shareFileClient.getFileUrl()); } /** * This implementation throws a FileNotFoundException, assuming * that the resource cannot be resolved to an absolute file path. */ @Override public File getFile() { throw new UnsupportedOperationException(getDescription() + " cannot be resolved to absolute file path"); } /** * @return The number of bytes present in the response body. */ @Override public long contentLength() { return this.shareFileClient.getProperties().getContentLength(); } /** * * @return Last time the directory was modified. */ @Override public long lastModified() { return this.shareFileClient.getProperties().getLastModified().toEpochSecond() * 1000; } /** * Create relative resource from current location. * * @param relativePath the relative path. * @return StorageFileResource with relative path from current location. */ @Override public Resource createRelative(String relativePath) { String newLocation = this.location + "/" + relativePath; return new StorageFileResource(this.shareServiceClient, newLocation, autoCreateFiles); } /** * @return The name of the file. */ @Override public String getFilename() { final String[] split = this.shareFileClient.getFilePath().split("/"); return split[split.length - 1]; } /** * @return a description for this resource, * to be used for error output when working with the resource. */ @Override public String getDescription() { return String.format("Azure storage account file resource [container='%s', file='%s']", this.shareFileClient.getShareName(), this.getFilename()); } @Override public InputStream getInputStream() throws IOException { try { return this.shareFileClient.openInputStream(); } catch (ShareStorageException e) { if (e.getErrorCode() == ShareErrorCode.SHARE_NOT_FOUND || e.getErrorCode() == ShareErrorCode.RESOURCE_NOT_FOUND) { throw new FileNotFoundException("Share or file does not exist"); } else { throw new IOException(MSG_FAIL_OPEN_OUTPUT, e); } } } @Override StorageType getStorageType() { return StorageType.FILE; } }
@rickle-msft @ibrahimrabab Should we introduce `shareFileClient.createIfNotExists(size)` ?
private void create() throws ShareStorageException { if (!this.exists()) { ShareFileHttpHeaders header = null; if (StringUtils.hasText(contentType)) { header = new ShareFileHttpHeaders(); header.setContentType(contentType); } this.shareFileClient.createWithResponse(1024, header, null, null, null, null, Context.NONE) .getValue(); } }
if (!this.exists()) {
private void create() throws ShareStorageException { if (!this.exists()) { ShareFileHttpHeaders header = null; if (StringUtils.hasText(contentType)) { header = new ShareFileHttpHeaders(); header.setContentType(contentType); } this.shareFileClient.createWithResponse(1024, header, null, null, null, null, Context.NONE) .getValue(); } }
class StorageFileResource extends AzureStorageResource { private static final String MSG_FAIL_OPEN_OUTPUT = "Failed to open output stream of file"; private final ShareServiceClient shareServiceClient; private final ShareClient shareClient; private final ShareFileClient shareFileClient; private final String location; private final boolean autoCreateFiles; private final String contentType; /** * Creates a new instance of {@link StorageFileResource}. * * @param shareServiceClient the ShareServiceClient * @param location the location */ public StorageFileResource(ShareServiceClient shareServiceClient, String location) { this(shareServiceClient, location, false); } /** * Creates a new instance of {@link StorageFileResource}. * * @param shareServiceClient the ShareServiceClient * @param location the location * @param autoCreateFiles whether to automatically create files */ public StorageFileResource(ShareServiceClient shareServiceClient, String location, boolean autoCreateFiles) { this(shareServiceClient, location, autoCreateFiles, null); } /** * Creates a new instance of {@link StorageFileResource}. * * @param shareServiceClient the ShareServiceClient * @param location the location * @param autoCreateFiles whether to automatically create files * @param contentType the content type */ public StorageFileResource(ShareServiceClient shareServiceClient, String location, boolean autoCreateFiles, String contentType) { assertIsAzureStorageLocation(location); this.autoCreateFiles = autoCreateFiles; this.location = location; this.shareServiceClient = shareServiceClient; this.shareClient = shareServiceClient.getShareClient(getContainerName(location)); this.shareFileClient = shareClient.getFileClient(getFilename(location)); this.contentType = StringUtils.hasText(contentType) ? contentType : getContentType(location); } /** * Checks whether an Azure Storage File can be opened, * if the file is not existed, and autoCreateFiles==true, * it will create the file on Azure Storage. * @return A {@link StorageFileOutputStream} object used to write data to the file. * @throws IOException when fail to open the output stream. */ @Override public OutputStream getOutputStream() throws IOException { try { if (this.autoCreateFiles) { this.shareClient.createIfNotExists(); this.create(); } return this.shareFileClient.getFileOutputStream(); } catch (ShareStorageException e) { throw new IOException(MSG_FAIL_OPEN_OUTPUT, e); } } /** * Determines if the file this client represents exists in the cloud. * * @return Flag indicating existence of the file. */ @Override public boolean exists() { return this.shareFileClient.exists(); } /** * Get the url of the storage file client. * * @return the URL of the storage file client. */ @Override public URL getURL() throws IOException { return new URL(this.shareFileClient.getFileUrl()); } /** * This implementation throws a FileNotFoundException, assuming * that the resource cannot be resolved to an absolute file path. */ @Override public File getFile() { throw new UnsupportedOperationException(getDescription() + " cannot be resolved to absolute file path"); } /** * @return The number of bytes present in the response body. */ @Override public long contentLength() { return this.shareFileClient.getProperties().getContentLength(); } /** * * @return Last time the directory was modified. */ @Override public long lastModified() { return this.shareFileClient.getProperties().getLastModified().toEpochSecond() * 1000; } /** * Create relative resource from current location. * * @param relativePath the relative path. * @return StorageFileResource with relative path from current location. */ @Override public Resource createRelative(String relativePath) { String newLocation = this.location + "/" + relativePath; return new StorageFileResource(this.shareServiceClient, newLocation, autoCreateFiles); } /** * @return The name of the file. */ @Override public String getFilename() { final String[] split = this.shareFileClient.getFilePath().split("/"); return split[split.length - 1]; } /** * @return a description for this resource, * to be used for error output when working with the resource. */ @Override public String getDescription() { return String.format("Azure storage account file resource [container='%s', file='%s']", this.shareFileClient.getShareName(), this.getFilename()); } @Override public InputStream getInputStream() throws IOException { try { return this.shareFileClient.openInputStream(); } catch (ShareStorageException e) { if (e.getErrorCode() == ShareErrorCode.SHARE_NOT_FOUND || e.getErrorCode() == ShareErrorCode.RESOURCE_NOT_FOUND) { throw new FileNotFoundException("Share or file not existed"); } else { throw new IOException(MSG_FAIL_OPEN_OUTPUT, e); } } } @Override StorageType getStorageType() { return StorageType.FILE; } }
class StorageFileResource extends AzureStorageResource { private static final String MSG_FAIL_OPEN_OUTPUT = "Failed to open output stream of file"; private final ShareServiceClient shareServiceClient; private final ShareClient shareClient; private final ShareFileClient shareFileClient; private final String location; private final boolean autoCreateFiles; private final String contentType; /** * Creates a new instance of {@link StorageFileResource}. * * @param shareServiceClient the ShareServiceClient * @param location the location */ public StorageFileResource(ShareServiceClient shareServiceClient, String location) { this(shareServiceClient, location, false); } /** * Creates a new instance of {@link StorageFileResource}. * * @param shareServiceClient the ShareServiceClient * @param location the location * @param autoCreateFiles whether to automatically create files */ public StorageFileResource(ShareServiceClient shareServiceClient, String location, boolean autoCreateFiles) { this(shareServiceClient, location, autoCreateFiles, null); } /** * Creates a new instance of {@link StorageFileResource}. * * @param shareServiceClient the ShareServiceClient * @param location the location * @param autoCreateFiles whether to automatically create files * @param contentType the content type */ public StorageFileResource(ShareServiceClient shareServiceClient, String location, boolean autoCreateFiles, String contentType) { assertIsAzureStorageLocation(location); this.autoCreateFiles = autoCreateFiles; this.location = location; this.shareServiceClient = shareServiceClient; this.shareClient = shareServiceClient.getShareClient(getContainerName(location)); this.shareFileClient = shareClient.getFileClient(getFilename(location)); this.contentType = StringUtils.hasText(contentType) ? contentType : getContentType(location); } /** * Checks whether an Azure Storage File can be opened, * if the file is not existed, and autoCreateFiles==true, * it will create the file on Azure Storage. * @return A {@link StorageFileOutputStream} object used to write data to the file. * @throws IOException when fail to open the output stream. */ @Override public OutputStream getOutputStream() throws IOException { try { if (this.autoCreateFiles) { this.shareClient.createIfNotExists(); this.create(); } return this.shareFileClient.getFileOutputStream(); } catch (ShareStorageException e) { throw new IOException(MSG_FAIL_OPEN_OUTPUT, e); } } /** * Determines if the file this client represents exists in the cloud. * * @return Flag indicating existence of the file. */ @Override public boolean exists() { return this.shareFileClient.exists(); } /** * Get the url of the storage file client. * * @return the URL of the storage file client. */ @Override public URL getURL() throws IOException { return new URL(this.shareFileClient.getFileUrl()); } /** * This implementation throws a FileNotFoundException, assuming * that the resource cannot be resolved to an absolute file path. */ @Override public File getFile() { throw new UnsupportedOperationException(getDescription() + " cannot be resolved to absolute file path"); } /** * @return The number of bytes present in the response body. */ @Override public long contentLength() { return this.shareFileClient.getProperties().getContentLength(); } /** * * @return Last time the directory was modified. */ @Override public long lastModified() { return this.shareFileClient.getProperties().getLastModified().toEpochSecond() * 1000; } /** * Create relative resource from current location. * * @param relativePath the relative path. * @return StorageFileResource with relative path from current location. */ @Override public Resource createRelative(String relativePath) { String newLocation = this.location + "/" + relativePath; return new StorageFileResource(this.shareServiceClient, newLocation, autoCreateFiles); } /** * @return The name of the file. */ @Override public String getFilename() { final String[] split = this.shareFileClient.getFilePath().split("/"); return split[split.length - 1]; } /** * @return a description for this resource, * to be used for error output when working with the resource. */ @Override public String getDescription() { return String.format("Azure storage account file resource [container='%s', file='%s']", this.shareFileClient.getShareName(), this.getFilename()); } @Override public InputStream getInputStream() throws IOException { try { return this.shareFileClient.openInputStream(); } catch (ShareStorageException e) { if (e.getErrorCode() == ShareErrorCode.SHARE_NOT_FOUND || e.getErrorCode() == ShareErrorCode.RESOURCE_NOT_FOUND) { throw new FileNotFoundException("Share or file does not exist"); } else { throw new IOException(MSG_FAIL_OPEN_OUTPUT, e); } } } @Override StorageType getStorageType() { return StorageType.FILE; } }
indent
public void userEventTriggered(final ChannelHandlerContext context, final Object event) { this.traceOperation(context, "userEventTriggered", event); try { if (event instanceof IdleStateEvent) { if (this.healthChecker instanceof RntbdClientChannelHealthChecker) { ((RntbdClientChannelHealthChecker) this.healthChecker) .isHealthyWithFailureReason(context.channel()).addListener((Future<String> future) -> { final Throwable cause; if (future.isSuccess()) { if (RntbdConstants.RntbdHealthCheckResults.SuccessValue.equals(future.get())) { return; } cause = new UnhealthyChannelException(future.get()); } else { cause = future.cause(); } this.exceptionCaught(context, cause); }); } else { this.healthChecker.isHealthy(context.channel()).addListener((Future<Boolean> future) -> { final Throwable cause; if (future.isSuccess()) { if (future.get()) { return; } cause = new UnhealthyChannelException( MessageFormat.format( "Custom ChannelHealthChecker {0} failed.", this.healthChecker.getClass().getSimpleName())); } else { cause = future.cause(); } this.exceptionCaught(context, cause); }); } return; } if (event instanceof RntbdContext) { this.contextFuture.complete((RntbdContext) event); this.removeContextNegotiatorAndFlushPendingWrites(context); return; } if (event instanceof RntbdContextException) { this.contextFuture.completeExceptionally((RntbdContextException) event); context.pipeline().flush().close(); return; } if (event instanceof SslHandshakeCompletionEvent) { SslHandshakeCompletionEvent sslHandshakeCompletionEvent = (SslHandshakeCompletionEvent) event; if (!sslHandshakeCompletionEvent.isSuccess()) { if (logger.isDebugEnabled()) { logger.debug("SslHandshake failed", sslHandshakeCompletionEvent.cause()); } this.exceptionCaught(context, sslHandshakeCompletionEvent.cause()); return; } else { logger.info("adding idleStateHandler"); context.pipeline().addFirst( new IdleStateHandler( this.idleConnectionTimerResolutionInNanos, this.idleConnectionTimerResolutionInNanos, 0, TimeUnit.NANOSECONDS)); } } context.fireUserEventTriggered(event); } catch (Throwable error) { reportIssue(context, "{}: ", event, error); this.exceptionCaught(context, error); } }
new IdleStateHandler(
public void userEventTriggered(final ChannelHandlerContext context, final Object event) { this.traceOperation(context, "userEventTriggered", event); try { if (event instanceof IdleStateEvent) { if (this.healthChecker instanceof RntbdClientChannelHealthChecker) { ((RntbdClientChannelHealthChecker) this.healthChecker) .isHealthyWithFailureReason(context.channel()).addListener((Future<String> future) -> { final Throwable cause; if (future.isSuccess()) { if (RntbdConstants.RntbdHealthCheckResults.SuccessValue.equals(future.get())) { return; } cause = new UnhealthyChannelException(future.get()); } else { cause = future.cause(); } this.exceptionCaught(context, cause); }); } else { this.healthChecker.isHealthy(context.channel()).addListener((Future<Boolean> future) -> { final Throwable cause; if (future.isSuccess()) { if (future.get()) { return; } cause = new UnhealthyChannelException( MessageFormat.format( "Custom ChannelHealthChecker {0} failed.", this.healthChecker.getClass().getSimpleName())); } else { cause = future.cause(); } this.exceptionCaught(context, cause); }); } return; } if (event instanceof RntbdContext) { this.contextFuture.complete((RntbdContext) event); this.removeContextNegotiatorAndFlushPendingWrites(context); return; } if (event instanceof RntbdContextException) { this.contextFuture.completeExceptionally((RntbdContextException) event); this.exceptionCaught(context, (RntbdContextException)event); return; } if (event instanceof SslHandshakeCompletionEvent) { SslHandshakeCompletionEvent sslHandshakeCompletionEvent = (SslHandshakeCompletionEvent) event; if (sslHandshakeCompletionEvent.isSuccess()) { if (logger.isDebugEnabled()) { logger.debug("SslHandshake completed, adding idleStateHandler"); } context.pipeline().addAfter( SslHandler.class.toString(), IdleStateHandler.class.toString(), new IdleStateHandler( this.idleConnectionTimerResolutionInNanos, this.idleConnectionTimerResolutionInNanos, 0, TimeUnit.NANOSECONDS)); } else { if (logger.isDebugEnabled()) { logger.debug("SslHandshake failed", sslHandshakeCompletionEvent.cause()); } this.exceptionCaught(context, sslHandshakeCompletionEvent.cause()); return; } } context.fireUserEventTriggered(event); } catch (Throwable error) { reportIssue(context, "{}: ", event, error); this.exceptionCaught(context, error); } }
class RntbdRequestManager implements ChannelHandler, ChannelInboundHandler, ChannelOutboundHandler { private static final ClosedChannelException ON_CHANNEL_UNREGISTERED = ThrowableUtil.unknownStackTrace(new ClosedChannelException(), RntbdRequestManager.class, "channelUnregistered"); private static final ClosedChannelException ON_CLOSE = ThrowableUtil.unknownStackTrace(new ClosedChannelException(), RntbdRequestManager.class, "close"); private static final ClosedChannelException ON_DEREGISTER = ThrowableUtil.unknownStackTrace(new ClosedChannelException(), RntbdRequestManager.class, "deregister"); private static final EventExecutor requestExpirationExecutor = new DefaultEventExecutor(new RntbdThreadFactory( "request-expirator", true, Thread.NORM_PRIORITY)); private static final Logger logger = LoggerFactory.getLogger(RntbdRequestManager.class); private final CompletableFuture<RntbdContext> contextFuture = new CompletableFuture<>(); private final CompletableFuture<RntbdContextRequest> contextRequestFuture = new CompletableFuture<>(); private final ChannelHealthChecker healthChecker; private final int pendingRequestLimit; private final ConcurrentHashMap<Long, RntbdRequestRecord> pendingRequests; private final Timestamps timestamps = new Timestamps(); private final RntbdConnectionStateListener rntbdConnectionStateListener; private final long idleConnectionTimerResolutionInNanos; private boolean closingExceptionally = false; private CoalescingBufferQueue pendingWrites; public RntbdRequestManager( final ChannelHealthChecker healthChecker, final int pendingRequestLimit, final RntbdConnectionStateListener connectionStateListener, final long idleConnectionTimerResolutionInNanos) { checkArgument(pendingRequestLimit > 0, "pendingRequestLimit: %s", pendingRequestLimit); checkNotNull(healthChecker, "healthChecker"); this.pendingRequests = new ConcurrentHashMap<>(pendingRequestLimit); this.pendingRequestLimit = pendingRequestLimit; this.healthChecker = healthChecker; this.rntbdConnectionStateListener = connectionStateListener; this.idleConnectionTimerResolutionInNanos = idleConnectionTimerResolutionInNanos; } /** * Gets called after the {@link ChannelHandler} was added to the actual context and it's ready to handle events. * * @param context {@link ChannelHandlerContext} to which this {@link RntbdRequestManager} belongs */ @Override public void handlerAdded(final ChannelHandlerContext context) { this.traceOperation(context, "handlerAdded"); } /** * Gets called after the {@link ChannelHandler} was removed from the actual context and it doesn't handle events * anymore. * * @param context {@link ChannelHandlerContext} to which this {@link RntbdRequestManager} belongs */ @Override public void handlerRemoved(final ChannelHandlerContext context) { this.traceOperation(context, "handlerRemoved"); } /** * The {@link Channel} of the {@link ChannelHandlerContext} is now active * * @param context {@link ChannelHandlerContext} to which this {@link RntbdRequestManager} belongs */ @Override public void channelActive(final ChannelHandlerContext context) { this.traceOperation(context, "channelActive"); context.fireChannelActive(); } /** * The {@link Channel} of the {@link ChannelHandlerContext} was registered and has reached the end of its lifetime * <p> * This method will only be called after the channel is closed. * * @param context {@link ChannelHandlerContext} to which this {@link RntbdRequestManager} belongs */ @Override public void channelInactive(final ChannelHandlerContext context) { this.traceOperation(context, "channelInactive"); context.fireChannelInactive(); } /** * The {@link Channel} of the {@link ChannelHandlerContext} has read a message from its peer. * * @param context {@link ChannelHandlerContext} to which this {@link RntbdRequestManager} belongs. * @param message The message read. */ @Override public void channelRead(final ChannelHandlerContext context, final Object message) { this.traceOperation(context, "channelRead"); try { if (message.getClass() == RntbdResponse.class) { try { this.messageReceived(context, (RntbdResponse) message); } catch (CorruptedFrameException error) { this.exceptionCaught(context, error); } catch (Throwable throwable) { reportIssue(context, "{} ", message, throwable); this.exceptionCaught(context, throwable); } } else { final IllegalStateException error = new IllegalStateException( lenientFormat("expected message of %s, not %s: %s", RntbdResponse.class, message.getClass(), message)); reportIssue(context, "", error); this.exceptionCaught(context, error); } } finally { if (message instanceof ReferenceCounted) { boolean released = ((ReferenceCounted) message).release(); reportIssueUnless(released, context, "failed to release message: {}", message); } } } /** * The {@link Channel} of the {@link ChannelHandlerContext} has fully consumed the most-recent message read. * <p> * If {@link ChannelOption * {@link Channel} will be made until {@link ChannelHandlerContext * for outbound messages to be written. * * @param context {@link ChannelHandlerContext} to which this {@link RntbdRequestManager} belongs */ @Override public void channelReadComplete(final ChannelHandlerContext context) { this.traceOperation(context, "channelReadComplete"); this.timestamps.channelReadCompleted(); context.fireChannelReadComplete(); } /** * Constructs a {@link CoalescingBufferQueue} for buffering encoded requests until we have an {@link RntbdRequest} * <p> * This method then calls {@link ChannelHandlerContext * {@link ChannelInboundHandler} in the {@link ChannelPipeline}. * <p> * Sub-classes may override this method to change behavior. * * @param context the {@link ChannelHandlerContext} for which the bind operation is made */ @Override public void channelRegistered(final ChannelHandlerContext context) { this.traceOperation(context, "channelRegistered"); reportIssueUnless(this.pendingWrites == null, context, "pendingWrites: {}", pendingWrites); this.pendingWrites = new CoalescingBufferQueue(context.channel()); context.fireChannelRegistered(); } /** * The {@link Channel} of the {@link ChannelHandlerContext} was unregistered from its {@link EventLoop} * * @param context {@link ChannelHandlerContext} to which this {@link RntbdRequestManager} belongs */ @Override public void channelUnregistered(final ChannelHandlerContext context) { this.traceOperation(context, "channelUnregistered"); if (!this.closingExceptionally) { this.completeAllPendingRequestsExceptionally(context, ON_CHANNEL_UNREGISTERED); } else { logger.debug("{} channelUnregistered exceptionally", context); } context.fireChannelUnregistered(); } /** * Gets called once the writable state of a {@link Channel} changed. You can check the state with * {@link Channel * * @param context {@link ChannelHandlerContext} to which this {@link RntbdRequestManager} belongs */ @Override public void channelWritabilityChanged(final ChannelHandlerContext context) { this.traceOperation(context, "channelWritabilityChanged"); context.fireChannelWritabilityChanged(); } /** * Processes {@link ChannelHandlerContext * * @param context {@link ChannelHandlerContext} to which this {@link RntbdRequestManager} belongs * @param cause Exception caught */ @Override @SuppressWarnings("deprecation") public void exceptionCaught(final ChannelHandlerContext context, final Throwable cause) { this.traceOperation(context, "exceptionCaught", cause); if (!this.closingExceptionally) { this.completeAllPendingRequestsExceptionally(context, cause); if (logger.isDebugEnabled()) { logger.debug("{} closing due to:", context, cause); } context.flush().close(); } } /** * Processes inbound events triggered by channel handlers in the {@link RntbdClientChannelHandler} pipeline * <p> * All but inbound request management events are ignored. * * @param context {@link ChannelHandlerContext} to which this {@link RntbdRequestManager} belongs * @param event An object representing a user event */ @Override /** * Called once a bind operation is made. * * @param context the {@link ChannelHandlerContext} for which the bind operation is made * @param localAddress the {@link SocketAddress} to which it should bound * @param promise the {@link ChannelPromise} to notify once the operation completes */ @Override public void bind(final ChannelHandlerContext context, final SocketAddress localAddress, final ChannelPromise promise) { this.traceOperation(context, "bind", localAddress); context.bind(localAddress, promise); } /** * Called once a close operation is made. * * @param context the {@link ChannelHandlerContext} for which the close operation is made * @param promise the {@link ChannelPromise} to notify once the operation completes */ @Override public void close(final ChannelHandlerContext context, final ChannelPromise promise) { this.traceOperation(context, "close"); if (!this.closingExceptionally) { this.completeAllPendingRequestsExceptionally(context, ON_CLOSE); } else { logger.debug("{} closed exceptionally", context); } final SslHandler sslHandler = context.pipeline().get(SslHandler.class); if (sslHandler != null) { try { sslHandler.closeOutbound(); } catch (Exception exception) { if (exception instanceof SSLException) { logger.debug( "SslException when attempting to close the outbound SSL connection: ", exception); } else { logger.warn( "Exception when attempting to close the outbound SSL connection: ", exception); throw exception; } } } context.close(promise); } /** * Called once a connect operation is made. * * @param context the {@link ChannelHandlerContext} for which the connect operation is made * @param remoteAddress the {@link SocketAddress} to which it should connect * @param localAddress the {@link SocketAddress} which is used as source on connect * @param promise the {@link ChannelPromise} to notify once the operation completes */ @Override public void connect( final ChannelHandlerContext context, final SocketAddress remoteAddress, final SocketAddress localAddress, final ChannelPromise promise ) { this.traceOperation(context, "connect", remoteAddress, localAddress); context.connect(remoteAddress, localAddress, promise); } /** * Called once a deregister operation is made from the current registered {@link EventLoop}. * * @param context the {@link ChannelHandlerContext} for which the deregister operation is made * @param promise the {@link ChannelPromise} to notify once the operation completes */ @Override public void deregister(final ChannelHandlerContext context, final ChannelPromise promise) { this.traceOperation(context, "deregister"); if (!this.closingExceptionally) { this.completeAllPendingRequestsExceptionally(context, ON_DEREGISTER); } else { logger.debug("{} deregistered exceptionally", context); } context.deregister(promise); } /** * Called once a disconnect operation is made. * * @param context the {@link ChannelHandlerContext} for which the disconnect operation is made * @param promise the {@link ChannelPromise} to notify once the operation completes */ @Override public void disconnect(final ChannelHandlerContext context, final ChannelPromise promise) { this.traceOperation(context, "disconnect"); context.disconnect(promise); } /** * Called once a flush operation is made * <p> * The flush operation will try to flush out all previous written messages that are pending. * * @param context the {@link ChannelHandlerContext} for which the flush operation is made */ @Override public void flush(final ChannelHandlerContext context) { this.traceOperation(context, "flush"); context.flush(); } /** * Intercepts {@link ChannelHandlerContext * * @param context the {@link ChannelHandlerContext} for which the read operation is made */ @Override public void read(final ChannelHandlerContext context) { this.traceOperation(context, "read"); context.read(); } /** * Called once a write operation is made * <p> * The write operation will send messages through the {@link ChannelPipeline} which are then ready to be flushed * to the actual {@link Channel}. This will occur when {@link Channel * * @param context the {@link ChannelHandlerContext} for which the write operation is made * @param message the message to write * @param promise the {@link ChannelPromise} to notify once the operation completes */ @Override public void write(final ChannelHandlerContext context, final Object message, final ChannelPromise promise) { this.traceOperation(context, "write", message); if (message instanceof RntbdRequestRecord) { final RntbdRequestRecord record = (RntbdRequestRecord) message; this.timestamps.channelWriteAttempted(); record.setSendingRequestHasStarted(); context.write(this.addPendingRequestRecord(context, record), promise).addListener(completed -> { record.stage(RntbdRequestRecord.Stage.SENT); if (completed.isSuccess()) { this.timestamps.channelWriteCompleted(); } }); return; } if (message == RntbdHealthCheckRequest.MESSAGE) { context.write(RntbdHealthCheckRequest.MESSAGE, promise).addListener(completed -> { if (completed.isSuccess()) { this.timestamps.channelPingCompleted(); } }); return; } final IllegalStateException error = new IllegalStateException(lenientFormat("message of %s: %s", message.getClass(), message)); reportIssue(context, "", error); this.exceptionCaught(context, error); } int pendingRequestCount() { return this.pendingRequests.size(); } Optional<RntbdContext> rntbdContext() { return Optional.of(this.contextFuture.getNow(null)); } CompletableFuture<RntbdContextRequest> rntbdContextRequestFuture() { return this.contextRequestFuture; } boolean hasRequestedRntbdContext() { return this.contextRequestFuture.getNow(null) != null; } boolean hasRntbdContext() { return this.contextFuture.getNow(null) != null; } RntbdChannelState getChannelState(final int demand) { reportIssueUnless(this.hasRequestedRntbdContext(), this, "Direct TCP context request was not issued"); final int limit = this.hasRntbdContext() ? this.pendingRequestLimit : Math.min(this.pendingRequestLimit, demand); if (this.pendingRequests.size() < limit) { return RntbdChannelState.ok(this.pendingRequests.size()); } if (this.hasRntbdContext()) { return RntbdChannelState.pendingLimit(this.pendingRequests.size()); } else { return RntbdChannelState.contextNegotiationPending((this.pendingRequests.size())); } } void pendWrite(final ByteBuf out, final ChannelPromise promise) { this.pendingWrites.add(out, promise); } Timestamps snapshotTimestamps() { return new Timestamps(this.timestamps); } private RntbdRequestRecord addPendingRequestRecord(final ChannelHandlerContext context, final RntbdRequestRecord record) { return this.pendingRequests.compute(record.transportRequestId(), (id, current) -> { reportIssueUnless(current == null, context, "id: {}, current: {}, request: {}", record); record.pendingRequestQueueSize(pendingRequests.size()); final Timeout pendingRequestTimeout = record.newTimeout(timeout -> { requestExpirationExecutor.execute(record::expire); }); record.whenComplete((response, error) -> { this.pendingRequests.remove(id); pendingRequestTimeout.cancel(); }); return record; }); } private void completeAllPendingRequestsExceptionally( final ChannelHandlerContext context, final Throwable throwable ) { reportIssueUnless(!this.closingExceptionally, context, "", throwable); this.closingExceptionally = true; if (this.pendingWrites != null && !this.pendingWrites.isEmpty()) { this.pendingWrites.releaseAndFailAll(context, throwable); } if (this.rntbdConnectionStateListener != null) { this.rntbdConnectionStateListener.onException(throwable); } if (this.pendingRequests.isEmpty()) { return; } if (!this.contextRequestFuture.isDone()) { this.contextRequestFuture.completeExceptionally(throwable); } if (!this.contextFuture.isDone()) { this.contextFuture.completeExceptionally(throwable); } final int count = this.pendingRequests.size(); Exception contextRequestException = null; String phrase = null; if (this.contextRequestFuture.isCompletedExceptionally()) { try { this.contextRequestFuture.get(); } catch (final CancellationException error) { phrase = "RNTBD context request write cancelled"; contextRequestException = error; } catch (final Exception error) { phrase = "RNTBD context request write failed"; contextRequestException = error; } catch (final Throwable error) { phrase = "RNTBD context request write failed"; contextRequestException = new ChannelException(error); } } else if (this.contextFuture.isCompletedExceptionally()) { try { this.contextFuture.get(); } catch (final CancellationException error) { phrase = "RNTBD context request read cancelled"; contextRequestException = error; } catch (final Exception error) { phrase = "RNTBD context request read failed"; contextRequestException = error; } catch (final Throwable error) { phrase = "RNTBD context request read failed"; contextRequestException = new ChannelException(error); } } else { phrase = "closed exceptionally"; } final String message = lenientFormat("%s %s with %s pending requests", context, phrase, count); final Exception cause; if (throwable instanceof ClosedChannelException) { cause = contextRequestException == null ? (ClosedChannelException) throwable : contextRequestException; } else { cause = throwable instanceof Exception ? (Exception) throwable : new ChannelException(throwable); } for (RntbdRequestRecord record : this.pendingRequests.values()) { final Map<String, String> requestHeaders = record.args().serviceRequest().getHeaders(); final String requestUri = record.args().physicalAddress().toString(); final GoneException error = new GoneException(message, cause, null, requestUri); BridgeInternal.setRequestHeaders(error, requestHeaders); record.completeExceptionally(error); } } /** * This method is called for each incoming message of type {@link RntbdResponse} to complete a request. * * @param context {@link ChannelHandlerContext} to which this {@link RntbdRequestManager request manager} belongs. * @param response the {@link RntbdResponse message} received. */ private void messageReceived(final ChannelHandlerContext context, final RntbdResponse response) { final Long transportRequestId = response.getTransportRequestId(); if (transportRequestId == null) { reportIssue(context, "response ignored because its transportRequestId is missing: {}", response); return; } final RntbdRequestRecord requestRecord = this.pendingRequests.get(transportRequestId); if (requestRecord == null) { logger.debug("response {} ignored because its requestRecord is missing: {}", transportRequestId, response); return; } requestRecord.stage(RntbdRequestRecord.Stage.DECODE_STARTED, response.getDecodeStartTime()); requestRecord.stage( RntbdRequestRecord.Stage.RECEIVED, response.getDecodeEndTime() != null ? response.getDecodeEndTime() : Instant.now()); requestRecord.responseLength(response.getMessageLength()); final HttpResponseStatus status = response.getStatus(); final UUID activityId = response.getActivityId(); final int statusCode = status.code(); if ((HttpResponseStatus.OK.code() <= statusCode && statusCode < HttpResponseStatus.MULTIPLE_CHOICES.code()) || statusCode == HttpResponseStatus.NOT_MODIFIED.code()) { final StoreResponse storeResponse = response.toStoreResponse(this.contextFuture.getNow(null)); requestRecord.complete(storeResponse); } else { final CosmosException cause; final long lsn = response.getHeader(RntbdResponseHeader.LSN); final String partitionKeyRangeId = response.getHeader(RntbdResponseHeader.PartitionKeyRangeId); final CosmosError error = response.hasPayload() ? new CosmosError(RntbdObjectMapper.readTree(response)) : new CosmosError(Integer.toString(statusCode), status.reasonPhrase(), status.codeClass().name()); final Map<String, String> responseHeaders = response.getHeaders().asMap( this.rntbdContext().orElseThrow(IllegalStateException::new), activityId ); final String resourceAddress = requestRecord.args().physicalAddress() != null ? requestRecord.args().physicalAddress().toString() : null; switch (status.code()) { case StatusCodes.BADREQUEST: cause = new BadRequestException(error, lsn, partitionKeyRangeId, responseHeaders); break; case StatusCodes.CONFLICT: cause = new ConflictException(error, lsn, partitionKeyRangeId, responseHeaders); break; case StatusCodes.FORBIDDEN: cause = new ForbiddenException(error, lsn, partitionKeyRangeId, responseHeaders); break; case StatusCodes.GONE: final int subStatusCode = Math.toIntExact(response.getHeader(RntbdResponseHeader.SubStatus)); switch (subStatusCode) { case SubStatusCodes.COMPLETING_SPLIT: cause = new PartitionKeyRangeIsSplittingException(error, lsn, partitionKeyRangeId, responseHeaders); break; case SubStatusCodes.COMPLETING_PARTITION_MIGRATION: cause = new PartitionIsMigratingException(error, lsn, partitionKeyRangeId, responseHeaders); break; case SubStatusCodes.NAME_CACHE_IS_STALE: cause = new InvalidPartitionException(error, lsn, partitionKeyRangeId, responseHeaders); break; case SubStatusCodes.PARTITION_KEY_RANGE_GONE: cause = new PartitionKeyRangeGoneException(error, lsn, partitionKeyRangeId, responseHeaders); break; default: GoneException goneExceptionFromService = new GoneException(error, lsn, partitionKeyRangeId, responseHeaders); goneExceptionFromService.setIsBasedOn410ResponseFromService(); cause = goneExceptionFromService; break; } break; case StatusCodes.INTERNAL_SERVER_ERROR: cause = new InternalServerErrorException(error, lsn, partitionKeyRangeId, responseHeaders); break; case StatusCodes.LOCKED: cause = new LockedException(error, lsn, partitionKeyRangeId, responseHeaders); break; case StatusCodes.METHOD_NOT_ALLOWED: cause = new MethodNotAllowedException(error, lsn, partitionKeyRangeId, responseHeaders); break; case StatusCodes.NOTFOUND: cause = new NotFoundException(error, lsn, partitionKeyRangeId, responseHeaders); break; case StatusCodes.PRECONDITION_FAILED: cause = new PreconditionFailedException(error, lsn, partitionKeyRangeId, responseHeaders); break; case StatusCodes.REQUEST_ENTITY_TOO_LARGE: cause = new RequestEntityTooLargeException(error, lsn, partitionKeyRangeId, responseHeaders); break; case StatusCodes.REQUEST_TIMEOUT: Exception inner = new RequestTimeoutException(error, lsn, partitionKeyRangeId, responseHeaders); cause = new GoneException(resourceAddress, error, lsn, partitionKeyRangeId, responseHeaders, inner); break; case StatusCodes.RETRY_WITH: cause = new RetryWithException(error, lsn, partitionKeyRangeId, responseHeaders); break; case StatusCodes.SERVICE_UNAVAILABLE: cause = new ServiceUnavailableException(error, lsn, partitionKeyRangeId, responseHeaders); break; case StatusCodes.TOO_MANY_REQUESTS: cause = new RequestRateTooLargeException(error, lsn, partitionKeyRangeId, responseHeaders); break; case StatusCodes.UNAUTHORIZED: cause = new UnauthorizedException(error, lsn, partitionKeyRangeId, responseHeaders); break; default: cause = BridgeInternal.createCosmosException(resourceAddress, status.code(), error, responseHeaders); break; } BridgeInternal.setResourceAddress(cause, resourceAddress); requestRecord.completeExceptionally(cause); } } private void removeContextNegotiatorAndFlushPendingWrites(final ChannelHandlerContext context) { final RntbdContextNegotiator negotiator = context.pipeline().get(RntbdContextNegotiator.class); negotiator.removeInboundHandler(); negotiator.removeOutboundHandler(); if (!this.pendingWrites.isEmpty()) { this.pendingWrites.writeAndRemoveAll(context); context.flush(); } } private static void reportIssue(final Object subject, final String format, final Object... args) { RntbdReporter.reportIssue(logger, subject, format, args); } private static void reportIssueUnless( final boolean predicate, final Object subject, final String format, final Object... args ) { RntbdReporter.reportIssueUnless(logger, predicate, subject, format, args); } private void traceOperation(final ChannelHandlerContext context, final String operationName, final Object... args) { logger.trace("{}\n{}\n{}", operationName, context, args); } final static class UnhealthyChannelException extends ChannelException { UnhealthyChannelException(String reason) { super("health check failed, reason: " + reason); } @Override public Throwable fillInStackTrace() { return this; } } }
class RntbdRequestManager implements ChannelHandler, ChannelInboundHandler, ChannelOutboundHandler { private static final ClosedChannelException ON_CHANNEL_UNREGISTERED = ThrowableUtil.unknownStackTrace(new ClosedChannelException(), RntbdRequestManager.class, "channelUnregistered"); private static final ClosedChannelException ON_CLOSE = ThrowableUtil.unknownStackTrace(new ClosedChannelException(), RntbdRequestManager.class, "close"); private static final ClosedChannelException ON_DEREGISTER = ThrowableUtil.unknownStackTrace(new ClosedChannelException(), RntbdRequestManager.class, "deregister"); private static final EventExecutor requestExpirationExecutor = new DefaultEventExecutor(new RntbdThreadFactory( "request-expirator", true, Thread.NORM_PRIORITY)); private static final Logger logger = LoggerFactory.getLogger(RntbdRequestManager.class); private final CompletableFuture<RntbdContext> contextFuture = new CompletableFuture<>(); private final CompletableFuture<RntbdContextRequest> contextRequestFuture = new CompletableFuture<>(); private final ChannelHealthChecker healthChecker; private final int pendingRequestLimit; private final ConcurrentHashMap<Long, RntbdRequestRecord> pendingRequests; private final Timestamps timestamps = new Timestamps(); private final RntbdConnectionStateListener rntbdConnectionStateListener; private final long idleConnectionTimerResolutionInNanos; private boolean closingExceptionally = false; private CoalescingBufferQueue pendingWrites; public RntbdRequestManager( final ChannelHealthChecker healthChecker, final int pendingRequestLimit, final RntbdConnectionStateListener connectionStateListener, final long idleConnectionTimerResolutionInNanos) { checkArgument(pendingRequestLimit > 0, "pendingRequestLimit: %s", pendingRequestLimit); checkNotNull(healthChecker, "healthChecker"); this.pendingRequests = new ConcurrentHashMap<>(pendingRequestLimit); this.pendingRequestLimit = pendingRequestLimit; this.healthChecker = healthChecker; this.rntbdConnectionStateListener = connectionStateListener; this.idleConnectionTimerResolutionInNanos = idleConnectionTimerResolutionInNanos; } /** * Gets called after the {@link ChannelHandler} was added to the actual context and it's ready to handle events. * * @param context {@link ChannelHandlerContext} to which this {@link RntbdRequestManager} belongs */ @Override public void handlerAdded(final ChannelHandlerContext context) { this.traceOperation(context, "handlerAdded"); } /** * Gets called after the {@link ChannelHandler} was removed from the actual context and it doesn't handle events * anymore. * * @param context {@link ChannelHandlerContext} to which this {@link RntbdRequestManager} belongs */ @Override public void handlerRemoved(final ChannelHandlerContext context) { this.traceOperation(context, "handlerRemoved"); } /** * The {@link Channel} of the {@link ChannelHandlerContext} is now active * * @param context {@link ChannelHandlerContext} to which this {@link RntbdRequestManager} belongs */ @Override public void channelActive(final ChannelHandlerContext context) { this.traceOperation(context, "channelActive"); context.fireChannelActive(); } /** * The {@link Channel} of the {@link ChannelHandlerContext} was registered and has reached the end of its lifetime * <p> * This method will only be called after the channel is closed. * * @param context {@link ChannelHandlerContext} to which this {@link RntbdRequestManager} belongs */ @Override public void channelInactive(final ChannelHandlerContext context) { this.traceOperation(context, "channelInactive"); context.fireChannelInactive(); } /** * The {@link Channel} of the {@link ChannelHandlerContext} has read a message from its peer. * * @param context {@link ChannelHandlerContext} to which this {@link RntbdRequestManager} belongs. * @param message The message read. */ @Override public void channelRead(final ChannelHandlerContext context, final Object message) { this.traceOperation(context, "channelRead"); try { if (message.getClass() == RntbdResponse.class) { try { this.messageReceived(context, (RntbdResponse) message); } catch (CorruptedFrameException error) { this.exceptionCaught(context, error); } catch (Throwable throwable) { reportIssue(context, "{} ", message, throwable); this.exceptionCaught(context, throwable); } } else { final IllegalStateException error = new IllegalStateException( lenientFormat("expected message of %s, not %s: %s", RntbdResponse.class, message.getClass(), message)); reportIssue(context, "", error); this.exceptionCaught(context, error); } } finally { if (message instanceof ReferenceCounted) { boolean released = ((ReferenceCounted) message).release(); reportIssueUnless(released, context, "failed to release message: {}", message); } } } /** * The {@link Channel} of the {@link ChannelHandlerContext} has fully consumed the most-recent message read. * <p> * If {@link ChannelOption * {@link Channel} will be made until {@link ChannelHandlerContext * for outbound messages to be written. * * @param context {@link ChannelHandlerContext} to which this {@link RntbdRequestManager} belongs */ @Override public void channelReadComplete(final ChannelHandlerContext context) { this.traceOperation(context, "channelReadComplete"); this.timestamps.channelReadCompleted(); context.fireChannelReadComplete(); } /** * Constructs a {@link CoalescingBufferQueue} for buffering encoded requests until we have an {@link RntbdRequest} * <p> * This method then calls {@link ChannelHandlerContext * {@link ChannelInboundHandler} in the {@link ChannelPipeline}. * <p> * Sub-classes may override this method to change behavior. * * @param context the {@link ChannelHandlerContext} for which the bind operation is made */ @Override public void channelRegistered(final ChannelHandlerContext context) { this.traceOperation(context, "channelRegistered"); reportIssueUnless(this.pendingWrites == null, context, "pendingWrites: {}", pendingWrites); this.pendingWrites = new CoalescingBufferQueue(context.channel()); context.fireChannelRegistered(); } /** * The {@link Channel} of the {@link ChannelHandlerContext} was unregistered from its {@link EventLoop} * * @param context {@link ChannelHandlerContext} to which this {@link RntbdRequestManager} belongs */ @Override public void channelUnregistered(final ChannelHandlerContext context) { this.traceOperation(context, "channelUnregistered"); if (!this.closingExceptionally) { this.completeAllPendingRequestsExceptionally(context, ON_CHANNEL_UNREGISTERED); } else { logger.debug("{} channelUnregistered exceptionally", context); } context.fireChannelUnregistered(); } /** * Gets called once the writable state of a {@link Channel} changed. You can check the state with * {@link Channel * * @param context {@link ChannelHandlerContext} to which this {@link RntbdRequestManager} belongs */ @Override public void channelWritabilityChanged(final ChannelHandlerContext context) { this.traceOperation(context, "channelWritabilityChanged"); context.fireChannelWritabilityChanged(); } /** * Processes {@link ChannelHandlerContext * * @param context {@link ChannelHandlerContext} to which this {@link RntbdRequestManager} belongs * @param cause Exception caught */ @Override @SuppressWarnings("deprecation") public void exceptionCaught(final ChannelHandlerContext context, final Throwable cause) { this.traceOperation(context, "exceptionCaught", cause); if (!this.closingExceptionally) { this.completeAllPendingRequestsExceptionally(context, cause); if (logger.isDebugEnabled()) { logger.debug("{} closing due to:", context, cause); } context.flush().close(); } } /** * Processes inbound events triggered by channel handlers in the {@link RntbdClientChannelHandler} pipeline * <p> * All but inbound request management events are ignored. * * @param context {@link ChannelHandlerContext} to which this {@link RntbdRequestManager} belongs * @param event An object representing a user event */ @Override /** * Called once a bind operation is made. * * @param context the {@link ChannelHandlerContext} for which the bind operation is made * @param localAddress the {@link SocketAddress} to which it should bound * @param promise the {@link ChannelPromise} to notify once the operation completes */ @Override public void bind(final ChannelHandlerContext context, final SocketAddress localAddress, final ChannelPromise promise) { this.traceOperation(context, "bind", localAddress); context.bind(localAddress, promise); } /** * Called once a close operation is made. * * @param context the {@link ChannelHandlerContext} for which the close operation is made * @param promise the {@link ChannelPromise} to notify once the operation completes */ @Override public void close(final ChannelHandlerContext context, final ChannelPromise promise) { this.traceOperation(context, "close"); if (!this.closingExceptionally) { this.completeAllPendingRequestsExceptionally(context, ON_CLOSE); } else { logger.debug("{} closed exceptionally", context); } final SslHandler sslHandler = context.pipeline().get(SslHandler.class); if (sslHandler != null) { try { sslHandler.closeOutbound(); } catch (Exception exception) { if (exception instanceof SSLException) { logger.debug( "SslException when attempting to close the outbound SSL connection: ", exception); } else { logger.warn( "Exception when attempting to close the outbound SSL connection: ", exception); throw exception; } } } context.close(promise); } /** * Called once a connect operation is made. * * @param context the {@link ChannelHandlerContext} for which the connect operation is made * @param remoteAddress the {@link SocketAddress} to which it should connect * @param localAddress the {@link SocketAddress} which is used as source on connect * @param promise the {@link ChannelPromise} to notify once the operation completes */ @Override public void connect( final ChannelHandlerContext context, final SocketAddress remoteAddress, final SocketAddress localAddress, final ChannelPromise promise ) { this.traceOperation(context, "connect", remoteAddress, localAddress); context.connect(remoteAddress, localAddress, promise); } /** * Called once a deregister operation is made from the current registered {@link EventLoop}. * * @param context the {@link ChannelHandlerContext} for which the deregister operation is made * @param promise the {@link ChannelPromise} to notify once the operation completes */ @Override public void deregister(final ChannelHandlerContext context, final ChannelPromise promise) { this.traceOperation(context, "deregister"); if (!this.closingExceptionally) { this.completeAllPendingRequestsExceptionally(context, ON_DEREGISTER); } else { logger.debug("{} deregistered exceptionally", context); } context.deregister(promise); } /** * Called once a disconnect operation is made. * * @param context the {@link ChannelHandlerContext} for which the disconnect operation is made * @param promise the {@link ChannelPromise} to notify once the operation completes */ @Override public void disconnect(final ChannelHandlerContext context, final ChannelPromise promise) { this.traceOperation(context, "disconnect"); context.disconnect(promise); } /** * Called once a flush operation is made * <p> * The flush operation will try to flush out all previous written messages that are pending. * * @param context the {@link ChannelHandlerContext} for which the flush operation is made */ @Override public void flush(final ChannelHandlerContext context) { this.traceOperation(context, "flush"); context.flush(); } /** * Intercepts {@link ChannelHandlerContext * * @param context the {@link ChannelHandlerContext} for which the read operation is made */ @Override public void read(final ChannelHandlerContext context) { this.traceOperation(context, "read"); context.read(); } /** * Called once a write operation is made * <p> * The write operation will send messages through the {@link ChannelPipeline} which are then ready to be flushed * to the actual {@link Channel}. This will occur when {@link Channel * * @param context the {@link ChannelHandlerContext} for which the write operation is made * @param message the message to write * @param promise the {@link ChannelPromise} to notify once the operation completes */ @Override public void write(final ChannelHandlerContext context, final Object message, final ChannelPromise promise) { this.traceOperation(context, "write", message); if (message instanceof RntbdRequestRecord) { final RntbdRequestRecord record = (RntbdRequestRecord) message; this.timestamps.channelWriteAttempted(); record.setSendingRequestHasStarted(); context.write(this.addPendingRequestRecord(context, record), promise).addListener(completed -> { record.stage(RntbdRequestRecord.Stage.SENT); if (completed.isSuccess()) { this.timestamps.channelWriteCompleted(); } }); return; } if (message == RntbdHealthCheckRequest.MESSAGE) { context.write(RntbdHealthCheckRequest.MESSAGE, promise).addListener(completed -> { if (completed.isSuccess()) { this.timestamps.channelPingCompleted(); } }); return; } final IllegalStateException error = new IllegalStateException(lenientFormat("message of %s: %s", message.getClass(), message)); reportIssue(context, "", error); this.exceptionCaught(context, error); } int pendingRequestCount() { return this.pendingRequests.size(); } Optional<RntbdContext> rntbdContext() { return Optional.of(this.contextFuture.getNow(null)); } CompletableFuture<RntbdContextRequest> rntbdContextRequestFuture() { return this.contextRequestFuture; } boolean hasRequestedRntbdContext() { return this.contextRequestFuture.getNow(null) != null; } boolean hasRntbdContext() { return this.contextFuture.getNow(null) != null; } RntbdChannelState getChannelState(final int demand) { reportIssueUnless(this.hasRequestedRntbdContext(), this, "Direct TCP context request was not issued"); final int limit = this.hasRntbdContext() ? this.pendingRequestLimit : Math.min(this.pendingRequestLimit, demand); if (this.pendingRequests.size() < limit) { return RntbdChannelState.ok(this.pendingRequests.size()); } if (this.hasRntbdContext()) { return RntbdChannelState.pendingLimit(this.pendingRequests.size()); } else { return RntbdChannelState.contextNegotiationPending((this.pendingRequests.size())); } } void pendWrite(final ByteBuf out, final ChannelPromise promise) { this.pendingWrites.add(out, promise); } Timestamps snapshotTimestamps() { return new Timestamps(this.timestamps); } private RntbdRequestRecord addPendingRequestRecord(final ChannelHandlerContext context, final RntbdRequestRecord record) { return this.pendingRequests.compute(record.transportRequestId(), (id, current) -> { reportIssueUnless(current == null, context, "id: {}, current: {}, request: {}", record); record.pendingRequestQueueSize(pendingRequests.size()); final Timeout pendingRequestTimeout = record.newTimeout(timeout -> { requestExpirationExecutor.execute(record::expire); }); record.whenComplete((response, error) -> { this.pendingRequests.remove(id); pendingRequestTimeout.cancel(); }); return record; }); } private void completeAllPendingRequestsExceptionally( final ChannelHandlerContext context, final Throwable throwable ) { reportIssueUnless(!this.closingExceptionally, context, "", throwable); this.closingExceptionally = true; if (this.pendingWrites != null && !this.pendingWrites.isEmpty()) { this.pendingWrites.releaseAndFailAll(context, throwable); } if (this.rntbdConnectionStateListener != null) { this.rntbdConnectionStateListener.onException(throwable); } if (this.pendingRequests.isEmpty()) { return; } if (!this.contextRequestFuture.isDone()) { this.contextRequestFuture.completeExceptionally(throwable); } if (!this.contextFuture.isDone()) { this.contextFuture.completeExceptionally(throwable); } final int count = this.pendingRequests.size(); Exception contextRequestException = null; String phrase = null; if (this.contextRequestFuture.isCompletedExceptionally()) { try { this.contextRequestFuture.get(); } catch (final CancellationException error) { phrase = "RNTBD context request write cancelled"; contextRequestException = error; } catch (final Exception error) { phrase = "RNTBD context request write failed"; contextRequestException = error; } catch (final Throwable error) { phrase = "RNTBD context request write failed"; contextRequestException = new ChannelException(error); } } else if (this.contextFuture.isCompletedExceptionally()) { try { this.contextFuture.get(); } catch (final CancellationException error) { phrase = "RNTBD context request read cancelled"; contextRequestException = error; } catch (final Exception error) { phrase = "RNTBD context request read failed"; contextRequestException = error; } catch (final Throwable error) { phrase = "RNTBD context request read failed"; contextRequestException = new ChannelException(error); } } else { phrase = "closed exceptionally"; } final String message = lenientFormat("%s %s with %s pending requests", context, phrase, count); final Exception cause; if (throwable instanceof ClosedChannelException) { cause = contextRequestException == null ? (ClosedChannelException) throwable : contextRequestException; } else { cause = throwable instanceof Exception ? (Exception) throwable : new ChannelException(throwable); } for (RntbdRequestRecord record : this.pendingRequests.values()) { final Map<String, String> requestHeaders = record.args().serviceRequest().getHeaders(); final String requestUri = record.args().physicalAddress().toString(); final GoneException error = new GoneException(message, cause, null, requestUri); BridgeInternal.setRequestHeaders(error, requestHeaders); record.completeExceptionally(error); } } /** * This method is called for each incoming message of type {@link RntbdResponse} to complete a request. * * @param context {@link ChannelHandlerContext} to which this {@link RntbdRequestManager request manager} belongs. * @param response the {@link RntbdResponse message} received. */ private void messageReceived(final ChannelHandlerContext context, final RntbdResponse response) { final Long transportRequestId = response.getTransportRequestId(); if (transportRequestId == null) { reportIssue(context, "response ignored because its transportRequestId is missing: {}", response); return; } final RntbdRequestRecord requestRecord = this.pendingRequests.get(transportRequestId); if (requestRecord == null) { logger.debug("response {} ignored because its requestRecord is missing: {}", transportRequestId, response); return; } requestRecord.stage(RntbdRequestRecord.Stage.DECODE_STARTED, response.getDecodeStartTime()); requestRecord.stage( RntbdRequestRecord.Stage.RECEIVED, response.getDecodeEndTime() != null ? response.getDecodeEndTime() : Instant.now()); requestRecord.responseLength(response.getMessageLength()); final HttpResponseStatus status = response.getStatus(); final UUID activityId = response.getActivityId(); final int statusCode = status.code(); if ((HttpResponseStatus.OK.code() <= statusCode && statusCode < HttpResponseStatus.MULTIPLE_CHOICES.code()) || statusCode == HttpResponseStatus.NOT_MODIFIED.code()) { final StoreResponse storeResponse = response.toStoreResponse(this.contextFuture.getNow(null)); requestRecord.complete(storeResponse); } else { final CosmosException cause; final long lsn = response.getHeader(RntbdResponseHeader.LSN); final String partitionKeyRangeId = response.getHeader(RntbdResponseHeader.PartitionKeyRangeId); final CosmosError error = response.hasPayload() ? new CosmosError(RntbdObjectMapper.readTree(response)) : new CosmosError(Integer.toString(statusCode), status.reasonPhrase(), status.codeClass().name()); final Map<String, String> responseHeaders = response.getHeaders().asMap( this.rntbdContext().orElseThrow(IllegalStateException::new), activityId ); final String resourceAddress = requestRecord.args().physicalAddress() != null ? requestRecord.args().physicalAddress().toString() : null; switch (status.code()) { case StatusCodes.BADREQUEST: cause = new BadRequestException(error, lsn, partitionKeyRangeId, responseHeaders); break; case StatusCodes.CONFLICT: cause = new ConflictException(error, lsn, partitionKeyRangeId, responseHeaders); break; case StatusCodes.FORBIDDEN: cause = new ForbiddenException(error, lsn, partitionKeyRangeId, responseHeaders); break; case StatusCodes.GONE: final int subStatusCode = Math.toIntExact(response.getHeader(RntbdResponseHeader.SubStatus)); switch (subStatusCode) { case SubStatusCodes.COMPLETING_SPLIT: cause = new PartitionKeyRangeIsSplittingException(error, lsn, partitionKeyRangeId, responseHeaders); break; case SubStatusCodes.COMPLETING_PARTITION_MIGRATION: cause = new PartitionIsMigratingException(error, lsn, partitionKeyRangeId, responseHeaders); break; case SubStatusCodes.NAME_CACHE_IS_STALE: cause = new InvalidPartitionException(error, lsn, partitionKeyRangeId, responseHeaders); break; case SubStatusCodes.PARTITION_KEY_RANGE_GONE: cause = new PartitionKeyRangeGoneException(error, lsn, partitionKeyRangeId, responseHeaders); break; default: GoneException goneExceptionFromService = new GoneException(error, lsn, partitionKeyRangeId, responseHeaders); goneExceptionFromService.setIsBasedOn410ResponseFromService(); cause = goneExceptionFromService; break; } break; case StatusCodes.INTERNAL_SERVER_ERROR: cause = new InternalServerErrorException(error, lsn, partitionKeyRangeId, responseHeaders); break; case StatusCodes.LOCKED: cause = new LockedException(error, lsn, partitionKeyRangeId, responseHeaders); break; case StatusCodes.METHOD_NOT_ALLOWED: cause = new MethodNotAllowedException(error, lsn, partitionKeyRangeId, responseHeaders); break; case StatusCodes.NOTFOUND: cause = new NotFoundException(error, lsn, partitionKeyRangeId, responseHeaders); break; case StatusCodes.PRECONDITION_FAILED: cause = new PreconditionFailedException(error, lsn, partitionKeyRangeId, responseHeaders); break; case StatusCodes.REQUEST_ENTITY_TOO_LARGE: cause = new RequestEntityTooLargeException(error, lsn, partitionKeyRangeId, responseHeaders); break; case StatusCodes.REQUEST_TIMEOUT: Exception inner = new RequestTimeoutException(error, lsn, partitionKeyRangeId, responseHeaders); cause = new GoneException(resourceAddress, error, lsn, partitionKeyRangeId, responseHeaders, inner); break; case StatusCodes.RETRY_WITH: cause = new RetryWithException(error, lsn, partitionKeyRangeId, responseHeaders); break; case StatusCodes.SERVICE_UNAVAILABLE: cause = new ServiceUnavailableException(error, lsn, partitionKeyRangeId, responseHeaders); break; case StatusCodes.TOO_MANY_REQUESTS: cause = new RequestRateTooLargeException(error, lsn, partitionKeyRangeId, responseHeaders); break; case StatusCodes.UNAUTHORIZED: cause = new UnauthorizedException(error, lsn, partitionKeyRangeId, responseHeaders); break; default: cause = BridgeInternal.createCosmosException(resourceAddress, status.code(), error, responseHeaders); break; } BridgeInternal.setResourceAddress(cause, resourceAddress); requestRecord.completeExceptionally(cause); } } private void removeContextNegotiatorAndFlushPendingWrites(final ChannelHandlerContext context) { final RntbdContextNegotiator negotiator = context.pipeline().get(RntbdContextNegotiator.class); negotiator.removeInboundHandler(); negotiator.removeOutboundHandler(); if (!this.pendingWrites.isEmpty()) { this.pendingWrites.writeAndRemoveAll(context); context.flush(); } } private static void reportIssue(final Object subject, final String format, final Object... args) { RntbdReporter.reportIssue(logger, subject, format, args); } private static void reportIssueUnless( final boolean predicate, final Object subject, final String format, final Object... args ) { RntbdReporter.reportIssueUnless(logger, predicate, subject, format, args); } private void traceOperation(final ChannelHandlerContext context, final String operationName, final Object... args) { logger.trace("{}\n{}\n{}", operationName, context, args); } final static class UnhealthyChannelException extends ChannelException { UnhealthyChannelException(String reason) { super("health check failed, reason: " + reason); } @Override public Throwable fillInStackTrace() { return this; } } }
The common usage on sync API is to create `DeviceManagementClient` via `.buildClient()`. And then API call does not need to have `.block()` or `.toIterable()` to convert async response to sync response.
public static void main(String[] args) { DeviceManagementAsyncClient client = new DeviceManagementClientBuilder() .endpoint(Configuration.getGlobalConfiguration().get("AZURE_ACCOUNT_ENDPOINT")) .instanceId(Configuration.getGlobalConfiguration().get("AZURE_INSTANCE_ID")) .credential(new DefaultAzureCredentialBuilder().build()) .httpLogOptions(new HttpLogOptions().setLogLevel(HttpLogDetailLevel.BODY_AND_HEADERS)) .buildAsyncClient(); try { Response<BinaryData> response = client.getDeviceWithResponse( Configuration.getGlobalConfiguration().get("DEVICEUPDATE_DEVICE"), null).block(); System.out.println(response.getValue()); } catch (HttpResponseException e) { if (e.getResponse().getStatusCode() == 404) { System.out.println("update does not exist"); } } }
null).block();
public static void main(String[] args) { DeviceManagementClient client = new DeviceManagementClientBuilder() .endpoint(Configuration.getGlobalConfiguration().get("AZURE_ACCOUNT_ENDPOINT")) .instanceId(Configuration.getGlobalConfiguration().get("AZURE_INSTANCE_ID")) .credential(new DefaultAzureCredentialBuilder().build()) .httpLogOptions(new HttpLogOptions().setLogLevel(HttpLogDetailLevel.BODY_AND_HEADERS)) .buildClient(); try { Response<BinaryData> response = client.getDeviceWithResponse( Configuration.getGlobalConfiguration().get("DEVICEUPDATE_DEVICE"), null); System.out.println(response.getValue()); } catch (HttpResponseException e) { if (e.getResponse().getStatusCode() == 404) { System.out.println("update does not exist"); } } }
class GetDeviceSample { }
class GetDeviceSample { }
Watch out around `withVirtualTime` as it doesn't always behave the best in a multi-threaded environment. From what I've seen in the past: > StepVerifier.withVirtualTime is a great way to mock execution through time, for example mocking 30 minutes of "running" with a simple API that takes milliseconds to complete. Unfortunately, the base overload uses a shared Scheduler for running which can result in states where the scheduler is shutdown or isn't instantiated when the test runs. So, if you need to use this API you can do one of two things: > > Pass an instance Scheduler instead of using the shared Scheduler. > > Annotated the test class with @Isolated and @Execution(ExecutionMode.SAME_THREAD). (more notes on these later)
void withRetryFluxEmitsItemsLaterThanTimeout() { final String timeoutMessage = "Operation timed out."; final Duration timeout = Duration.ofSeconds(5); final AmqpRetryOptions options = new AmqpRetryOptions() .setDelay(Duration.ofSeconds(1)) .setMaxRetries(2) .setTryTimeout(timeout); final Duration totalWaitTime = Duration.ofSeconds(options.getMaxRetries() * options.getDelay().getSeconds()); final AtomicInteger resubscribe = new AtomicInteger(); final TestPublisher<AmqpTransportType> singleItem = TestPublisher.create(); final Flux<AmqpTransportType> flux = singleItem.flux() .doOnSubscribe(s -> resubscribe.incrementAndGet()); StepVerifier.withVirtualTime(() -> RetryUtil.withRetry(flux, options, timeoutMessage)) .expectSubscription() .then(() -> singleItem.next(AmqpTransportType.AMQP_WEB_SOCKETS)) .expectNext(AmqpTransportType.AMQP_WEB_SOCKETS) .expectNoEvent(totalWaitTime) .thenCancel() .verify(); assertEquals(1, resubscribe.get()); }
StepVerifier.withVirtualTime(() -> RetryUtil.withRetry(flux, options, timeoutMessage))
void withRetryFluxEmitsItemsLaterThanTimeout() { final String timeoutMessage = "Operation timed out."; final Duration timeout = Duration.ofSeconds(5); final AmqpRetryOptions options = new AmqpRetryOptions() .setDelay(Duration.ofSeconds(1)) .setMaxRetries(2) .setTryTimeout(timeout); final Duration totalWaitTime = Duration.ofSeconds(options.getMaxRetries() * options.getDelay().getSeconds()); final AtomicInteger resubscribe = new AtomicInteger(); final TestPublisher<AmqpTransportType> singleItem = TestPublisher.create(); final Flux<AmqpTransportType> flux = singleItem.flux() .doOnSubscribe(s -> resubscribe.incrementAndGet()); final VirtualTimeScheduler virtualTimeScheduler = VirtualTimeScheduler.create(); try { StepVerifier.withVirtualTime(() -> RetryUtil.withRetry(flux, options, timeoutMessage), () -> virtualTimeScheduler, 1) .expectSubscription() .then(() -> singleItem.next(AmqpTransportType.AMQP_WEB_SOCKETS)) .expectNext(AmqpTransportType.AMQP_WEB_SOCKETS) .expectNoEvent(totalWaitTime) .thenCancel() .verify(); } finally { virtualTimeScheduler.dispose(); } assertEquals(1, resubscribe.get()); }
class RetryUtilTest { @Test void getCorrectModeFixed() { final AmqpRetryOptions retryOptions = new AmqpRetryOptions() .setMode(AmqpRetryMode.FIXED); final AmqpRetryPolicy retryPolicy = RetryUtil.getRetryPolicy(retryOptions); Assertions.assertNotNull(retryPolicy); assertEquals(FixedAmqpRetryPolicy.class, retryPolicy.getClass()); } @Test void getCorrectModeExponential() { final AmqpRetryOptions retryOptions = new AmqpRetryOptions() .setMode(AmqpRetryMode.EXPONENTIAL); final AmqpRetryPolicy retryPolicy = RetryUtil.getRetryPolicy(retryOptions); Assertions.assertNotNull(retryPolicy); assertEquals(ExponentialAmqpRetryPolicy.class, retryPolicy.getClass()); } /** * Tests a retry that times out on a Flux. */ @Test void withRetryFlux() { final String timeoutMessage = "Operation timed out."; final Duration timeout = Duration.ofMillis(1500); final AmqpRetryOptions options = new AmqpRetryOptions() .setDelay(Duration.ofSeconds(1)) .setMaxRetries(2) .setTryTimeout(timeout); final Duration totalWaitTime = Duration.ofSeconds(options.getMaxRetries() * options.getDelay().getSeconds()) .plus(timeout); final AtomicInteger resubscribe = new AtomicInteger(); final Flux<AmqpTransportType> neverFlux = Flux.<AmqpTransportType>never() .doOnSubscribe(s -> resubscribe.incrementAndGet()); StepVerifier.create(RetryUtil.withRetry(neverFlux, options, timeoutMessage)) .expectSubscription() .thenAwait(totalWaitTime) .expectErrorSatisfies(error -> assertTrue(error.getCause() instanceof TimeoutException)) .verify(); assertEquals(options.getMaxRetries() + 1, resubscribe.get()); } /** * Tests a retry that times out on a Flux. */ @Test /** * Tests a retry that times out on a Mono. */ @Test void withRetryMono() { final String timeoutMessage = "Operation timed out."; final Duration timeout = Duration.ofMillis(500); final AmqpRetryOptions options = new AmqpRetryOptions() .setDelay(Duration.ofSeconds(1)) .setMaxRetries(2) .setTryTimeout(timeout); final Duration totalWaitTime = Duration.ofSeconds(options.getMaxRetries() * options.getDelay().getSeconds()); final AtomicInteger resubscribe = new AtomicInteger(); final Mono<AmqpTransportType> neverFlux = TestPublisher.<AmqpTransportType>create().mono() .doOnSubscribe(s -> resubscribe.incrementAndGet()); StepVerifier.create(RetryUtil.withRetry(neverFlux, options, timeoutMessage)) .expectSubscription() .thenAwait(totalWaitTime) .expectErrorSatisfies(error -> assertTrue(error.getCause() instanceof TimeoutException)) .verify(); assertEquals(options.getMaxRetries() + 1, resubscribe.get()); } static Stream<Throwable> withTransientError() { return Stream.of( new AmqpException(true, "Test-exception", new AmqpErrorContext("test-ns")), new TimeoutException("Test-timeout") ); } @ParameterizedTest @MethodSource void withTransientError(Throwable transientError) { final String timeoutMessage = "Operation timed out."; final Duration timeout = Duration.ofSeconds(30); final AmqpRetryOptions options = new AmqpRetryOptions() .setMode(AmqpRetryMode.FIXED) .setDelay(Duration.ofSeconds(1)) .setMaxRetries(1) .setTryTimeout(timeout); final AtomicBoolean wasSent = new AtomicBoolean(); final Flux<Integer> stream = Flux.concat( Flux.just(0, 1), Flux.create(sink -> { if (wasSent.getAndSet(true)) { sink.next(10); sink.complete(); } else { sink.error(transientError); } }), Flux.just(3, 4)); StepVerifier.create(RetryUtil.withRetry(stream, options, timeoutMessage)) .expectNext(0, 1) .expectNext(0, 1) .expectNext(10) .expectNext(3, 4) .expectComplete() .verify(); } static Stream<Throwable> withNonTransientError() { return Stream.of( new AmqpException(false, "Test-exception", new AmqpErrorContext("test-ns")), new IllegalStateException("Some illegal State") ); } @ParameterizedTest @MethodSource void withNonTransientError(Throwable nonTransientError) { final String timeoutMessage = "Operation timed out."; final Duration timeout = Duration.ofSeconds(30); final AmqpRetryOptions options = new AmqpRetryOptions() .setMode(AmqpRetryMode.FIXED) .setDelay(Duration.ofSeconds(1)) .setMaxRetries(1) .setTryTimeout(timeout); final Flux<Integer> stream = Flux.concat( Flux.just(0, 1, 2), Flux.error(nonTransientError), Flux.just(3, 4)); StepVerifier.create(RetryUtil.withRetry(stream, options, timeoutMessage)) .expectNext(0, 1, 2) .expectErrorMatches(error -> error.equals(nonTransientError)) .verify(); } static Stream<AmqpRetryOptions> createRetry() { final AmqpRetryOptions fixed = new AmqpRetryOptions() .setMode(AmqpRetryMode.FIXED) .setDelay(Duration.ofSeconds(10)) .setMaxRetries(2) .setMaxDelay(Duration.ofSeconds(90)); final AmqpRetryOptions exponential = new AmqpRetryOptions() .setMode(AmqpRetryMode.EXPONENTIAL) .setDelay(Duration.ofSeconds(5)) .setMaxRetries(5) .setMaxDelay(Duration.ofSeconds(35)); return Stream.of(fixed, exponential); } /** * Verifies retry options are correctly mapped to a retry spec. */ @MethodSource @ParameterizedTest void createRetry(AmqpRetryOptions options) { final Retry actual = RetryUtil.createRetry(options); assertTrue(actual instanceof RetryBackoffSpec); final RetryBackoffSpec retrySpec = (RetryBackoffSpec) actual; assertEquals(options.getMaxRetries(), retrySpec.maxAttempts); assertEquals(options.getMaxDelay(), retrySpec.maxBackoff); assertTrue(options.getDelay().compareTo(retrySpec.minBackoff) < 0); assertTrue(retrySpec.jitterFactor > 0); } static Stream<Arguments> retryFilter() { return Stream.of( Arguments.of(new TimeoutException("Something"), true), Arguments.of(new AmqpException(true, "foo message", new AmqpErrorContext("test-namespace")), true), Arguments.of(new AmqpException(false, "foo message", new AmqpErrorContext("test-ns")), false), Arguments.of(new IllegalArgumentException("invalid"), false) ); } @MethodSource @ParameterizedTest void retryFilter(Throwable throwable, boolean expected) { final AmqpRetryOptions options = new AmqpRetryOptions().setMode(AmqpRetryMode.EXPONENTIAL); final Retry retry = RetryUtil.createRetry(options); assertTrue(retry instanceof RetryBackoffSpec); final RetryBackoffSpec retrySpec = (RetryBackoffSpec) retry; final Predicate<Throwable> errorFilter = retrySpec.errorFilter; final boolean actual = errorFilter.test(throwable); assertEquals(expected, actual); } }
class RetryUtilTest { @Test void getCorrectModeFixed() { final AmqpRetryOptions retryOptions = new AmqpRetryOptions() .setMode(AmqpRetryMode.FIXED); final AmqpRetryPolicy retryPolicy = RetryUtil.getRetryPolicy(retryOptions); Assertions.assertNotNull(retryPolicy); assertEquals(FixedAmqpRetryPolicy.class, retryPolicy.getClass()); } @Test void getCorrectModeExponential() { final AmqpRetryOptions retryOptions = new AmqpRetryOptions() .setMode(AmqpRetryMode.EXPONENTIAL); final AmqpRetryPolicy retryPolicy = RetryUtil.getRetryPolicy(retryOptions); Assertions.assertNotNull(retryPolicy); assertEquals(ExponentialAmqpRetryPolicy.class, retryPolicy.getClass()); } /** * Tests a retry that times out on a Flux. */ @Test void withRetryFlux() { final String timeoutMessage = "Operation timed out."; final Duration timeout = Duration.ofMillis(1500); final AmqpRetryOptions options = new AmqpRetryOptions() .setDelay(Duration.ofSeconds(1)) .setMaxRetries(2) .setTryTimeout(timeout); final Duration totalWaitTime = Duration.ofSeconds(options.getMaxRetries() * options.getDelay().getSeconds()) .plus(timeout); final AtomicInteger resubscribe = new AtomicInteger(); final Flux<AmqpTransportType> neverFlux = Flux.<AmqpTransportType>never() .doOnSubscribe(s -> resubscribe.incrementAndGet()); StepVerifier.create(RetryUtil.withRetry(neverFlux, options, timeoutMessage)) .expectSubscription() .thenAwait(totalWaitTime) .expectErrorSatisfies(error -> assertTrue(error.getCause() instanceof TimeoutException)) .verify(); assertEquals(options.getMaxRetries() + 1, resubscribe.get()); } /** * Tests a retry that times out on a Flux. */ @Test /** * Tests a retry that times out on a Mono. */ @Test void withRetryMono() { final String timeoutMessage = "Operation timed out."; final Duration timeout = Duration.ofMillis(500); final AmqpRetryOptions options = new AmqpRetryOptions() .setDelay(Duration.ofSeconds(1)) .setMaxRetries(2) .setTryTimeout(timeout); final Duration totalWaitTime = Duration.ofSeconds(options.getMaxRetries() * options.getDelay().getSeconds()); final AtomicInteger resubscribe = new AtomicInteger(); final Mono<AmqpTransportType> neverFlux = TestPublisher.<AmqpTransportType>create().mono() .doOnSubscribe(s -> resubscribe.incrementAndGet()); StepVerifier.create(RetryUtil.withRetry(neverFlux, options, timeoutMessage)) .expectSubscription() .thenAwait(totalWaitTime) .expectErrorSatisfies(error -> assertTrue(error.getCause() instanceof TimeoutException)) .verify(); assertEquals(options.getMaxRetries() + 1, resubscribe.get()); } static Stream<Throwable> withTransientError() { return Stream.of( new AmqpException(true, "Test-exception", new AmqpErrorContext("test-ns")), new TimeoutException("Test-timeout") ); } @ParameterizedTest @MethodSource void withTransientError(Throwable transientError) { final String timeoutMessage = "Operation timed out."; final Duration timeout = Duration.ofSeconds(30); final AmqpRetryOptions options = new AmqpRetryOptions() .setMode(AmqpRetryMode.FIXED) .setDelay(Duration.ofSeconds(1)) .setMaxRetries(1) .setTryTimeout(timeout); final AtomicBoolean wasSent = new AtomicBoolean(); final Flux<Integer> stream = Flux.concat( Flux.just(0, 1), Flux.create(sink -> { if (wasSent.getAndSet(true)) { sink.next(10); sink.complete(); } else { sink.error(transientError); } }), Flux.just(3, 4)); StepVerifier.create(RetryUtil.withRetry(stream, options, timeoutMessage)) .expectNext(0, 1) .expectNext(0, 1) .expectNext(10) .expectNext(3, 4) .expectComplete() .verify(); } static Stream<Throwable> withNonTransientError() { return Stream.of( new AmqpException(false, "Test-exception", new AmqpErrorContext("test-ns")), new IllegalStateException("Some illegal State") ); } @ParameterizedTest @MethodSource void withNonTransientError(Throwable nonTransientError) { final String timeoutMessage = "Operation timed out."; final Duration timeout = Duration.ofSeconds(30); final AmqpRetryOptions options = new AmqpRetryOptions() .setMode(AmqpRetryMode.FIXED) .setDelay(Duration.ofSeconds(1)) .setMaxRetries(1) .setTryTimeout(timeout); final Flux<Integer> stream = Flux.concat( Flux.defer(() -> Flux.just(0, 1, 2)), Flux.defer(() -> Flux.error(nonTransientError)), Flux.defer(() -> Flux.just(3, 4))); final VirtualTimeScheduler virtualTimeScheduler = VirtualTimeScheduler.create(true); try { StepVerifier.withVirtualTime(() -> RetryUtil.withRetry(stream, options, timeoutMessage), () -> virtualTimeScheduler, 4) .expectNext(0, 1, 2) .expectErrorMatches(error -> error.equals(nonTransientError)) .verify(); } finally { virtualTimeScheduler.dispose(); } } static Stream<AmqpRetryOptions> createRetry() { final AmqpRetryOptions fixed = new AmqpRetryOptions() .setMode(AmqpRetryMode.FIXED) .setDelay(Duration.ofSeconds(10)) .setMaxRetries(2) .setMaxDelay(Duration.ofSeconds(90)); final AmqpRetryOptions exponential = new AmqpRetryOptions() .setMode(AmqpRetryMode.EXPONENTIAL) .setDelay(Duration.ofSeconds(5)) .setMaxRetries(5) .setMaxDelay(Duration.ofSeconds(35)); return Stream.of(fixed, exponential); } /** * Verifies retry options are correctly mapped to a retry spec. */ @MethodSource @ParameterizedTest void createRetry(AmqpRetryOptions options) { final Retry actual = RetryUtil.createRetry(options); assertTrue(actual instanceof RetryBackoffSpec); final RetryBackoffSpec retrySpec = (RetryBackoffSpec) actual; assertEquals(options.getMaxRetries(), retrySpec.maxAttempts); assertEquals(options.getMaxDelay(), retrySpec.maxBackoff); assertTrue(options.getDelay().compareTo(retrySpec.minBackoff) < 0); assertTrue(retrySpec.jitterFactor > 0); } static Stream<Arguments> retryFilter() { return Stream.of( Arguments.of(new TimeoutException("Something"), true), Arguments.of(new AmqpException(true, "foo message", new AmqpErrorContext("test-namespace")), true), Arguments.of(new AmqpException(false, "foo message", new AmqpErrorContext("test-ns")), false), Arguments.of(new IllegalArgumentException("invalid"), false) ); } @MethodSource @ParameterizedTest void retryFilter(Throwable throwable, boolean expected) { final AmqpRetryOptions options = new AmqpRetryOptions().setMode(AmqpRetryMode.EXPONENTIAL); final Retry retry = RetryUtil.createRetry(options); assertTrue(retry instanceof RetryBackoffSpec); final RetryBackoffSpec retrySpec = (RetryBackoffSpec) retry; final Predicate<Throwable> errorFilter = retrySpec.errorFilter; final boolean actual = errorFilter.test(throwable); assertEquals(expected, actual); } }
container.asyncContainer -> since the asyncContainer is not public, so it can only be accessed under the com.azure.cosmos package, but customer's code generally is under a different package, so it might cause issue? Wonder for the sync API, whether we should introduce a timeout public API within SDK which use the same strategy underlying
public void readItemWithSoftTimeoutAndFallback() throws Exception { String pk = UUID.randomUUID().toString(); String id = UUID.randomUUID().toString(); ObjectNode properties = getDocumentDefinition(id, pk); ObjectNode fallBackProperties = getDocumentDefinition("justFallback", "justFallback"); container.createItem(properties); String successfulResponse = wrapWithSoftTimeoutAndFallback( container .asyncContainer .readItem(id, new PartitionKey(pk), new CosmosItemRequestOptions(), ObjectNode.class), Duration.ofDays(3), fallBackProperties) .map(node -> node.get("id").asText()) .block(); assertThat(successfulResponse).isEqualTo(id); String timedOutResponse = wrapWithSoftTimeoutAndFallback( container .asyncContainer .readItem(id, new PartitionKey(pk), new CosmosItemRequestOptions(), ObjectNode.class), Duration.ofNanos(10), fallBackProperties) .map(node -> node.get("id").asText()) .block(); assertThat(timedOutResponse).isEqualTo("justFallback"); Thread.sleep(1000); }
.asyncContainer
public void readItemWithSoftTimeoutAndFallback() throws Exception { String pk = UUID.randomUUID().toString(); String id = UUID.randomUUID().toString(); ObjectNode properties = getDocumentDefinition(id, pk); ObjectNode fallBackProperties = getDocumentDefinition("justFallback", "justFallback"); container.createItem(properties); String successfulResponse = wrapWithSoftTimeoutAndFallback( container .asyncContainer .readItem(id, new PartitionKey(pk), new CosmosItemRequestOptions(), ObjectNode.class), Duration.ofDays(3), fallBackProperties) .map(node -> node.get("id").asText()) .block(); assertThat(successfulResponse).isEqualTo(id); String timedOutResponse = wrapWithSoftTimeoutAndFallback( container .asyncContainer .readItem(id, new PartitionKey(pk), new CosmosItemRequestOptions(), ObjectNode.class), Duration.ofNanos(10), fallBackProperties) .map(node -> node.get("id").asText()) .block(); assertThat(timedOutResponse).isEqualTo("justFallback"); Thread.sleep(1000); }
class CosmosItemTest extends TestSuiteBase { private static final ObjectMapper OBJECT_MAPPER = new ObjectMapper(); private CosmosClient client; private CosmosContainer container; @Factory(dataProvider = "clientBuildersWithDirectSession") public CosmosItemTest(CosmosClientBuilder clientBuilder) { super(clientBuilder); } @BeforeClass(groups = {"simple"}, timeOut = SETUP_TIMEOUT) public void before_CosmosItemTest() { assertThat(this.client).isNull(); this.client = getClientBuilder().buildClient(); CosmosAsyncContainer asyncContainer = getSharedMultiPartitionCosmosContainer(this.client.asyncClient()); container = client.getDatabase(asyncContainer.getDatabase().getId()).getContainer(asyncContainer.getId()); } @AfterClass(groups = {"simple"}, timeOut = SHUTDOWN_TIMEOUT, alwaysRun = true) public void afterClass() { assertThat(this.client).isNotNull(); this.client.close(); } @Test(groups = { "simple" }, timeOut = TIMEOUT) public void createItem() throws Exception { InternalObjectNode properties = getDocumentDefinition(UUID.randomUUID().toString()); CosmosItemResponse<InternalObjectNode> itemResponse = container.createItem(properties); assertThat(itemResponse.getRequestCharge()).isGreaterThan(0); validateItemResponse(properties, itemResponse); properties = getDocumentDefinition(UUID.randomUUID().toString()); CosmosItemResponse<InternalObjectNode> itemResponse1 = container.createItem(properties, new CosmosItemRequestOptions()); validateItemResponse(properties, itemResponse1); } @Test(groups = {"simple"}, timeOut = TIMEOUT) public void createItem_alreadyExists() throws Exception { InternalObjectNode properties = getDocumentDefinition(UUID.randomUUID().toString()); CosmosItemResponse<InternalObjectNode> itemResponse = container.createItem(properties); validateItemResponse(properties, itemResponse); properties = getDocumentDefinition(UUID.randomUUID().toString()); CosmosItemResponse<InternalObjectNode> itemResponse1 = container.createItem(properties, new CosmosItemRequestOptions()); validateItemResponse(properties, itemResponse1); try { container.createItem(properties, new CosmosItemRequestOptions()); } catch (Exception e) { assertThat(e).isInstanceOf(CosmosException.class); assertThat(((CosmosException) e).getStatusCode()).isEqualTo(HttpConstants.StatusCodes.CONFLICT); } } @Test(groups = { "simple" }, timeOut = TIMEOUT) public void createLargeItem() throws Exception { InternalObjectNode docDefinition = getDocumentDefinition(UUID.randomUUID().toString()); int size = (int) (ONE_MB * 1.5); BridgeInternal.setProperty(docDefinition, "largeString", StringUtils.repeat("x", size)); CosmosItemResponse<InternalObjectNode> itemResponse = container.createItem(docDefinition, new CosmosItemRequestOptions()); validateItemResponse(docDefinition, itemResponse); } @Test(groups = { "simple" }, timeOut = TIMEOUT) public void createItemWithVeryLargePartitionKey() throws Exception { InternalObjectNode docDefinition = getDocumentDefinition(UUID.randomUUID().toString()); StringBuilder sb = new StringBuilder(); for(int i = 0; i < 100; i++) { sb.append(i).append("x"); } BridgeInternal.setProperty(docDefinition, "mypk", sb.toString()); CosmosItemResponse<InternalObjectNode> itemResponse = container.createItem(docDefinition, new CosmosItemRequestOptions()); validateItemResponse(docDefinition, itemResponse); } @Test(groups = { "simple" }, timeOut = TIMEOUT) public void readItemWithVeryLargePartitionKey() throws Exception { InternalObjectNode docDefinition = getDocumentDefinition(UUID.randomUUID().toString()); StringBuilder sb = new StringBuilder(); for(int i = 0; i < 100; i++) { sb.append(i).append("x"); } BridgeInternal.setProperty(docDefinition, "mypk", sb.toString()); CosmosItemResponse<InternalObjectNode> itemResponse = container.createItem(docDefinition); waitIfNeededForReplicasToCatchUp(getClientBuilder()); CosmosItemRequestOptions options = new CosmosItemRequestOptions(); CosmosItemResponse<InternalObjectNode> readResponse = container.readItem(docDefinition.getId(), new PartitionKey(sb.toString()), options, InternalObjectNode.class); validateItemResponse(docDefinition, readResponse); } @Test(groups = { "simple" }, timeOut = TIMEOUT) public void readItem() throws Exception { InternalObjectNode properties = getDocumentDefinition(UUID.randomUUID().toString()); CosmosItemResponse<InternalObjectNode> itemResponse = container.createItem(properties); CosmosItemResponse<InternalObjectNode> readResponse1 = container.readItem(properties.getId(), new PartitionKey(ModelBridgeInternal.getObjectFromJsonSerializable(properties, "mypk")), new CosmosItemRequestOptions(), InternalObjectNode.class); validateItemResponse(properties, readResponse1); } @Test(groups = { "simple" }, timeOut = TIMEOUT) static <T> Mono<T> wrapWithSoftTimeoutAndFallback( Mono<CosmosItemResponse<T>> source, Duration softTimeout, T fallback) { AtomicBoolean timeoutElapsed = new AtomicBoolean(false); return Mono .<T>create(sink -> { source .subscribeOn(Schedulers.boundedElastic()) .subscribe( response -> { if (timeoutElapsed.get()) { logger.warn( "COMPLETED SUCCESSFULLY after timeout elapsed. Diagnostics: {}", response.getDiagnostics().toString()); } else { logger.info("COMPLETED SUCCESSFULLY"); } sink.success(response.getItem()); }, error -> { final Throwable unwrappedException = Exceptions.unwrap(error); if (unwrappedException instanceof CosmosException) { final CosmosException cosmosException = (CosmosException) unwrappedException; logger.error( "COMPLETED WITH COSMOS FAILURE. Diagnostics: {}", cosmosException.getDiagnostics() != null ? cosmosException.getDiagnostics().toString() : "n/a", cosmosException); } else { logger.error("COMPLETED WITH GENERIC FAILURE", error); } if (timeoutElapsed.get()) { sink.success(); } else { sink.error(error); } } ); }) .timeout(softTimeout) .onErrorResume(error -> { timeoutElapsed.set(true); return Mono.just(fallback); }); } @Test(groups = { "simple" }, timeOut = TIMEOUT) public void readItemWithEventualConsistency() throws Exception { CosmosAsyncContainer asyncContainer = getSharedMultiPartitionCosmosContainer(this.client.asyncClient()); container = client.getDatabase(asyncContainer.getDatabase().getId()).getContainer(asyncContainer.getId()); String idAndPkValue = UUID.randomUUID().toString(); ObjectNode properties = getDocumentDefinition(idAndPkValue, idAndPkValue); CosmosItemResponse<ObjectNode> itemResponse = container.createItem(properties); CosmosItemResponse<ObjectNode> readResponse1 = container.readItem( idAndPkValue, new PartitionKey(idAndPkValue), new CosmosItemRequestOptions() .setSessionToken(StringUtils.repeat("SomeManualInvalidSessionToken", 2000)) .setConsistencyLevel(ConsistencyLevel.EVENTUAL), ObjectNode.class); logger.info("REQUEST DIAGNOSTICS: {}", readResponse1.getDiagnostics().toString()); validateIdOfItemResponse(idAndPkValue, readResponse1); } @Test(groups = { "simple" }, timeOut = TIMEOUT) public void replaceItem() throws Exception{ InternalObjectNode properties = getDocumentDefinition(UUID.randomUUID().toString()); CosmosItemResponse<InternalObjectNode> itemResponse = container.createItem(properties); validateItemResponse(properties, itemResponse); String newPropValue = UUID.randomUUID().toString(); BridgeInternal.setProperty(properties, "newProp", newPropValue); CosmosItemRequestOptions options = new CosmosItemRequestOptions(); ModelBridgeInternal.setPartitionKey(options, new PartitionKey(ModelBridgeInternal.getObjectFromJsonSerializable(properties, "mypk"))); CosmosItemResponse<InternalObjectNode> replace = container.replaceItem(properties, properties.getId(), new PartitionKey(ModelBridgeInternal.getObjectFromJsonSerializable(properties, "mypk")), options); assertThat(ModelBridgeInternal.getObjectFromJsonSerializable(BridgeInternal.getProperties(replace), "newProp")).isEqualTo(newPropValue); } @Test(groups = { "simple" }, timeOut = TIMEOUT) public void deleteItem() throws Exception { InternalObjectNode properties = getDocumentDefinition(UUID.randomUUID().toString()); CosmosItemResponse<InternalObjectNode> itemResponse = container.createItem(properties); CosmosItemRequestOptions options = new CosmosItemRequestOptions(); CosmosItemResponse<?> deleteResponse = container.deleteItem(properties.getId(), new PartitionKey(ModelBridgeInternal.getObjectFromJsonSerializable(properties, "mypk")), options); assertThat(deleteResponse.getStatusCode()).isEqualTo(204); } @Test(groups = { "simple" }, timeOut = TIMEOUT) public void deleteItemUsingEntity() throws Exception { InternalObjectNode properties = getDocumentDefinition(UUID.randomUUID().toString()); CosmosItemResponse<InternalObjectNode> itemResponse = container.createItem(properties); CosmosItemRequestOptions options = new CosmosItemRequestOptions(); CosmosItemResponse<?> deleteResponse = container.deleteItem(itemResponse.getItem(), options); assertThat(deleteResponse.getStatusCode()).isEqualTo(204); } @Test(groups = { "simple" }, timeOut = TIMEOUT) public void readAllItems() throws Exception { InternalObjectNode properties = getDocumentDefinition(UUID.randomUUID().toString()); CosmosItemResponse<InternalObjectNode> itemResponse = container.createItem(properties); CosmosQueryRequestOptions cosmosQueryRequestOptions = new CosmosQueryRequestOptions(); CosmosPagedIterable<InternalObjectNode> feedResponseIterator3 = container.readAllItems(cosmosQueryRequestOptions, InternalObjectNode.class); assertThat(feedResponseIterator3.iterator().hasNext()).isTrue(); } @Test(groups = { "simple" }, timeOut = TIMEOUT) public void queryItems() throws Exception{ InternalObjectNode properties = getDocumentDefinition(UUID.randomUUID().toString()); CosmosItemResponse<InternalObjectNode> itemResponse = container.createItem(properties); String query = String.format("SELECT * from c where c.id = '%s'", properties.getId()); CosmosQueryRequestOptions cosmosQueryRequestOptions = new CosmosQueryRequestOptions(); CosmosPagedIterable<InternalObjectNode> feedResponseIterator1 = container.queryItems(query, cosmosQueryRequestOptions, InternalObjectNode.class); assertThat(feedResponseIterator1.iterator().hasNext()).isTrue(); SqlQuerySpec querySpec = new SqlQuerySpec(query); CosmosPagedIterable<InternalObjectNode> feedResponseIterator3 = container.queryItems(querySpec, cosmosQueryRequestOptions, InternalObjectNode.class); assertThat(feedResponseIterator3.iterator().hasNext()).isTrue(); } @Test(groups = { "simple" }, timeOut = TIMEOUT) public void queryItemsWithCustomCorrelationActivityId() throws Exception{ InternalObjectNode properties = getDocumentDefinition(UUID.randomUUID().toString()); CosmosItemResponse<InternalObjectNode> itemResponse = container.createItem(properties); String query = String.format("SELECT * from c where c.id = '%s'", properties.getId()); CosmosQueryRequestOptions cosmosQueryRequestOptions = new CosmosQueryRequestOptions(); UUID correlationId = UUID.randomUUID(); ImplementationBridgeHelpers .CosmosQueryRequestOptionsHelper .getCosmosQueryRequestOptionsAccessor() .setCorrelationActivityId(cosmosQueryRequestOptions, correlationId); CosmosPagedIterable<InternalObjectNode> feedResponseIterator1 = container.queryItems(query, cosmosQueryRequestOptions, InternalObjectNode.class); assertThat(feedResponseIterator1.iterator().hasNext()).isTrue(); feedResponseIterator1 .iterableByPage() .forEach(response -> { assertThat(response.getCorrelationActivityId() == correlationId) .withFailMessage("response.getCorrelationActivityId"); assertThat(response.getCosmosDiagnostics().toString().contains(correlationId.toString())) .withFailMessage("response.getCosmosDiagnostics"); }); } @Test(groups = { "simple" }, timeOut = TIMEOUT) public void queryItemsWithEventualConsistency() throws Exception{ CosmosAsyncContainer asyncContainer = getSharedMultiPartitionCosmosContainer(this.client.asyncClient()); container = client.getDatabase(asyncContainer.getDatabase().getId()).getContainer(asyncContainer.getId()); String idAndPkValue = UUID.randomUUID().toString(); ObjectNode properties = getDocumentDefinition(idAndPkValue, idAndPkValue); CosmosItemResponse<ObjectNode> itemResponse = container.createItem(properties); String query = String.format("SELECT * from c where c.id = '%s'", idAndPkValue); CosmosQueryRequestOptions cosmosQueryRequestOptions = new CosmosQueryRequestOptions() .setSessionToken(StringUtils.repeat("SomeManualInvalidSessionToken", 2000)) .setConsistencyLevel(ConsistencyLevel.EVENTUAL); CosmosPagedIterable<ObjectNode> feedResponseIterator1 = container.queryItems(query, cosmosQueryRequestOptions, ObjectNode.class); feedResponseIterator1.handle( (r) -> logger.info("Query RequestDiagnostics: {}", r.getCosmosDiagnostics().toString())); assertThat(feedResponseIterator1.iterator().hasNext()).isTrue(); assertThat(feedResponseIterator1.stream().count() == 1); SqlQuerySpec querySpec = new SqlQuerySpec(query); CosmosPagedIterable<ObjectNode> feedResponseIterator3 = container.queryItems(querySpec, cosmosQueryRequestOptions, ObjectNode.class); feedResponseIterator3.handle( (r) -> logger.info("Query RequestDiagnostics: {}", r.getCosmosDiagnostics().toString())); assertThat(feedResponseIterator3.iterator().hasNext()).isTrue(); assertThat(feedResponseIterator3.stream().count() == 1); } @Test(groups = { "simple" }, timeOut = TIMEOUT) public void queryItemsWithContinuationTokenAndPageSize() throws Exception{ List<String> actualIds = new ArrayList<>(); InternalObjectNode properties = getDocumentDefinition(UUID.randomUUID().toString()); container.createItem(properties); actualIds.add(properties.getId()); properties = getDocumentDefinition(UUID.randomUUID().toString()); container.createItem(properties); actualIds.add(properties.getId()); properties = getDocumentDefinition(UUID.randomUUID().toString()); container.createItem(properties); actualIds.add(properties.getId()); String query = String.format("SELECT * from c where c.id in ('%s', '%s', '%s')", actualIds.get(0), actualIds.get(1), actualIds.get(2)); CosmosQueryRequestOptions cosmosQueryRequestOptions = new CosmosQueryRequestOptions(); String continuationToken = null; int pageSize = 1; int initialDocumentCount = 3; int finalDocumentCount = 0; CosmosPagedIterable<InternalObjectNode> feedResponseIterator1 = container.queryItems(query, cosmosQueryRequestOptions, InternalObjectNode.class); do { Iterable<FeedResponse<InternalObjectNode>> feedResponseIterable = feedResponseIterator1.iterableByPage(continuationToken, pageSize); for (FeedResponse<InternalObjectNode> fr : feedResponseIterable) { int resultSize = fr.getResults().size(); assertThat(resultSize).isEqualTo(pageSize); finalDocumentCount += fr.getResults().size(); continuationToken = fr.getContinuationToken(); } } while(continuationToken != null); assertThat(finalDocumentCount).isEqualTo(initialDocumentCount); } @Test(groups = { "simple" }, timeOut = TIMEOUT) public void readAllItemsOfLogicalPartition() throws Exception{ String pkValue = UUID.randomUUID().toString(); ObjectNode properties = getDocumentDefinition(UUID.randomUUID().toString(), pkValue); CosmosItemResponse<ObjectNode> itemResponse = container.createItem(properties); CosmosQueryRequestOptions cosmosQueryRequestOptions = new CosmosQueryRequestOptions(); CosmosPagedIterable<ObjectNode> feedResponseIterator1 = container.readAllItems( new PartitionKey(pkValue), cosmosQueryRequestOptions, ObjectNode.class); assertThat(feedResponseIterator1.iterator().hasNext()).isTrue(); CosmosPagedIterable<ObjectNode> feedResponseIterator3 = container.readAllItems( new PartitionKey(pkValue), cosmosQueryRequestOptions, ObjectNode.class); assertThat(feedResponseIterator3.iterator().hasNext()).isTrue(); } @Test(groups = { "simple" }, timeOut = TIMEOUT) public void readAllItemsOfLogicalPartitionWithContinuationTokenAndPageSize() throws Exception{ String pkValue = UUID.randomUUID().toString(); List<String> actualIds = new ArrayList<>(); ObjectNode properties = getDocumentDefinition(UUID.randomUUID().toString(), pkValue); container.createItem(properties); properties = getDocumentDefinition(UUID.randomUUID().toString(), pkValue); container.createItem(properties); properties = getDocumentDefinition(UUID.randomUUID().toString(), pkValue); container.createItem(properties); CosmosQueryRequestOptions cosmosQueryRequestOptions = new CosmosQueryRequestOptions(); String continuationToken = null; int pageSize = 1; int initialDocumentCount = 3; int finalDocumentCount = 0; CosmosPagedIterable<InternalObjectNode> feedResponseIterator1 = container.readAllItems( new PartitionKey(pkValue), cosmosQueryRequestOptions, InternalObjectNode.class); do { Iterable<FeedResponse<InternalObjectNode>> feedResponseIterable = feedResponseIterator1.iterableByPage(continuationToken, pageSize); for (FeedResponse<InternalObjectNode> fr : feedResponseIterable) { int resultSize = fr.getResults().size(); assertThat(resultSize).isEqualTo(pageSize); finalDocumentCount += fr.getResults().size(); continuationToken = fr.getContinuationToken(); } } while(continuationToken != null); assertThat(finalDocumentCount).isEqualTo(initialDocumentCount); } private InternalObjectNode getDocumentDefinition(String documentId) { final String uuid = UUID.randomUUID().toString(); final InternalObjectNode properties = new InternalObjectNode(String.format("{ " + "\"id\": \"%s\", " + "\"mypk\": \"%s\", " + "\"sgmts\": [[6519456, 1471916863], [2498434, 1455671440]]" + "}" , documentId, uuid)); return properties; } private ObjectNode getDocumentDefinition(String documentId, String pkId) throws JsonProcessingException { String json = String.format("{ " + "\"id\": \"%s\", " + "\"mypk\": \"%s\", " + "\"sgmts\": [[6519456, 1471916863], [2498434, 1455671440]]" + "}" , documentId, pkId); return OBJECT_MAPPER.readValue(json, ObjectNode.class); } private void validateItemResponse(InternalObjectNode containerProperties, CosmosItemResponse<InternalObjectNode> createResponse) { assertThat(BridgeInternal.getProperties(createResponse).getId()).isNotNull(); assertThat(BridgeInternal.getProperties(createResponse).getId()) .as("check Resource Id") .isEqualTo(containerProperties.getId()); } private void validateIdOfItemResponse(String expectedId, CosmosItemResponse<ObjectNode> createResponse) { assertThat(BridgeInternal.getProperties(createResponse).getId()).isNotNull(); assertThat(BridgeInternal.getProperties(createResponse).getId()) .as("check Resource Id") .isEqualTo(expectedId); } }
class CosmosItemTest extends TestSuiteBase { private static final ObjectMapper OBJECT_MAPPER = new ObjectMapper(); private CosmosClient client; private CosmosContainer container; @Factory(dataProvider = "clientBuildersWithDirectSession") public CosmosItemTest(CosmosClientBuilder clientBuilder) { super(clientBuilder); } @BeforeClass(groups = {"simple"}, timeOut = SETUP_TIMEOUT) public void before_CosmosItemTest() { assertThat(this.client).isNull(); this.client = getClientBuilder().buildClient(); CosmosAsyncContainer asyncContainer = getSharedMultiPartitionCosmosContainer(this.client.asyncClient()); container = client.getDatabase(asyncContainer.getDatabase().getId()).getContainer(asyncContainer.getId()); } @AfterClass(groups = {"simple"}, timeOut = SHUTDOWN_TIMEOUT, alwaysRun = true) public void afterClass() { assertThat(this.client).isNotNull(); this.client.close(); } @Test(groups = { "simple" }, timeOut = TIMEOUT) public void createItem() throws Exception { InternalObjectNode properties = getDocumentDefinition(UUID.randomUUID().toString()); CosmosItemResponse<InternalObjectNode> itemResponse = container.createItem(properties); assertThat(itemResponse.getRequestCharge()).isGreaterThan(0); validateItemResponse(properties, itemResponse); properties = getDocumentDefinition(UUID.randomUUID().toString()); CosmosItemResponse<InternalObjectNode> itemResponse1 = container.createItem(properties, new CosmosItemRequestOptions()); validateItemResponse(properties, itemResponse1); } @Test(groups = {"simple"}, timeOut = TIMEOUT) public void createItem_alreadyExists() throws Exception { InternalObjectNode properties = getDocumentDefinition(UUID.randomUUID().toString()); CosmosItemResponse<InternalObjectNode> itemResponse = container.createItem(properties); validateItemResponse(properties, itemResponse); properties = getDocumentDefinition(UUID.randomUUID().toString()); CosmosItemResponse<InternalObjectNode> itemResponse1 = container.createItem(properties, new CosmosItemRequestOptions()); validateItemResponse(properties, itemResponse1); try { container.createItem(properties, new CosmosItemRequestOptions()); } catch (Exception e) { assertThat(e).isInstanceOf(CosmosException.class); assertThat(((CosmosException) e).getStatusCode()).isEqualTo(HttpConstants.StatusCodes.CONFLICT); } } @Test(groups = { "simple" }, timeOut = TIMEOUT) public void createLargeItem() throws Exception { InternalObjectNode docDefinition = getDocumentDefinition(UUID.randomUUID().toString()); int size = (int) (ONE_MB * 1.5); BridgeInternal.setProperty(docDefinition, "largeString", StringUtils.repeat("x", size)); CosmosItemResponse<InternalObjectNode> itemResponse = container.createItem(docDefinition, new CosmosItemRequestOptions()); validateItemResponse(docDefinition, itemResponse); } @Test(groups = { "simple" }, timeOut = TIMEOUT) public void createItemWithVeryLargePartitionKey() throws Exception { InternalObjectNode docDefinition = getDocumentDefinition(UUID.randomUUID().toString()); StringBuilder sb = new StringBuilder(); for(int i = 0; i < 100; i++) { sb.append(i).append("x"); } BridgeInternal.setProperty(docDefinition, "mypk", sb.toString()); CosmosItemResponse<InternalObjectNode> itemResponse = container.createItem(docDefinition, new CosmosItemRequestOptions()); validateItemResponse(docDefinition, itemResponse); } @Test(groups = { "simple" }, timeOut = TIMEOUT) public void readItemWithVeryLargePartitionKey() throws Exception { InternalObjectNode docDefinition = getDocumentDefinition(UUID.randomUUID().toString()); StringBuilder sb = new StringBuilder(); for(int i = 0; i < 100; i++) { sb.append(i).append("x"); } BridgeInternal.setProperty(docDefinition, "mypk", sb.toString()); CosmosItemResponse<InternalObjectNode> itemResponse = container.createItem(docDefinition); waitIfNeededForReplicasToCatchUp(getClientBuilder()); CosmosItemRequestOptions options = new CosmosItemRequestOptions(); CosmosItemResponse<InternalObjectNode> readResponse = container.readItem(docDefinition.getId(), new PartitionKey(sb.toString()), options, InternalObjectNode.class); validateItemResponse(docDefinition, readResponse); } @Test(groups = { "simple" }, timeOut = TIMEOUT) public void readItem() throws Exception { InternalObjectNode properties = getDocumentDefinition(UUID.randomUUID().toString()); CosmosItemResponse<InternalObjectNode> itemResponse = container.createItem(properties); CosmosItemResponse<InternalObjectNode> readResponse1 = container.readItem(properties.getId(), new PartitionKey(ModelBridgeInternal.getObjectFromJsonSerializable(properties, "mypk")), new CosmosItemRequestOptions(), InternalObjectNode.class); validateItemResponse(properties, readResponse1); } @Test(groups = { "simple" }, timeOut = TIMEOUT) static <T> Mono<T> wrapWithSoftTimeoutAndFallback( Mono<CosmosItemResponse<T>> source, Duration softTimeout, T fallback) { AtomicBoolean timeoutElapsed = new AtomicBoolean(false); return Mono .<T>create(sink -> { source .subscribeOn(Schedulers.boundedElastic()) .subscribe( response -> { if (timeoutElapsed.get()) { logger.warn( "COMPLETED SUCCESSFULLY after timeout elapsed. Diagnostics: {}", response.getDiagnostics().toString()); } else { logger.info("COMPLETED SUCCESSFULLY"); } sink.success(response.getItem()); }, error -> { final Throwable unwrappedException = Exceptions.unwrap(error); if (unwrappedException instanceof CosmosException) { final CosmosException cosmosException = (CosmosException) unwrappedException; logger.error( "COMPLETED WITH COSMOS FAILURE. Diagnostics: {}", cosmosException.getDiagnostics() != null ? cosmosException.getDiagnostics().toString() : "n/a", cosmosException); } else { logger.error("COMPLETED WITH GENERIC FAILURE", error); } if (timeoutElapsed.get()) { sink.success(); } else { sink.error(error); } } ); }) .timeout(softTimeout) .onErrorResume(error -> { timeoutElapsed.set(true); return Mono.just(fallback); }); } @Test(groups = { "simple" }, timeOut = TIMEOUT) public void readItemWithEventualConsistency() throws Exception { CosmosAsyncContainer asyncContainer = getSharedMultiPartitionCosmosContainer(this.client.asyncClient()); container = client.getDatabase(asyncContainer.getDatabase().getId()).getContainer(asyncContainer.getId()); String idAndPkValue = UUID.randomUUID().toString(); ObjectNode properties = getDocumentDefinition(idAndPkValue, idAndPkValue); CosmosItemResponse<ObjectNode> itemResponse = container.createItem(properties); CosmosItemResponse<ObjectNode> readResponse1 = container.readItem( idAndPkValue, new PartitionKey(idAndPkValue), new CosmosItemRequestOptions() .setSessionToken(StringUtils.repeat("SomeManualInvalidSessionToken", 2000)) .setConsistencyLevel(ConsistencyLevel.EVENTUAL), ObjectNode.class); logger.info("REQUEST DIAGNOSTICS: {}", readResponse1.getDiagnostics().toString()); validateIdOfItemResponse(idAndPkValue, readResponse1); } @Test(groups = { "simple" }, timeOut = TIMEOUT) public void replaceItem() throws Exception{ InternalObjectNode properties = getDocumentDefinition(UUID.randomUUID().toString()); CosmosItemResponse<InternalObjectNode> itemResponse = container.createItem(properties); validateItemResponse(properties, itemResponse); String newPropValue = UUID.randomUUID().toString(); BridgeInternal.setProperty(properties, "newProp", newPropValue); CosmosItemRequestOptions options = new CosmosItemRequestOptions(); ModelBridgeInternal.setPartitionKey(options, new PartitionKey(ModelBridgeInternal.getObjectFromJsonSerializable(properties, "mypk"))); CosmosItemResponse<InternalObjectNode> replace = container.replaceItem(properties, properties.getId(), new PartitionKey(ModelBridgeInternal.getObjectFromJsonSerializable(properties, "mypk")), options); assertThat(ModelBridgeInternal.getObjectFromJsonSerializable(BridgeInternal.getProperties(replace), "newProp")).isEqualTo(newPropValue); } @Test(groups = { "simple" }, timeOut = TIMEOUT) public void deleteItem() throws Exception { InternalObjectNode properties = getDocumentDefinition(UUID.randomUUID().toString()); CosmosItemResponse<InternalObjectNode> itemResponse = container.createItem(properties); CosmosItemRequestOptions options = new CosmosItemRequestOptions(); CosmosItemResponse<?> deleteResponse = container.deleteItem(properties.getId(), new PartitionKey(ModelBridgeInternal.getObjectFromJsonSerializable(properties, "mypk")), options); assertThat(deleteResponse.getStatusCode()).isEqualTo(204); } @Test(groups = { "simple" }, timeOut = TIMEOUT) public void deleteItemUsingEntity() throws Exception { InternalObjectNode properties = getDocumentDefinition(UUID.randomUUID().toString()); CosmosItemResponse<InternalObjectNode> itemResponse = container.createItem(properties); CosmosItemRequestOptions options = new CosmosItemRequestOptions(); CosmosItemResponse<?> deleteResponse = container.deleteItem(itemResponse.getItem(), options); assertThat(deleteResponse.getStatusCode()).isEqualTo(204); } @Test(groups = { "simple" }, timeOut = TIMEOUT) public void readAllItems() throws Exception { InternalObjectNode properties = getDocumentDefinition(UUID.randomUUID().toString()); CosmosItemResponse<InternalObjectNode> itemResponse = container.createItem(properties); CosmosQueryRequestOptions cosmosQueryRequestOptions = new CosmosQueryRequestOptions(); CosmosPagedIterable<InternalObjectNode> feedResponseIterator3 = container.readAllItems(cosmosQueryRequestOptions, InternalObjectNode.class); assertThat(feedResponseIterator3.iterator().hasNext()).isTrue(); } @Test(groups = { "simple" }, timeOut = TIMEOUT) public void queryItems() throws Exception{ InternalObjectNode properties = getDocumentDefinition(UUID.randomUUID().toString()); CosmosItemResponse<InternalObjectNode> itemResponse = container.createItem(properties); String query = String.format("SELECT * from c where c.id = '%s'", properties.getId()); CosmosQueryRequestOptions cosmosQueryRequestOptions = new CosmosQueryRequestOptions(); CosmosPagedIterable<InternalObjectNode> feedResponseIterator1 = container.queryItems(query, cosmosQueryRequestOptions, InternalObjectNode.class); assertThat(feedResponseIterator1.iterator().hasNext()).isTrue(); SqlQuerySpec querySpec = new SqlQuerySpec(query); CosmosPagedIterable<InternalObjectNode> feedResponseIterator3 = container.queryItems(querySpec, cosmosQueryRequestOptions, InternalObjectNode.class); assertThat(feedResponseIterator3.iterator().hasNext()).isTrue(); } @Test(groups = { "simple" }, timeOut = TIMEOUT) public void queryItemsWithCustomCorrelationActivityId() throws Exception{ InternalObjectNode properties = getDocumentDefinition(UUID.randomUUID().toString()); CosmosItemResponse<InternalObjectNode> itemResponse = container.createItem(properties); String query = String.format("SELECT * from c where c.id = '%s'", properties.getId()); CosmosQueryRequestOptions cosmosQueryRequestOptions = new CosmosQueryRequestOptions(); UUID correlationId = UUID.randomUUID(); ImplementationBridgeHelpers .CosmosQueryRequestOptionsHelper .getCosmosQueryRequestOptionsAccessor() .setCorrelationActivityId(cosmosQueryRequestOptions, correlationId); CosmosPagedIterable<InternalObjectNode> feedResponseIterator1 = container.queryItems(query, cosmosQueryRequestOptions, InternalObjectNode.class); assertThat(feedResponseIterator1.iterator().hasNext()).isTrue(); feedResponseIterator1 .iterableByPage() .forEach(response -> { assertThat(response.getCorrelationActivityId() == correlationId) .withFailMessage("response.getCorrelationActivityId"); assertThat(response.getCosmosDiagnostics().toString().contains(correlationId.toString())) .withFailMessage("response.getCosmosDiagnostics"); }); } @Test(groups = { "simple" }, timeOut = TIMEOUT) public void queryItemsWithEventualConsistency() throws Exception{ CosmosAsyncContainer asyncContainer = getSharedMultiPartitionCosmosContainer(this.client.asyncClient()); container = client.getDatabase(asyncContainer.getDatabase().getId()).getContainer(asyncContainer.getId()); String idAndPkValue = UUID.randomUUID().toString(); ObjectNode properties = getDocumentDefinition(idAndPkValue, idAndPkValue); CosmosItemResponse<ObjectNode> itemResponse = container.createItem(properties); String query = String.format("SELECT * from c where c.id = '%s'", idAndPkValue); CosmosQueryRequestOptions cosmosQueryRequestOptions = new CosmosQueryRequestOptions() .setSessionToken(StringUtils.repeat("SomeManualInvalidSessionToken", 2000)) .setConsistencyLevel(ConsistencyLevel.EVENTUAL); CosmosPagedIterable<ObjectNode> feedResponseIterator1 = container.queryItems(query, cosmosQueryRequestOptions, ObjectNode.class); feedResponseIterator1.handle( (r) -> logger.info("Query RequestDiagnostics: {}", r.getCosmosDiagnostics().toString())); assertThat(feedResponseIterator1.iterator().hasNext()).isTrue(); assertThat(feedResponseIterator1.stream().count() == 1); SqlQuerySpec querySpec = new SqlQuerySpec(query); CosmosPagedIterable<ObjectNode> feedResponseIterator3 = container.queryItems(querySpec, cosmosQueryRequestOptions, ObjectNode.class); feedResponseIterator3.handle( (r) -> logger.info("Query RequestDiagnostics: {}", r.getCosmosDiagnostics().toString())); assertThat(feedResponseIterator3.iterator().hasNext()).isTrue(); assertThat(feedResponseIterator3.stream().count() == 1); } @Test(groups = { "simple" }, timeOut = TIMEOUT) public void queryItemsWithContinuationTokenAndPageSize() throws Exception{ List<String> actualIds = new ArrayList<>(); InternalObjectNode properties = getDocumentDefinition(UUID.randomUUID().toString()); container.createItem(properties); actualIds.add(properties.getId()); properties = getDocumentDefinition(UUID.randomUUID().toString()); container.createItem(properties); actualIds.add(properties.getId()); properties = getDocumentDefinition(UUID.randomUUID().toString()); container.createItem(properties); actualIds.add(properties.getId()); String query = String.format("SELECT * from c where c.id in ('%s', '%s', '%s')", actualIds.get(0), actualIds.get(1), actualIds.get(2)); CosmosQueryRequestOptions cosmosQueryRequestOptions = new CosmosQueryRequestOptions(); String continuationToken = null; int pageSize = 1; int initialDocumentCount = 3; int finalDocumentCount = 0; CosmosPagedIterable<InternalObjectNode> feedResponseIterator1 = container.queryItems(query, cosmosQueryRequestOptions, InternalObjectNode.class); do { Iterable<FeedResponse<InternalObjectNode>> feedResponseIterable = feedResponseIterator1.iterableByPage(continuationToken, pageSize); for (FeedResponse<InternalObjectNode> fr : feedResponseIterable) { int resultSize = fr.getResults().size(); assertThat(resultSize).isEqualTo(pageSize); finalDocumentCount += fr.getResults().size(); continuationToken = fr.getContinuationToken(); } } while(continuationToken != null); assertThat(finalDocumentCount).isEqualTo(initialDocumentCount); } @Test(groups = { "simple" }, timeOut = TIMEOUT) public void readAllItemsOfLogicalPartition() throws Exception{ String pkValue = UUID.randomUUID().toString(); ObjectNode properties = getDocumentDefinition(UUID.randomUUID().toString(), pkValue); CosmosItemResponse<ObjectNode> itemResponse = container.createItem(properties); CosmosQueryRequestOptions cosmosQueryRequestOptions = new CosmosQueryRequestOptions(); CosmosPagedIterable<ObjectNode> feedResponseIterator1 = container.readAllItems( new PartitionKey(pkValue), cosmosQueryRequestOptions, ObjectNode.class); assertThat(feedResponseIterator1.iterator().hasNext()).isTrue(); CosmosPagedIterable<ObjectNode> feedResponseIterator3 = container.readAllItems( new PartitionKey(pkValue), cosmosQueryRequestOptions, ObjectNode.class); assertThat(feedResponseIterator3.iterator().hasNext()).isTrue(); } @Test(groups = { "simple" }, timeOut = TIMEOUT) public void readAllItemsOfLogicalPartitionWithContinuationTokenAndPageSize() throws Exception{ String pkValue = UUID.randomUUID().toString(); List<String> actualIds = new ArrayList<>(); ObjectNode properties = getDocumentDefinition(UUID.randomUUID().toString(), pkValue); container.createItem(properties); properties = getDocumentDefinition(UUID.randomUUID().toString(), pkValue); container.createItem(properties); properties = getDocumentDefinition(UUID.randomUUID().toString(), pkValue); container.createItem(properties); CosmosQueryRequestOptions cosmosQueryRequestOptions = new CosmosQueryRequestOptions(); String continuationToken = null; int pageSize = 1; int initialDocumentCount = 3; int finalDocumentCount = 0; CosmosPagedIterable<InternalObjectNode> feedResponseIterator1 = container.readAllItems( new PartitionKey(pkValue), cosmosQueryRequestOptions, InternalObjectNode.class); do { Iterable<FeedResponse<InternalObjectNode>> feedResponseIterable = feedResponseIterator1.iterableByPage(continuationToken, pageSize); for (FeedResponse<InternalObjectNode> fr : feedResponseIterable) { int resultSize = fr.getResults().size(); assertThat(resultSize).isEqualTo(pageSize); finalDocumentCount += fr.getResults().size(); continuationToken = fr.getContinuationToken(); } } while(continuationToken != null); assertThat(finalDocumentCount).isEqualTo(initialDocumentCount); } private InternalObjectNode getDocumentDefinition(String documentId) { final String uuid = UUID.randomUUID().toString(); final InternalObjectNode properties = new InternalObjectNode(String.format("{ " + "\"id\": \"%s\", " + "\"mypk\": \"%s\", " + "\"sgmts\": [[6519456, 1471916863], [2498434, 1455671440]]" + "}" , documentId, uuid)); return properties; } private ObjectNode getDocumentDefinition(String documentId, String pkId) throws JsonProcessingException { String json = String.format("{ " + "\"id\": \"%s\", " + "\"mypk\": \"%s\", " + "\"sgmts\": [[6519456, 1471916863], [2498434, 1455671440]]" + "}" , documentId, pkId); return OBJECT_MAPPER.readValue(json, ObjectNode.class); } private void validateItemResponse(InternalObjectNode containerProperties, CosmosItemResponse<InternalObjectNode> createResponse) { assertThat(BridgeInternal.getProperties(createResponse).getId()).isNotNull(); assertThat(BridgeInternal.getProperties(createResponse).getId()) .as("check Resource Id") .isEqualTo(containerProperties.getId()); } private void validateIdOfItemResponse(String expectedId, CosmosItemResponse<ObjectNode> createResponse) { assertThat(BridgeInternal.getProperties(createResponse).getId()).isNotNull(); assertThat(BridgeInternal.getProperties(createResponse).getId()) .as("check Resource Id") .isEqualTo(expectedId); } }
I think we probably need to cache the result here, turning into a hot publisher, else for each subscribe, it will trigger a call to gateway? https://github.com/Azure/azure-sdk-for-java/blob/d05f7e4ec323f32df36545871ef0685e4a645537/sdk/cosmos/azure-cosmos/src/main/java/com/azure/cosmos/implementation/caches/AsyncLazy.java#L38 https://projectreactor.io/docs/core/release/api/reactor/core/publisher/Mono.html#cache--
public Mono<TValue> getValueAsync() { Mono<TValue> valueMono = this.value; if (valueMono != null) { return valueMono; } valueLock.lock(); try { if (this.value != null) { return this.value; } this.value = this.createValueFunc.apply(null); return this.value; } finally { valueLock.unlock(); } }
this.value = this.createValueFunc.apply(null);
public Mono<TValue> getValueAsync() { return this.value.get(); }
class AsyncLazyWithRefresh<TValue> { private final Function<TValue, Mono<TValue>> createValueFunc; private final ReentrantLock valueLock = new ReentrantLock(); private final ReentrantLock removeFromCacheLock = new ReentrantLock(); private boolean removeFromCache = false; private Mono<TValue> value; private Mono<TValue> refreshInProgress; public AsyncLazyWithRefresh(TValue value) { this.createValueFunc = null; this.value = Mono.just(value); this.refreshInProgress = null; } public AsyncLazyWithRefresh(Mono<TValue> value) { this.createValueFunc = null; this.value = value; this.refreshInProgress = null; } public AsyncLazyWithRefresh(Function<TValue, Mono<TValue>> taskFactory) { this.createValueFunc = taskFactory; this.value = null; this.refreshInProgress = null; } public Mono<TValue> value() { return value; } public Mono<TValue> createAndWaitForBackgroundRefreshTaskAsync(Function<TValue, Mono<TValue>> createRefreshFunction) { Mono<TValue> valueMono = this.value; AtomicReference<TValue> originalValue = new AtomicReference<>(); valueMono.flatMap(value -> { originalValue.set(value); return valueMono; }); AtomicReference<Mono<TValue>> refreshMono = new AtomicReference<>(); valueLock.lock(); try { this.refreshInProgress = createRefreshFunction.apply(originalValue.get()); refreshMono.set(this.refreshInProgress); return refreshMono.get(); } finally { valueLock.unlock(); } } public boolean shouldRemoveFromCache() { if (this.removeFromCache) { return false; } removeFromCacheLock.lock(); try { if (this.removeFromCache) { return false; } this.removeFromCache = true; return true; } finally { removeFromCacheLock.unlock(); } } }
class AsyncLazyWithRefresh<TValue> { private final AtomicBoolean removeFromCache = new AtomicBoolean(false); private final AtomicReference<Mono<TValue>> value; private Mono<TValue> refreshInProgress; private final AtomicBoolean refreshInProgressCompleted = new AtomicBoolean(false); public AsyncLazyWithRefresh(TValue value) { this.value = new AtomicReference<>(); this.value.set(Mono.just(value)); this.refreshInProgress = null; } public AsyncLazyWithRefresh(Function<TValue, Mono<TValue>> taskFactory) { this.value = new AtomicReference<>(); this.value.set(taskFactory.apply(null).cache()); this.refreshInProgress = null; } public Mono<TValue> value() { return value.get(); } @SuppressWarnings("unchecked") public Mono<TValue> createAndWaitForBackgroundRefreshTaskAsync(TKey key, Function<TValue, Mono<TValue>> createRefreshFunction) { Mono<TValue> valueMono = this.value.get(); return valueMono.flatMap(value -> { if(this.refreshInProgressCompleted.compareAndSet(false, true)) { this.refreshInProgress = createRefreshFunction.apply(value).cache(); return this.refreshInProgress .flatMap(response -> { this.value.set(Mono.just(response)); this.refreshInProgressCompleted.set(false); return this.value.get(); }).doOnError(e -> this.refreshInProgressCompleted.set(false)); } return this.refreshInProgress == null ? valueMono : refreshInProgress; }); } public boolean shouldRemoveFromCache() { return this.removeFromCache.compareAndSet(false, true); } }
instead of using lock here, probably can use AtomicReference -> compareAndSet,
public Mono<TValue> getValueAsync() { Mono<TValue> valueMono = this.value; if (valueMono != null) { return valueMono; } valueLock.lock(); try { if (this.value != null) { return this.value; } this.value = this.createValueFunc.apply(null); return this.value; } finally { valueLock.unlock(); } }
valueLock.lock();
public Mono<TValue> getValueAsync() { return this.value.get(); }
class AsyncLazyWithRefresh<TValue> { private final Function<TValue, Mono<TValue>> createValueFunc; private final ReentrantLock valueLock = new ReentrantLock(); private final ReentrantLock removeFromCacheLock = new ReentrantLock(); private boolean removeFromCache = false; private Mono<TValue> value; private Mono<TValue> refreshInProgress; public AsyncLazyWithRefresh(TValue value) { this.createValueFunc = null; this.value = Mono.just(value); this.refreshInProgress = null; } public AsyncLazyWithRefresh(Mono<TValue> value) { this.createValueFunc = null; this.value = value; this.refreshInProgress = null; } public AsyncLazyWithRefresh(Function<TValue, Mono<TValue>> taskFactory) { this.createValueFunc = taskFactory; this.value = null; this.refreshInProgress = null; } public Mono<TValue> value() { return value; } public Mono<TValue> createAndWaitForBackgroundRefreshTaskAsync(Function<TValue, Mono<TValue>> createRefreshFunction) { Mono<TValue> valueMono = this.value; AtomicReference<TValue> originalValue = new AtomicReference<>(); valueMono.flatMap(value -> { originalValue.set(value); return valueMono; }); AtomicReference<Mono<TValue>> refreshMono = new AtomicReference<>(); valueLock.lock(); try { this.refreshInProgress = createRefreshFunction.apply(originalValue.get()); refreshMono.set(this.refreshInProgress); return refreshMono.get(); } finally { valueLock.unlock(); } } public boolean shouldRemoveFromCache() { if (this.removeFromCache) { return false; } removeFromCacheLock.lock(); try { if (this.removeFromCache) { return false; } this.removeFromCache = true; return true; } finally { removeFromCacheLock.unlock(); } } }
class AsyncLazyWithRefresh<TValue> { private final AtomicBoolean removeFromCache = new AtomicBoolean(false); private final AtomicReference<Mono<TValue>> value; private Mono<TValue> refreshInProgress; private final AtomicBoolean refreshInProgressCompleted = new AtomicBoolean(false); public AsyncLazyWithRefresh(TValue value) { this.value = new AtomicReference<>(); this.value.set(Mono.just(value)); this.refreshInProgress = null; } public AsyncLazyWithRefresh(Function<TValue, Mono<TValue>> taskFactory) { this.value = new AtomicReference<>(); this.value.set(taskFactory.apply(null).cache()); this.refreshInProgress = null; } public Mono<TValue> value() { return value.get(); } @SuppressWarnings("unchecked") public Mono<TValue> createAndWaitForBackgroundRefreshTaskAsync(TKey key, Function<TValue, Mono<TValue>> createRefreshFunction) { Mono<TValue> valueMono = this.value.get(); return valueMono.flatMap(value -> { if(this.refreshInProgressCompleted.compareAndSet(false, true)) { this.refreshInProgress = createRefreshFunction.apply(value).cache(); return this.refreshInProgress .flatMap(response -> { this.value.set(Mono.just(response)); this.refreshInProgressCompleted.set(false); return this.value.get(); }).doOnError(e -> this.refreshInProgressCompleted.set(false)); } return this.refreshInProgress == null ? valueMono : refreshInProgress; }); } public boolean shouldRemoveFromCache() { return this.removeFromCache.compareAndSet(false, true); } }
Same here, maybe consider using AtomicBoolean?
public boolean shouldRemoveFromCache() { if (this.removeFromCache) { return false; } removeFromCacheLock.lock(); try { if (this.removeFromCache) { return false; } this.removeFromCache = true; return true; } finally { removeFromCacheLock.unlock(); } }
removeFromCacheLock.lock();
public boolean shouldRemoveFromCache() { return this.removeFromCache.compareAndSet(false, true); }
class AsyncLazyWithRefresh<TValue> { private final Function<TValue, Mono<TValue>> createValueFunc; private final ReentrantLock valueLock = new ReentrantLock(); private final ReentrantLock removeFromCacheLock = new ReentrantLock(); private boolean removeFromCache = false; private Mono<TValue> value; private Mono<TValue> refreshInProgress; public AsyncLazyWithRefresh(TValue value) { this.createValueFunc = null; this.value = Mono.just(value); this.refreshInProgress = null; } public AsyncLazyWithRefresh(Mono<TValue> value) { this.createValueFunc = null; this.value = value; this.refreshInProgress = null; } public AsyncLazyWithRefresh(Function<TValue, Mono<TValue>> taskFactory) { this.createValueFunc = taskFactory; this.value = null; this.refreshInProgress = null; } public Mono<TValue> getValueAsync() { Mono<TValue> valueMono = this.value; if (valueMono != null) { return valueMono; } valueLock.lock(); try { if (this.value != null) { return this.value; } this.value = this.createValueFunc.apply(null); return this.value; } finally { valueLock.unlock(); } } public Mono<TValue> value() { return value; } public Mono<TValue> createAndWaitForBackgroundRefreshTaskAsync(Function<TValue, Mono<TValue>> createRefreshFunction) { Mono<TValue> valueMono = this.value; AtomicReference<TValue> originalValue = new AtomicReference<>(); valueMono.flatMap(value -> { originalValue.set(value); return valueMono; }); AtomicReference<Mono<TValue>> refreshMono = new AtomicReference<>(); valueLock.lock(); try { this.refreshInProgress = createRefreshFunction.apply(originalValue.get()); refreshMono.set(this.refreshInProgress); return refreshMono.get(); } finally { valueLock.unlock(); } } }
class AsyncLazyWithRefresh<TValue> { private final AtomicBoolean removeFromCache = new AtomicBoolean(false); private final AtomicReference<Mono<TValue>> value; private Mono<TValue> refreshInProgress; private final AtomicBoolean refreshInProgressCompleted = new AtomicBoolean(false); public AsyncLazyWithRefresh(TValue value) { this.value = new AtomicReference<>(); this.value.set(Mono.just(value)); this.refreshInProgress = null; } public AsyncLazyWithRefresh(Function<TValue, Mono<TValue>> taskFactory) { this.value = new AtomicReference<>(); this.value.set(taskFactory.apply(null).cache()); this.refreshInProgress = null; } public Mono<TValue> getValueAsync() { return this.value.get(); } public Mono<TValue> value() { return value.get(); } @SuppressWarnings("unchecked") public Mono<TValue> createAndWaitForBackgroundRefreshTaskAsync(TKey key, Function<TValue, Mono<TValue>> createRefreshFunction) { Mono<TValue> valueMono = this.value.get(); return valueMono.flatMap(value -> { if(this.refreshInProgressCompleted.compareAndSet(false, true)) { this.refreshInProgress = createRefreshFunction.apply(value).cache(); return this.refreshInProgress .flatMap(response -> { this.value.set(Mono.just(response)); this.refreshInProgressCompleted.set(false); return this.value.get(); }).doOnError(e -> this.refreshInProgressCompleted.set(false)); } return this.refreshInProgress == null ? valueMono : refreshInProgress; }); } }
Add validation about cache key?
public void createItem_withCacheRefresh() throws InterruptedException { String containerId = "bulksplittestcontainer_" + UUID.randomUUID(); int totalRequest = getTotalRequest(); CosmosContainerProperties containerProperties = new CosmosContainerProperties(containerId, "/mypk"); CosmosContainerResponse containerResponse = createdDatabase.createContainer(containerProperties).block(); CosmosAsyncContainer container = createdDatabase.getContainer(containerId); Flux<CosmosItemOperation> cosmosItemOperationFlux1 = Flux.range(0, totalRequest).map(i -> { String partitionKey = UUID.randomUUID().toString(); TestDoc testDoc = this.populateTestDoc(partitionKey); return CosmosBulkOperations.getCreateItemOperation(testDoc, new PartitionKey(partitionKey)); }); Flux<CosmosItemOperation> cosmosItemOperationFlux2 = Flux.range(0, totalRequest).map(i -> { String partitionKey = UUID.randomUUID().toString(); EventDoc eventDoc = new EventDoc(UUID.randomUUID().toString(), 2, 4, "type1", partitionKey); return CosmosBulkOperations.getCreateItemOperation(eventDoc, new PartitionKey(partitionKey)); }); CosmosBulkExecutionOptions cosmosBulkExecutionOptions = new CosmosBulkExecutionOptions(); Flux<CosmosBulkOperationResponse<AsyncCacheNonBlockingIntegrationTest>> responseFlux = container.executeBulkOperations(cosmosItemOperationFlux1, cosmosBulkExecutionOptions); AtomicInteger processedDoc = new AtomicInteger(0); responseFlux .flatMap(cosmosBulkOperationResponse -> { processedDoc.incrementAndGet(); CosmosBulkItemResponse cosmosBulkItemResponse = cosmosBulkOperationResponse.getResponse(); assertThat(cosmosBulkItemResponse.getStatusCode()).isEqualTo(HttpResponseStatus.CREATED.code()); assertThat(cosmosBulkItemResponse.getRequestCharge()).isGreaterThan(0); assertThat(cosmosBulkItemResponse.getCosmosDiagnostics().toString()).isNotNull(); assertThat(cosmosBulkItemResponse.getSessionToken()).isNotNull(); assertThat(cosmosBulkItemResponse.getActivityId()).isNotNull(); assertThat(cosmosBulkItemResponse.getRequestCharge()).isNotNull(); return Mono.just(cosmosBulkItemResponse); }).blockLast(); assertThat(processedDoc.get()).isEqualTo(totalRequest); RxDocumentClientImpl rxDocumentClient = (RxDocumentClientImpl) this.bulkClient.getDocClientWrapper(); ConcurrentHashMap<String, ?> routingMap = getRoutingMap(rxDocumentClient); String cacheKeyBeforePartition = routingMap.keys().nextElement(); List<PartitionKeyRange> partitionKeyRanges = getPartitionKeyRanges(containerId, this.bulkClient); logger.info("Scaling up throughput for split"); ThroughputProperties throughputProperties = ThroughputProperties.createManualThroughput(16000); ThroughputResponse throughputResponse = container.replaceThroughput(throughputProperties).block(); logger.info("Throughput replace request submitted for {} ", throughputResponse.getProperties().getManualThroughput()); throughputResponse = container.readThroughput().block(); while (true) { assert throughputResponse != null; if (!throughputResponse.isReplacePending()) { break; } logger.info("Waiting for split to complete"); Thread.sleep(10 * 1000); throughputResponse = container.readThroughput().block(); } List<PartitionKeyRange> partitionKeyRangesAfterSplit = getPartitionKeyRanges(containerId, this.bulkClient); assertThat(partitionKeyRangesAfterSplit.size()).isGreaterThan(partitionKeyRanges.size()) .as("Partition ranges should increase after split"); logger.info("After split num partitions = {}", partitionKeyRangesAfterSplit.size()); routingMap = getRoutingMap(rxDocumentClient); String cacheKeyAfterPartition = routingMap.keys().nextElement(); assertThat(cacheKeyBeforePartition).isEqualTo(cacheKeyAfterPartition); responseFlux = container.executeBulkOperations(cosmosItemOperationFlux2, cosmosBulkExecutionOptions); AtomicInteger processedDoc2 = new AtomicInteger(0); responseFlux .flatMap(cosmosBulkOperationResponse -> { processedDoc2.incrementAndGet(); CosmosBulkItemResponse cosmosBulkItemResponse = cosmosBulkOperationResponse.getResponse(); assertThat(cosmosBulkItemResponse.getStatusCode()).isEqualTo(HttpResponseStatus.CREATED.code()); assertThat(cosmosBulkItemResponse.getRequestCharge()).isGreaterThan(0); assertThat(cosmosBulkItemResponse.getCosmosDiagnostics().toString()).isNotNull(); assertThat(cosmosBulkItemResponse.getSessionToken()).isNotNull(); assertThat(cosmosBulkItemResponse.getActivityId()).isNotNull(); assertThat(cosmosBulkItemResponse.getRequestCharge()).isNotNull(); return Mono.just(cosmosBulkItemResponse); }).blockLast(); assertThat(processedDoc.get()).isEqualTo(totalRequest); container.delete().block(); }
assertThat(processedDoc.get()).isEqualTo(totalRequest);
public void createItem_withCacheRefresh() throws InterruptedException { String containerId = "bulksplittestcontainer_" + UUID.randomUUID(); int totalRequest = getTotalRequest(); CosmosContainerProperties containerProperties = new CosmosContainerProperties(containerId, "/mypk"); CosmosContainerResponse containerResponse = createdDatabase.createContainer(containerProperties).block(); CosmosAsyncContainer container = createdDatabase.getContainer(containerId); Flux<CosmosItemOperation> cosmosItemOperationFlux1 = Flux.range(0, totalRequest).map(i -> { String partitionKey = UUID.randomUUID().toString(); TestDoc testDoc = this.populateTestDoc(partitionKey); return CosmosBulkOperations.getCreateItemOperation(testDoc, new PartitionKey(partitionKey)); }); Flux<CosmosItemOperation> cosmosItemOperationFlux2 = Flux.range(0, totalRequest).map(i -> { String partitionKey = UUID.randomUUID().toString(); EventDoc eventDoc = new EventDoc(UUID.randomUUID().toString(), 2, 4, "type1", partitionKey); return CosmosBulkOperations.getCreateItemOperation(eventDoc, new PartitionKey(partitionKey)); }); CosmosBulkExecutionOptions cosmosBulkExecutionOptions = new CosmosBulkExecutionOptions(); Flux<CosmosBulkOperationResponse<AsyncCacheNonBlockingIntegrationTest>> responseFlux = container.executeBulkOperations(cosmosItemOperationFlux1, cosmosBulkExecutionOptions); AtomicInteger processedDoc = new AtomicInteger(0); responseFlux .flatMap(cosmosBulkOperationResponse -> { processedDoc.incrementAndGet(); CosmosBulkItemResponse cosmosBulkItemResponse = cosmosBulkOperationResponse.getResponse(); assertThat(cosmosBulkItemResponse.getStatusCode()).isEqualTo(HttpResponseStatus.CREATED.code()); assertThat(cosmosBulkItemResponse.getRequestCharge()).isGreaterThan(0); assertThat(cosmosBulkItemResponse.getCosmosDiagnostics().toString()).isNotNull(); assertThat(cosmosBulkItemResponse.getSessionToken()).isNotNull(); assertThat(cosmosBulkItemResponse.getActivityId()).isNotNull(); assertThat(cosmosBulkItemResponse.getRequestCharge()).isNotNull(); return Mono.just(cosmosBulkItemResponse); }).blockLast(); assertThat(processedDoc.get()).isEqualTo(totalRequest); RxDocumentClientImpl rxDocumentClient = (RxDocumentClientImpl) this.bulkClient.getDocClientWrapper(); ConcurrentHashMap<String, ?> routingMap = getRoutingMap(rxDocumentClient); String cacheKeyBeforePartition = routingMap.keys().nextElement(); List<PartitionKeyRange> partitionKeyRanges = getPartitionKeyRanges(containerId, this.bulkClient); logger.info("Scaling up throughput for split"); ThroughputProperties throughputProperties = ThroughputProperties.createManualThroughput(16000); ThroughputResponse throughputResponse = container.replaceThroughput(throughputProperties).block(); logger.info("Throughput replace request submitted for {} ", throughputResponse.getProperties().getManualThroughput()); throughputResponse = container.readThroughput().block(); while (true) { assert throughputResponse != null; if (!throughputResponse.isReplacePending()) { break; } logger.info("Waiting for split to complete"); Thread.sleep(10 * 1000); throughputResponse = container.readThroughput().block(); } List<PartitionKeyRange> partitionKeyRangesAfterSplit = getPartitionKeyRanges(containerId, this.bulkClient); assertThat(partitionKeyRangesAfterSplit.size()).isGreaterThan(partitionKeyRanges.size()) .as("Partition ranges should increase after split"); logger.info("After split num partitions = {}", partitionKeyRangesAfterSplit.size()); routingMap = getRoutingMap(rxDocumentClient); String cacheKeyAfterPartition = routingMap.keys().nextElement(); assertThat(cacheKeyBeforePartition).isEqualTo(cacheKeyAfterPartition); responseFlux = container.executeBulkOperations(cosmosItemOperationFlux2, cosmosBulkExecutionOptions); AtomicInteger processedDoc2 = new AtomicInteger(0); responseFlux .flatMap(cosmosBulkOperationResponse -> { processedDoc2.incrementAndGet(); CosmosBulkItemResponse cosmosBulkItemResponse = cosmosBulkOperationResponse.getResponse(); assertThat(cosmosBulkItemResponse.getStatusCode()).isEqualTo(HttpResponseStatus.CREATED.code()); assertThat(cosmosBulkItemResponse.getRequestCharge()).isGreaterThan(0); assertThat(cosmosBulkItemResponse.getCosmosDiagnostics().toString()).isNotNull(); assertThat(cosmosBulkItemResponse.getSessionToken()).isNotNull(); assertThat(cosmosBulkItemResponse.getActivityId()).isNotNull(); assertThat(cosmosBulkItemResponse.getRequestCharge()).isNotNull(); return Mono.just(cosmosBulkItemResponse); }).blockLast(); assertThat(processedDoc.get()).isEqualTo(totalRequest); container.delete().block(); }
class AsyncCacheNonBlockingIntegrationTest extends BatchTestBase { private final static Logger logger = LoggerFactory.getLogger(AsyncCacheNonBlockingIntegrationTest.class); private CosmosAsyncClient bulkClient; private CosmosAsyncDatabase createdDatabase; @Factory(dataProvider = "simpleClientBuilderGatewaySession") public AsyncCacheNonBlockingIntegrationTest(CosmosClientBuilder clientBuilder) { super(clientBuilder); } @BeforeClass(groups = {"simple"}, timeOut = SETUP_TIMEOUT) public void before_CosmosBulkAsyncTest() { assertThat(this.bulkClient).isNull(); this.bulkClient = getClientBuilder().buildAsyncClient(); createdDatabase = getSharedCosmosDatabase(this.bulkClient); } @AfterClass(groups = {"simple"}, timeOut = SHUTDOWN_TIMEOUT, alwaysRun = true) public void afterClass() { safeCloseAsync(this.bulkClient); } @Test(groups = {"simple"}, timeOut = TIMEOUT * 200) private ConcurrentHashMap<String, ?> getRoutingMap(RxDocumentClientImpl rxDocumentClient) { RxPartitionKeyRangeCache partitionKeyRangeCache = ReflectionUtils.getPartitionKeyRangeCache(rxDocumentClient); AsyncCacheNonBlocking<String, CollectionRoutingMap> routingMapAsyncCache = ReflectionUtils.getRoutingMapAsyncCacheNonBlocking(partitionKeyRangeCache); return ReflectionUtils.getValueMapNonBlockingCache(routingMapAsyncCache); } private List<PartitionKeyRange> getPartitionKeyRanges( String containerId, CosmosAsyncClient asyncClient) { List<PartitionKeyRange> partitionKeyRanges = new ArrayList<>(); AsyncDocumentClient asyncDocumentClient = BridgeInternal.getContextClient(asyncClient); List<FeedResponse<PartitionKeyRange>> partitionFeedResponseList = asyncDocumentClient .readPartitionKeyRanges("/dbs/" + createdDatabase.getId() + "/colls/" + containerId, new CosmosQueryRequestOptions()) .collectList().block(); partitionFeedResponseList.forEach(f -> partitionKeyRanges.addAll(f.getResults())); return partitionKeyRanges; } private int getTotalRequest() { int countRequest = new Random().nextInt(100) + 200; logger.info("Total count of request for this test case: " + countRequest); return countRequest; } }
class AsyncCacheNonBlockingIntegrationTest extends BatchTestBase { private final static Logger logger = LoggerFactory.getLogger(AsyncCacheNonBlockingIntegrationTest.class); private CosmosAsyncClient bulkClient; private CosmosAsyncDatabase createdDatabase; @Factory(dataProvider = "simpleClientBuilderGatewaySession") public AsyncCacheNonBlockingIntegrationTest(CosmosClientBuilder clientBuilder) { super(clientBuilder); } @BeforeClass(groups = {"simple"}, timeOut = SETUP_TIMEOUT) public void before_CosmosBulkAsyncTest() { assertThat(this.bulkClient).isNull(); this.bulkClient = getClientBuilder().buildAsyncClient(); createdDatabase = getSharedCosmosDatabase(this.bulkClient); } @AfterClass(groups = {"simple"}, timeOut = SHUTDOWN_TIMEOUT, alwaysRun = true) public void afterClass() { safeCloseAsync(this.bulkClient); } @Test(groups = {"simple"}, timeOut = TIMEOUT * 200) private ConcurrentHashMap<String, ?> getRoutingMap(RxDocumentClientImpl rxDocumentClient) { RxPartitionKeyRangeCache partitionKeyRangeCache = ReflectionUtils.getPartitionKeyRangeCache(rxDocumentClient); AsyncCacheNonBlocking<String, CollectionRoutingMap> routingMapAsyncCache = ReflectionUtils.getRoutingMapAsyncCacheNonBlocking(partitionKeyRangeCache); return ReflectionUtils.getValueMapNonBlockingCache(routingMapAsyncCache); } private List<PartitionKeyRange> getPartitionKeyRanges( String containerId, CosmosAsyncClient asyncClient) { List<PartitionKeyRange> partitionKeyRanges = new ArrayList<>(); AsyncDocumentClient asyncDocumentClient = BridgeInternal.getContextClient(asyncClient); List<FeedResponse<PartitionKeyRange>> partitionFeedResponseList = asyncDocumentClient .readPartitionKeyRanges("/dbs/" + createdDatabase.getId() + "/colls/" + containerId, new CosmosQueryRequestOptions()) .collectList().block(); partitionFeedResponseList.forEach(f -> partitionKeyRanges.addAll(f.getResults())); return partitionKeyRanges; } private int getTotalRequest() { int countRequest = new Random().nextInt(100) + 200; logger.info("Total count of request for this test case: " + countRequest); return countRequest; } }
let `numberOfCacheRefreshes.incrementAndGet();` be part of the Mono, you will see the issue mentioned above about `I think we probably need to cache the result here, turning into a hot publisher, else for each subscribe, it will trigger a call to gateway?`
public void getAsync() { AtomicInteger numberOfCacheRefreshes = new AtomicInteger(0); final Function<Integer, Mono<Integer>> refreshFunc = key -> { numberOfCacheRefreshes.incrementAndGet(); return Mono.just(key * 2); }; AsyncCacheNonBlocking<Integer, Integer> cache = new AsyncCacheNonBlocking<>(); List<Mono<Integer>> tasks = new ArrayList<>(); for (int j = 0; j < 10; j++) { int key = j; tasks.add(cache.getAsync(key, value -> refreshFunc.apply(key), false)); } Flux<Integer> o = Flux.merge(tasks.stream().map(Mono::flux).collect(Collectors.toList())); o.collectList().single().block(); assertThat(numberOfCacheRefreshes.get()).isEqualTo(10); assertThat(cache.getAsync(2, value -> refreshFunc.apply(2), false).block()).isEqualTo(4); Function<Integer, Mono<Integer>> refreshFunc1 = key -> { numberOfCacheRefreshes.incrementAndGet(); return Mono.just(key * 2 + 1); }; List<Mono<Integer>> tasks1 = new ArrayList<>(); for (int j = 0; j < 10; j++) { int key = j; tasks1.add(cache.getAsync(key, value -> refreshFunc1.apply(key), true)); } Flux<Integer> o1 = Flux.merge(tasks1.stream().map(Mono::flux).collect(Collectors.toList())); o1.collectList().single().block(); assertThat(numberOfCacheRefreshes.get()).isEqualTo(20); assertThat(cache.getAsync(2, value -> refreshFunc1.apply(2), false).block()).isEqualTo(5); Function<Integer, Mono<Integer>> refreshFunc2 = key -> { numberOfCacheRefreshes.incrementAndGet(); return Mono.just(key * 2 + 3); }; List<Mono<Integer>> tasks2 = new ArrayList<>(); for (int j = 0; j < 10; j++) { int key = j; tasks2.add(cache.getAsync(key, value -> refreshFunc2.apply(key), false)); } Flux<Integer> o2 = Flux.merge(tasks2.stream().map(Mono::flux).collect(Collectors.toList())); o2.collectList().single().block(); assertThat(numberOfCacheRefreshes.get()).isEqualTo(20); assertThat(cache.getAsync(2, value -> refreshFunc2.apply(2), false).block()).isEqualTo(5); }
final Function<Integer, Mono<Integer>> refreshFunc = key -> {
public void getAsync() { AtomicInteger numberOfCacheRefreshes = new AtomicInteger(0); final Function<Integer, Mono<Integer>> refreshFunc = key -> { return Mono.just(key * 2) .doOnNext(t -> { numberOfCacheRefreshes.incrementAndGet(); }).cache(); }; AsyncCacheNonBlocking<Integer, Integer> cache = new AsyncCacheNonBlocking<>(); List<Mono<Integer>> tasks = new ArrayList<>(); for (int j = 0; j < 10; j++) { int key = j; tasks.add(cache.getAsync(key, value -> refreshFunc.apply(key), forceRefresh -> false)); } Flux<Integer> o = Flux.merge(tasks.stream().map(Mono::flux).collect(Collectors.toList())); o.collectList().single().block(); assertThat(numberOfCacheRefreshes.get()).isEqualTo(10); assertThat(cache.getAsync(2, value -> refreshFunc.apply(2), forceRefresh -> false).block()).isEqualTo(4); Function<Integer, Mono<Integer>> refreshFunc1 = key -> { numberOfCacheRefreshes.incrementAndGet(); return Mono.just(key * 2 + 1); }; List<Mono<Integer>> tasks1 = new ArrayList<>(); for (int j = 0; j < 10; j++) { int key = j; tasks1.add(cache.getAsync(key, value -> refreshFunc1.apply(key), forceRefresh -> true)); } Flux<Integer> o1 = Flux.merge(tasks1.stream().map(Mono::flux).collect(Collectors.toList())); o1.collectList().single().block(); assertThat(numberOfCacheRefreshes.get()).isEqualTo(20); assertThat(cache.getAsync(2, value -> refreshFunc1.apply(2), forceRefresh -> false).block()).isEqualTo(5); Function<Integer, Mono<Integer>> refreshFunc2 = key -> { numberOfCacheRefreshes.incrementAndGet(); return Mono.just(key * 2 + 3); }; List<Mono<Integer>> tasks2 = new ArrayList<>(); for (int j = 0; j < 10; j++) { int key = j; tasks2.add(cache.getAsync(key, value -> refreshFunc2.apply(key), forceRefresh -> false)); } Flux<Integer> o2 = Flux.merge(tasks2.stream().map(Mono::flux).collect(Collectors.toList())); o2.collectList().single().block(); assertThat(numberOfCacheRefreshes.get()).isEqualTo(20); assertThat(cache.getAsync(2, value -> refreshFunc2.apply(2), forceRefresh -> false).block()).isEqualTo(5); }
class AsyncCacheNonBlockingTest { private static final int TIMEOUT = 2000; @Test(groups = {"unit"}, timeOut = TIMEOUT) }
class AsyncCacheNonBlockingTest { private static final int TIMEOUT = 2000; @Test(groups = {"unit"}, timeOut = TIMEOUT) }
we probably can start calling the function in the constructor (no need to wait until getValueAsync()), then there is no need to have a reference to the createValueFunc any more? ``` this.value = this.createValueFunc.apply(null).cache(); ```
public AsyncLazyWithRefresh(Function<TValue, Mono<TValue>> taskFactory) { this.createValueFunc = taskFactory; this.value = new AtomicReference<>(); this.refreshInProgress = new AtomicReference<>(); }
this.value = new AtomicReference<>();
public AsyncLazyWithRefresh(Function<TValue, Mono<TValue>> taskFactory) { this.value = new AtomicReference<>(); this.value.set(taskFactory.apply(null).cache()); this.refreshInProgress = null; }
class AsyncLazyWithRefresh<TValue> { private final Function<TValue, Mono<TValue>> createValueFunc; private final AtomicBoolean removeFromCache = new AtomicBoolean(false); private AtomicReference<Mono<TValue>> value; private final AtomicReference<Mono<TValue>> refreshInProgress; public AsyncLazyWithRefresh(TValue value) { this.createValueFunc = null; if (this.value != null) { this.value.set(Mono.just(value)); } this.refreshInProgress = new AtomicReference<>(); } public Mono<TValue> getValueAsync() { this.value.compareAndSet(null, this.createValueFunc.apply(null)); return this.value.get().cache(); } public Mono<TValue> value() { return value.get(); } public Mono<TValue> createAndWaitForBackgroundRefreshTaskAsync(Function<TValue, Mono<TValue>> createRefreshFunction) { Mono<TValue> valueMono = this.value.get(); AtomicReference<TValue> originalValue = new AtomicReference<>(); return valueMono.flatMap(value -> { originalValue.set(value); return valueMono; }).flatMap(value -> { if(this.refreshInProgress.compareAndSet(null, createRefreshFunction.apply(originalValue.get()))) { return this.refreshInProgress.get().cache() .flatMap(response -> { this.value.set(Mono.just(response)); this.refreshInProgress.set(null); return this.value.get(); }); } return this.refreshInProgress.get(); }); } public boolean shouldRemoveFromCache() { return this.removeFromCache.compareAndSet(false, true); } }
class AsyncLazyWithRefresh<TValue> { private final AtomicBoolean removeFromCache = new AtomicBoolean(false); private final AtomicReference<Mono<TValue>> value; private Mono<TValue> refreshInProgress; private final AtomicBoolean refreshInProgressCompleted = new AtomicBoolean(false); public AsyncLazyWithRefresh(TValue value) { this.value = new AtomicReference<>(); this.value.set(Mono.just(value)); this.refreshInProgress = null; } public Mono<TValue> getValueAsync() { return this.value.get(); } public Mono<TValue> value() { return value.get(); } @SuppressWarnings("unchecked") public Mono<TValue> createAndWaitForBackgroundRefreshTaskAsync(TKey key, Function<TValue, Mono<TValue>> createRefreshFunction) { Mono<TValue> valueMono = this.value.get(); return valueMono.flatMap(value -> { if(this.refreshInProgressCompleted.compareAndSet(false, true)) { this.refreshInProgress = createRefreshFunction.apply(value).cache(); return this.refreshInProgress .flatMap(response -> { this.value.set(Mono.just(response)); this.refreshInProgressCompleted.set(false); return this.value.get(); }).doOnError(e -> this.refreshInProgressCompleted.set(false)); } return this.refreshInProgress == null ? valueMono : refreshInProgress; }); } public boolean shouldRemoveFromCache() { return this.removeFromCache.compareAndSet(false, true); } }
We don't need this method. We can simplify this using the `Exceptions.java` class that we have under cosmos.implementation package, and use its pre-defined function below. ``` public static boolean isNotFound(CosmosException e) { return isStatusCode(e, HttpConstants.StatusCodes.NOTFOUND); } ``` https://github.com/Azure/azure-sdk-for-java/blob/main/sdk/cosmos/azure-cosmos/src/main/java/com/azure/cosmos/implementation/Exceptions.java#L25 This will keep it simple, and also we don't need to use netty's `HttpResponseStatus` class
private Boolean removeNotFoundFromCacheException(CosmosException e) { if (e.getStatusCode() == HttpResponseStatus.NOT_FOUND.code()) { return true; } return false; }
if (e.getStatusCode() == HttpResponseStatus.NOT_FOUND.code()) {
private Boolean removeNotFoundFromCacheException(CosmosException e) { if (Exceptions.isNotFound(e)) { return true; } return false; }
class AsyncCacheNonBlocking<TKey, TValue> { private final static Logger logger = LoggerFactory.getLogger(AsyncCacheNonBlocking.class); private final ConcurrentHashMap<TKey, AsyncLazyWithRefresh<TValue>> values; private AsyncCacheNonBlocking(ConcurrentHashMap<TKey, AsyncLazyWithRefresh<TValue>> values) { this.values = values; } public AsyncCacheNonBlocking() { this(new ConcurrentHashMap<>()); } /** * * <p> * If another initialization function is already running, new initialization function will not be started. * The result will be result of currently running initialization function. * </p> * * <p> * If previous initialization function is successfully completed it will return the value. It is possible this * value is stale and will only be updated after the force refresh task is complete. * Force refresh is true: * If the key does not exist: It will create and await the new task * If the key exists and the current task is still running: It will return the existing task * If the key exists and the current task is already done: It will start a new task to get the updated values. * Once the refresh task is complete it will be returned to caller. * If it is a success the value in the cache will be updated. If the refresh task throws an exception the key will be removed from the cache. * </p> * * <p> * If previous initialization function failed - new one will be launched. * </p> * * @param key Key for which to get a value. * @param singleValueInitFunc Initialization function. * @param forceRefresh Force refresh for refreshing the cache * @return Cached value or value returned by initialization function. */ public Mono<TValue> getAsync( TKey key, Function<TValue, Mono<TValue>> singleValueInitFunc, Function<TValue, Boolean> forceRefresh) { AsyncLazyWithRefresh<TValue> initialLazyValue = values.get(key); if (initialLazyValue != null) { if (logger.isDebugEnabled()) { logger.debug("cache[{}] exists", key); } return initialLazyValue.getValueAsync().flatMap(value -> { if(!forceRefresh.apply(value)) { return Mono.just(value); } Mono<TValue> refreshMono = initialLazyValue.createAndWaitForBackgroundRefreshTaskAsync(key, singleValueInitFunc); return refreshMono.onErrorResume( (exception) -> { if (logger.isDebugEnabled()) { logger.debug("refresh cache [{}] resulted in error", key, exception); } if (initialLazyValue.shouldRemoveFromCache()) { if (removeNotFoundFromCacheException((CosmosException)exception)) { this.remove(key); } } return Mono.error(exception); } ); }).onErrorResume((exception) -> { if (logger.isDebugEnabled()) { logger.debug("cache[{}] resulted in error", key, exception); } if (initialLazyValue.shouldRemoveFromCache()) { this.remove(key); } return Mono.error(exception); }); } if (logger.isDebugEnabled()) { logger.debug("cache[{}] doesn't exist, computing new value", key); } AsyncLazyWithRefresh<TValue> asyncLazyWithRefresh = new AsyncLazyWithRefresh<TValue>(singleValueInitFunc); this.values.putIfAbsent(key, asyncLazyWithRefresh); AsyncLazyWithRefresh<TValue> result = this.values.get(key); return result.getValueAsync().onErrorResume( (exception) -> { if (logger.isDebugEnabled()) { logger.debug("cache[{}] resulted in error", key, exception); } if (result.shouldRemoveFromCache()) { this.remove(key); } return Mono.error(exception); } ); } public void set(TKey key, TValue value) { if (logger.isDebugEnabled()) { logger.debug("set cache[{}]={}", key, value); } AsyncLazyWithRefresh<TValue> updatedValue = new AsyncLazyWithRefresh<TValue>(value); this.values.put(key, updatedValue); } public void remove(TKey key) { values.remove(key); } /** * This is AsyncLazy that has an additional Task that can * be used to update the value. This allows concurrent requests * to use the stale value while the refresh is occurring. */ private class AsyncLazyWithRefresh<TValue> { private final AtomicBoolean removeFromCache = new AtomicBoolean(false); private AtomicReference<Mono<TValue>> value; private Mono<TValue> refreshInProgress; private final AtomicBoolean refreshInProgressCompleted = new AtomicBoolean(false); public AsyncLazyWithRefresh(TValue value) { this.value = new AtomicReference<>(); this.value.set(Mono.just(value)); this.refreshInProgress = null; } public AsyncLazyWithRefresh(Function<TValue, Mono<TValue>> taskFactory) { this.value = new AtomicReference<>(); this.value.set(taskFactory.apply(null).cache()); this.refreshInProgress = null; } public Mono<TValue> getValueAsync() { return this.value.get(); } public Mono<TValue> value() { return value.get(); } @SuppressWarnings("unchecked") public Mono<TValue> createAndWaitForBackgroundRefreshTaskAsync(TKey key, Function<TValue, Mono<TValue>> createRefreshFunction) { Mono<TValue> valueMono = this.value.get(); return valueMono.flatMap(value -> { if(this.refreshInProgressCompleted.compareAndSet(false, true)) { this.refreshInProgress = createRefreshFunction.apply(value).cache(); return this.refreshInProgress .flatMap(response -> { this.value.set(Mono.just(response)); this.refreshInProgressCompleted.set(false); return this.value.get(); }); } return this.refreshInProgress; }); } public boolean shouldRemoveFromCache() { return this.removeFromCache.compareAndSet(false, true); } } }
class AsyncCacheNonBlocking<TKey, TValue> { private final static Logger logger = LoggerFactory.getLogger(AsyncCacheNonBlocking.class); private final ConcurrentHashMap<TKey, AsyncLazyWithRefresh<TValue>> values; public AsyncCacheNonBlocking() { this.values = new ConcurrentHashMap<>(); } /** * * <p> * If another initialization function is already running, new initialization function will not be started. * The result will be result of currently running initialization function. * </p> * * <p> * If previous initialization function is successfully completed it will return the value. It is possible this * value is stale and will only be updated after the force refresh task is complete. * Force refresh is true: * If the key does not exist: It will create and await the new task * If the key exists and the current task is still running: It will return the existing task * If the key exists and the current task is already done: It will start a new task to get the updated values. * Once the refresh task is complete it will be returned to caller. * If it is a success the value in the cache will be updated. If the refresh task throws an exception the key will be removed from the cache. * </p> * * <p> * If previous initialization function failed - new one will be launched. * </p> * * @param key Key for which to get a value. * @param singleValueInitFunc Initialization function. * @param forceRefresh Force refresh for refreshing the cache * @return Cached value or value returned by initialization function. */ public Mono<TValue> getAsync( TKey key, Function<TValue, Mono<TValue>> singleValueInitFunc, Function<TValue, Boolean> forceRefresh) { AsyncLazyWithRefresh<TValue> initialLazyValue = values.get(key); if (initialLazyValue != null) { logger.debug("cache[{}] exists", key); return initialLazyValue.getValueAsync().flatMap(value -> { if(!forceRefresh.apply(value)) { return Mono.just(value); } Mono<TValue> refreshMono = initialLazyValue.createAndWaitForBackgroundRefreshTaskAsync(key, singleValueInitFunc); return refreshMono.onErrorResume( (exception) -> { logger.debug("refresh cache [{}] resulted in error", key, exception); if (initialLazyValue.shouldRemoveFromCache()) { if (removeNotFoundFromCacheException((CosmosException)exception)) { this.remove(key); } } return Mono.error(exception); } ); }).onErrorResume((exception) -> { if (logger.isDebugEnabled()) { logger.debug("cache[{}] resulted in error", key, exception); } if (initialLazyValue.shouldRemoveFromCache()) { this.remove(key); } return Mono.error(exception); }); } if (logger.isDebugEnabled()) { logger.debug("cache[{}] doesn't exist, computing new value", key); } AsyncLazyWithRefresh<TValue> asyncLazyWithRefresh = new AsyncLazyWithRefresh<TValue>(singleValueInitFunc); AsyncLazyWithRefresh<TValue> preResult = this.values.putIfAbsent(key, asyncLazyWithRefresh); if (preResult == null) { preResult = asyncLazyWithRefresh; } AsyncLazyWithRefresh<TValue> result = preResult; return result.getValueAsync().onErrorResume( (exception) -> { if (logger.isDebugEnabled()) { logger.debug("cache[{}] resulted in error", key, exception); } if (result.shouldRemoveFromCache()) { this.remove(key); } return Mono.error(exception); } ); } public void set(TKey key, TValue value) { if (logger.isDebugEnabled()) { logger.debug("set cache[{}]={}", key, value); } AsyncLazyWithRefresh<TValue> updatedValue = new AsyncLazyWithRefresh<TValue>(value); this.values.put(key, updatedValue); } public void remove(TKey key) { values.remove(key); } /** * This is AsyncLazy that has an additional Task that can * be used to update the value. This allows concurrent requests * to use the stale value while the refresh is occurring. */ private class AsyncLazyWithRefresh<TValue> { private final AtomicBoolean removeFromCache = new AtomicBoolean(false); private final AtomicReference<Mono<TValue>> value; private Mono<TValue> refreshInProgress; private final AtomicBoolean refreshInProgressCompleted = new AtomicBoolean(false); public AsyncLazyWithRefresh(TValue value) { this.value = new AtomicReference<>(); this.value.set(Mono.just(value)); this.refreshInProgress = null; } public AsyncLazyWithRefresh(Function<TValue, Mono<TValue>> taskFactory) { this.value = new AtomicReference<>(); this.value.set(taskFactory.apply(null).cache()); this.refreshInProgress = null; } public Mono<TValue> getValueAsync() { return this.value.get(); } public Mono<TValue> value() { return value.get(); } @SuppressWarnings("unchecked") public Mono<TValue> createAndWaitForBackgroundRefreshTaskAsync(TKey key, Function<TValue, Mono<TValue>> createRefreshFunction) { Mono<TValue> valueMono = this.value.get(); return valueMono.flatMap(value -> { if(this.refreshInProgressCompleted.compareAndSet(false, true)) { this.refreshInProgress = createRefreshFunction.apply(value).cache(); return this.refreshInProgress .flatMap(response -> { this.value.set(Mono.just(response)); this.refreshInProgressCompleted.set(false); return this.value.get(); }).doOnError(e -> this.refreshInProgressCompleted.set(false)); } return this.refreshInProgress == null ? valueMono : refreshInProgress; }); } public boolean shouldRemoveFromCache() { return this.removeFromCache.compareAndSet(false, true); } } }
I no longer have the background why we used `doPrivileged`. But directly call `accessibleObject.setAccessible(true)` here would result in StackOverflowException on javac (build), at `com.sun.tools.javac.comp.Resolve.isAccessible`, which seems to be a javac bug.
private void setAccessible(final AccessibleObject accessibleObject) { Runnable runnable = () -> accessibleObject.setAccessible(true); runnable.run(); }
}
private void setAccessible(final AccessibleObject accessibleObject) { Runnable runnable = () -> accessibleObject.setAccessible(true); runnable.run(); }
class ResourceManagerTestBase extends TestBase { private static final String ZERO_UUID = "00000000-0000-0000-0000-000000000000"; private static final String ZERO_SUBSCRIPTION = ZERO_UUID; private static final String ZERO_TENANT = ZERO_UUID; private static final String PLAYBACK_URI_BASE = "http: private static final String AZURE_AUTH_LOCATION = "AZURE_AUTH_LOCATION"; private static final String AZURE_TEST_LOG_LEVEL = "AZURE_TEST_LOG_LEVEL"; private static final String HTTPS_PROXY_HOST = "https.proxyHost"; private static final String HTTPS_PROXY_PORT = "https.proxyPort"; private static final String HTTP_PROXY_HOST = "http.proxyHost"; private static final String HTTP_PROXY_PORT = "http.proxyPort"; private static final String USE_SYSTEM_PROXY = "java.net.useSystemProxies"; private static final String VALUE_TRUE = "true"; private static final String PLAYBACK_URI = PLAYBACK_URI_BASE + "1234"; private static final AzureProfile PLAYBACK_PROFILE = new AzureProfile( ZERO_TENANT, ZERO_SUBSCRIPTION, new AzureEnvironment(Arrays.stream(AzureEnvironment.Endpoint.values()) .collect(Collectors.toMap(AzureEnvironment.Endpoint::identifier, endpoint -> PLAYBACK_URI))) ); private static final OutputStream EMPTY_OUTPUT_STREAM = new OutputStream() { @Override public void write(int b) { } }; private static final ClientLogger LOGGER = new ClientLogger(ResourceManagerTestBase.class); private AzureProfile testProfile; private AuthFile testAuthFile; private boolean isSkipInPlayback; /** * Generates a random resource name. * * @param prefix Prefix for the resource name. * @param maxLen Maximum length of the resource name. * @return A randomly generated resource name with a given prefix and maximum length. */ protected String generateRandomResourceName(String prefix, int maxLen) { return testResourceNamer.randomName(prefix, maxLen); } /** * @return A randomly generated UUID. */ protected String generateRandomUuid() { return testResourceNamer.randomUuid(); } /** * @return random password */ public static String password() { String password = new ResourceNamer("").randomName("Pa5$", 12); LOGGER.info("Password: {}", password); return password; } private static String sshPublicKey; /** * @return an SSH public key */ public static String sshPublicKey() { if (sshPublicKey == null) { try { KeyPairGenerator keyGen = KeyPairGenerator.getInstance("RSA"); keyGen.initialize(1024); KeyPair pair = keyGen.generateKeyPair(); PublicKey publicKey = pair.getPublic(); RSAPublicKey rsaPublicKey = (RSAPublicKey) publicKey; ByteArrayOutputStream byteOs = new ByteArrayOutputStream(); DataOutputStream dos = new DataOutputStream(byteOs); dos.writeInt("ssh-rsa".getBytes(StandardCharsets.US_ASCII).length); dos.write("ssh-rsa".getBytes(StandardCharsets.US_ASCII)); dos.writeInt(rsaPublicKey.getPublicExponent().toByteArray().length); dos.write(rsaPublicKey.getPublicExponent().toByteArray()); dos.writeInt(rsaPublicKey.getModulus().toByteArray().length); dos.write(rsaPublicKey.getModulus().toByteArray()); String publicKeyEncoded = new String(Base64.getEncoder().encode(byteOs.toByteArray()), StandardCharsets.US_ASCII); sshPublicKey = "ssh-rsa " + publicKeyEncoded; } catch (NoSuchAlgorithmException | IOException e) { throw LOGGER.logExceptionAsError(new IllegalStateException("failed to generate ssh key", e)); } } return sshPublicKey; } /** * Loads a credential from file. * * @return A credential loaded from a file. */ protected TokenCredential credentialFromFile() { return testAuthFile.getCredential(); } /** * Loads a client ID from file. * * @return A client ID loaded from a file. */ protected String clientIdFromFile() { String clientId = testAuthFile == null ? null : testAuthFile.getClientId(); return testResourceNamer.recordValueFromConfig(clientId); } /** * @return The test profile. */ protected AzureProfile profile() { return testProfile; } /** * @return Whether the test mode is {@link TestMode */ protected boolean isPlaybackMode() { return getTestMode() == TestMode.PLAYBACK; } /** * @return Whether the test should be skipped in playback. */ protected boolean skipInPlayback() { if (isPlaybackMode()) { isSkipInPlayback = true; } return isSkipInPlayback; } @Override protected void beforeTest() { TokenCredential credential; HttpPipeline httpPipeline; Map<String, String> textReplacementRules = new HashMap<>(); String logLevel = Configuration.getGlobalConfiguration().get(AZURE_TEST_LOG_LEVEL); HttpLogDetailLevel httpLogDetailLevel; try { httpLogDetailLevel = HttpLogDetailLevel.valueOf(logLevel); } catch (Exception e) { if (isPlaybackMode()) { httpLogDetailLevel = HttpLogDetailLevel.NONE; LOGGER.error("Environment variable '{}' has not been set yet. Using 'NONE' for PLAYBACK.", AZURE_TEST_LOG_LEVEL); } else { httpLogDetailLevel = HttpLogDetailLevel.BODY_AND_HEADERS; LOGGER.error("Environment variable '{}' has not been set yet. Using 'BODY_AND_HEADERS' for RECORD/LIVE.", AZURE_TEST_LOG_LEVEL); } } if (httpLogDetailLevel == HttpLogDetailLevel.NONE) { try { System.setOut(new PrintStream(EMPTY_OUTPUT_STREAM, false, Charset.defaultCharset().name())); System.setErr(new PrintStream(EMPTY_OUTPUT_STREAM, false, Charset.defaultCharset().name())); } catch (UnsupportedEncodingException e) { } } if (isPlaybackMode()) { if (interceptorManager.getRecordedData() == null) { skipInPlayback(); return; } testProfile = PLAYBACK_PROFILE; List<HttpPipelinePolicy> policies = new ArrayList<>(); policies.add(new TextReplacementPolicy(interceptorManager.getRecordedData(), textReplacementRules)); httpPipeline = buildHttpPipeline( null, testProfile, new HttpLogOptions().setLogLevel(httpLogDetailLevel), policies, interceptorManager.getPlaybackClient()); textReplacementRules.put(PLAYBACK_URI_BASE + "1234", PLAYBACK_URI); addTextReplacementRules(textReplacementRules); } else { if (System.getenv(AZURE_AUTH_LOCATION) != null) { final File credFile = new File(System.getenv(AZURE_AUTH_LOCATION)); try { testAuthFile = AuthFile.parse(credFile); } catch (IOException e) { throw LOGGER.logExceptionAsError(new RuntimeException("Cannot parse auth file. Please check file format.", e)); } credential = testAuthFile.getCredential(); testProfile = new AzureProfile(testAuthFile.getTenantId(), testAuthFile.getSubscriptionId(), testAuthFile.getEnvironment()); } else { Configuration configuration = Configuration.getGlobalConfiguration(); String clientId = configuration.get(Configuration.PROPERTY_AZURE_CLIENT_ID); String tenantId = configuration.get(Configuration.PROPERTY_AZURE_TENANT_ID); String clientSecret = configuration.get(Configuration.PROPERTY_AZURE_CLIENT_SECRET); String subscriptionId = configuration.get(Configuration.PROPERTY_AZURE_SUBSCRIPTION_ID); if (clientId == null || tenantId == null || clientSecret == null || subscriptionId == null) { throw LOGGER.logExceptionAsError( new IllegalArgumentException("When running tests in record mode either 'AZURE_AUTH_LOCATION' or 'AZURE_CLIENT_ID, AZURE_TENANT_ID, AZURE_CLIENT_SECRET and AZURE_SUBSCRIPTION_ID' needs to be set")); } credential = new ClientSecretCredentialBuilder() .tenantId(tenantId) .clientId(clientId) .clientSecret(clientSecret) .authorityHost(AzureEnvironment.AZURE.getActiveDirectoryEndpoint()) .build(); testProfile = new AzureProfile(tenantId, subscriptionId, AzureEnvironment.AZURE); } List<HttpPipelinePolicy> policies = new ArrayList<>(); policies.add(new TimeoutPolicy(Duration.ofMinutes(1))); if (!interceptorManager.isLiveMode() && !testContextManager.doNotRecordTest()) { policies.add(new TextReplacementPolicy(interceptorManager.getRecordedData(), textReplacementRules)); } if (httpLogDetailLevel == HttpLogDetailLevel.BODY_AND_HEADERS) { policies.add(new HttpDebugLoggingPolicy()); httpLogDetailLevel = HttpLogDetailLevel.NONE; } httpPipeline = buildHttpPipeline( credential, testProfile, new HttpLogOptions().setLogLevel(httpLogDetailLevel), policies, generateHttpClientWithProxy(null, null)); textReplacementRules.put(testProfile.getSubscriptionId(), ZERO_SUBSCRIPTION); textReplacementRules.put(testProfile.getTenantId(), ZERO_TENANT); textReplacementRules.put(Pattern.quote(AzureEnvironment.AZURE.getResourceManagerEndpoint()), PLAYBACK_URI + "/"); textReplacementRules.put(Pattern.quote(AzureEnvironment.AZURE.getMicrosoftGraphEndpoint()), PLAYBACK_URI + "/"); textReplacementRules.put("https: textReplacementRules.put("https: addTextReplacementRules(textReplacementRules); } initializeClients(httpPipeline, testProfile); } /** * Generates an {@link HttpClient} with a proxy. * * @param clientBuilder The HttpClient builder. * @param proxyOptions The proxy. * @return An HttpClient with a proxy. */ protected HttpClient generateHttpClientWithProxy(NettyAsyncHttpClientBuilder clientBuilder, ProxyOptions proxyOptions) { if (clientBuilder == null) { clientBuilder = new NettyAsyncHttpClientBuilder(); } if (proxyOptions != null) { clientBuilder.proxy(proxyOptions); } else { try { System.setProperty(USE_SYSTEM_PROXY, VALUE_TRUE); List<Proxy> proxies = ProxySelector.getDefault().select(new URI(AzureEnvironment.AZURE.getResourceManagerEndpoint())); if (!proxies.isEmpty()) { for (Proxy proxy : proxies) { if (proxy.address() instanceof InetSocketAddress) { String host = ((InetSocketAddress) proxy.address()).getHostName(); int port = ((InetSocketAddress) proxy.address()).getPort(); switch (proxy.type()) { case HTTP: return clientBuilder.proxy(new ProxyOptions(ProxyOptions.Type.HTTP, new InetSocketAddress(host, port))).build(); case SOCKS: return clientBuilder.proxy(new ProxyOptions(ProxyOptions.Type.SOCKS5, new InetSocketAddress(host, port))).build(); default: } } } } String host = null; int port = 0; if (System.getProperty(HTTPS_PROXY_HOST) != null && System.getProperty(HTTPS_PROXY_PORT) != null) { host = System.getProperty(HTTPS_PROXY_HOST); port = Integer.parseInt(System.getProperty(HTTPS_PROXY_PORT)); } else if (System.getProperty(HTTP_PROXY_HOST) != null && System.getProperty(HTTP_PROXY_PORT) != null) { host = System.getProperty(HTTP_PROXY_HOST); port = Integer.parseInt(System.getProperty(HTTP_PROXY_PORT)); } if (host != null) { clientBuilder.proxy(new ProxyOptions(ProxyOptions.Type.HTTP, new InetSocketAddress(host, port))); } } catch (URISyntaxException e) { } } return clientBuilder.build(); } @Override protected void afterTest() { if (!isSkipInPlayback) { cleanUpResources(); } } private void addTextReplacementRules(Map<String, String> rules) { for (Map.Entry<String, String> entry : rules.entrySet()) { interceptorManager.addTextReplacementRule(entry.getKey(), entry.getValue()); } } /** * Sets sdk context when running the tests * * @param internalContext the internal runtime context * @param objects the manager classes to change internal context * @param <T> the type of internal context * @throws RuntimeException when field cannot be found or set. */ protected <T> void setInternalContext(T internalContext, Object... objects) { try { for (Object obj : objects) { for (final Field field : obj.getClass().getSuperclass().getDeclaredFields()) { if (field.getName().equals("resourceManager")) { setAccessible(field); Field context = field.get(obj).getClass().getDeclaredField("internalContext"); setAccessible(context); context.set(field.get(obj), internalContext); } } for (Field field : obj.getClass().getDeclaredFields()) { if (field.getName().equals("internalContext")) { setAccessible(field); field.set(obj, internalContext); } else if (field.getName().contains("Manager")) { setAccessible(field); setInternalContext(internalContext, field.get(obj)); } } } } catch (IllegalAccessException | NoSuchFieldException ex) { throw LOGGER.logExceptionAsError(new RuntimeException(ex)); } } /** * Builds the manager with provided http pipeline and profile in general manner. * * @param manager the class of the manager * @param httpPipeline the http pipeline * @param profile the azure profile * @param <T> the type of the manager * @return the manager instance * @throws RuntimeException when field cannot be found or set. */ protected <T> T buildManager(Class<T> manager, HttpPipeline httpPipeline, AzureProfile profile) { try { Constructor<T> constructor = manager.getDeclaredConstructor(httpPipeline.getClass(), profile.getClass()); setAccessible(constructor); return constructor.newInstance(httpPipeline, profile); } catch (NoSuchMethodException | IllegalAccessException | InstantiationException | InvocationTargetException ex) { throw LOGGER.logExceptionAsError(new RuntimeException(ex)); } } /** * Builds an HttpPipeline. * * @param credential The credentials to use in the pipeline. * @param profile The AzureProfile to use in the pipeline. * @param httpLogOptions The HTTP logging options to use in the pipeline. * @param policies Additional policies to use in the pipeline. * @param httpClient The HttpClient to use in the pipeline. * @return A new constructed HttpPipeline. */ protected abstract HttpPipeline buildHttpPipeline( TokenCredential credential, AzureProfile profile, HttpLogOptions httpLogOptions, List<HttpPipelinePolicy> policies, HttpClient httpClient); /** * Initializes service clients used in testing. * * @param httpPipeline The HttpPipeline to use in the clients. * @param profile The AzureProfile to use in the clients. */ protected abstract void initializeClients(HttpPipeline httpPipeline, AzureProfile profile); /** * Cleans up resources. */ protected abstract void cleanUpResources(); }
class ResourceManagerTestBase extends TestBase { private static final String ZERO_UUID = "00000000-0000-0000-0000-000000000000"; private static final String ZERO_SUBSCRIPTION = ZERO_UUID; private static final String ZERO_TENANT = ZERO_UUID; private static final String PLAYBACK_URI_BASE = "http: private static final String AZURE_AUTH_LOCATION = "AZURE_AUTH_LOCATION"; private static final String AZURE_TEST_LOG_LEVEL = "AZURE_TEST_LOG_LEVEL"; private static final String HTTPS_PROXY_HOST = "https.proxyHost"; private static final String HTTPS_PROXY_PORT = "https.proxyPort"; private static final String HTTP_PROXY_HOST = "http.proxyHost"; private static final String HTTP_PROXY_PORT = "http.proxyPort"; private static final String USE_SYSTEM_PROXY = "java.net.useSystemProxies"; private static final String VALUE_TRUE = "true"; private static final String PLAYBACK_URI = PLAYBACK_URI_BASE + "1234"; private static final AzureProfile PLAYBACK_PROFILE = new AzureProfile( ZERO_TENANT, ZERO_SUBSCRIPTION, new AzureEnvironment(Arrays.stream(AzureEnvironment.Endpoint.values()) .collect(Collectors.toMap(AzureEnvironment.Endpoint::identifier, endpoint -> PLAYBACK_URI))) ); private static final OutputStream EMPTY_OUTPUT_STREAM = new OutputStream() { @Override public void write(int b) { } }; private static final ClientLogger LOGGER = new ClientLogger(ResourceManagerTestBase.class); private AzureProfile testProfile; private AuthFile testAuthFile; private boolean isSkipInPlayback; /** * Generates a random resource name. * * @param prefix Prefix for the resource name. * @param maxLen Maximum length of the resource name. * @return A randomly generated resource name with a given prefix and maximum length. */ protected String generateRandomResourceName(String prefix, int maxLen) { return testResourceNamer.randomName(prefix, maxLen); } /** * @return A randomly generated UUID. */ protected String generateRandomUuid() { return testResourceNamer.randomUuid(); } /** * @return random password */ public static String password() { String password = new ResourceNamer("").randomName("Pa5$", 12); LOGGER.info("Password: {}", password); return password; } private static String sshPublicKey; /** * @return an SSH public key */ public static String sshPublicKey() { if (sshPublicKey == null) { try { KeyPairGenerator keyGen = KeyPairGenerator.getInstance("RSA"); keyGen.initialize(1024); KeyPair pair = keyGen.generateKeyPair(); PublicKey publicKey = pair.getPublic(); RSAPublicKey rsaPublicKey = (RSAPublicKey) publicKey; ByteArrayOutputStream byteOs = new ByteArrayOutputStream(); DataOutputStream dos = new DataOutputStream(byteOs); dos.writeInt("ssh-rsa".getBytes(StandardCharsets.US_ASCII).length); dos.write("ssh-rsa".getBytes(StandardCharsets.US_ASCII)); dos.writeInt(rsaPublicKey.getPublicExponent().toByteArray().length); dos.write(rsaPublicKey.getPublicExponent().toByteArray()); dos.writeInt(rsaPublicKey.getModulus().toByteArray().length); dos.write(rsaPublicKey.getModulus().toByteArray()); String publicKeyEncoded = new String(Base64.getEncoder().encode(byteOs.toByteArray()), StandardCharsets.US_ASCII); sshPublicKey = "ssh-rsa " + publicKeyEncoded; } catch (NoSuchAlgorithmException | IOException e) { throw LOGGER.logExceptionAsError(new IllegalStateException("failed to generate ssh key", e)); } } return sshPublicKey; } /** * Loads a credential from file. * * @return A credential loaded from a file. */ protected TokenCredential credentialFromFile() { return testAuthFile.getCredential(); } /** * Loads a client ID from file. * * @return A client ID loaded from a file. */ protected String clientIdFromFile() { String clientId = testAuthFile == null ? null : testAuthFile.getClientId(); return testResourceNamer.recordValueFromConfig(clientId); } /** * @return The test profile. */ protected AzureProfile profile() { return testProfile; } /** * @return Whether the test mode is {@link TestMode */ protected boolean isPlaybackMode() { return getTestMode() == TestMode.PLAYBACK; } /** * @return Whether the test should be skipped in playback. */ protected boolean skipInPlayback() { if (isPlaybackMode()) { isSkipInPlayback = true; } return isSkipInPlayback; } @Override protected void beforeTest() { TokenCredential credential; HttpPipeline httpPipeline; Map<String, String> textReplacementRules = new HashMap<>(); String logLevel = Configuration.getGlobalConfiguration().get(AZURE_TEST_LOG_LEVEL); HttpLogDetailLevel httpLogDetailLevel; try { httpLogDetailLevel = HttpLogDetailLevel.valueOf(logLevel); } catch (Exception e) { if (isPlaybackMode()) { httpLogDetailLevel = HttpLogDetailLevel.NONE; LOGGER.error("Environment variable '{}' has not been set yet. Using 'NONE' for PLAYBACK.", AZURE_TEST_LOG_LEVEL); } else { httpLogDetailLevel = HttpLogDetailLevel.BODY_AND_HEADERS; LOGGER.error("Environment variable '{}' has not been set yet. Using 'BODY_AND_HEADERS' for RECORD/LIVE.", AZURE_TEST_LOG_LEVEL); } } if (httpLogDetailLevel == HttpLogDetailLevel.NONE) { try { System.setOut(new PrintStream(EMPTY_OUTPUT_STREAM, false, Charset.defaultCharset().name())); System.setErr(new PrintStream(EMPTY_OUTPUT_STREAM, false, Charset.defaultCharset().name())); } catch (UnsupportedEncodingException e) { } } if (isPlaybackMode()) { if (interceptorManager.getRecordedData() == null) { skipInPlayback(); return; } testProfile = PLAYBACK_PROFILE; List<HttpPipelinePolicy> policies = new ArrayList<>(); policies.add(new TextReplacementPolicy(interceptorManager.getRecordedData(), textReplacementRules)); httpPipeline = buildHttpPipeline( null, testProfile, new HttpLogOptions().setLogLevel(httpLogDetailLevel), policies, interceptorManager.getPlaybackClient()); textReplacementRules.put(PLAYBACK_URI_BASE + "1234", PLAYBACK_URI); addTextReplacementRules(textReplacementRules); } else { if (System.getenv(AZURE_AUTH_LOCATION) != null) { final File credFile = new File(System.getenv(AZURE_AUTH_LOCATION)); try { testAuthFile = AuthFile.parse(credFile); } catch (IOException e) { throw LOGGER.logExceptionAsError(new RuntimeException("Cannot parse auth file. Please check file format.", e)); } credential = testAuthFile.getCredential(); testProfile = new AzureProfile(testAuthFile.getTenantId(), testAuthFile.getSubscriptionId(), testAuthFile.getEnvironment()); } else { Configuration configuration = Configuration.getGlobalConfiguration(); String clientId = configuration.get(Configuration.PROPERTY_AZURE_CLIENT_ID); String tenantId = configuration.get(Configuration.PROPERTY_AZURE_TENANT_ID); String clientSecret = configuration.get(Configuration.PROPERTY_AZURE_CLIENT_SECRET); String subscriptionId = configuration.get(Configuration.PROPERTY_AZURE_SUBSCRIPTION_ID); if (clientId == null || tenantId == null || clientSecret == null || subscriptionId == null) { throw LOGGER.logExceptionAsError( new IllegalArgumentException("When running tests in record mode either 'AZURE_AUTH_LOCATION' or 'AZURE_CLIENT_ID, AZURE_TENANT_ID, AZURE_CLIENT_SECRET and AZURE_SUBSCRIPTION_ID' needs to be set")); } credential = new ClientSecretCredentialBuilder() .tenantId(tenantId) .clientId(clientId) .clientSecret(clientSecret) .authorityHost(AzureEnvironment.AZURE.getActiveDirectoryEndpoint()) .build(); testProfile = new AzureProfile(tenantId, subscriptionId, AzureEnvironment.AZURE); } List<HttpPipelinePolicy> policies = new ArrayList<>(); policies.add(new TimeoutPolicy(Duration.ofMinutes(1))); if (!interceptorManager.isLiveMode() && !testContextManager.doNotRecordTest()) { policies.add(new TextReplacementPolicy(interceptorManager.getRecordedData(), textReplacementRules)); } if (httpLogDetailLevel == HttpLogDetailLevel.BODY_AND_HEADERS) { policies.add(new HttpDebugLoggingPolicy()); httpLogDetailLevel = HttpLogDetailLevel.NONE; } httpPipeline = buildHttpPipeline( credential, testProfile, new HttpLogOptions().setLogLevel(httpLogDetailLevel), policies, generateHttpClientWithProxy(null, null)); textReplacementRules.put(testProfile.getSubscriptionId(), ZERO_SUBSCRIPTION); textReplacementRules.put(testProfile.getTenantId(), ZERO_TENANT); textReplacementRules.put(Pattern.quote(AzureEnvironment.AZURE.getResourceManagerEndpoint()), PLAYBACK_URI + "/"); textReplacementRules.put(Pattern.quote(AzureEnvironment.AZURE.getMicrosoftGraphEndpoint()), PLAYBACK_URI + "/"); textReplacementRules.put("https: textReplacementRules.put("https: addTextReplacementRules(textReplacementRules); } initializeClients(httpPipeline, testProfile); } /** * Generates an {@link HttpClient} with a proxy. * * @param clientBuilder The HttpClient builder. * @param proxyOptions The proxy. * @return An HttpClient with a proxy. */ protected HttpClient generateHttpClientWithProxy(NettyAsyncHttpClientBuilder clientBuilder, ProxyOptions proxyOptions) { if (clientBuilder == null) { clientBuilder = new NettyAsyncHttpClientBuilder(); } if (proxyOptions != null) { clientBuilder.proxy(proxyOptions); } else { try { System.setProperty(USE_SYSTEM_PROXY, VALUE_TRUE); List<Proxy> proxies = ProxySelector.getDefault().select(new URI(AzureEnvironment.AZURE.getResourceManagerEndpoint())); if (!proxies.isEmpty()) { for (Proxy proxy : proxies) { if (proxy.address() instanceof InetSocketAddress) { String host = ((InetSocketAddress) proxy.address()).getHostName(); int port = ((InetSocketAddress) proxy.address()).getPort(); switch (proxy.type()) { case HTTP: return clientBuilder.proxy(new ProxyOptions(ProxyOptions.Type.HTTP, new InetSocketAddress(host, port))).build(); case SOCKS: return clientBuilder.proxy(new ProxyOptions(ProxyOptions.Type.SOCKS5, new InetSocketAddress(host, port))).build(); default: } } } } String host = null; int port = 0; if (System.getProperty(HTTPS_PROXY_HOST) != null && System.getProperty(HTTPS_PROXY_PORT) != null) { host = System.getProperty(HTTPS_PROXY_HOST); port = Integer.parseInt(System.getProperty(HTTPS_PROXY_PORT)); } else if (System.getProperty(HTTP_PROXY_HOST) != null && System.getProperty(HTTP_PROXY_PORT) != null) { host = System.getProperty(HTTP_PROXY_HOST); port = Integer.parseInt(System.getProperty(HTTP_PROXY_PORT)); } if (host != null) { clientBuilder.proxy(new ProxyOptions(ProxyOptions.Type.HTTP, new InetSocketAddress(host, port))); } } catch (URISyntaxException e) { } } return clientBuilder.build(); } @Override protected void afterTest() { if (!isSkipInPlayback) { cleanUpResources(); } } private void addTextReplacementRules(Map<String, String> rules) { for (Map.Entry<String, String> entry : rules.entrySet()) { interceptorManager.addTextReplacementRule(entry.getKey(), entry.getValue()); } } /** * Sets sdk context when running the tests * * @param internalContext the internal runtime context * @param objects the manager classes to change internal context * @param <T> the type of internal context * @throws RuntimeException when field cannot be found or set. */ protected <T> void setInternalContext(T internalContext, Object... objects) { try { for (Object obj : objects) { for (final Field field : obj.getClass().getSuperclass().getDeclaredFields()) { if (field.getName().equals("resourceManager")) { setAccessible(field); Field context = field.get(obj).getClass().getDeclaredField("internalContext"); setAccessible(context); context.set(field.get(obj), internalContext); } } for (Field field : obj.getClass().getDeclaredFields()) { if (field.getName().equals("internalContext")) { setAccessible(field); field.set(obj, internalContext); } else if (field.getName().contains("Manager")) { setAccessible(field); setInternalContext(internalContext, field.get(obj)); } } } } catch (IllegalAccessException | NoSuchFieldException ex) { throw LOGGER.logExceptionAsError(new RuntimeException(ex)); } } /** * Builds the manager with provided http pipeline and profile in general manner. * * @param manager the class of the manager * @param httpPipeline the http pipeline * @param profile the azure profile * @param <T> the type of the manager * @return the manager instance * @throws RuntimeException when field cannot be found or set. */ protected <T> T buildManager(Class<T> manager, HttpPipeline httpPipeline, AzureProfile profile) { try { Constructor<T> constructor = manager.getDeclaredConstructor(httpPipeline.getClass(), profile.getClass()); setAccessible(constructor); return constructor.newInstance(httpPipeline, profile); } catch (NoSuchMethodException | IllegalAccessException | InstantiationException | InvocationTargetException ex) { throw LOGGER.logExceptionAsError(new RuntimeException(ex)); } } /** * Builds an HttpPipeline. * * @param credential The credentials to use in the pipeline. * @param profile The AzureProfile to use in the pipeline. * @param httpLogOptions The HTTP logging options to use in the pipeline. * @param policies Additional policies to use in the pipeline. * @param httpClient The HttpClient to use in the pipeline. * @return A new constructed HttpPipeline. */ protected abstract HttpPipeline buildHttpPipeline( TokenCredential credential, AzureProfile profile, HttpLogOptions httpLogOptions, List<HttpPipelinePolicy> policies, HttpClient httpClient); /** * Initializes service clients used in testing. * * @param httpPipeline The HttpPipeline to use in the clients. * @param profile The AzureProfile to use in the clients. */ protected abstract void initializeClients(HttpPipeline httpPipeline, AzureProfile profile); /** * Cleans up resources. */ protected abstract void cleanUpResources(); }
follow convention in the file, i.e. `String.format` etc.
public ServiceBusClientBuilder customEndpointAddress(String customEndpointAddress) { if (customEndpointAddress == null) { this.customEndpointAddress = null; return this; } try { this.customEndpointAddress = new URL(customEndpointAddress); } catch (MalformedURLException e) { throw LOGGER.logExceptionAsError( new IllegalArgumentException(customEndpointAddress + " : is not a valid URL,", e)); } return this; }
new IllegalArgumentException(customEndpointAddress + " : is not a valid URL,", e));
public ServiceBusClientBuilder customEndpointAddress(String customEndpointAddress) { if (customEndpointAddress == null) { this.customEndpointAddress = null; return this; } try { this.customEndpointAddress = new URL(customEndpointAddress); } catch (MalformedURLException e) { throw LOGGER.logExceptionAsError( new IllegalArgumentException(String.format("(%s) : is not a valid URL,", customEndpointAddress), e)); } return this; }
class ServiceBusClientBuilder implements TokenCredentialTrait<ServiceBusClientBuilder>, AzureNamedKeyCredentialTrait<ServiceBusClientBuilder>, ConnectionStringTrait<ServiceBusClientBuilder>, AzureSasCredentialTrait<ServiceBusClientBuilder>, AmqpTrait<ServiceBusClientBuilder>, ConfigurationTrait<ServiceBusClientBuilder> { private static final AmqpRetryOptions DEFAULT_RETRY = new AmqpRetryOptions().setTryTimeout(ServiceBusConstants.OPERATION_TIMEOUT); private static final String SERVICE_BUS_PROPERTIES_FILE = "azure-messaging-servicebus.properties"; private static final String SUBSCRIPTION_ENTITY_PATH_FORMAT = "%s/subscriptions/%s"; private static final String DEAD_LETTER_QUEUE_NAME_SUFFIX = "/$deadletterqueue"; private static final String TRANSFER_DEAD_LETTER_QUEUE_NAME_SUFFIX = "/$Transfer/$deadletterqueue"; private static final int DEFAULT_PREFETCH_COUNT = 0; private static final String NAME_KEY = "name"; private static final String VERSION_KEY = "version"; private static final String UNKNOWN = "UNKNOWN"; private static final Pattern HOST_PORT_PATTERN = Pattern.compile("^[^:]+:\\d+"); private static final Duration MAX_LOCK_RENEW_DEFAULT_DURATION = Duration.ofMinutes(5); private static final ClientLogger LOGGER = new ClientLogger(ServiceBusClientBuilder.class); private final Object connectionLock = new Object(); private final MessageSerializer messageSerializer = new ServiceBusMessageSerializer(); private final TracerProvider tracerProvider = new TracerProvider(ServiceLoader.load(Tracer.class)); private ClientOptions clientOptions; private Configuration configuration; private ServiceBusConnectionProcessor sharedConnection; private String connectionStringEntityName; private TokenCredential credentials; private String fullyQualifiedNamespace; private ProxyOptions proxyOptions; private AmqpRetryOptions retryOptions; private Scheduler scheduler; private AmqpTransportType transport = AmqpTransportType.AMQP; private SslDomain.VerifyMode verifyMode; private boolean crossEntityTransactions; private URL customEndpointAddress; /** * Keeps track of the open clients that were created from this builder when there is a shared connection. */ private final AtomicInteger openClients = new AtomicInteger(); /** * Creates a new instance with the default transport {@link AmqpTransportType */ public ServiceBusClientBuilder() { } /** * Sets the {@link ClientOptions} to be sent from the client built from this builder, enabling customization of * certain properties, as well as support the addition of custom header information. Refer to the {@link * ClientOptions} documentation for more information. * * @param clientOptions to be set on the client. * * @return The updated {@link ServiceBusClientBuilder} object. */ @Override public ServiceBusClientBuilder clientOptions(ClientOptions clientOptions) { this.clientOptions = clientOptions; return this; } /** * Sets the fully-qualified namespace for the Service Bus. * * @param fullyQualifiedNamespace The fully-qualified namespace for the Service Bus. * * @return The updated {@link ServiceBusClientBuilder} object. */ public ServiceBusClientBuilder fullyQualifiedNamespace(String fullyQualifiedNamespace) { this.fullyQualifiedNamespace = Objects.requireNonNull(fullyQualifiedNamespace, "'fullyQualifiedNamespace' cannot be null."); if (CoreUtils.isNullOrEmpty(fullyQualifiedNamespace)) { throw LOGGER.logExceptionAsError( new IllegalArgumentException("'fullyQualifiedNamespace' cannot be an empty string.")); } return this; } private String getAndValidateFullyQualifiedNamespace() { if (CoreUtils.isNullOrEmpty(fullyQualifiedNamespace)) { throw LOGGER.logExceptionAsError( new IllegalArgumentException("'fullyQualifiedNamespace' cannot be an empty string.")); } return fullyQualifiedNamespace; } /** * Sets a custom endpoint address when connecting to the Event Hubs service. This can be useful when your network * does not allow connecting to the standard Azure Event Hubs endpoint address, but does allow connecting through * an intermediary. For example: {@literal https: * <p> * If no port is specified, the default port for the {@link * used. * * @param customEndpointAddress The custom endpoint address. * @return The updated {@link ServiceBusClientBuilder} object. * @throws IllegalArgumentException if {@code customEndpointAddress} cannot be parsed into a valid {@link URL}. */ /** * Sets the connection string for a Service Bus namespace or a specific Service Bus resource. * * @param connectionString Connection string for a Service Bus namespace or a specific Service Bus resource. * * @return The updated {@link ServiceBusClientBuilder} object. */ public ServiceBusClientBuilder connectionString(String connectionString) { final ConnectionStringProperties properties = new ConnectionStringProperties(connectionString); final TokenCredential tokenCredential; try { tokenCredential = getTokenCredential(properties); } catch (Exception e) { throw LOGGER.logExceptionAsError( new AzureException("Could not create the ServiceBusSharedKeyCredential.", e)); } this.fullyQualifiedNamespace = properties.getEndpoint().getHost(); String entityPath = properties.getEntityPath(); if (!CoreUtils.isNullOrEmpty(entityPath)) { LOGGER.atInfo() .addKeyValue(ENTITY_PATH_KEY, entityPath) .log("Setting entity from connection string."); this.connectionStringEntityName = entityPath; } return credential(properties.getEndpoint().getHost(), tokenCredential); } /** * Enable cross entity transaction on the connection to Service bus. Use this feature only when your transaction * scope spans across different Service Bus entities. This feature is achieved by routing all the messages through * one 'send-via' entity on server side as explained next. * Once clients are created for multiple entities, the first entity that an operation occurs on becomes the * entity through which all subsequent sends will be routed through ('send-via' entity). This enables the service to * perform a transaction that is meant to span multiple entities. This means that subsequent entities that perform * their first operation need to either be senders, or if they are receivers they need to be on the same entity as * the initial entity through which all sends are routed through (otherwise the service would not be able to ensure * that the transaction is committed because it cannot route a receive operation through a different entity). For * instance, if you have SenderA (For entity A) and ReceiverB (For entity B) that are created from a client with * cross-entity transactions enabled, you would need to receive first with ReceiverB to allow this to work. If you * first send to entity A, and then attempted to receive from entity B, an exception would be thrown. * * <p><strong>Avoid using non-transaction API on this client</strong></p> * Since this feature will set up connection to Service Bus optimised to enable this feature. Once all the clients * have been setup, the first receiver or sender used will initialize 'send-via' queue as a single message transfer * entity. All the messages will flow via this queue. Thus this client is not suitable for any non-transaction API. * * <p><strong>When not to enable this feature</strong></p> * If your transaction is involved in one Service bus entity only. For example you are receiving from one * queue/subscription and you want to settle your own messages which are part of one transaction. * * @return The updated {@link ServiceBusSenderClientBuilder} object. * * @see <a href="https: */ public ServiceBusClientBuilder enableCrossEntityTransactions() { this.crossEntityTransactions = true; return this; } private TokenCredential getTokenCredential(ConnectionStringProperties properties) { TokenCredential tokenCredential; if (properties.getSharedAccessSignature() == null) { tokenCredential = new ServiceBusSharedKeyCredential(properties.getSharedAccessKeyName(), properties.getSharedAccessKey(), ServiceBusConstants.TOKEN_VALIDITY); } else { tokenCredential = new ServiceBusSharedKeyCredential(properties.getSharedAccessSignature()); } return tokenCredential; } /** * Sets the configuration store that is used during construction of the service client. * * If not specified, the default configuration store is used to configure Service Bus clients. Use {@link * Configuration * * @param configuration The configuration store used to configure Service Bus clients. * * @return The updated {@link ServiceBusClientBuilder} object. */ @Override public ServiceBusClientBuilder configuration(Configuration configuration) { this.configuration = configuration; return this; } /** * Sets the credential by using a {@link TokenCredential} for the Service Bus resource. * <a href="https: * azure-identity</a> has multiple {@link TokenCredential} implementations that can be used to authenticate * the access to the Service Bus resource. * * @param fullyQualifiedNamespace The fully-qualified namespace for the Service Bus. * @param credential The token credential to use for authentication. Access controls may be specified by the * ServiceBus namespace or the requested Service Bus entity, depending on Azure configuration. * * @return The updated {@link ServiceBusClientBuilder} object. */ public ServiceBusClientBuilder credential(String fullyQualifiedNamespace, TokenCredential credential) { this.fullyQualifiedNamespace = Objects.requireNonNull(fullyQualifiedNamespace, "'fullyQualifiedNamespace' cannot be null."); this.credentials = Objects.requireNonNull(credential, "'credential' cannot be null."); if (CoreUtils.isNullOrEmpty(fullyQualifiedNamespace)) { throw LOGGER.logExceptionAsError( new IllegalArgumentException("'fullyQualifiedNamespace' cannot be an empty string.")); } return this; } /** * Sets the {@link TokenCredential} used to authorize requests sent to the service. Refer to the Azure SDK for Java * <a href="https: * documentation for more details on proper usage of the {@link TokenCredential} type. * * @param credential The token credential to use for authentication. Access controls may be specified by the * ServiceBus namespace or the requested Service Bus entity, depending on Azure configuration. * * @return The updated {@link ServiceBusClientBuilder} object. */ @Override public ServiceBusClientBuilder credential(TokenCredential credential) { this.credentials = Objects.requireNonNull(credential, "'credential' cannot be null."); return this; } /** * Sets the credential with the shared access policies for the Service Bus resource. * You can find the shared access policies on the azure portal or Azure CLI. * For instance, on the portal, "Shared Access policies" has 'policy' and its 'Primary Key' and 'Secondary Key'. * The 'name' attribute of the {@link AzureNamedKeyCredential} is the 'policy' on portal and the 'key' attribute * can be either 'Primary Key' or 'Secondary Key'. * This method and {@link * you to update the name and key. * * @param fullyQualifiedNamespace The fully-qualified namespace for the Service Bus. * @param credential {@link AzureNamedKeyCredential} to be used for authentication. * * @return The updated {@link ServiceBusClientBuilder} object. */ public ServiceBusClientBuilder credential(String fullyQualifiedNamespace, AzureNamedKeyCredential credential) { this.fullyQualifiedNamespace = Objects.requireNonNull(fullyQualifiedNamespace, "'fullyQualifiedNamespace' cannot be null."); Objects.requireNonNull(credential, "'credential' cannot be null."); if (CoreUtils.isNullOrEmpty(fullyQualifiedNamespace)) { throw LOGGER.logExceptionAsError( new IllegalArgumentException("'fullyQualifiedNamespace' cannot be an empty string.")); } this.credentials = new ServiceBusSharedKeyCredential(credential.getAzureNamedKey().getName(), credential.getAzureNamedKey().getKey(), ServiceBusConstants.TOKEN_VALIDITY); return this; } /** * Sets the credential with the shared access policies for the Service Bus resource. * You can find the shared access policies on the azure portal or Azure CLI. * For instance, on the portal, "Shared Access policies" has 'policy' and its 'Primary Key' and 'Secondary Key'. * The 'name' attribute of the {@link AzureNamedKeyCredential} is the 'policy' on portal and the 'key' attribute * can be either 'Primary Key' or 'Secondary Key'. * This method and {@link * you to update the name and key. * * @param credential {@link AzureNamedKeyCredential} to be used for authentication. * * @return The updated {@link ServiceBusClientBuilder} object. */ @Override public ServiceBusClientBuilder credential(AzureNamedKeyCredential credential) { Objects.requireNonNull(credential, "'credential' cannot be null."); this.credentials = new ServiceBusSharedKeyCredential(credential.getAzureNamedKey().getName(), credential.getAzureNamedKey().getKey(), ServiceBusConstants.TOKEN_VALIDITY); return this; } /** * Sets the credential with Shared Access Signature for the Service Bus resource. * Refer to <a href="https: * Service Bus access control with Shared Access Signatures</a>. * * @param fullyQualifiedNamespace The fully-qualified namespace for the Service Bus. * @param credential {@link AzureSasCredential} to be used for authentication. * * @return The updated {@link ServiceBusClientBuilder} object. */ public ServiceBusClientBuilder credential(String fullyQualifiedNamespace, AzureSasCredential credential) { this.fullyQualifiedNamespace = Objects.requireNonNull(fullyQualifiedNamespace, "'fullyQualifiedNamespace' cannot be null."); Objects.requireNonNull(credential, "'credential' cannot be null."); if (CoreUtils.isNullOrEmpty(fullyQualifiedNamespace)) { throw LOGGER.logExceptionAsError( new IllegalArgumentException("'fullyQualifiedNamespace' cannot be an empty string.")); } this.credentials = new ServiceBusSharedKeyCredential(credential.getSignature()); return this; } /** * Sets the credential with Shared Access Signature for the Service Bus resource. * Refer to <a href="https: * Service Bus access control with Shared Access Signatures</a>. * * @param credential {@link AzureSasCredential} to be used for authentication. * * @return The updated {@link ServiceBusClientBuilder} object. */ @Override public ServiceBusClientBuilder credential(AzureSasCredential credential) { Objects.requireNonNull(credential, "'credential' cannot be null."); this.credentials = new ServiceBusSharedKeyCredential(credential.getSignature()); return this; } /** * Sets the proxy configuration to use for {@link ServiceBusSenderAsyncClient}. When a proxy is configured, {@link * AmqpTransportType * * @param proxyOptions The proxy configuration to use. * * @return The updated {@link ServiceBusClientBuilder} object. */ @Override public ServiceBusClientBuilder proxyOptions(ProxyOptions proxyOptions) { this.proxyOptions = proxyOptions; return this; } /** * Package-private method that sets the verify mode for this connection. * * @param verifyMode The verification mode. * @return The updated {@link ServiceBusClientBuilder} object. */ ServiceBusClientBuilder verifyMode(SslDomain.VerifyMode verifyMode) { this.verifyMode = verifyMode; return this; } /** * Sets the retry options for Service Bus clients. If not specified, the default retry options are used. * * @param retryOptions The retry options to use. * * @return The updated {@link ServiceBusClientBuilder} object. */ @Override public ServiceBusClientBuilder retryOptions(AmqpRetryOptions retryOptions) { this.retryOptions = retryOptions; return this; } /** * Sets the scheduler to use. * * @param scheduler Scheduler to be used. * * @return The updated {@link ServiceBusClientBuilder} object. */ ServiceBusClientBuilder scheduler(Scheduler scheduler) { this.scheduler = scheduler; return this; } /** * Sets the transport type by which all the communication with Azure Service Bus occurs. Default value is {@link * AmqpTransportType * * @param transportType The transport type to use. * * @return The updated {@link ServiceBusClientBuilder} object. */ @Override public ServiceBusClientBuilder transportType(AmqpTransportType transportType) { this.transport = transportType; return this; } /** * A new instance of {@link ServiceBusSenderClientBuilder} used to configure Service Bus message senders. * * @return A new instance of {@link ServiceBusSenderClientBuilder}. */ public ServiceBusSenderClientBuilder sender() { return new ServiceBusSenderClientBuilder(); } /** * A new instance of {@link ServiceBusReceiverClientBuilder} used to configure Service Bus message receivers. * * @return A new instance of {@link ServiceBusReceiverClientBuilder}. */ public ServiceBusReceiverClientBuilder receiver() { return new ServiceBusReceiverClientBuilder(); } /** * A new instance of {@link ServiceBusSessionReceiverClientBuilder} used to configure <b>session aware</b> Service * Bus message receivers. * * @return A new instance of {@link ServiceBusSessionReceiverClientBuilder}. */ public ServiceBusSessionReceiverClientBuilder sessionReceiver() { return new ServiceBusSessionReceiverClientBuilder(); } /** * A new instance of {@link ServiceBusProcessorClientBuilder} used to configure {@link ServiceBusProcessorClient} * instance. * * @return A new instance of {@link ServiceBusProcessorClientBuilder}. */ public ServiceBusProcessorClientBuilder processor() { return new ServiceBusProcessorClientBuilder(); } /** * A new instance of {@link ServiceBusSessionProcessorClientBuilder} used to configure a Service Bus processor * instance that processes sessions. * @return A new instance of {@link ServiceBusSessionProcessorClientBuilder}. */ public ServiceBusSessionProcessorClientBuilder sessionProcessor() { return new ServiceBusSessionProcessorClientBuilder(); } /** * Called when a child client is closed. Disposes of the shared connection if there are no more clients. */ void onClientClose() { synchronized (connectionLock) { final int numberOfOpenClients = openClients.decrementAndGet(); LOGGER.atInfo() .addKeyValue("numberOfOpenClients", numberOfOpenClients) .log("Closing a dependent client."); if (numberOfOpenClients > 0) { return; } if (numberOfOpenClients < 0) { LOGGER.atWarning() .addKeyValue("numberOfOpenClients", numberOfOpenClients) .log("There should not be less than 0 clients."); } LOGGER.info("No more open clients, closing shared connection."); if (sharedConnection != null) { sharedConnection.dispose(); sharedConnection = null; } else { LOGGER.warning("Shared ServiceBusConnectionProcessor was already disposed."); } } } private ServiceBusConnectionProcessor getOrCreateConnectionProcessor(MessageSerializer serializer) { if (retryOptions == null) { retryOptions = DEFAULT_RETRY; } if (scheduler == null) { scheduler = Schedulers.elastic(); } synchronized (connectionLock) { if (sharedConnection == null) { final ConnectionOptions connectionOptions = getConnectionOptions(); final Flux<ServiceBusAmqpConnection> connectionFlux = Mono.fromCallable(() -> { final String connectionId = StringUtil.getRandomString("MF"); final ReactorProvider provider = new ReactorProvider(); final ReactorHandlerProvider handlerProvider = new ReactorHandlerProvider(provider); final TokenManagerProvider tokenManagerProvider = new AzureTokenManagerProvider( connectionOptions.getAuthorizationType(), connectionOptions.getFullyQualifiedNamespace(), connectionOptions.getAuthorizationScope()); return (ServiceBusAmqpConnection) new ServiceBusReactorAmqpConnection(connectionId, connectionOptions, provider, handlerProvider, tokenManagerProvider, serializer, crossEntityTransactions); }).repeat(); sharedConnection = connectionFlux.subscribeWith(new ServiceBusConnectionProcessor( connectionOptions.getFullyQualifiedNamespace(), connectionOptions.getRetry())); } } final int numberOfOpenClients = openClients.incrementAndGet(); LOGGER.info(" return sharedConnection; } private ConnectionOptions getConnectionOptions() { configuration = configuration == null ? Configuration.getGlobalConfiguration().clone() : configuration; if (credentials == null) { throw LOGGER.logExceptionAsError(new IllegalArgumentException("Credentials have not been set. " + "They can be set using: connectionString(String), connectionString(String, String), " + "or credentials(String, String, TokenCredential)" )); } if (proxyOptions != null && proxyOptions.isProxyAddressConfigured() && transport != AmqpTransportType.AMQP_WEB_SOCKETS) { throw LOGGER.logExceptionAsError(new IllegalArgumentException( "Cannot use a proxy when TransportType is not AMQP.")); } if (proxyOptions == null) { proxyOptions = getDefaultProxyConfiguration(configuration); } final CbsAuthorizationType authorizationType = credentials instanceof ServiceBusSharedKeyCredential ? CbsAuthorizationType.SHARED_ACCESS_SIGNATURE : CbsAuthorizationType.JSON_WEB_TOKEN; final SslDomain.VerifyMode verificationMode = verifyMode != null ? verifyMode : SslDomain.VerifyMode.VERIFY_PEER_NAME; final ClientOptions options = clientOptions != null ? clientOptions : new ClientOptions(); final Map<String, String> properties = CoreUtils.getProperties(SERVICE_BUS_PROPERTIES_FILE); final String product = properties.getOrDefault(NAME_KEY, UNKNOWN); final String clientVersion = properties.getOrDefault(VERSION_KEY, UNKNOWN); if (customEndpointAddress == null) { return new ConnectionOptions(getAndValidateFullyQualifiedNamespace(), credentials, authorizationType, ServiceBusConstants.AZURE_ACTIVE_DIRECTORY_SCOPE, transport, retryOptions, proxyOptions, scheduler, options, verificationMode, product, clientVersion); } else { return new ConnectionOptions(getAndValidateFullyQualifiedNamespace(), credentials, authorizationType, ServiceBusConstants.AZURE_ACTIVE_DIRECTORY_SCOPE, transport, retryOptions, proxyOptions, scheduler, options, verificationMode, product, clientVersion, customEndpointAddress.getHost(), customEndpointAddress.getPort()); } } private ProxyOptions getDefaultProxyConfiguration(Configuration configuration) { ProxyAuthenticationType authentication = ProxyAuthenticationType.NONE; if (proxyOptions != null) { authentication = proxyOptions.getAuthentication(); } String proxyAddress = configuration.get(Configuration.PROPERTY_HTTP_PROXY); if (CoreUtils.isNullOrEmpty(proxyAddress)) { return ProxyOptions.SYSTEM_DEFAULTS; } return getProxyOptions(authentication, proxyAddress, configuration, Boolean.parseBoolean(configuration.get("java.net.useSystemProxies"))); } private ProxyOptions getProxyOptions(ProxyAuthenticationType authentication, String proxyAddress, Configuration configuration, boolean useSystemProxies) { String host; int port; if (HOST_PORT_PATTERN.matcher(proxyAddress.trim()).find()) { final String[] hostPort = proxyAddress.split(":"); host = hostPort[0]; port = Integer.parseInt(hostPort[1]); final Proxy proxy = new Proxy(Proxy.Type.HTTP, new InetSocketAddress(host, port)); final String username = configuration.get(ProxyOptions.PROXY_USERNAME); final String password = configuration.get(ProxyOptions.PROXY_PASSWORD); return new ProxyOptions(authentication, proxy, username, password); } else if (useSystemProxies) { com.azure.core.http.ProxyOptions coreProxyOptions = com.azure.core.http.ProxyOptions .fromConfiguration(configuration); return new ProxyOptions(authentication, new Proxy(coreProxyOptions.getType().toProxyType(), coreProxyOptions.getAddress()), coreProxyOptions.getUsername(), coreProxyOptions.getPassword()); } else { LOGGER.verbose("'HTTP_PROXY' was configured but ignored as 'java.net.useSystemProxies' wasn't " + "set or was false."); return ProxyOptions.SYSTEM_DEFAULTS; } } private static boolean isNullOrEmpty(String item) { return item == null || item.isEmpty(); } private static MessagingEntityType validateEntityPaths(String connectionStringEntityName, String topicName, String queueName) { final boolean hasTopicName = !isNullOrEmpty(topicName); final boolean hasQueueName = !isNullOrEmpty(queueName); final boolean hasConnectionStringEntity = !isNullOrEmpty(connectionStringEntityName); final MessagingEntityType entityType; if (!hasConnectionStringEntity && !hasQueueName && !hasTopicName) { throw ServiceBusClientBuilder.LOGGER.logExceptionAsError(new IllegalStateException( "Cannot build client without setting either a queueName or topicName.")); } else if (hasQueueName && hasTopicName) { throw ServiceBusClientBuilder.LOGGER.logExceptionAsError(new IllegalStateException(String.format( "Cannot build client with both queueName (%s) and topicName (%s) set.", queueName, topicName))); } else if (hasQueueName) { if (hasConnectionStringEntity && !queueName.equals(connectionStringEntityName)) { throw ServiceBusClientBuilder.LOGGER.logExceptionAsError(new IllegalStateException(String.format( "queueName (%s) is different than the connectionString's EntityPath (%s).", queueName, connectionStringEntityName))); } entityType = MessagingEntityType.QUEUE; } else if (hasTopicName) { if (hasConnectionStringEntity && !topicName.equals(connectionStringEntityName)) { throw ServiceBusClientBuilder.LOGGER.logExceptionAsError(new IllegalStateException(String.format( "topicName (%s) is different than the connectionString's EntityPath (%s).", topicName, connectionStringEntityName))); } entityType = MessagingEntityType.SUBSCRIPTION; } else { entityType = MessagingEntityType.UNKNOWN; } return entityType; } private static String getEntityPath(MessagingEntityType entityType, String queueName, String topicName, String subscriptionName, SubQueue subQueue) { String entityPath; switch (entityType) { case QUEUE: entityPath = queueName; break; case SUBSCRIPTION: if (isNullOrEmpty(subscriptionName)) { throw ServiceBusClientBuilder.LOGGER.logExceptionAsError(new IllegalStateException(String.format( "topicName (%s) must have a subscriptionName associated with it.", topicName))); } entityPath = String.format(Locale.ROOT, SUBSCRIPTION_ENTITY_PATH_FORMAT, topicName, subscriptionName); break; default: throw ServiceBusClientBuilder.LOGGER.logExceptionAsError( new IllegalArgumentException("Unknown entity type: " + entityType)); } if (subQueue == null) { return entityPath; } switch (subQueue) { case NONE: break; case TRANSFER_DEAD_LETTER_QUEUE: entityPath += TRANSFER_DEAD_LETTER_QUEUE_NAME_SUFFIX; break; case DEAD_LETTER_QUEUE: entityPath += DEAD_LETTER_QUEUE_NAME_SUFFIX; break; default: throw ServiceBusClientBuilder.LOGGER.logExceptionAsError(new IllegalArgumentException("Unsupported value of subqueue type: " + subQueue)); } return entityPath; } /** * Builder for creating {@link ServiceBusSenderClient} and {@link ServiceBusSenderAsyncClient} to publish messages * to Service Bus. * * @see ServiceBusSenderAsyncClient * @see ServiceBusSenderClient */ @ServiceClientBuilder(serviceClients = {ServiceBusSenderClient.class, ServiceBusSenderAsyncClient.class}) public final class ServiceBusSenderClientBuilder { private String queueName; private String topicName; private ServiceBusSenderClientBuilder() { } /** * Sets the name of the Service Bus queue to publish messages to. * * @param queueName Name of the queue. * * @return The modified {@link ServiceBusSenderClientBuilder} object. */ public ServiceBusSenderClientBuilder queueName(String queueName) { this.queueName = queueName; return this; } /** * Sets the name of the Service Bus topic to publish messages to. * * @param topicName Name of the topic. * * @return The modified {@link ServiceBusSenderClientBuilder} object. */ public ServiceBusSenderClientBuilder topicName(String topicName) { this.topicName = topicName; return this; } /** * Creates an <b>asynchronous</b> {@link ServiceBusSenderAsyncClient client} for transmitting {@link * ServiceBusMessage} to a Service Bus queue or topic. * * @return A new {@link ServiceBusSenderAsyncClient} for transmitting to a Service queue or topic. * @throws IllegalStateException if {@link * topicName} are not set or, both of these fields are set. It is also thrown if the Service Bus {@link * * {@link * @throws IllegalArgumentException if the entity type is not a queue or a topic. */ public ServiceBusSenderAsyncClient buildAsyncClient() { final ServiceBusConnectionProcessor connectionProcessor = getOrCreateConnectionProcessor(messageSerializer); final MessagingEntityType entityType = validateEntityPaths(connectionStringEntityName, topicName, queueName); final String entityName; switch (entityType) { case QUEUE: entityName = queueName; break; case SUBSCRIPTION: entityName = topicName; break; case UNKNOWN: entityName = connectionStringEntityName; break; default: throw LOGGER.logExceptionAsError( new IllegalArgumentException("Unknown entity type: " + entityType)); } return new ServiceBusSenderAsyncClient(entityName, entityType, connectionProcessor, retryOptions, tracerProvider, messageSerializer, ServiceBusClientBuilder.this::onClientClose, null); } /** * Creates a <b>synchronous</b> {@link ServiceBusSenderClient client} for transmitting {@link ServiceBusMessage} * to a Service Bus queue or topic. * * @return A new {@link ServiceBusSenderAsyncClient} for transmitting to a Service queue or topic. * @throws IllegalStateException if {@link * topicName} are not set or, both of these fields are set. It is also thrown if the Service Bus {@link * * {@link * @throws IllegalArgumentException if the entity type is not a queue or a topic. */ public ServiceBusSenderClient buildClient() { return new ServiceBusSenderClient(buildAsyncClient(), MessageUtils.getTotalTimeout(retryOptions)); } } /** * Builder for creating {@link ServiceBusProcessorClient} to consume messages from a session-based Service Bus * entity. {@link ServiceBusProcessorClient} processes messages and errors via {@link * and {@link * next session to process. * * <p> * By default, the processor: * <ul> * <li>Automatically settles messages. Disabled via {@link * <li>Processes 1 session concurrently. Configured via {@link * <li>Invokes 1 instance of {@link * {@link * </ul> * * <p><strong>Instantiate a session-enabled processor client</strong></p> * <!-- src_embed com.azure.messaging.servicebus.servicebusprocessorclient * <pre> * Consumer&lt;ServiceBusReceivedMessageContext&gt; onMessage = context -&gt; & * ServiceBusReceivedMessage message = context.getMessage& * System.out.printf& * message.getSessionId& * & * * Consumer&lt;ServiceBusErrorContext&gt; onError = context -&gt; & * System.out.printf& * context.getFullyQualifiedNamespace& * * if & * ServiceBusException exception = & * System.out.printf& * exception.getReason& * & * System.out.printf& * & * & * * & * * ServiceBusProcessorClient sessionProcessor = new ServiceBusClientBuilder& * .connectionString& * .sessionProcessor& * .queueName& * .maxConcurrentSessions& * .processMessage& * .processError& * .buildProcessorClient& * * & * sessionProcessor.start& * </pre> * <!-- end com.azure.messaging.servicebus.servicebusprocessorclient * * @see ServiceBusProcessorClient */ public final class ServiceBusSessionProcessorClientBuilder { private final ServiceBusProcessorClientOptions processorClientOptions; private final ServiceBusSessionReceiverClientBuilder sessionReceiverClientBuilder; private Consumer<ServiceBusReceivedMessageContext> processMessage; private Consumer<ServiceBusErrorContext> processError; private ServiceBusSessionProcessorClientBuilder() { sessionReceiverClientBuilder = new ServiceBusSessionReceiverClientBuilder(); processorClientOptions = new ServiceBusProcessorClientOptions() .setMaxConcurrentCalls(1) .setTracerProvider(tracerProvider); sessionReceiverClientBuilder.maxConcurrentSessions(1); } /** * Sets the amount of time to continue auto-renewing the lock. Setting {@link Duration * disables auto-renewal. For {@link ServiceBusReceiveMode * auto-renewal is disabled. * * @param maxAutoLockRenewDuration the amount of time to continue auto-renewing the lock. {@link Duration * or {@code null} indicates that auto-renewal is disabled. * * @return The updated {@link ServiceBusSessionProcessorClientBuilder} object. * @throws IllegalArgumentException If {code maxAutoLockRenewDuration} is negative. */ public ServiceBusSessionProcessorClientBuilder maxAutoLockRenewDuration(Duration maxAutoLockRenewDuration) { validateAndThrow(maxAutoLockRenewDuration); sessionReceiverClientBuilder.maxAutoLockRenewDuration(maxAutoLockRenewDuration); return this; } /** * Enables session processing roll-over by processing at most {@code maxConcurrentSessions}. * * @param maxConcurrentSessions Maximum number of concurrent sessions to process at any given time. * * @return The modified {@link ServiceBusSessionProcessorClientBuilder} object. * @throws IllegalArgumentException if {@code maxConcurrentSessions} is less than 1. */ public ServiceBusSessionProcessorClientBuilder maxConcurrentSessions(int maxConcurrentSessions) { if (maxConcurrentSessions < 1) { throw LOGGER.logExceptionAsError( new IllegalArgumentException("'maxConcurrentSessions' cannot be less than 1")); } sessionReceiverClientBuilder.maxConcurrentSessions(maxConcurrentSessions); return this; } /** * Sets the prefetch count of the processor. For both {@link ServiceBusReceiveMode * {@link ServiceBusReceiveMode * * Prefetch speeds up the message flow by aiming to have a message readily available for local retrieval when * and before the application starts the processor. * Setting a non-zero value will prefetch that number of messages. Setting the value to zero turns prefetch off. * Using a non-zero prefetch risks of losing messages even though it has better performance. * @see <a href="https: * * @param prefetchCount The prefetch count. * * @return The modified {@link ServiceBusProcessorClientBuilder} object. */ public ServiceBusSessionProcessorClientBuilder prefetchCount(int prefetchCount) { sessionReceiverClientBuilder.prefetchCount(prefetchCount); return this; } /** * Sets the name of the queue to create a processor for. * @param queueName Name of the queue. * * @return The modified {@link ServiceBusSessionProcessorClientBuilder} object. */ public ServiceBusSessionProcessorClientBuilder queueName(String queueName) { sessionReceiverClientBuilder.queueName(queueName); return this; } /** * Sets the receive mode for the processor. * @param receiveMode Mode for receiving messages. * * @return The modified {@link ServiceBusSessionProcessorClientBuilder} object. */ public ServiceBusSessionProcessorClientBuilder receiveMode(ServiceBusReceiveMode receiveMode) { sessionReceiverClientBuilder.receiveMode(receiveMode); return this; } /** * Sets the type of the {@link SubQueue} to connect to. Azure Service Bus queues and subscriptions provide a * secondary sub-queue, called a dead-letter queue (DLQ). * * @param subQueue The type of the sub queue. * * @return The modified {@link ServiceBusSessionProcessorClientBuilder} object. * @see * @see SubQueue */ public ServiceBusSessionProcessorClientBuilder subQueue(SubQueue subQueue) { this.sessionReceiverClientBuilder.subQueue(subQueue); return this; } /** * Sets the name of the subscription in the topic to listen to. <b>{@link * </b> * @param subscriptionName Name of the subscription. * * @return The modified {@link ServiceBusSessionProcessorClientBuilder} object. * @see */ public ServiceBusSessionProcessorClientBuilder subscriptionName(String subscriptionName) { sessionReceiverClientBuilder.subscriptionName(subscriptionName); return this; } /** * Sets the name of the topic. <b>{@link * @param topicName Name of the topic. * * @return The modified {@link ServiceBusSessionProcessorClientBuilder} object. * @see */ public ServiceBusSessionProcessorClientBuilder topicName(String topicName) { sessionReceiverClientBuilder.topicName(topicName); return this; } /** * The message processing callback for the processor that will be executed when a message is received. * @param processMessage The message processing consumer that will be executed when a message is received. * * @return The updated {@link ServiceBusProcessorClientBuilder} object. */ public ServiceBusSessionProcessorClientBuilder processMessage( Consumer<ServiceBusReceivedMessageContext> processMessage) { this.processMessage = processMessage; return this; } /** * The error handler for the processor which will be invoked in the event of an error while receiving messages. * @param processError The error handler which will be executed when an error occurs. * * @return The updated {@link ServiceBusProcessorClientBuilder} object */ public ServiceBusSessionProcessorClientBuilder processError( Consumer<ServiceBusErrorContext> processError) { this.processError = processError; return this; } /** * Max concurrent messages that this processor should process. * * @param maxConcurrentCalls max concurrent messages that this processor should process. * * @return The updated {@link ServiceBusSessionProcessorClientBuilder} object. * @throws IllegalArgumentException if {@code maxConcurrentCalls} is less than 1. */ public ServiceBusSessionProcessorClientBuilder maxConcurrentCalls(int maxConcurrentCalls) { if (maxConcurrentCalls < 1) { throw LOGGER.logExceptionAsError( new IllegalArgumentException("'maxConcurrentCalls' cannot be less than 1")); } processorClientOptions.setMaxConcurrentCalls(maxConcurrentCalls); return this; } /** * Disables auto-complete and auto-abandon of received messages. By default, a successfully processed message is * {@link ServiceBusReceivedMessageContext * the message is processed, it is {@link ServiceBusReceivedMessageContext * abandoned}. * * @return The modified {@link ServiceBusSessionProcessorClientBuilder} object. */ public ServiceBusSessionProcessorClientBuilder disableAutoComplete() { sessionReceiverClientBuilder.disableAutoComplete(); processorClientOptions.setDisableAutoComplete(true); return this; } /** * Creates a <b>session-aware</b> Service Bus processor responsible for reading * {@link ServiceBusReceivedMessage messages} from a specific queue or subscription. * * @return An new {@link ServiceBusProcessorClient} that receives messages from a queue or subscription. * @throws IllegalStateException if {@link * topicName} are not set or, both of these fields are set. It is also thrown if the Service Bus {@link * * {@link * * @throws IllegalArgumentException Queue or topic name are not set via {@link * queueName()} or {@link * @throws NullPointerException if the {@link * callbacks are not set. */ public ServiceBusProcessorClient buildProcessorClient() { return new ServiceBusProcessorClient(sessionReceiverClientBuilder, sessionReceiverClientBuilder.queueName, sessionReceiverClientBuilder.topicName, sessionReceiverClientBuilder.subscriptionName, Objects.requireNonNull(processMessage, "'processMessage' cannot be null"), Objects.requireNonNull(processError, "'processError' cannot be null"), processorClientOptions); } } /** * Builder for creating {@link ServiceBusReceiverClient} and {@link ServiceBusReceiverAsyncClient} to consume * messages from a <b>session aware</b> Service Bus entity. * * @see ServiceBusReceiverAsyncClient * @see ServiceBusReceiverClient */ @ServiceClientBuilder(serviceClients = {ServiceBusReceiverClient.class, ServiceBusReceiverAsyncClient.class}) public final class ServiceBusSessionReceiverClientBuilder { private boolean enableAutoComplete = true; private Integer maxConcurrentSessions = null; private int prefetchCount = DEFAULT_PREFETCH_COUNT; private String queueName; private ServiceBusReceiveMode receiveMode = ServiceBusReceiveMode.PEEK_LOCK; private String subscriptionName; private String topicName; private Duration maxAutoLockRenewDuration = MAX_LOCK_RENEW_DEFAULT_DURATION; private SubQueue subQueue = SubQueue.NONE; private ServiceBusSessionReceiverClientBuilder() { } /** * Disables auto-complete and auto-abandon of received messages. By default, a successfully processed message is * {@link ServiceBusReceiverAsyncClient * the message is processed, it is {@link ServiceBusReceiverAsyncClient * abandoned}. * * @return The modified {@link ServiceBusSessionReceiverClientBuilder} object. */ public ServiceBusSessionReceiverClientBuilder disableAutoComplete() { this.enableAutoComplete = false; return this; } /** * Sets the amount of time to continue auto-renewing the session lock. Setting {@link Duration * {@code null} disables auto-renewal. For {@link ServiceBusReceiveMode * mode, auto-renewal is disabled. * * @param maxAutoLockRenewDuration the amount of time to continue auto-renewing the session lock. * {@link Duration * * @return The updated {@link ServiceBusSessionReceiverClientBuilder} object. * @throws IllegalArgumentException If {code maxAutoLockRenewDuration} is negative. */ public ServiceBusSessionReceiverClientBuilder maxAutoLockRenewDuration(Duration maxAutoLockRenewDuration) { validateAndThrow(maxAutoLockRenewDuration); this.maxAutoLockRenewDuration = maxAutoLockRenewDuration; return this; } /** * Enables session processing roll-over by processing at most {@code maxConcurrentSessions}. * * @param maxConcurrentSessions Maximum number of concurrent sessions to process at any given time. * * @return The modified {@link ServiceBusSessionReceiverClientBuilder} object. * @throws IllegalArgumentException if {@code maxConcurrentSessions} is less than 1. */ ServiceBusSessionReceiverClientBuilder maxConcurrentSessions(int maxConcurrentSessions) { if (maxConcurrentSessions < 1) { throw LOGGER.logExceptionAsError(new IllegalArgumentException( "maxConcurrentSessions cannot be less than 1.")); } this.maxConcurrentSessions = maxConcurrentSessions; return this; } /** * Sets the prefetch count of the receiver. For both {@link ServiceBusReceiveMode * {@link ServiceBusReceiveMode * * Prefetch speeds up the message flow by aiming to have a message readily available for local retrieval when * and before the application asks for one using {@link ServiceBusReceiverAsyncClient * Setting a non-zero value will prefetch that number of messages. Setting the value to zero turns prefetch * off. * * @param prefetchCount The prefetch count. * * @return The modified {@link ServiceBusSessionReceiverClientBuilder} object. * @throws IllegalArgumentException If {code prefetchCount} is negative. */ public ServiceBusSessionReceiverClientBuilder prefetchCount(int prefetchCount) { validateAndThrow(prefetchCount); this.prefetchCount = prefetchCount; return this; } /** * Sets the name of the queue to create a receiver for. * * @param queueName Name of the queue. * * @return The modified {@link ServiceBusSessionReceiverClientBuilder} object. */ public ServiceBusSessionReceiverClientBuilder queueName(String queueName) { this.queueName = queueName; return this; } /** * Sets the receive mode for the receiver. * * @param receiveMode Mode for receiving messages. * * @return The modified {@link ServiceBusSessionReceiverClientBuilder} object. */ public ServiceBusSessionReceiverClientBuilder receiveMode(ServiceBusReceiveMode receiveMode) { this.receiveMode = receiveMode; return this; } /** * Sets the type of the {@link SubQueue} to connect to. Azure Service Bus queues and subscriptions provide a * secondary sub-queue, called a dead-letter queue (DLQ). * * @param subQueue The type of the sub queue. * * @return The modified {@link ServiceBusSessionReceiverClientBuilder} object. * @see * @see SubQueue */ public ServiceBusSessionReceiverClientBuilder subQueue(SubQueue subQueue) { this.subQueue = subQueue; return this; } /** * Sets the name of the subscription in the topic to listen to. <b>{@link * </b> * * @param subscriptionName Name of the subscription. * * @return The modified {@link ServiceBusSessionReceiverClientBuilder} object. * @see */ public ServiceBusSessionReceiverClientBuilder subscriptionName(String subscriptionName) { this.subscriptionName = subscriptionName; return this; } /** * Sets the name of the topic. <b>{@link * * @param topicName Name of the topic. * * @return The modified {@link ServiceBusSessionReceiverClientBuilder} object. * @see */ public ServiceBusSessionReceiverClientBuilder topicName(String topicName) { this.topicName = topicName; return this; } /** * Creates an <b>asynchronous</b>, <b>session-aware</b> Service Bus receiver responsible for reading {@link * ServiceBusMessage messages} from a specific queue or subscription. * * @return An new {@link ServiceBusReceiverAsyncClient} that receives messages from a queue or subscription. * @throws IllegalStateException if {@link * topicName} are not set or, both of these fields are set. It is also thrown if the Service Bus {@link * * {@link * * @throws IllegalArgumentException Queue or topic name are not set via {@link * queueName()} or {@link */ ServiceBusReceiverAsyncClient buildAsyncClientForProcessor() { final MessagingEntityType entityType = validateEntityPaths(connectionStringEntityName, topicName, queueName); final String entityPath = getEntityPath(entityType, queueName, topicName, subscriptionName, subQueue); if (enableAutoComplete && receiveMode == ServiceBusReceiveMode.RECEIVE_AND_DELETE) { LOGGER.warning("'enableAutoComplete' is not needed in for RECEIVE_AND_DELETE mode."); enableAutoComplete = false; } if (receiveMode == ServiceBusReceiveMode.RECEIVE_AND_DELETE) { maxAutoLockRenewDuration = Duration.ZERO; } final ServiceBusConnectionProcessor connectionProcessor = getOrCreateConnectionProcessor(messageSerializer); final ReceiverOptions receiverOptions = new ReceiverOptions(receiveMode, prefetchCount, maxAutoLockRenewDuration, enableAutoComplete, null, maxConcurrentSessions); final ServiceBusSessionManager sessionManager = new ServiceBusSessionManager(entityPath, entityType, connectionProcessor, tracerProvider, messageSerializer, receiverOptions); return new ServiceBusReceiverAsyncClient(connectionProcessor.getFullyQualifiedNamespace(), entityPath, entityType, receiverOptions, connectionProcessor, ServiceBusConstants.OPERATION_TIMEOUT, tracerProvider, messageSerializer, ServiceBusClientBuilder.this::onClientClose, sessionManager); } /** * Creates an <b>asynchronous</b>, <b>session-aware</b> Service Bus receiver responsible for reading {@link * ServiceBusMessage messages} from a specific queue or subscription. * * @return An new {@link ServiceBusSessionReceiverAsyncClient} that receives messages from a queue or * subscription. * @throws IllegalStateException if {@link * topicName} are not set or, both of these fields are set. It is also thrown if the Service Bus {@link * * {@link * * @throws IllegalArgumentException Queue or topic name are not set via {@link * queueName()} or {@link */ public ServiceBusSessionReceiverAsyncClient buildAsyncClient() { return buildAsyncClient(true); } /** * Creates a <b>synchronous</b>, <b>session-aware</b> Service Bus receiver responsible for reading {@link * ServiceBusMessage messages} from a specific queue or subscription. * * @return An new {@link ServiceBusReceiverClient} that receives messages from a queue or subscription. * @throws IllegalStateException if {@link * topicName} are not set or, both of these fields are set. It is also thrown if the Service Bus {@link * * {@link * * @throws IllegalArgumentException Queue or topic name are not set via {@link * queueName()} or {@link */ public ServiceBusSessionReceiverClient buildClient() { final boolean isPrefetchDisabled = prefetchCount == 0; return new ServiceBusSessionReceiverClient(buildAsyncClient(false), isPrefetchDisabled, MessageUtils.getTotalTimeout(retryOptions)); } private ServiceBusSessionReceiverAsyncClient buildAsyncClient(boolean isAutoCompleteAllowed) { final MessagingEntityType entityType = validateEntityPaths(connectionStringEntityName, topicName, queueName); final String entityPath = getEntityPath(entityType, queueName, topicName, subscriptionName, SubQueue.NONE); if (!isAutoCompleteAllowed && enableAutoComplete) { LOGGER.warning( "'enableAutoComplete' is not supported in synchronous client except through callback receive."); enableAutoComplete = false; } else if (enableAutoComplete && receiveMode == ServiceBusReceiveMode.RECEIVE_AND_DELETE) { LOGGER.warning("'enableAutoComplete' is not needed in for RECEIVE_AND_DELETE mode."); enableAutoComplete = false; } if (receiveMode == ServiceBusReceiveMode.RECEIVE_AND_DELETE) { maxAutoLockRenewDuration = Duration.ZERO; } final ServiceBusConnectionProcessor connectionProcessor = getOrCreateConnectionProcessor(messageSerializer); final ReceiverOptions receiverOptions = new ReceiverOptions(receiveMode, prefetchCount, maxAutoLockRenewDuration, enableAutoComplete, null, maxConcurrentSessions); return new ServiceBusSessionReceiverAsyncClient(connectionProcessor.getFullyQualifiedNamespace(), entityPath, entityType, receiverOptions, connectionProcessor, tracerProvider, messageSerializer, ServiceBusClientBuilder.this::onClientClose); } } /** * Builder for creating {@link ServiceBusProcessorClient} to consume messages from a Service Bus entity. * {@link ServiceBusProcessorClient ServiceBusProcessorClients} provides a push-based mechanism that notifies * the message processing callback when a message is received or the error handle when an error is observed. To * create an instance, therefore, configuring the two callbacks - {@link * {@link * with auto-completion and auto-lock renewal capabilities. * * <p><strong>Sample code to instantiate a processor client</strong></p> * <!-- src_embed com.azure.messaging.servicebus.servicebusprocessorclient * <pre> * Consumer&lt;ServiceBusReceivedMessageContext&gt; onMessage = context -&gt; & * ServiceBusReceivedMessage message = context.getMessage& * System.out.printf& * message.getSequenceNumber& * & * * Consumer&lt;ServiceBusErrorContext&gt; onError = context -&gt; & * System.out.printf& * context.getFullyQualifiedNamespace& * * if & * ServiceBusException exception = & * System.out.printf& * exception.getReason& * & * System.out.printf& * & * & * * & * * ServiceBusProcessorClient processor = new ServiceBusClientBuilder& * .connectionString& * .processor& * .queueName& * .processMessage& * .processError& * .buildProcessorClient& * * & * processor.start& * </pre> * <!-- end com.azure.messaging.servicebus.servicebusprocessorclient * * @see ServiceBusProcessorClient */ public final class ServiceBusProcessorClientBuilder { private final ServiceBusReceiverClientBuilder serviceBusReceiverClientBuilder; private final ServiceBusProcessorClientOptions processorClientOptions; private Consumer<ServiceBusReceivedMessageContext> processMessage; private Consumer<ServiceBusErrorContext> processError; private ServiceBusProcessorClientBuilder() { serviceBusReceiverClientBuilder = new ServiceBusReceiverClientBuilder(); processorClientOptions = new ServiceBusProcessorClientOptions() .setMaxConcurrentCalls(1) .setTracerProvider(tracerProvider); } /** * Sets the prefetch count of the processor. For both {@link ServiceBusReceiveMode * {@link ServiceBusReceiveMode * * Prefetch speeds up the message flow by aiming to have a message readily available for local retrieval when * and before the application starts the processor. * Setting a non-zero value will prefetch that number of messages. Setting the value to zero turns prefetch off. * * @param prefetchCount The prefetch count. * * @return The modified {@link ServiceBusProcessorClientBuilder} object. */ public ServiceBusProcessorClientBuilder prefetchCount(int prefetchCount) { serviceBusReceiverClientBuilder.prefetchCount(prefetchCount); return this; } /** * Sets the name of the queue to create a processor for. * @param queueName Name of the queue. * * @return The modified {@link ServiceBusProcessorClientBuilder} object. */ public ServiceBusProcessorClientBuilder queueName(String queueName) { serviceBusReceiverClientBuilder.queueName(queueName); return this; } /** * Sets the receive mode for the processor. * @param receiveMode Mode for receiving messages. * * @return The modified {@link ServiceBusProcessorClientBuilder} object. */ public ServiceBusProcessorClientBuilder receiveMode(ServiceBusReceiveMode receiveMode) { serviceBusReceiverClientBuilder.receiveMode(receiveMode); return this; } /** * Sets the type of the {@link SubQueue} to connect to. Azure Service Bus queues and subscriptions provide a * secondary sub-queue, called a dead-letter queue (DLQ). * * @param subQueue The type of the sub queue. * * @return The modified {@link ServiceBusProcessorClientBuilder} object. * @see * @see SubQueue */ public ServiceBusProcessorClientBuilder subQueue(SubQueue subQueue) { serviceBusReceiverClientBuilder.subQueue(subQueue); return this; } /** * Sets the name of the subscription in the topic to listen to. <b>{@link * </b> * @param subscriptionName Name of the subscription. * * @return The modified {@link ServiceBusProcessorClientBuilder} object. * @see */ public ServiceBusProcessorClientBuilder subscriptionName(String subscriptionName) { serviceBusReceiverClientBuilder.subscriptionName(subscriptionName); return this; } /** * Sets the name of the topic. <b>{@link * @param topicName Name of the topic. * * @return The modified {@link ServiceBusProcessorClientBuilder} object. * @see */ public ServiceBusProcessorClientBuilder topicName(String topicName) { serviceBusReceiverClientBuilder.topicName(topicName); return this; } /** * The message processing callback for the processor which will be executed when a message is received. * @param processMessage The message processing consumer that will be executed when a message is received. * * @return The updated {@link ServiceBusProcessorClientBuilder} object. */ public ServiceBusProcessorClientBuilder processMessage( Consumer<ServiceBusReceivedMessageContext> processMessage) { this.processMessage = processMessage; return this; } /** * The error handler for the processor which will be invoked in the event of an error while receiving messages. * @param processError The error handler which will be executed when an error occurs. * * @return The updated {@link ServiceBusProcessorClientBuilder} object */ public ServiceBusProcessorClientBuilder processError(Consumer<ServiceBusErrorContext> processError) { this.processError = processError; return this; } /** * Sets the amount of time to continue auto-renewing the lock. Setting {@link Duration * disables auto-renewal. For {@link ServiceBusReceiveMode * auto-renewal is disabled. * * @param maxAutoLockRenewDuration the amount of time to continue auto-renewing the lock. {@link Duration * or {@code null} indicates that auto-renewal is disabled. * * @return The updated {@link ServiceBusProcessorClientBuilder} object. * @throws IllegalArgumentException If {code maxAutoLockRenewDuration} is negative. */ public ServiceBusProcessorClientBuilder maxAutoLockRenewDuration(Duration maxAutoLockRenewDuration) { validateAndThrow(maxAutoLockRenewDuration); serviceBusReceiverClientBuilder.maxAutoLockRenewDuration(maxAutoLockRenewDuration); return this; } /** * Max concurrent messages that this processor should process. By default, this is set to 1. * * @param maxConcurrentCalls max concurrent messages that this processor should process. * @return The updated {@link ServiceBusProcessorClientBuilder} object. * @throws IllegalArgumentException if the {@code maxConcurrentCalls} is set to a value less than 1. */ public ServiceBusProcessorClientBuilder maxConcurrentCalls(int maxConcurrentCalls) { if (maxConcurrentCalls < 1) { throw LOGGER.logExceptionAsError( new IllegalArgumentException("'maxConcurrentCalls' cannot be less than 1")); } processorClientOptions.setMaxConcurrentCalls(maxConcurrentCalls); return this; } /** * Disables auto-complete and auto-abandon of received messages. By default, a successfully processed message is * {@link ServiceBusReceivedMessageContext * the message is processed, it is {@link ServiceBusReceivedMessageContext * abandoned}. * * @return The modified {@link ServiceBusProcessorClientBuilder} object. */ public ServiceBusProcessorClientBuilder disableAutoComplete() { serviceBusReceiverClientBuilder.disableAutoComplete(); processorClientOptions.setDisableAutoComplete(true); return this; } /** * Creates Service Bus message processor responsible for reading {@link ServiceBusReceivedMessage * messages} from a specific queue or subscription. * * @return An new {@link ServiceBusProcessorClient} that processes messages from a queue or subscription. * @throws IllegalStateException if {@link * topicName} are not set or, both of these fields are set. It is also thrown if the Service Bus {@link * * {@link * * @throws IllegalArgumentException Queue or topic name are not set via {@link * queueName()} or {@link * @throws NullPointerException if the {@link * callbacks are not set. */ public ServiceBusProcessorClient buildProcessorClient() { return new ServiceBusProcessorClient(serviceBusReceiverClientBuilder, serviceBusReceiverClientBuilder.queueName, serviceBusReceiverClientBuilder.topicName, serviceBusReceiverClientBuilder.subscriptionName, Objects.requireNonNull(processMessage, "'processMessage' cannot be null"), Objects.requireNonNull(processError, "'processError' cannot be null"), processorClientOptions); } } /** * Builder for creating {@link ServiceBusReceiverClient} and {@link ServiceBusReceiverAsyncClient} to consume * messages from Service Bus. * * @see ServiceBusReceiverAsyncClient * @see ServiceBusReceiverClient */ @ServiceClientBuilder(serviceClients = {ServiceBusReceiverClient.class, ServiceBusReceiverAsyncClient.class}) public final class ServiceBusReceiverClientBuilder { private boolean enableAutoComplete = true; private int prefetchCount = DEFAULT_PREFETCH_COUNT; private String queueName; private SubQueue subQueue; private ServiceBusReceiveMode receiveMode = ServiceBusReceiveMode.PEEK_LOCK; private String subscriptionName; private String topicName; private Duration maxAutoLockRenewDuration = MAX_LOCK_RENEW_DEFAULT_DURATION; private ServiceBusReceiverClientBuilder() { } /** * Disables auto-complete and auto-abandon of received messages. By default, a successfully processed message is * {@link ServiceBusReceiverAsyncClient * the message is processed, it is {@link ServiceBusReceiverAsyncClient * abandoned}. * * @return The modified {@link ServiceBusReceiverClientBuilder} object. */ public ServiceBusReceiverClientBuilder disableAutoComplete() { this.enableAutoComplete = false; return this; } /** * Sets the amount of time to continue auto-renewing the lock. Setting {@link Duration * disables auto-renewal. For {@link ServiceBusReceiveMode * auto-renewal is disabled. * * @param maxAutoLockRenewDuration the amount of time to continue auto-renewing the lock. {@link Duration * or {@code null} indicates that auto-renewal is disabled. * * @return The updated {@link ServiceBusReceiverClientBuilder} object. * @throws IllegalArgumentException If {code maxAutoLockRenewDuration} is negative. */ public ServiceBusReceiverClientBuilder maxAutoLockRenewDuration(Duration maxAutoLockRenewDuration) { validateAndThrow(maxAutoLockRenewDuration); this.maxAutoLockRenewDuration = maxAutoLockRenewDuration; return this; } /** * Sets the prefetch count of the receiver. For both {@link ServiceBusReceiveMode * {@link ServiceBusReceiveMode * * Prefetch speeds up the message flow by aiming to have a message readily available for local retrieval when * and before the application asks for one using {@link ServiceBusReceiverAsyncClient * Setting a non-zero value will prefetch that number of messages. Setting the value to zero turns prefetch * off. * * @param prefetchCount The prefetch count. * * @return The modified {@link ServiceBusReceiverClientBuilder} object. * @throws IllegalArgumentException If {code prefetchCount} is negative. */ public ServiceBusReceiverClientBuilder prefetchCount(int prefetchCount) { validateAndThrow(prefetchCount); this.prefetchCount = prefetchCount; return this; } /** * Sets the name of the queue to create a receiver for. * * @param queueName Name of the queue. * * @return The modified {@link ServiceBusReceiverClientBuilder} object. */ public ServiceBusReceiverClientBuilder queueName(String queueName) { this.queueName = queueName; return this; } /** * Sets the receive mode for the receiver. * * @param receiveMode Mode for receiving messages. * * @return The modified {@link ServiceBusReceiverClientBuilder} object. */ public ServiceBusReceiverClientBuilder receiveMode(ServiceBusReceiveMode receiveMode) { this.receiveMode = receiveMode; return this; } /** * Sets the type of the {@link SubQueue} to connect to. * * @param subQueue The type of the sub queue. * * @return The modified {@link ServiceBusReceiverClientBuilder} object. * @see */ public ServiceBusReceiverClientBuilder subQueue(SubQueue subQueue) { this.subQueue = subQueue; return this; } /** * Sets the name of the subscription in the topic to listen to. <b>{@link * </b> * * @param subscriptionName Name of the subscription. * * @return The modified {@link ServiceBusReceiverClientBuilder} object. * @see */ public ServiceBusReceiverClientBuilder subscriptionName(String subscriptionName) { this.subscriptionName = subscriptionName; return this; } /** * Sets the name of the topic. <b>{@link * * @param topicName Name of the topic. * * @return The modified {@link ServiceBusReceiverClientBuilder} object. * @see */ public ServiceBusReceiverClientBuilder topicName(String topicName) { this.topicName = topicName; return this; } /** * Creates an <b>asynchronous</b> Service Bus receiver responsible for reading {@link ServiceBusMessage * messages} from a specific queue or subscription. * * @return An new {@link ServiceBusReceiverAsyncClient} that receives messages from a queue or subscription. * @throws IllegalStateException if {@link * topicName} are not set or, both of these fields are set. It is also thrown if the Service Bus {@link * * {@link * * @throws IllegalArgumentException Queue or topic name are not set via {@link * queueName()} or {@link */ public ServiceBusReceiverAsyncClient buildAsyncClient() { return buildAsyncClient(true); } /** * Creates <b>synchronous</b> Service Bus receiver responsible for reading {@link ServiceBusMessage messages} * from a specific queue or subscription. * * @return An new {@link ServiceBusReceiverClient} that receives messages from a queue or subscription. * @throws IllegalStateException if {@link * topicName} are not set or, both of these fields are set. It is also thrown if the Service Bus {@link * * {@link * * @throws IllegalArgumentException Queue or topic name are not set via {@link * queueName()} or {@link */ public ServiceBusReceiverClient buildClient() { final boolean isPrefetchDisabled = prefetchCount == 0; return new ServiceBusReceiverClient(buildAsyncClient(false), isPrefetchDisabled, MessageUtils.getTotalTimeout(retryOptions)); } ServiceBusReceiverAsyncClient buildAsyncClient(boolean isAutoCompleteAllowed) { final MessagingEntityType entityType = validateEntityPaths(connectionStringEntityName, topicName, queueName); final String entityPath = getEntityPath(entityType, queueName, topicName, subscriptionName, subQueue); if (!isAutoCompleteAllowed && enableAutoComplete) { LOGGER.warning( "'enableAutoComplete' is not supported in synchronous client except through callback receive."); enableAutoComplete = false; } else if (enableAutoComplete && receiveMode == ServiceBusReceiveMode.RECEIVE_AND_DELETE) { LOGGER.warning("'enableAutoComplete' is not needed in for RECEIVE_AND_DELETE mode."); enableAutoComplete = false; } if (receiveMode == ServiceBusReceiveMode.RECEIVE_AND_DELETE) { maxAutoLockRenewDuration = Duration.ZERO; } final ServiceBusConnectionProcessor connectionProcessor = getOrCreateConnectionProcessor(messageSerializer); final ReceiverOptions receiverOptions = new ReceiverOptions(receiveMode, prefetchCount, maxAutoLockRenewDuration, enableAutoComplete); return new ServiceBusReceiverAsyncClient(connectionProcessor.getFullyQualifiedNamespace(), entityPath, entityType, receiverOptions, connectionProcessor, ServiceBusConstants.OPERATION_TIMEOUT, tracerProvider, messageSerializer, ServiceBusClientBuilder.this::onClientClose); } } private void validateAndThrow(int prefetchCount) { if (prefetchCount < 0) { throw LOGGER.logExceptionAsError(new IllegalArgumentException(String.format( "prefetchCount (%s) cannot be less than 0.", prefetchCount))); } } private void validateAndThrow(Duration maxLockRenewalDuration) { if (maxLockRenewalDuration != null && maxLockRenewalDuration.isNegative()) { throw LOGGER.logExceptionAsError(new IllegalArgumentException( "'maxLockRenewalDuration' cannot be negative.")); } } }
class ServiceBusClientBuilder implements TokenCredentialTrait<ServiceBusClientBuilder>, AzureNamedKeyCredentialTrait<ServiceBusClientBuilder>, ConnectionStringTrait<ServiceBusClientBuilder>, AzureSasCredentialTrait<ServiceBusClientBuilder>, AmqpTrait<ServiceBusClientBuilder>, ConfigurationTrait<ServiceBusClientBuilder> { private static final AmqpRetryOptions DEFAULT_RETRY = new AmqpRetryOptions().setTryTimeout(ServiceBusConstants.OPERATION_TIMEOUT); private static final String SERVICE_BUS_PROPERTIES_FILE = "azure-messaging-servicebus.properties"; private static final String SUBSCRIPTION_ENTITY_PATH_FORMAT = "%s/subscriptions/%s"; private static final String DEAD_LETTER_QUEUE_NAME_SUFFIX = "/$deadletterqueue"; private static final String TRANSFER_DEAD_LETTER_QUEUE_NAME_SUFFIX = "/$Transfer/$deadletterqueue"; private static final int DEFAULT_PREFETCH_COUNT = 0; private static final String NAME_KEY = "name"; private static final String VERSION_KEY = "version"; private static final String UNKNOWN = "UNKNOWN"; private static final Pattern HOST_PORT_PATTERN = Pattern.compile("^[^:]+:\\d+"); private static final Duration MAX_LOCK_RENEW_DEFAULT_DURATION = Duration.ofMinutes(5); private static final ClientLogger LOGGER = new ClientLogger(ServiceBusClientBuilder.class); private final Object connectionLock = new Object(); private final MessageSerializer messageSerializer = new ServiceBusMessageSerializer(); private final TracerProvider tracerProvider = new TracerProvider(ServiceLoader.load(Tracer.class)); private ClientOptions clientOptions; private Configuration configuration; private ServiceBusConnectionProcessor sharedConnection; private String connectionStringEntityName; private TokenCredential credentials; private String fullyQualifiedNamespace; private ProxyOptions proxyOptions; private AmqpRetryOptions retryOptions; private Scheduler scheduler; private AmqpTransportType transport = AmqpTransportType.AMQP; private SslDomain.VerifyMode verifyMode; private boolean crossEntityTransactions; private URL customEndpointAddress; /** * Keeps track of the open clients that were created from this builder when there is a shared connection. */ private final AtomicInteger openClients = new AtomicInteger(); /** * Creates a new instance with the default transport {@link AmqpTransportType */ public ServiceBusClientBuilder() { } /** * Sets the {@link ClientOptions} to be sent from the client built from this builder, enabling customization of * certain properties, as well as support the addition of custom header information. Refer to the {@link * ClientOptions} documentation for more information. * * @param clientOptions to be set on the client. * * @return The updated {@link ServiceBusClientBuilder} object. */ @Override public ServiceBusClientBuilder clientOptions(ClientOptions clientOptions) { this.clientOptions = clientOptions; return this; } /** * Sets the fully-qualified namespace for the Service Bus. * * @param fullyQualifiedNamespace The fully-qualified namespace for the Service Bus. * * @return The updated {@link ServiceBusClientBuilder} object. */ public ServiceBusClientBuilder fullyQualifiedNamespace(String fullyQualifiedNamespace) { this.fullyQualifiedNamespace = Objects.requireNonNull(fullyQualifiedNamespace, "'fullyQualifiedNamespace' cannot be null."); if (CoreUtils.isNullOrEmpty(fullyQualifiedNamespace)) { throw LOGGER.logExceptionAsError( new IllegalArgumentException("'fullyQualifiedNamespace' cannot be an empty string.")); } return this; } private String getAndValidateFullyQualifiedNamespace() { if (CoreUtils.isNullOrEmpty(fullyQualifiedNamespace)) { throw LOGGER.logExceptionAsError( new IllegalArgumentException("'fullyQualifiedNamespace' cannot be an empty string.")); } return fullyQualifiedNamespace; } /** * Sets a custom endpoint address when connecting to the Service Bus service. This can be useful when your network * does not allow connecting to the standard Azure Service Bus endpoint address, but does allow connecting through * an intermediary. For example: {@literal https: * <p> * If no port is specified, the default port for the {@link * used. * * @param customEndpointAddress The custom endpoint address. * @return The updated {@link ServiceBusClientBuilder} object. * @throws IllegalArgumentException if {@code customEndpointAddress} cannot be parsed into a valid {@link URL}. */ /** * Sets the connection string for a Service Bus namespace or a specific Service Bus resource. * * @param connectionString Connection string for a Service Bus namespace or a specific Service Bus resource. * * @return The updated {@link ServiceBusClientBuilder} object. */ public ServiceBusClientBuilder connectionString(String connectionString) { final ConnectionStringProperties properties = new ConnectionStringProperties(connectionString); final TokenCredential tokenCredential; try { tokenCredential = getTokenCredential(properties); } catch (Exception e) { throw LOGGER.logExceptionAsError( new AzureException("Could not create the ServiceBusSharedKeyCredential.", e)); } this.fullyQualifiedNamespace = properties.getEndpoint().getHost(); String entityPath = properties.getEntityPath(); if (!CoreUtils.isNullOrEmpty(entityPath)) { LOGGER.atInfo() .addKeyValue(ENTITY_PATH_KEY, entityPath) .log("Setting entity from connection string."); this.connectionStringEntityName = entityPath; } return credential(properties.getEndpoint().getHost(), tokenCredential); } /** * Enable cross entity transaction on the connection to Service bus. Use this feature only when your transaction * scope spans across different Service Bus entities. This feature is achieved by routing all the messages through * one 'send-via' entity on server side as explained next. * Once clients are created for multiple entities, the first entity that an operation occurs on becomes the * entity through which all subsequent sends will be routed through ('send-via' entity). This enables the service to * perform a transaction that is meant to span multiple entities. This means that subsequent entities that perform * their first operation need to either be senders, or if they are receivers they need to be on the same entity as * the initial entity through which all sends are routed through (otherwise the service would not be able to ensure * that the transaction is committed because it cannot route a receive operation through a different entity). For * instance, if you have SenderA (For entity A) and ReceiverB (For entity B) that are created from a client with * cross-entity transactions enabled, you would need to receive first with ReceiverB to allow this to work. If you * first send to entity A, and then attempted to receive from entity B, an exception would be thrown. * * <p><strong>Avoid using non-transaction API on this client</strong></p> * Since this feature will set up connection to Service Bus optimised to enable this feature. Once all the clients * have been setup, the first receiver or sender used will initialize 'send-via' queue as a single message transfer * entity. All the messages will flow via this queue. Thus this client is not suitable for any non-transaction API. * * <p><strong>When not to enable this feature</strong></p> * If your transaction is involved in one Service bus entity only. For example you are receiving from one * queue/subscription and you want to settle your own messages which are part of one transaction. * * @return The updated {@link ServiceBusSenderClientBuilder} object. * * @see <a href="https: */ public ServiceBusClientBuilder enableCrossEntityTransactions() { this.crossEntityTransactions = true; return this; } private TokenCredential getTokenCredential(ConnectionStringProperties properties) { TokenCredential tokenCredential; if (properties.getSharedAccessSignature() == null) { tokenCredential = new ServiceBusSharedKeyCredential(properties.getSharedAccessKeyName(), properties.getSharedAccessKey(), ServiceBusConstants.TOKEN_VALIDITY); } else { tokenCredential = new ServiceBusSharedKeyCredential(properties.getSharedAccessSignature()); } return tokenCredential; } /** * Sets the configuration store that is used during construction of the service client. * * If not specified, the default configuration store is used to configure Service Bus clients. Use {@link * Configuration * * @param configuration The configuration store used to configure Service Bus clients. * * @return The updated {@link ServiceBusClientBuilder} object. */ @Override public ServiceBusClientBuilder configuration(Configuration configuration) { this.configuration = configuration; return this; } /** * Sets the credential by using a {@link TokenCredential} for the Service Bus resource. * <a href="https: * azure-identity</a> has multiple {@link TokenCredential} implementations that can be used to authenticate * the access to the Service Bus resource. * * @param fullyQualifiedNamespace The fully-qualified namespace for the Service Bus. * @param credential The token credential to use for authentication. Access controls may be specified by the * ServiceBus namespace or the requested Service Bus entity, depending on Azure configuration. * * @return The updated {@link ServiceBusClientBuilder} object. */ public ServiceBusClientBuilder credential(String fullyQualifiedNamespace, TokenCredential credential) { this.fullyQualifiedNamespace = Objects.requireNonNull(fullyQualifiedNamespace, "'fullyQualifiedNamespace' cannot be null."); this.credentials = Objects.requireNonNull(credential, "'credential' cannot be null."); if (CoreUtils.isNullOrEmpty(fullyQualifiedNamespace)) { throw LOGGER.logExceptionAsError( new IllegalArgumentException("'fullyQualifiedNamespace' cannot be an empty string.")); } return this; } /** * Sets the {@link TokenCredential} used to authorize requests sent to the service. Refer to the Azure SDK for Java * <a href="https: * documentation for more details on proper usage of the {@link TokenCredential} type. * * @param credential The token credential to use for authentication. Access controls may be specified by the * ServiceBus namespace or the requested Service Bus entity, depending on Azure configuration. * * @return The updated {@link ServiceBusClientBuilder} object. */ @Override public ServiceBusClientBuilder credential(TokenCredential credential) { this.credentials = Objects.requireNonNull(credential, "'credential' cannot be null."); return this; } /** * Sets the credential with the shared access policies for the Service Bus resource. * You can find the shared access policies on the azure portal or Azure CLI. * For instance, on the portal, "Shared Access policies" has 'policy' and its 'Primary Key' and 'Secondary Key'. * The 'name' attribute of the {@link AzureNamedKeyCredential} is the 'policy' on portal and the 'key' attribute * can be either 'Primary Key' or 'Secondary Key'. * This method and {@link * you to update the name and key. * * @param fullyQualifiedNamespace The fully-qualified namespace for the Service Bus. * @param credential {@link AzureNamedKeyCredential} to be used for authentication. * * @return The updated {@link ServiceBusClientBuilder} object. */ public ServiceBusClientBuilder credential(String fullyQualifiedNamespace, AzureNamedKeyCredential credential) { this.fullyQualifiedNamespace = Objects.requireNonNull(fullyQualifiedNamespace, "'fullyQualifiedNamespace' cannot be null."); Objects.requireNonNull(credential, "'credential' cannot be null."); if (CoreUtils.isNullOrEmpty(fullyQualifiedNamespace)) { throw LOGGER.logExceptionAsError( new IllegalArgumentException("'fullyQualifiedNamespace' cannot be an empty string.")); } this.credentials = new ServiceBusSharedKeyCredential(credential.getAzureNamedKey().getName(), credential.getAzureNamedKey().getKey(), ServiceBusConstants.TOKEN_VALIDITY); return this; } /** * Sets the credential with the shared access policies for the Service Bus resource. * You can find the shared access policies on the azure portal or Azure CLI. * For instance, on the portal, "Shared Access policies" has 'policy' and its 'Primary Key' and 'Secondary Key'. * The 'name' attribute of the {@link AzureNamedKeyCredential} is the 'policy' on portal and the 'key' attribute * can be either 'Primary Key' or 'Secondary Key'. * This method and {@link * you to update the name and key. * * @param credential {@link AzureNamedKeyCredential} to be used for authentication. * * @return The updated {@link ServiceBusClientBuilder} object. */ @Override public ServiceBusClientBuilder credential(AzureNamedKeyCredential credential) { Objects.requireNonNull(credential, "'credential' cannot be null."); this.credentials = new ServiceBusSharedKeyCredential(credential.getAzureNamedKey().getName(), credential.getAzureNamedKey().getKey(), ServiceBusConstants.TOKEN_VALIDITY); return this; } /** * Sets the credential with Shared Access Signature for the Service Bus resource. * Refer to <a href="https: * Service Bus access control with Shared Access Signatures</a>. * * @param fullyQualifiedNamespace The fully-qualified namespace for the Service Bus. * @param credential {@link AzureSasCredential} to be used for authentication. * * @return The updated {@link ServiceBusClientBuilder} object. */ public ServiceBusClientBuilder credential(String fullyQualifiedNamespace, AzureSasCredential credential) { this.fullyQualifiedNamespace = Objects.requireNonNull(fullyQualifiedNamespace, "'fullyQualifiedNamespace' cannot be null."); Objects.requireNonNull(credential, "'credential' cannot be null."); if (CoreUtils.isNullOrEmpty(fullyQualifiedNamespace)) { throw LOGGER.logExceptionAsError( new IllegalArgumentException("'fullyQualifiedNamespace' cannot be an empty string.")); } this.credentials = new ServiceBusSharedKeyCredential(credential.getSignature()); return this; } /** * Sets the credential with Shared Access Signature for the Service Bus resource. * Refer to <a href="https: * Service Bus access control with Shared Access Signatures</a>. * * @param credential {@link AzureSasCredential} to be used for authentication. * * @return The updated {@link ServiceBusClientBuilder} object. */ @Override public ServiceBusClientBuilder credential(AzureSasCredential credential) { Objects.requireNonNull(credential, "'credential' cannot be null."); this.credentials = new ServiceBusSharedKeyCredential(credential.getSignature()); return this; } /** * Sets the proxy configuration to use for {@link ServiceBusSenderAsyncClient}. When a proxy is configured, {@link * AmqpTransportType * * @param proxyOptions The proxy configuration to use. * * @return The updated {@link ServiceBusClientBuilder} object. */ @Override public ServiceBusClientBuilder proxyOptions(ProxyOptions proxyOptions) { this.proxyOptions = proxyOptions; return this; } /** * Package-private method that sets the verify mode for this connection. * * @param verifyMode The verification mode. * @return The updated {@link ServiceBusClientBuilder} object. */ ServiceBusClientBuilder verifyMode(SslDomain.VerifyMode verifyMode) { this.verifyMode = verifyMode; return this; } /** * Sets the retry options for Service Bus clients. If not specified, the default retry options are used. * * @param retryOptions The retry options to use. * * @return The updated {@link ServiceBusClientBuilder} object. */ @Override public ServiceBusClientBuilder retryOptions(AmqpRetryOptions retryOptions) { this.retryOptions = retryOptions; return this; } /** * Sets the scheduler to use. * * @param scheduler Scheduler to be used. * * @return The updated {@link ServiceBusClientBuilder} object. */ ServiceBusClientBuilder scheduler(Scheduler scheduler) { this.scheduler = scheduler; return this; } /** * Sets the transport type by which all the communication with Azure Service Bus occurs. Default value is {@link * AmqpTransportType * * @param transportType The transport type to use. * * @return The updated {@link ServiceBusClientBuilder} object. */ @Override public ServiceBusClientBuilder transportType(AmqpTransportType transportType) { this.transport = transportType; return this; } /** * A new instance of {@link ServiceBusSenderClientBuilder} used to configure Service Bus message senders. * * @return A new instance of {@link ServiceBusSenderClientBuilder}. */ public ServiceBusSenderClientBuilder sender() { return new ServiceBusSenderClientBuilder(); } /** * A new instance of {@link ServiceBusReceiverClientBuilder} used to configure Service Bus message receivers. * * @return A new instance of {@link ServiceBusReceiverClientBuilder}. */ public ServiceBusReceiverClientBuilder receiver() { return new ServiceBusReceiverClientBuilder(); } /** * A new instance of {@link ServiceBusSessionReceiverClientBuilder} used to configure <b>session aware</b> Service * Bus message receivers. * * @return A new instance of {@link ServiceBusSessionReceiverClientBuilder}. */ public ServiceBusSessionReceiverClientBuilder sessionReceiver() { return new ServiceBusSessionReceiverClientBuilder(); } /** * A new instance of {@link ServiceBusProcessorClientBuilder} used to configure {@link ServiceBusProcessorClient} * instance. * * @return A new instance of {@link ServiceBusProcessorClientBuilder}. */ public ServiceBusProcessorClientBuilder processor() { return new ServiceBusProcessorClientBuilder(); } /** * A new instance of {@link ServiceBusSessionProcessorClientBuilder} used to configure a Service Bus processor * instance that processes sessions. * @return A new instance of {@link ServiceBusSessionProcessorClientBuilder}. */ public ServiceBusSessionProcessorClientBuilder sessionProcessor() { return new ServiceBusSessionProcessorClientBuilder(); } /** * Called when a child client is closed. Disposes of the shared connection if there are no more clients. */ void onClientClose() { synchronized (connectionLock) { final int numberOfOpenClients = openClients.decrementAndGet(); LOGGER.atInfo() .addKeyValue("numberOfOpenClients", numberOfOpenClients) .log("Closing a dependent client."); if (numberOfOpenClients > 0) { return; } if (numberOfOpenClients < 0) { LOGGER.atWarning() .addKeyValue("numberOfOpenClients", numberOfOpenClients) .log("There should not be less than 0 clients."); } LOGGER.info("No more open clients, closing shared connection."); if (sharedConnection != null) { sharedConnection.dispose(); sharedConnection = null; } else { LOGGER.warning("Shared ServiceBusConnectionProcessor was already disposed."); } } } private ServiceBusConnectionProcessor getOrCreateConnectionProcessor(MessageSerializer serializer) { if (retryOptions == null) { retryOptions = DEFAULT_RETRY; } if (scheduler == null) { scheduler = Schedulers.elastic(); } synchronized (connectionLock) { if (sharedConnection == null) { final ConnectionOptions connectionOptions = getConnectionOptions(); final Flux<ServiceBusAmqpConnection> connectionFlux = Mono.fromCallable(() -> { final String connectionId = StringUtil.getRandomString("MF"); final ReactorProvider provider = new ReactorProvider(); final ReactorHandlerProvider handlerProvider = new ReactorHandlerProvider(provider); final TokenManagerProvider tokenManagerProvider = new AzureTokenManagerProvider( connectionOptions.getAuthorizationType(), connectionOptions.getFullyQualifiedNamespace(), connectionOptions.getAuthorizationScope()); return (ServiceBusAmqpConnection) new ServiceBusReactorAmqpConnection(connectionId, connectionOptions, provider, handlerProvider, tokenManagerProvider, serializer, crossEntityTransactions); }).repeat(); sharedConnection = connectionFlux.subscribeWith(new ServiceBusConnectionProcessor( connectionOptions.getFullyQualifiedNamespace(), connectionOptions.getRetry())); } } final int numberOfOpenClients = openClients.incrementAndGet(); LOGGER.info(" return sharedConnection; } private ConnectionOptions getConnectionOptions() { configuration = configuration == null ? Configuration.getGlobalConfiguration().clone() : configuration; if (credentials == null) { throw LOGGER.logExceptionAsError(new IllegalArgumentException("Credentials have not been set. " + "They can be set using: connectionString(String), connectionString(String, String), " + "or credentials(String, String, TokenCredential)" )); } if (proxyOptions != null && proxyOptions.isProxyAddressConfigured() && transport != AmqpTransportType.AMQP_WEB_SOCKETS) { throw LOGGER.logExceptionAsError(new IllegalArgumentException( "Cannot use a proxy when TransportType is not AMQP.")); } if (proxyOptions == null) { proxyOptions = getDefaultProxyConfiguration(configuration); } final CbsAuthorizationType authorizationType = credentials instanceof ServiceBusSharedKeyCredential ? CbsAuthorizationType.SHARED_ACCESS_SIGNATURE : CbsAuthorizationType.JSON_WEB_TOKEN; final SslDomain.VerifyMode verificationMode = verifyMode != null ? verifyMode : SslDomain.VerifyMode.VERIFY_PEER_NAME; final ClientOptions options = clientOptions != null ? clientOptions : new ClientOptions(); final Map<String, String> properties = CoreUtils.getProperties(SERVICE_BUS_PROPERTIES_FILE); final String product = properties.getOrDefault(NAME_KEY, UNKNOWN); final String clientVersion = properties.getOrDefault(VERSION_KEY, UNKNOWN); if (customEndpointAddress == null) { return new ConnectionOptions(getAndValidateFullyQualifiedNamespace(), credentials, authorizationType, ServiceBusConstants.AZURE_ACTIVE_DIRECTORY_SCOPE, transport, retryOptions, proxyOptions, scheduler, options, verificationMode, product, clientVersion); } else { return new ConnectionOptions(getAndValidateFullyQualifiedNamespace(), credentials, authorizationType, ServiceBusConstants.AZURE_ACTIVE_DIRECTORY_SCOPE, transport, retryOptions, proxyOptions, scheduler, options, verificationMode, product, clientVersion, customEndpointAddress.getHost(), customEndpointAddress.getPort()); } } private ProxyOptions getDefaultProxyConfiguration(Configuration configuration) { ProxyAuthenticationType authentication = ProxyAuthenticationType.NONE; if (proxyOptions != null) { authentication = proxyOptions.getAuthentication(); } String proxyAddress = configuration.get(Configuration.PROPERTY_HTTP_PROXY); if (CoreUtils.isNullOrEmpty(proxyAddress)) { return ProxyOptions.SYSTEM_DEFAULTS; } return getProxyOptions(authentication, proxyAddress, configuration, Boolean.parseBoolean(configuration.get("java.net.useSystemProxies"))); } private ProxyOptions getProxyOptions(ProxyAuthenticationType authentication, String proxyAddress, Configuration configuration, boolean useSystemProxies) { String host; int port; if (HOST_PORT_PATTERN.matcher(proxyAddress.trim()).find()) { final String[] hostPort = proxyAddress.split(":"); host = hostPort[0]; port = Integer.parseInt(hostPort[1]); final Proxy proxy = new Proxy(Proxy.Type.HTTP, new InetSocketAddress(host, port)); final String username = configuration.get(ProxyOptions.PROXY_USERNAME); final String password = configuration.get(ProxyOptions.PROXY_PASSWORD); return new ProxyOptions(authentication, proxy, username, password); } else if (useSystemProxies) { com.azure.core.http.ProxyOptions coreProxyOptions = com.azure.core.http.ProxyOptions .fromConfiguration(configuration); return new ProxyOptions(authentication, new Proxy(coreProxyOptions.getType().toProxyType(), coreProxyOptions.getAddress()), coreProxyOptions.getUsername(), coreProxyOptions.getPassword()); } else { LOGGER.verbose("'HTTP_PROXY' was configured but ignored as 'java.net.useSystemProxies' wasn't " + "set or was false."); return ProxyOptions.SYSTEM_DEFAULTS; } } private static boolean isNullOrEmpty(String item) { return item == null || item.isEmpty(); } private static MessagingEntityType validateEntityPaths(String connectionStringEntityName, String topicName, String queueName) { final boolean hasTopicName = !isNullOrEmpty(topicName); final boolean hasQueueName = !isNullOrEmpty(queueName); final boolean hasConnectionStringEntity = !isNullOrEmpty(connectionStringEntityName); final MessagingEntityType entityType; if (!hasConnectionStringEntity && !hasQueueName && !hasTopicName) { throw ServiceBusClientBuilder.LOGGER.logExceptionAsError(new IllegalStateException( "Cannot build client without setting either a queueName or topicName.")); } else if (hasQueueName && hasTopicName) { throw ServiceBusClientBuilder.LOGGER.logExceptionAsError(new IllegalStateException(String.format( "Cannot build client with both queueName (%s) and topicName (%s) set.", queueName, topicName))); } else if (hasQueueName) { if (hasConnectionStringEntity && !queueName.equals(connectionStringEntityName)) { throw ServiceBusClientBuilder.LOGGER.logExceptionAsError(new IllegalStateException(String.format( "queueName (%s) is different than the connectionString's EntityPath (%s).", queueName, connectionStringEntityName))); } entityType = MessagingEntityType.QUEUE; } else if (hasTopicName) { if (hasConnectionStringEntity && !topicName.equals(connectionStringEntityName)) { throw ServiceBusClientBuilder.LOGGER.logExceptionAsError(new IllegalStateException(String.format( "topicName (%s) is different than the connectionString's EntityPath (%s).", topicName, connectionStringEntityName))); } entityType = MessagingEntityType.SUBSCRIPTION; } else { entityType = MessagingEntityType.UNKNOWN; } return entityType; } private static String getEntityPath(MessagingEntityType entityType, String queueName, String topicName, String subscriptionName, SubQueue subQueue) { String entityPath; switch (entityType) { case QUEUE: entityPath = queueName; break; case SUBSCRIPTION: if (isNullOrEmpty(subscriptionName)) { throw ServiceBusClientBuilder.LOGGER.logExceptionAsError(new IllegalStateException(String.format( "topicName (%s) must have a subscriptionName associated with it.", topicName))); } entityPath = String.format(Locale.ROOT, SUBSCRIPTION_ENTITY_PATH_FORMAT, topicName, subscriptionName); break; default: throw ServiceBusClientBuilder.LOGGER.logExceptionAsError( new IllegalArgumentException("Unknown entity type: " + entityType)); } if (subQueue == null) { return entityPath; } switch (subQueue) { case NONE: break; case TRANSFER_DEAD_LETTER_QUEUE: entityPath += TRANSFER_DEAD_LETTER_QUEUE_NAME_SUFFIX; break; case DEAD_LETTER_QUEUE: entityPath += DEAD_LETTER_QUEUE_NAME_SUFFIX; break; default: throw ServiceBusClientBuilder.LOGGER.logExceptionAsError(new IllegalArgumentException("Unsupported value of subqueue type: " + subQueue)); } return entityPath; } /** * Builder for creating {@link ServiceBusSenderClient} and {@link ServiceBusSenderAsyncClient} to publish messages * to Service Bus. * * @see ServiceBusSenderAsyncClient * @see ServiceBusSenderClient */ @ServiceClientBuilder(serviceClients = {ServiceBusSenderClient.class, ServiceBusSenderAsyncClient.class}) public final class ServiceBusSenderClientBuilder { private String queueName; private String topicName; private ServiceBusSenderClientBuilder() { } /** * Sets the name of the Service Bus queue to publish messages to. * * @param queueName Name of the queue. * * @return The modified {@link ServiceBusSenderClientBuilder} object. */ public ServiceBusSenderClientBuilder queueName(String queueName) { this.queueName = queueName; return this; } /** * Sets the name of the Service Bus topic to publish messages to. * * @param topicName Name of the topic. * * @return The modified {@link ServiceBusSenderClientBuilder} object. */ public ServiceBusSenderClientBuilder topicName(String topicName) { this.topicName = topicName; return this; } /** * Creates an <b>asynchronous</b> {@link ServiceBusSenderAsyncClient client} for transmitting {@link * ServiceBusMessage} to a Service Bus queue or topic. * * @return A new {@link ServiceBusSenderAsyncClient} for transmitting to a Service queue or topic. * @throws IllegalStateException if {@link * topicName} are not set or, both of these fields are set. It is also thrown if the Service Bus {@link * * {@link * @throws IllegalArgumentException if the entity type is not a queue or a topic. */ public ServiceBusSenderAsyncClient buildAsyncClient() { final ServiceBusConnectionProcessor connectionProcessor = getOrCreateConnectionProcessor(messageSerializer); final MessagingEntityType entityType = validateEntityPaths(connectionStringEntityName, topicName, queueName); final String entityName; switch (entityType) { case QUEUE: entityName = queueName; break; case SUBSCRIPTION: entityName = topicName; break; case UNKNOWN: entityName = connectionStringEntityName; break; default: throw LOGGER.logExceptionAsError( new IllegalArgumentException("Unknown entity type: " + entityType)); } return new ServiceBusSenderAsyncClient(entityName, entityType, connectionProcessor, retryOptions, tracerProvider, messageSerializer, ServiceBusClientBuilder.this::onClientClose, null); } /** * Creates a <b>synchronous</b> {@link ServiceBusSenderClient client} for transmitting {@link ServiceBusMessage} * to a Service Bus queue or topic. * * @return A new {@link ServiceBusSenderAsyncClient} for transmitting to a Service queue or topic. * @throws IllegalStateException if {@link * topicName} are not set or, both of these fields are set. It is also thrown if the Service Bus {@link * * {@link * @throws IllegalArgumentException if the entity type is not a queue or a topic. */ public ServiceBusSenderClient buildClient() { return new ServiceBusSenderClient(buildAsyncClient(), MessageUtils.getTotalTimeout(retryOptions)); } } /** * Builder for creating {@link ServiceBusProcessorClient} to consume messages from a session-based Service Bus * entity. {@link ServiceBusProcessorClient} processes messages and errors via {@link * and {@link * next session to process. * * <p> * By default, the processor: * <ul> * <li>Automatically settles messages. Disabled via {@link * <li>Processes 1 session concurrently. Configured via {@link * <li>Invokes 1 instance of {@link * {@link * </ul> * * <p><strong>Instantiate a session-enabled processor client</strong></p> * <!-- src_embed com.azure.messaging.servicebus.servicebusprocessorclient * <pre> * Consumer&lt;ServiceBusReceivedMessageContext&gt; onMessage = context -&gt; & * ServiceBusReceivedMessage message = context.getMessage& * System.out.printf& * message.getSessionId& * & * * Consumer&lt;ServiceBusErrorContext&gt; onError = context -&gt; & * System.out.printf& * context.getFullyQualifiedNamespace& * * if & * ServiceBusException exception = & * System.out.printf& * exception.getReason& * & * System.out.printf& * & * & * * & * * ServiceBusProcessorClient sessionProcessor = new ServiceBusClientBuilder& * .connectionString& * .sessionProcessor& * .queueName& * .maxConcurrentSessions& * .processMessage& * .processError& * .buildProcessorClient& * * & * sessionProcessor.start& * </pre> * <!-- end com.azure.messaging.servicebus.servicebusprocessorclient * * @see ServiceBusProcessorClient */ public final class ServiceBusSessionProcessorClientBuilder { private final ServiceBusProcessorClientOptions processorClientOptions; private final ServiceBusSessionReceiverClientBuilder sessionReceiverClientBuilder; private Consumer<ServiceBusReceivedMessageContext> processMessage; private Consumer<ServiceBusErrorContext> processError; private ServiceBusSessionProcessorClientBuilder() { sessionReceiverClientBuilder = new ServiceBusSessionReceiverClientBuilder(); processorClientOptions = new ServiceBusProcessorClientOptions() .setMaxConcurrentCalls(1) .setTracerProvider(tracerProvider); sessionReceiverClientBuilder.maxConcurrentSessions(1); } /** * Sets the amount of time to continue auto-renewing the lock. Setting {@link Duration * disables auto-renewal. For {@link ServiceBusReceiveMode * auto-renewal is disabled. * * @param maxAutoLockRenewDuration the amount of time to continue auto-renewing the lock. {@link Duration * or {@code null} indicates that auto-renewal is disabled. * * @return The updated {@link ServiceBusSessionProcessorClientBuilder} object. * @throws IllegalArgumentException If {code maxAutoLockRenewDuration} is negative. */ public ServiceBusSessionProcessorClientBuilder maxAutoLockRenewDuration(Duration maxAutoLockRenewDuration) { validateAndThrow(maxAutoLockRenewDuration); sessionReceiverClientBuilder.maxAutoLockRenewDuration(maxAutoLockRenewDuration); return this; } /** * Enables session processing roll-over by processing at most {@code maxConcurrentSessions}. * * @param maxConcurrentSessions Maximum number of concurrent sessions to process at any given time. * * @return The modified {@link ServiceBusSessionProcessorClientBuilder} object. * @throws IllegalArgumentException if {@code maxConcurrentSessions} is less than 1. */ public ServiceBusSessionProcessorClientBuilder maxConcurrentSessions(int maxConcurrentSessions) { if (maxConcurrentSessions < 1) { throw LOGGER.logExceptionAsError( new IllegalArgumentException("'maxConcurrentSessions' cannot be less than 1")); } sessionReceiverClientBuilder.maxConcurrentSessions(maxConcurrentSessions); return this; } /** * Sets the prefetch count of the processor. For both {@link ServiceBusReceiveMode * {@link ServiceBusReceiveMode * * Prefetch speeds up the message flow by aiming to have a message readily available for local retrieval when * and before the application starts the processor. * Setting a non-zero value will prefetch that number of messages. Setting the value to zero turns prefetch off. * Using a non-zero prefetch risks of losing messages even though it has better performance. * @see <a href="https: * * @param prefetchCount The prefetch count. * * @return The modified {@link ServiceBusProcessorClientBuilder} object. */ public ServiceBusSessionProcessorClientBuilder prefetchCount(int prefetchCount) { sessionReceiverClientBuilder.prefetchCount(prefetchCount); return this; } /** * Sets the name of the queue to create a processor for. * @param queueName Name of the queue. * * @return The modified {@link ServiceBusSessionProcessorClientBuilder} object. */ public ServiceBusSessionProcessorClientBuilder queueName(String queueName) { sessionReceiverClientBuilder.queueName(queueName); return this; } /** * Sets the receive mode for the processor. * @param receiveMode Mode for receiving messages. * * @return The modified {@link ServiceBusSessionProcessorClientBuilder} object. */ public ServiceBusSessionProcessorClientBuilder receiveMode(ServiceBusReceiveMode receiveMode) { sessionReceiverClientBuilder.receiveMode(receiveMode); return this; } /** * Sets the type of the {@link SubQueue} to connect to. Azure Service Bus queues and subscriptions provide a * secondary sub-queue, called a dead-letter queue (DLQ). * * @param subQueue The type of the sub queue. * * @return The modified {@link ServiceBusSessionProcessorClientBuilder} object. * @see * @see SubQueue */ public ServiceBusSessionProcessorClientBuilder subQueue(SubQueue subQueue) { this.sessionReceiverClientBuilder.subQueue(subQueue); return this; } /** * Sets the name of the subscription in the topic to listen to. <b>{@link * </b> * @param subscriptionName Name of the subscription. * * @return The modified {@link ServiceBusSessionProcessorClientBuilder} object. * @see */ public ServiceBusSessionProcessorClientBuilder subscriptionName(String subscriptionName) { sessionReceiverClientBuilder.subscriptionName(subscriptionName); return this; } /** * Sets the name of the topic. <b>{@link * @param topicName Name of the topic. * * @return The modified {@link ServiceBusSessionProcessorClientBuilder} object. * @see */ public ServiceBusSessionProcessorClientBuilder topicName(String topicName) { sessionReceiverClientBuilder.topicName(topicName); return this; } /** * The message processing callback for the processor that will be executed when a message is received. * @param processMessage The message processing consumer that will be executed when a message is received. * * @return The updated {@link ServiceBusProcessorClientBuilder} object. */ public ServiceBusSessionProcessorClientBuilder processMessage( Consumer<ServiceBusReceivedMessageContext> processMessage) { this.processMessage = processMessage; return this; } /** * The error handler for the processor which will be invoked in the event of an error while receiving messages. * @param processError The error handler which will be executed when an error occurs. * * @return The updated {@link ServiceBusProcessorClientBuilder} object */ public ServiceBusSessionProcessorClientBuilder processError( Consumer<ServiceBusErrorContext> processError) { this.processError = processError; return this; } /** * Max concurrent messages that this processor should process. * * @param maxConcurrentCalls max concurrent messages that this processor should process. * * @return The updated {@link ServiceBusSessionProcessorClientBuilder} object. * @throws IllegalArgumentException if {@code maxConcurrentCalls} is less than 1. */ public ServiceBusSessionProcessorClientBuilder maxConcurrentCalls(int maxConcurrentCalls) { if (maxConcurrentCalls < 1) { throw LOGGER.logExceptionAsError( new IllegalArgumentException("'maxConcurrentCalls' cannot be less than 1")); } processorClientOptions.setMaxConcurrentCalls(maxConcurrentCalls); return this; } /** * Disables auto-complete and auto-abandon of received messages. By default, a successfully processed message is * {@link ServiceBusReceivedMessageContext * the message is processed, it is {@link ServiceBusReceivedMessageContext * abandoned}. * * @return The modified {@link ServiceBusSessionProcessorClientBuilder} object. */ public ServiceBusSessionProcessorClientBuilder disableAutoComplete() { sessionReceiverClientBuilder.disableAutoComplete(); processorClientOptions.setDisableAutoComplete(true); return this; } /** * Creates a <b>session-aware</b> Service Bus processor responsible for reading * {@link ServiceBusReceivedMessage messages} from a specific queue or subscription. * * @return An new {@link ServiceBusProcessorClient} that receives messages from a queue or subscription. * @throws IllegalStateException if {@link * topicName} are not set or, both of these fields are set. It is also thrown if the Service Bus {@link * * {@link * * @throws IllegalArgumentException Queue or topic name are not set via {@link * queueName()} or {@link * @throws NullPointerException if the {@link * callbacks are not set. */ public ServiceBusProcessorClient buildProcessorClient() { return new ServiceBusProcessorClient(sessionReceiverClientBuilder, sessionReceiverClientBuilder.queueName, sessionReceiverClientBuilder.topicName, sessionReceiverClientBuilder.subscriptionName, Objects.requireNonNull(processMessage, "'processMessage' cannot be null"), Objects.requireNonNull(processError, "'processError' cannot be null"), processorClientOptions); } } /** * Builder for creating {@link ServiceBusReceiverClient} and {@link ServiceBusReceiverAsyncClient} to consume * messages from a <b>session aware</b> Service Bus entity. * * @see ServiceBusReceiverAsyncClient * @see ServiceBusReceiverClient */ @ServiceClientBuilder(serviceClients = {ServiceBusReceiverClient.class, ServiceBusReceiverAsyncClient.class}) public final class ServiceBusSessionReceiverClientBuilder { private boolean enableAutoComplete = true; private Integer maxConcurrentSessions = null; private int prefetchCount = DEFAULT_PREFETCH_COUNT; private String queueName; private ServiceBusReceiveMode receiveMode = ServiceBusReceiveMode.PEEK_LOCK; private String subscriptionName; private String topicName; private Duration maxAutoLockRenewDuration = MAX_LOCK_RENEW_DEFAULT_DURATION; private SubQueue subQueue = SubQueue.NONE; private ServiceBusSessionReceiverClientBuilder() { } /** * Disables auto-complete and auto-abandon of received messages. By default, a successfully processed message is * {@link ServiceBusReceiverAsyncClient * the message is processed, it is {@link ServiceBusReceiverAsyncClient * abandoned}. * * @return The modified {@link ServiceBusSessionReceiverClientBuilder} object. */ public ServiceBusSessionReceiverClientBuilder disableAutoComplete() { this.enableAutoComplete = false; return this; } /** * Sets the amount of time to continue auto-renewing the session lock. Setting {@link Duration * {@code null} disables auto-renewal. For {@link ServiceBusReceiveMode * mode, auto-renewal is disabled. * * @param maxAutoLockRenewDuration the amount of time to continue auto-renewing the session lock. * {@link Duration * * @return The updated {@link ServiceBusSessionReceiverClientBuilder} object. * @throws IllegalArgumentException If {code maxAutoLockRenewDuration} is negative. */ public ServiceBusSessionReceiverClientBuilder maxAutoLockRenewDuration(Duration maxAutoLockRenewDuration) { validateAndThrow(maxAutoLockRenewDuration); this.maxAutoLockRenewDuration = maxAutoLockRenewDuration; return this; } /** * Enables session processing roll-over by processing at most {@code maxConcurrentSessions}. * * @param maxConcurrentSessions Maximum number of concurrent sessions to process at any given time. * * @return The modified {@link ServiceBusSessionReceiverClientBuilder} object. * @throws IllegalArgumentException if {@code maxConcurrentSessions} is less than 1. */ ServiceBusSessionReceiverClientBuilder maxConcurrentSessions(int maxConcurrentSessions) { if (maxConcurrentSessions < 1) { throw LOGGER.logExceptionAsError(new IllegalArgumentException( "maxConcurrentSessions cannot be less than 1.")); } this.maxConcurrentSessions = maxConcurrentSessions; return this; } /** * Sets the prefetch count of the receiver. For both {@link ServiceBusReceiveMode * {@link ServiceBusReceiveMode * * Prefetch speeds up the message flow by aiming to have a message readily available for local retrieval when * and before the application asks for one using {@link ServiceBusReceiverAsyncClient * Setting a non-zero value will prefetch that number of messages. Setting the value to zero turns prefetch * off. * * @param prefetchCount The prefetch count. * * @return The modified {@link ServiceBusSessionReceiverClientBuilder} object. * @throws IllegalArgumentException If {code prefetchCount} is negative. */ public ServiceBusSessionReceiverClientBuilder prefetchCount(int prefetchCount) { validateAndThrow(prefetchCount); this.prefetchCount = prefetchCount; return this; } /** * Sets the name of the queue to create a receiver for. * * @param queueName Name of the queue. * * @return The modified {@link ServiceBusSessionReceiverClientBuilder} object. */ public ServiceBusSessionReceiverClientBuilder queueName(String queueName) { this.queueName = queueName; return this; } /** * Sets the receive mode for the receiver. * * @param receiveMode Mode for receiving messages. * * @return The modified {@link ServiceBusSessionReceiverClientBuilder} object. */ public ServiceBusSessionReceiverClientBuilder receiveMode(ServiceBusReceiveMode receiveMode) { this.receiveMode = receiveMode; return this; } /** * Sets the type of the {@link SubQueue} to connect to. Azure Service Bus queues and subscriptions provide a * secondary sub-queue, called a dead-letter queue (DLQ). * * @param subQueue The type of the sub queue. * * @return The modified {@link ServiceBusSessionReceiverClientBuilder} object. * @see * @see SubQueue */ public ServiceBusSessionReceiverClientBuilder subQueue(SubQueue subQueue) { this.subQueue = subQueue; return this; } /** * Sets the name of the subscription in the topic to listen to. <b>{@link * </b> * * @param subscriptionName Name of the subscription. * * @return The modified {@link ServiceBusSessionReceiverClientBuilder} object. * @see */ public ServiceBusSessionReceiverClientBuilder subscriptionName(String subscriptionName) { this.subscriptionName = subscriptionName; return this; } /** * Sets the name of the topic. <b>{@link * * @param topicName Name of the topic. * * @return The modified {@link ServiceBusSessionReceiverClientBuilder} object. * @see */ public ServiceBusSessionReceiverClientBuilder topicName(String topicName) { this.topicName = topicName; return this; } /** * Creates an <b>asynchronous</b>, <b>session-aware</b> Service Bus receiver responsible for reading {@link * ServiceBusMessage messages} from a specific queue or subscription. * * @return An new {@link ServiceBusReceiverAsyncClient} that receives messages from a queue or subscription. * @throws IllegalStateException if {@link * topicName} are not set or, both of these fields are set. It is also thrown if the Service Bus {@link * * {@link * * @throws IllegalArgumentException Queue or topic name are not set via {@link * queueName()} or {@link */ ServiceBusReceiverAsyncClient buildAsyncClientForProcessor() { final MessagingEntityType entityType = validateEntityPaths(connectionStringEntityName, topicName, queueName); final String entityPath = getEntityPath(entityType, queueName, topicName, subscriptionName, subQueue); if (enableAutoComplete && receiveMode == ServiceBusReceiveMode.RECEIVE_AND_DELETE) { LOGGER.warning("'enableAutoComplete' is not needed in for RECEIVE_AND_DELETE mode."); enableAutoComplete = false; } if (receiveMode == ServiceBusReceiveMode.RECEIVE_AND_DELETE) { maxAutoLockRenewDuration = Duration.ZERO; } final ServiceBusConnectionProcessor connectionProcessor = getOrCreateConnectionProcessor(messageSerializer); final ReceiverOptions receiverOptions = new ReceiverOptions(receiveMode, prefetchCount, maxAutoLockRenewDuration, enableAutoComplete, null, maxConcurrentSessions); final ServiceBusSessionManager sessionManager = new ServiceBusSessionManager(entityPath, entityType, connectionProcessor, tracerProvider, messageSerializer, receiverOptions); return new ServiceBusReceiverAsyncClient(connectionProcessor.getFullyQualifiedNamespace(), entityPath, entityType, receiverOptions, connectionProcessor, ServiceBusConstants.OPERATION_TIMEOUT, tracerProvider, messageSerializer, ServiceBusClientBuilder.this::onClientClose, sessionManager); } /** * Creates an <b>asynchronous</b>, <b>session-aware</b> Service Bus receiver responsible for reading {@link * ServiceBusMessage messages} from a specific queue or subscription. * * @return An new {@link ServiceBusSessionReceiverAsyncClient} that receives messages from a queue or * subscription. * @throws IllegalStateException if {@link * topicName} are not set or, both of these fields are set. It is also thrown if the Service Bus {@link * * {@link * * @throws IllegalArgumentException Queue or topic name are not set via {@link * queueName()} or {@link */ public ServiceBusSessionReceiverAsyncClient buildAsyncClient() { return buildAsyncClient(true); } /** * Creates a <b>synchronous</b>, <b>session-aware</b> Service Bus receiver responsible for reading {@link * ServiceBusMessage messages} from a specific queue or subscription. * * @return An new {@link ServiceBusReceiverClient} that receives messages from a queue or subscription. * @throws IllegalStateException if {@link * topicName} are not set or, both of these fields are set. It is also thrown if the Service Bus {@link * * {@link * * @throws IllegalArgumentException Queue or topic name are not set via {@link * queueName()} or {@link */ public ServiceBusSessionReceiverClient buildClient() { final boolean isPrefetchDisabled = prefetchCount == 0; return new ServiceBusSessionReceiverClient(buildAsyncClient(false), isPrefetchDisabled, MessageUtils.getTotalTimeout(retryOptions)); } private ServiceBusSessionReceiverAsyncClient buildAsyncClient(boolean isAutoCompleteAllowed) { final MessagingEntityType entityType = validateEntityPaths(connectionStringEntityName, topicName, queueName); final String entityPath = getEntityPath(entityType, queueName, topicName, subscriptionName, SubQueue.NONE); if (!isAutoCompleteAllowed && enableAutoComplete) { LOGGER.warning( "'enableAutoComplete' is not supported in synchronous client except through callback receive."); enableAutoComplete = false; } else if (enableAutoComplete && receiveMode == ServiceBusReceiveMode.RECEIVE_AND_DELETE) { LOGGER.warning("'enableAutoComplete' is not needed in for RECEIVE_AND_DELETE mode."); enableAutoComplete = false; } if (receiveMode == ServiceBusReceiveMode.RECEIVE_AND_DELETE) { maxAutoLockRenewDuration = Duration.ZERO; } final ServiceBusConnectionProcessor connectionProcessor = getOrCreateConnectionProcessor(messageSerializer); final ReceiverOptions receiverOptions = new ReceiverOptions(receiveMode, prefetchCount, maxAutoLockRenewDuration, enableAutoComplete, null, maxConcurrentSessions); return new ServiceBusSessionReceiverAsyncClient(connectionProcessor.getFullyQualifiedNamespace(), entityPath, entityType, receiverOptions, connectionProcessor, tracerProvider, messageSerializer, ServiceBusClientBuilder.this::onClientClose); } } /** * Builder for creating {@link ServiceBusProcessorClient} to consume messages from a Service Bus entity. * {@link ServiceBusProcessorClient ServiceBusProcessorClients} provides a push-based mechanism that notifies * the message processing callback when a message is received or the error handle when an error is observed. To * create an instance, therefore, configuring the two callbacks - {@link * {@link * with auto-completion and auto-lock renewal capabilities. * * <p><strong>Sample code to instantiate a processor client</strong></p> * <!-- src_embed com.azure.messaging.servicebus.servicebusprocessorclient * <pre> * Consumer&lt;ServiceBusReceivedMessageContext&gt; onMessage = context -&gt; & * ServiceBusReceivedMessage message = context.getMessage& * System.out.printf& * message.getSequenceNumber& * & * * Consumer&lt;ServiceBusErrorContext&gt; onError = context -&gt; & * System.out.printf& * context.getFullyQualifiedNamespace& * * if & * ServiceBusException exception = & * System.out.printf& * exception.getReason& * & * System.out.printf& * & * & * * & * * ServiceBusProcessorClient processor = new ServiceBusClientBuilder& * .connectionString& * .processor& * .queueName& * .processMessage& * .processError& * .buildProcessorClient& * * & * processor.start& * </pre> * <!-- end com.azure.messaging.servicebus.servicebusprocessorclient * * @see ServiceBusProcessorClient */ public final class ServiceBusProcessorClientBuilder { private final ServiceBusReceiverClientBuilder serviceBusReceiverClientBuilder; private final ServiceBusProcessorClientOptions processorClientOptions; private Consumer<ServiceBusReceivedMessageContext> processMessage; private Consumer<ServiceBusErrorContext> processError; private ServiceBusProcessorClientBuilder() { serviceBusReceiverClientBuilder = new ServiceBusReceiverClientBuilder(); processorClientOptions = new ServiceBusProcessorClientOptions() .setMaxConcurrentCalls(1) .setTracerProvider(tracerProvider); } /** * Sets the prefetch count of the processor. For both {@link ServiceBusReceiveMode * {@link ServiceBusReceiveMode * * Prefetch speeds up the message flow by aiming to have a message readily available for local retrieval when * and before the application starts the processor. * Setting a non-zero value will prefetch that number of messages. Setting the value to zero turns prefetch off. * * @param prefetchCount The prefetch count. * * @return The modified {@link ServiceBusProcessorClientBuilder} object. */ public ServiceBusProcessorClientBuilder prefetchCount(int prefetchCount) { serviceBusReceiverClientBuilder.prefetchCount(prefetchCount); return this; } /** * Sets the name of the queue to create a processor for. * @param queueName Name of the queue. * * @return The modified {@link ServiceBusProcessorClientBuilder} object. */ public ServiceBusProcessorClientBuilder queueName(String queueName) { serviceBusReceiverClientBuilder.queueName(queueName); return this; } /** * Sets the receive mode for the processor. * @param receiveMode Mode for receiving messages. * * @return The modified {@link ServiceBusProcessorClientBuilder} object. */ public ServiceBusProcessorClientBuilder receiveMode(ServiceBusReceiveMode receiveMode) { serviceBusReceiverClientBuilder.receiveMode(receiveMode); return this; } /** * Sets the type of the {@link SubQueue} to connect to. Azure Service Bus queues and subscriptions provide a * secondary sub-queue, called a dead-letter queue (DLQ). * * @param subQueue The type of the sub queue. * * @return The modified {@link ServiceBusProcessorClientBuilder} object. * @see * @see SubQueue */ public ServiceBusProcessorClientBuilder subQueue(SubQueue subQueue) { serviceBusReceiverClientBuilder.subQueue(subQueue); return this; } /** * Sets the name of the subscription in the topic to listen to. <b>{@link * </b> * @param subscriptionName Name of the subscription. * * @return The modified {@link ServiceBusProcessorClientBuilder} object. * @see */ public ServiceBusProcessorClientBuilder subscriptionName(String subscriptionName) { serviceBusReceiverClientBuilder.subscriptionName(subscriptionName); return this; } /** * Sets the name of the topic. <b>{@link * @param topicName Name of the topic. * * @return The modified {@link ServiceBusProcessorClientBuilder} object. * @see */ public ServiceBusProcessorClientBuilder topicName(String topicName) { serviceBusReceiverClientBuilder.topicName(topicName); return this; } /** * The message processing callback for the processor which will be executed when a message is received. * @param processMessage The message processing consumer that will be executed when a message is received. * * @return The updated {@link ServiceBusProcessorClientBuilder} object. */ public ServiceBusProcessorClientBuilder processMessage( Consumer<ServiceBusReceivedMessageContext> processMessage) { this.processMessage = processMessage; return this; } /** * The error handler for the processor which will be invoked in the event of an error while receiving messages. * @param processError The error handler which will be executed when an error occurs. * * @return The updated {@link ServiceBusProcessorClientBuilder} object */ public ServiceBusProcessorClientBuilder processError(Consumer<ServiceBusErrorContext> processError) { this.processError = processError; return this; } /** * Sets the amount of time to continue auto-renewing the lock. Setting {@link Duration * disables auto-renewal. For {@link ServiceBusReceiveMode * auto-renewal is disabled. * * @param maxAutoLockRenewDuration the amount of time to continue auto-renewing the lock. {@link Duration * or {@code null} indicates that auto-renewal is disabled. * * @return The updated {@link ServiceBusProcessorClientBuilder} object. * @throws IllegalArgumentException If {code maxAutoLockRenewDuration} is negative. */ public ServiceBusProcessorClientBuilder maxAutoLockRenewDuration(Duration maxAutoLockRenewDuration) { validateAndThrow(maxAutoLockRenewDuration); serviceBusReceiverClientBuilder.maxAutoLockRenewDuration(maxAutoLockRenewDuration); return this; } /** * Max concurrent messages that this processor should process. By default, this is set to 1. * * @param maxConcurrentCalls max concurrent messages that this processor should process. * @return The updated {@link ServiceBusProcessorClientBuilder} object. * @throws IllegalArgumentException if the {@code maxConcurrentCalls} is set to a value less than 1. */ public ServiceBusProcessorClientBuilder maxConcurrentCalls(int maxConcurrentCalls) { if (maxConcurrentCalls < 1) { throw LOGGER.logExceptionAsError( new IllegalArgumentException("'maxConcurrentCalls' cannot be less than 1")); } processorClientOptions.setMaxConcurrentCalls(maxConcurrentCalls); return this; } /** * Disables auto-complete and auto-abandon of received messages. By default, a successfully processed message is * {@link ServiceBusReceivedMessageContext * the message is processed, it is {@link ServiceBusReceivedMessageContext * abandoned}. * * @return The modified {@link ServiceBusProcessorClientBuilder} object. */ public ServiceBusProcessorClientBuilder disableAutoComplete() { serviceBusReceiverClientBuilder.disableAutoComplete(); processorClientOptions.setDisableAutoComplete(true); return this; } /** * Creates Service Bus message processor responsible for reading {@link ServiceBusReceivedMessage * messages} from a specific queue or subscription. * * @return An new {@link ServiceBusProcessorClient} that processes messages from a queue or subscription. * @throws IllegalStateException if {@link * topicName} are not set or, both of these fields are set. It is also thrown if the Service Bus {@link * * {@link * * @throws IllegalArgumentException Queue or topic name are not set via {@link * queueName()} or {@link * @throws NullPointerException if the {@link * callbacks are not set. */ public ServiceBusProcessorClient buildProcessorClient() { return new ServiceBusProcessorClient(serviceBusReceiverClientBuilder, serviceBusReceiverClientBuilder.queueName, serviceBusReceiverClientBuilder.topicName, serviceBusReceiverClientBuilder.subscriptionName, Objects.requireNonNull(processMessage, "'processMessage' cannot be null"), Objects.requireNonNull(processError, "'processError' cannot be null"), processorClientOptions); } } /** * Builder for creating {@link ServiceBusReceiverClient} and {@link ServiceBusReceiverAsyncClient} to consume * messages from Service Bus. * * @see ServiceBusReceiverAsyncClient * @see ServiceBusReceiverClient */ @ServiceClientBuilder(serviceClients = {ServiceBusReceiverClient.class, ServiceBusReceiverAsyncClient.class}) public final class ServiceBusReceiverClientBuilder { private boolean enableAutoComplete = true; private int prefetchCount = DEFAULT_PREFETCH_COUNT; private String queueName; private SubQueue subQueue; private ServiceBusReceiveMode receiveMode = ServiceBusReceiveMode.PEEK_LOCK; private String subscriptionName; private String topicName; private Duration maxAutoLockRenewDuration = MAX_LOCK_RENEW_DEFAULT_DURATION; private ServiceBusReceiverClientBuilder() { } /** * Disables auto-complete and auto-abandon of received messages. By default, a successfully processed message is * {@link ServiceBusReceiverAsyncClient * the message is processed, it is {@link ServiceBusReceiverAsyncClient * abandoned}. * * @return The modified {@link ServiceBusReceiverClientBuilder} object. */ public ServiceBusReceiverClientBuilder disableAutoComplete() { this.enableAutoComplete = false; return this; } /** * Sets the amount of time to continue auto-renewing the lock. Setting {@link Duration * disables auto-renewal. For {@link ServiceBusReceiveMode * auto-renewal is disabled. * * @param maxAutoLockRenewDuration the amount of time to continue auto-renewing the lock. {@link Duration * or {@code null} indicates that auto-renewal is disabled. * * @return The updated {@link ServiceBusReceiverClientBuilder} object. * @throws IllegalArgumentException If {code maxAutoLockRenewDuration} is negative. */ public ServiceBusReceiverClientBuilder maxAutoLockRenewDuration(Duration maxAutoLockRenewDuration) { validateAndThrow(maxAutoLockRenewDuration); this.maxAutoLockRenewDuration = maxAutoLockRenewDuration; return this; } /** * Sets the prefetch count of the receiver. For both {@link ServiceBusReceiveMode * {@link ServiceBusReceiveMode * * Prefetch speeds up the message flow by aiming to have a message readily available for local retrieval when * and before the application asks for one using {@link ServiceBusReceiverAsyncClient * Setting a non-zero value will prefetch that number of messages. Setting the value to zero turns prefetch * off. * * @param prefetchCount The prefetch count. * * @return The modified {@link ServiceBusReceiverClientBuilder} object. * @throws IllegalArgumentException If {code prefetchCount} is negative. */ public ServiceBusReceiverClientBuilder prefetchCount(int prefetchCount) { validateAndThrow(prefetchCount); this.prefetchCount = prefetchCount; return this; } /** * Sets the name of the queue to create a receiver for. * * @param queueName Name of the queue. * * @return The modified {@link ServiceBusReceiverClientBuilder} object. */ public ServiceBusReceiverClientBuilder queueName(String queueName) { this.queueName = queueName; return this; } /** * Sets the receive mode for the receiver. * * @param receiveMode Mode for receiving messages. * * @return The modified {@link ServiceBusReceiverClientBuilder} object. */ public ServiceBusReceiverClientBuilder receiveMode(ServiceBusReceiveMode receiveMode) { this.receiveMode = receiveMode; return this; } /** * Sets the type of the {@link SubQueue} to connect to. * * @param subQueue The type of the sub queue. * * @return The modified {@link ServiceBusReceiverClientBuilder} object. * @see */ public ServiceBusReceiverClientBuilder subQueue(SubQueue subQueue) { this.subQueue = subQueue; return this; } /** * Sets the name of the subscription in the topic to listen to. <b>{@link * </b> * * @param subscriptionName Name of the subscription. * * @return The modified {@link ServiceBusReceiverClientBuilder} object. * @see */ public ServiceBusReceiverClientBuilder subscriptionName(String subscriptionName) { this.subscriptionName = subscriptionName; return this; } /** * Sets the name of the topic. <b>{@link * * @param topicName Name of the topic. * * @return The modified {@link ServiceBusReceiverClientBuilder} object. * @see */ public ServiceBusReceiverClientBuilder topicName(String topicName) { this.topicName = topicName; return this; } /** * Creates an <b>asynchronous</b> Service Bus receiver responsible for reading {@link ServiceBusMessage * messages} from a specific queue or subscription. * * @return An new {@link ServiceBusReceiverAsyncClient} that receives messages from a queue or subscription. * @throws IllegalStateException if {@link * topicName} are not set or, both of these fields are set. It is also thrown if the Service Bus {@link * * {@link * * @throws IllegalArgumentException Queue or topic name are not set via {@link * queueName()} or {@link */ public ServiceBusReceiverAsyncClient buildAsyncClient() { return buildAsyncClient(true); } /** * Creates <b>synchronous</b> Service Bus receiver responsible for reading {@link ServiceBusMessage messages} * from a specific queue or subscription. * * @return An new {@link ServiceBusReceiverClient} that receives messages from a queue or subscription. * @throws IllegalStateException if {@link * topicName} are not set or, both of these fields are set. It is also thrown if the Service Bus {@link * * {@link * * @throws IllegalArgumentException Queue or topic name are not set via {@link * queueName()} or {@link */ public ServiceBusReceiverClient buildClient() { final boolean isPrefetchDisabled = prefetchCount == 0; return new ServiceBusReceiverClient(buildAsyncClient(false), isPrefetchDisabled, MessageUtils.getTotalTimeout(retryOptions)); } ServiceBusReceiverAsyncClient buildAsyncClient(boolean isAutoCompleteAllowed) { final MessagingEntityType entityType = validateEntityPaths(connectionStringEntityName, topicName, queueName); final String entityPath = getEntityPath(entityType, queueName, topicName, subscriptionName, subQueue); if (!isAutoCompleteAllowed && enableAutoComplete) { LOGGER.warning( "'enableAutoComplete' is not supported in synchronous client except through callback receive."); enableAutoComplete = false; } else if (enableAutoComplete && receiveMode == ServiceBusReceiveMode.RECEIVE_AND_DELETE) { LOGGER.warning("'enableAutoComplete' is not needed in for RECEIVE_AND_DELETE mode."); enableAutoComplete = false; } if (receiveMode == ServiceBusReceiveMode.RECEIVE_AND_DELETE) { maxAutoLockRenewDuration = Duration.ZERO; } final ServiceBusConnectionProcessor connectionProcessor = getOrCreateConnectionProcessor(messageSerializer); final ReceiverOptions receiverOptions = new ReceiverOptions(receiveMode, prefetchCount, maxAutoLockRenewDuration, enableAutoComplete); return new ServiceBusReceiverAsyncClient(connectionProcessor.getFullyQualifiedNamespace(), entityPath, entityType, receiverOptions, connectionProcessor, ServiceBusConstants.OPERATION_TIMEOUT, tracerProvider, messageSerializer, ServiceBusClientBuilder.this::onClientClose); } } private void validateAndThrow(int prefetchCount) { if (prefetchCount < 0) { throw LOGGER.logExceptionAsError(new IllegalArgumentException(String.format( "prefetchCount (%s) cannot be less than 0.", prefetchCount))); } } private void validateAndThrow(Duration maxLockRenewalDuration) { if (maxLockRenewalDuration != null && maxLockRenewalDuration.isNegative()) { throw LOGGER.logExceptionAsError(new IllegalArgumentException( "'maxLockRenewalDuration' cannot be negative.")); } } }
For Event Hubs, It seems we support setting custom endpoint at Processor level as well (in addition to EventHubsClientBuilder) - https://github.com/Azure/azure-sdk-for-java/blob/main/sdk/eventhubs/azure-messaging-eventhubs/CHANGELOG.md#550-2020-02-15 Should we check if its the same for SB processor as well
public ServiceBusClientBuilder customEndpointAddress(String customEndpointAddress) { if (customEndpointAddress == null) { this.customEndpointAddress = null; return this; } try { this.customEndpointAddress = new URL(customEndpointAddress); } catch (MalformedURLException e) { throw LOGGER.logExceptionAsError( new IllegalArgumentException(String.format("(%s) : is not a valid URL,", customEndpointAddress), e)); } return this; }
if (customEndpointAddress == null) {
public ServiceBusClientBuilder customEndpointAddress(String customEndpointAddress) { if (customEndpointAddress == null) { this.customEndpointAddress = null; return this; } try { this.customEndpointAddress = new URL(customEndpointAddress); } catch (MalformedURLException e) { throw LOGGER.logExceptionAsError( new IllegalArgumentException(String.format("(%s) : is not a valid URL,", customEndpointAddress), e)); } return this; }
class ServiceBusClientBuilder implements TokenCredentialTrait<ServiceBusClientBuilder>, AzureNamedKeyCredentialTrait<ServiceBusClientBuilder>, ConnectionStringTrait<ServiceBusClientBuilder>, AzureSasCredentialTrait<ServiceBusClientBuilder>, AmqpTrait<ServiceBusClientBuilder>, ConfigurationTrait<ServiceBusClientBuilder> { private static final AmqpRetryOptions DEFAULT_RETRY = new AmqpRetryOptions().setTryTimeout(ServiceBusConstants.OPERATION_TIMEOUT); private static final String SERVICE_BUS_PROPERTIES_FILE = "azure-messaging-servicebus.properties"; private static final String SUBSCRIPTION_ENTITY_PATH_FORMAT = "%s/subscriptions/%s"; private static final String DEAD_LETTER_QUEUE_NAME_SUFFIX = "/$deadletterqueue"; private static final String TRANSFER_DEAD_LETTER_QUEUE_NAME_SUFFIX = "/$Transfer/$deadletterqueue"; private static final int DEFAULT_PREFETCH_COUNT = 0; private static final String NAME_KEY = "name"; private static final String VERSION_KEY = "version"; private static final String UNKNOWN = "UNKNOWN"; private static final Pattern HOST_PORT_PATTERN = Pattern.compile("^[^:]+:\\d+"); private static final Duration MAX_LOCK_RENEW_DEFAULT_DURATION = Duration.ofMinutes(5); private static final ClientLogger LOGGER = new ClientLogger(ServiceBusClientBuilder.class); private final Object connectionLock = new Object(); private final MessageSerializer messageSerializer = new ServiceBusMessageSerializer(); private final TracerProvider tracerProvider = new TracerProvider(ServiceLoader.load(Tracer.class)); private ClientOptions clientOptions; private Configuration configuration; private ServiceBusConnectionProcessor sharedConnection; private String connectionStringEntityName; private TokenCredential credentials; private String fullyQualifiedNamespace; private ProxyOptions proxyOptions; private AmqpRetryOptions retryOptions; private Scheduler scheduler; private AmqpTransportType transport = AmqpTransportType.AMQP; private SslDomain.VerifyMode verifyMode; private boolean crossEntityTransactions; private URL customEndpointAddress; /** * Keeps track of the open clients that were created from this builder when there is a shared connection. */ private final AtomicInteger openClients = new AtomicInteger(); /** * Creates a new instance with the default transport {@link AmqpTransportType */ public ServiceBusClientBuilder() { } /** * Sets the {@link ClientOptions} to be sent from the client built from this builder, enabling customization of * certain properties, as well as support the addition of custom header information. Refer to the {@link * ClientOptions} documentation for more information. * * @param clientOptions to be set on the client. * * @return The updated {@link ServiceBusClientBuilder} object. */ @Override public ServiceBusClientBuilder clientOptions(ClientOptions clientOptions) { this.clientOptions = clientOptions; return this; } /** * Sets the fully-qualified namespace for the Service Bus. * * @param fullyQualifiedNamespace The fully-qualified namespace for the Service Bus. * * @return The updated {@link ServiceBusClientBuilder} object. */ public ServiceBusClientBuilder fullyQualifiedNamespace(String fullyQualifiedNamespace) { this.fullyQualifiedNamespace = Objects.requireNonNull(fullyQualifiedNamespace, "'fullyQualifiedNamespace' cannot be null."); if (CoreUtils.isNullOrEmpty(fullyQualifiedNamespace)) { throw LOGGER.logExceptionAsError( new IllegalArgumentException("'fullyQualifiedNamespace' cannot be an empty string.")); } return this; } private String getAndValidateFullyQualifiedNamespace() { if (CoreUtils.isNullOrEmpty(fullyQualifiedNamespace)) { throw LOGGER.logExceptionAsError( new IllegalArgumentException("'fullyQualifiedNamespace' cannot be an empty string.")); } return fullyQualifiedNamespace; } /** * Sets a custom endpoint address when connecting to the Service Bus service. This can be useful when your network * does not allow connecting to the standard Azure Service Bus endpoint address, but does allow connecting through * an intermediary. For example: {@literal https: * <p> * If no port is specified, the default port for the {@link * used. * * @param customEndpointAddress The custom endpoint address. * @return The updated {@link ServiceBusClientBuilder} object. * @throws IllegalArgumentException if {@code customEndpointAddress} cannot be parsed into a valid {@link URL}. */ /** * Sets the connection string for a Service Bus namespace or a specific Service Bus resource. * * @param connectionString Connection string for a Service Bus namespace or a specific Service Bus resource. * * @return The updated {@link ServiceBusClientBuilder} object. */ public ServiceBusClientBuilder connectionString(String connectionString) { final ConnectionStringProperties properties = new ConnectionStringProperties(connectionString); final TokenCredential tokenCredential; try { tokenCredential = getTokenCredential(properties); } catch (Exception e) { throw LOGGER.logExceptionAsError( new AzureException("Could not create the ServiceBusSharedKeyCredential.", e)); } this.fullyQualifiedNamespace = properties.getEndpoint().getHost(); String entityPath = properties.getEntityPath(); if (!CoreUtils.isNullOrEmpty(entityPath)) { LOGGER.atInfo() .addKeyValue(ENTITY_PATH_KEY, entityPath) .log("Setting entity from connection string."); this.connectionStringEntityName = entityPath; } return credential(properties.getEndpoint().getHost(), tokenCredential); } /** * Enable cross entity transaction on the connection to Service bus. Use this feature only when your transaction * scope spans across different Service Bus entities. This feature is achieved by routing all the messages through * one 'send-via' entity on server side as explained next. * Once clients are created for multiple entities, the first entity that an operation occurs on becomes the * entity through which all subsequent sends will be routed through ('send-via' entity). This enables the service to * perform a transaction that is meant to span multiple entities. This means that subsequent entities that perform * their first operation need to either be senders, or if they are receivers they need to be on the same entity as * the initial entity through which all sends are routed through (otherwise the service would not be able to ensure * that the transaction is committed because it cannot route a receive operation through a different entity). For * instance, if you have SenderA (For entity A) and ReceiverB (For entity B) that are created from a client with * cross-entity transactions enabled, you would need to receive first with ReceiverB to allow this to work. If you * first send to entity A, and then attempted to receive from entity B, an exception would be thrown. * * <p><strong>Avoid using non-transaction API on this client</strong></p> * Since this feature will set up connection to Service Bus optimised to enable this feature. Once all the clients * have been setup, the first receiver or sender used will initialize 'send-via' queue as a single message transfer * entity. All the messages will flow via this queue. Thus this client is not suitable for any non-transaction API. * * <p><strong>When not to enable this feature</strong></p> * If your transaction is involved in one Service bus entity only. For example you are receiving from one * queue/subscription and you want to settle your own messages which are part of one transaction. * * @return The updated {@link ServiceBusSenderClientBuilder} object. * * @see <a href="https: */ public ServiceBusClientBuilder enableCrossEntityTransactions() { this.crossEntityTransactions = true; return this; } private TokenCredential getTokenCredential(ConnectionStringProperties properties) { TokenCredential tokenCredential; if (properties.getSharedAccessSignature() == null) { tokenCredential = new ServiceBusSharedKeyCredential(properties.getSharedAccessKeyName(), properties.getSharedAccessKey(), ServiceBusConstants.TOKEN_VALIDITY); } else { tokenCredential = new ServiceBusSharedKeyCredential(properties.getSharedAccessSignature()); } return tokenCredential; } /** * Sets the configuration store that is used during construction of the service client. * * If not specified, the default configuration store is used to configure Service Bus clients. Use {@link * Configuration * * @param configuration The configuration store used to configure Service Bus clients. * * @return The updated {@link ServiceBusClientBuilder} object. */ @Override public ServiceBusClientBuilder configuration(Configuration configuration) { this.configuration = configuration; return this; } /** * Sets the credential by using a {@link TokenCredential} for the Service Bus resource. * <a href="https: * azure-identity</a> has multiple {@link TokenCredential} implementations that can be used to authenticate * the access to the Service Bus resource. * * @param fullyQualifiedNamespace The fully-qualified namespace for the Service Bus. * @param credential The token credential to use for authentication. Access controls may be specified by the * ServiceBus namespace or the requested Service Bus entity, depending on Azure configuration. * * @return The updated {@link ServiceBusClientBuilder} object. */ public ServiceBusClientBuilder credential(String fullyQualifiedNamespace, TokenCredential credential) { this.fullyQualifiedNamespace = Objects.requireNonNull(fullyQualifiedNamespace, "'fullyQualifiedNamespace' cannot be null."); this.credentials = Objects.requireNonNull(credential, "'credential' cannot be null."); if (CoreUtils.isNullOrEmpty(fullyQualifiedNamespace)) { throw LOGGER.logExceptionAsError( new IllegalArgumentException("'fullyQualifiedNamespace' cannot be an empty string.")); } return this; } /** * Sets the {@link TokenCredential} used to authorize requests sent to the service. Refer to the Azure SDK for Java * <a href="https: * documentation for more details on proper usage of the {@link TokenCredential} type. * * @param credential The token credential to use for authentication. Access controls may be specified by the * ServiceBus namespace or the requested Service Bus entity, depending on Azure configuration. * * @return The updated {@link ServiceBusClientBuilder} object. */ @Override public ServiceBusClientBuilder credential(TokenCredential credential) { this.credentials = Objects.requireNonNull(credential, "'credential' cannot be null."); return this; } /** * Sets the credential with the shared access policies for the Service Bus resource. * You can find the shared access policies on the azure portal or Azure CLI. * For instance, on the portal, "Shared Access policies" has 'policy' and its 'Primary Key' and 'Secondary Key'. * The 'name' attribute of the {@link AzureNamedKeyCredential} is the 'policy' on portal and the 'key' attribute * can be either 'Primary Key' or 'Secondary Key'. * This method and {@link * you to update the name and key. * * @param fullyQualifiedNamespace The fully-qualified namespace for the Service Bus. * @param credential {@link AzureNamedKeyCredential} to be used for authentication. * * @return The updated {@link ServiceBusClientBuilder} object. */ public ServiceBusClientBuilder credential(String fullyQualifiedNamespace, AzureNamedKeyCredential credential) { this.fullyQualifiedNamespace = Objects.requireNonNull(fullyQualifiedNamespace, "'fullyQualifiedNamespace' cannot be null."); Objects.requireNonNull(credential, "'credential' cannot be null."); if (CoreUtils.isNullOrEmpty(fullyQualifiedNamespace)) { throw LOGGER.logExceptionAsError( new IllegalArgumentException("'fullyQualifiedNamespace' cannot be an empty string.")); } this.credentials = new ServiceBusSharedKeyCredential(credential.getAzureNamedKey().getName(), credential.getAzureNamedKey().getKey(), ServiceBusConstants.TOKEN_VALIDITY); return this; } /** * Sets the credential with the shared access policies for the Service Bus resource. * You can find the shared access policies on the azure portal or Azure CLI. * For instance, on the portal, "Shared Access policies" has 'policy' and its 'Primary Key' and 'Secondary Key'. * The 'name' attribute of the {@link AzureNamedKeyCredential} is the 'policy' on portal and the 'key' attribute * can be either 'Primary Key' or 'Secondary Key'. * This method and {@link * you to update the name and key. * * @param credential {@link AzureNamedKeyCredential} to be used for authentication. * * @return The updated {@link ServiceBusClientBuilder} object. */ @Override public ServiceBusClientBuilder credential(AzureNamedKeyCredential credential) { Objects.requireNonNull(credential, "'credential' cannot be null."); this.credentials = new ServiceBusSharedKeyCredential(credential.getAzureNamedKey().getName(), credential.getAzureNamedKey().getKey(), ServiceBusConstants.TOKEN_VALIDITY); return this; } /** * Sets the credential with Shared Access Signature for the Service Bus resource. * Refer to <a href="https: * Service Bus access control with Shared Access Signatures</a>. * * @param fullyQualifiedNamespace The fully-qualified namespace for the Service Bus. * @param credential {@link AzureSasCredential} to be used for authentication. * * @return The updated {@link ServiceBusClientBuilder} object. */ public ServiceBusClientBuilder credential(String fullyQualifiedNamespace, AzureSasCredential credential) { this.fullyQualifiedNamespace = Objects.requireNonNull(fullyQualifiedNamespace, "'fullyQualifiedNamespace' cannot be null."); Objects.requireNonNull(credential, "'credential' cannot be null."); if (CoreUtils.isNullOrEmpty(fullyQualifiedNamespace)) { throw LOGGER.logExceptionAsError( new IllegalArgumentException("'fullyQualifiedNamespace' cannot be an empty string.")); } this.credentials = new ServiceBusSharedKeyCredential(credential.getSignature()); return this; } /** * Sets the credential with Shared Access Signature for the Service Bus resource. * Refer to <a href="https: * Service Bus access control with Shared Access Signatures</a>. * * @param credential {@link AzureSasCredential} to be used for authentication. * * @return The updated {@link ServiceBusClientBuilder} object. */ @Override public ServiceBusClientBuilder credential(AzureSasCredential credential) { Objects.requireNonNull(credential, "'credential' cannot be null."); this.credentials = new ServiceBusSharedKeyCredential(credential.getSignature()); return this; } /** * Sets the proxy configuration to use for {@link ServiceBusSenderAsyncClient}. When a proxy is configured, {@link * AmqpTransportType * * @param proxyOptions The proxy configuration to use. * * @return The updated {@link ServiceBusClientBuilder} object. */ @Override public ServiceBusClientBuilder proxyOptions(ProxyOptions proxyOptions) { this.proxyOptions = proxyOptions; return this; } /** * Package-private method that sets the verify mode for this connection. * * @param verifyMode The verification mode. * @return The updated {@link ServiceBusClientBuilder} object. */ ServiceBusClientBuilder verifyMode(SslDomain.VerifyMode verifyMode) { this.verifyMode = verifyMode; return this; } /** * Sets the retry options for Service Bus clients. If not specified, the default retry options are used. * * @param retryOptions The retry options to use. * * @return The updated {@link ServiceBusClientBuilder} object. */ @Override public ServiceBusClientBuilder retryOptions(AmqpRetryOptions retryOptions) { this.retryOptions = retryOptions; return this; } /** * Sets the scheduler to use. * * @param scheduler Scheduler to be used. * * @return The updated {@link ServiceBusClientBuilder} object. */ ServiceBusClientBuilder scheduler(Scheduler scheduler) { this.scheduler = scheduler; return this; } /** * Sets the transport type by which all the communication with Azure Service Bus occurs. Default value is {@link * AmqpTransportType * * @param transportType The transport type to use. * * @return The updated {@link ServiceBusClientBuilder} object. */ @Override public ServiceBusClientBuilder transportType(AmqpTransportType transportType) { this.transport = transportType; return this; } /** * A new instance of {@link ServiceBusSenderClientBuilder} used to configure Service Bus message senders. * * @return A new instance of {@link ServiceBusSenderClientBuilder}. */ public ServiceBusSenderClientBuilder sender() { return new ServiceBusSenderClientBuilder(); } /** * A new instance of {@link ServiceBusReceiverClientBuilder} used to configure Service Bus message receivers. * * @return A new instance of {@link ServiceBusReceiverClientBuilder}. */ public ServiceBusReceiverClientBuilder receiver() { return new ServiceBusReceiverClientBuilder(); } /** * A new instance of {@link ServiceBusSessionReceiverClientBuilder} used to configure <b>session aware</b> Service * Bus message receivers. * * @return A new instance of {@link ServiceBusSessionReceiverClientBuilder}. */ public ServiceBusSessionReceiverClientBuilder sessionReceiver() { return new ServiceBusSessionReceiverClientBuilder(); } /** * A new instance of {@link ServiceBusProcessorClientBuilder} used to configure {@link ServiceBusProcessorClient} * instance. * * @return A new instance of {@link ServiceBusProcessorClientBuilder}. */ public ServiceBusProcessorClientBuilder processor() { return new ServiceBusProcessorClientBuilder(); } /** * A new instance of {@link ServiceBusSessionProcessorClientBuilder} used to configure a Service Bus processor * instance that processes sessions. * @return A new instance of {@link ServiceBusSessionProcessorClientBuilder}. */ public ServiceBusSessionProcessorClientBuilder sessionProcessor() { return new ServiceBusSessionProcessorClientBuilder(); } /** * Called when a child client is closed. Disposes of the shared connection if there are no more clients. */ void onClientClose() { synchronized (connectionLock) { final int numberOfOpenClients = openClients.decrementAndGet(); LOGGER.atInfo() .addKeyValue("numberOfOpenClients", numberOfOpenClients) .log("Closing a dependent client."); if (numberOfOpenClients > 0) { return; } if (numberOfOpenClients < 0) { LOGGER.atWarning() .addKeyValue("numberOfOpenClients", numberOfOpenClients) .log("There should not be less than 0 clients."); } LOGGER.info("No more open clients, closing shared connection."); if (sharedConnection != null) { sharedConnection.dispose(); sharedConnection = null; } else { LOGGER.warning("Shared ServiceBusConnectionProcessor was already disposed."); } } } private ServiceBusConnectionProcessor getOrCreateConnectionProcessor(MessageSerializer serializer) { if (retryOptions == null) { retryOptions = DEFAULT_RETRY; } if (scheduler == null) { scheduler = Schedulers.elastic(); } synchronized (connectionLock) { if (sharedConnection == null) { final ConnectionOptions connectionOptions = getConnectionOptions(); final Flux<ServiceBusAmqpConnection> connectionFlux = Mono.fromCallable(() -> { final String connectionId = StringUtil.getRandomString("MF"); final ReactorProvider provider = new ReactorProvider(); final ReactorHandlerProvider handlerProvider = new ReactorHandlerProvider(provider); final TokenManagerProvider tokenManagerProvider = new AzureTokenManagerProvider( connectionOptions.getAuthorizationType(), connectionOptions.getFullyQualifiedNamespace(), connectionOptions.getAuthorizationScope()); return (ServiceBusAmqpConnection) new ServiceBusReactorAmqpConnection(connectionId, connectionOptions, provider, handlerProvider, tokenManagerProvider, serializer, crossEntityTransactions); }).repeat(); sharedConnection = connectionFlux.subscribeWith(new ServiceBusConnectionProcessor( connectionOptions.getFullyQualifiedNamespace(), connectionOptions.getRetry())); } } final int numberOfOpenClients = openClients.incrementAndGet(); LOGGER.info(" return sharedConnection; } private ConnectionOptions getConnectionOptions() { configuration = configuration == null ? Configuration.getGlobalConfiguration().clone() : configuration; if (credentials == null) { throw LOGGER.logExceptionAsError(new IllegalArgumentException("Credentials have not been set. " + "They can be set using: connectionString(String), connectionString(String, String), " + "or credentials(String, String, TokenCredential)" )); } if (proxyOptions != null && proxyOptions.isProxyAddressConfigured() && transport != AmqpTransportType.AMQP_WEB_SOCKETS) { throw LOGGER.logExceptionAsError(new IllegalArgumentException( "Cannot use a proxy when TransportType is not AMQP.")); } if (proxyOptions == null) { proxyOptions = getDefaultProxyConfiguration(configuration); } final CbsAuthorizationType authorizationType = credentials instanceof ServiceBusSharedKeyCredential ? CbsAuthorizationType.SHARED_ACCESS_SIGNATURE : CbsAuthorizationType.JSON_WEB_TOKEN; final SslDomain.VerifyMode verificationMode = verifyMode != null ? verifyMode : SslDomain.VerifyMode.VERIFY_PEER_NAME; final ClientOptions options = clientOptions != null ? clientOptions : new ClientOptions(); final Map<String, String> properties = CoreUtils.getProperties(SERVICE_BUS_PROPERTIES_FILE); final String product = properties.getOrDefault(NAME_KEY, UNKNOWN); final String clientVersion = properties.getOrDefault(VERSION_KEY, UNKNOWN); if (customEndpointAddress == null) { return new ConnectionOptions(getAndValidateFullyQualifiedNamespace(), credentials, authorizationType, ServiceBusConstants.AZURE_ACTIVE_DIRECTORY_SCOPE, transport, retryOptions, proxyOptions, scheduler, options, verificationMode, product, clientVersion); } else { return new ConnectionOptions(getAndValidateFullyQualifiedNamespace(), credentials, authorizationType, ServiceBusConstants.AZURE_ACTIVE_DIRECTORY_SCOPE, transport, retryOptions, proxyOptions, scheduler, options, verificationMode, product, clientVersion, customEndpointAddress.getHost(), customEndpointAddress.getPort()); } } private ProxyOptions getDefaultProxyConfiguration(Configuration configuration) { ProxyAuthenticationType authentication = ProxyAuthenticationType.NONE; if (proxyOptions != null) { authentication = proxyOptions.getAuthentication(); } String proxyAddress = configuration.get(Configuration.PROPERTY_HTTP_PROXY); if (CoreUtils.isNullOrEmpty(proxyAddress)) { return ProxyOptions.SYSTEM_DEFAULTS; } return getProxyOptions(authentication, proxyAddress, configuration, Boolean.parseBoolean(configuration.get("java.net.useSystemProxies"))); } private ProxyOptions getProxyOptions(ProxyAuthenticationType authentication, String proxyAddress, Configuration configuration, boolean useSystemProxies) { String host; int port; if (HOST_PORT_PATTERN.matcher(proxyAddress.trim()).find()) { final String[] hostPort = proxyAddress.split(":"); host = hostPort[0]; port = Integer.parseInt(hostPort[1]); final Proxy proxy = new Proxy(Proxy.Type.HTTP, new InetSocketAddress(host, port)); final String username = configuration.get(ProxyOptions.PROXY_USERNAME); final String password = configuration.get(ProxyOptions.PROXY_PASSWORD); return new ProxyOptions(authentication, proxy, username, password); } else if (useSystemProxies) { com.azure.core.http.ProxyOptions coreProxyOptions = com.azure.core.http.ProxyOptions .fromConfiguration(configuration); return new ProxyOptions(authentication, new Proxy(coreProxyOptions.getType().toProxyType(), coreProxyOptions.getAddress()), coreProxyOptions.getUsername(), coreProxyOptions.getPassword()); } else { LOGGER.verbose("'HTTP_PROXY' was configured but ignored as 'java.net.useSystemProxies' wasn't " + "set or was false."); return ProxyOptions.SYSTEM_DEFAULTS; } } private static boolean isNullOrEmpty(String item) { return item == null || item.isEmpty(); } private static MessagingEntityType validateEntityPaths(String connectionStringEntityName, String topicName, String queueName) { final boolean hasTopicName = !isNullOrEmpty(topicName); final boolean hasQueueName = !isNullOrEmpty(queueName); final boolean hasConnectionStringEntity = !isNullOrEmpty(connectionStringEntityName); final MessagingEntityType entityType; if (!hasConnectionStringEntity && !hasQueueName && !hasTopicName) { throw ServiceBusClientBuilder.LOGGER.logExceptionAsError(new IllegalStateException( "Cannot build client without setting either a queueName or topicName.")); } else if (hasQueueName && hasTopicName) { throw ServiceBusClientBuilder.LOGGER.logExceptionAsError(new IllegalStateException(String.format( "Cannot build client with both queueName (%s) and topicName (%s) set.", queueName, topicName))); } else if (hasQueueName) { if (hasConnectionStringEntity && !queueName.equals(connectionStringEntityName)) { throw ServiceBusClientBuilder.LOGGER.logExceptionAsError(new IllegalStateException(String.format( "queueName (%s) is different than the connectionString's EntityPath (%s).", queueName, connectionStringEntityName))); } entityType = MessagingEntityType.QUEUE; } else if (hasTopicName) { if (hasConnectionStringEntity && !topicName.equals(connectionStringEntityName)) { throw ServiceBusClientBuilder.LOGGER.logExceptionAsError(new IllegalStateException(String.format( "topicName (%s) is different than the connectionString's EntityPath (%s).", topicName, connectionStringEntityName))); } entityType = MessagingEntityType.SUBSCRIPTION; } else { entityType = MessagingEntityType.UNKNOWN; } return entityType; } private static String getEntityPath(MessagingEntityType entityType, String queueName, String topicName, String subscriptionName, SubQueue subQueue) { String entityPath; switch (entityType) { case QUEUE: entityPath = queueName; break; case SUBSCRIPTION: if (isNullOrEmpty(subscriptionName)) { throw ServiceBusClientBuilder.LOGGER.logExceptionAsError(new IllegalStateException(String.format( "topicName (%s) must have a subscriptionName associated with it.", topicName))); } entityPath = String.format(Locale.ROOT, SUBSCRIPTION_ENTITY_PATH_FORMAT, topicName, subscriptionName); break; default: throw ServiceBusClientBuilder.LOGGER.logExceptionAsError( new IllegalArgumentException("Unknown entity type: " + entityType)); } if (subQueue == null) { return entityPath; } switch (subQueue) { case NONE: break; case TRANSFER_DEAD_LETTER_QUEUE: entityPath += TRANSFER_DEAD_LETTER_QUEUE_NAME_SUFFIX; break; case DEAD_LETTER_QUEUE: entityPath += DEAD_LETTER_QUEUE_NAME_SUFFIX; break; default: throw ServiceBusClientBuilder.LOGGER.logExceptionAsError(new IllegalArgumentException("Unsupported value of subqueue type: " + subQueue)); } return entityPath; } /** * Builder for creating {@link ServiceBusSenderClient} and {@link ServiceBusSenderAsyncClient} to publish messages * to Service Bus. * * @see ServiceBusSenderAsyncClient * @see ServiceBusSenderClient */ @ServiceClientBuilder(serviceClients = {ServiceBusSenderClient.class, ServiceBusSenderAsyncClient.class}) public final class ServiceBusSenderClientBuilder { private String queueName; private String topicName; private ServiceBusSenderClientBuilder() { } /** * Sets the name of the Service Bus queue to publish messages to. * * @param queueName Name of the queue. * * @return The modified {@link ServiceBusSenderClientBuilder} object. */ public ServiceBusSenderClientBuilder queueName(String queueName) { this.queueName = queueName; return this; } /** * Sets the name of the Service Bus topic to publish messages to. * * @param topicName Name of the topic. * * @return The modified {@link ServiceBusSenderClientBuilder} object. */ public ServiceBusSenderClientBuilder topicName(String topicName) { this.topicName = topicName; return this; } /** * Creates an <b>asynchronous</b> {@link ServiceBusSenderAsyncClient client} for transmitting {@link * ServiceBusMessage} to a Service Bus queue or topic. * * @return A new {@link ServiceBusSenderAsyncClient} for transmitting to a Service queue or topic. * @throws IllegalStateException if {@link * topicName} are not set or, both of these fields are set. It is also thrown if the Service Bus {@link * * {@link * @throws IllegalArgumentException if the entity type is not a queue or a topic. */ public ServiceBusSenderAsyncClient buildAsyncClient() { final ServiceBusConnectionProcessor connectionProcessor = getOrCreateConnectionProcessor(messageSerializer); final MessagingEntityType entityType = validateEntityPaths(connectionStringEntityName, topicName, queueName); final String entityName; switch (entityType) { case QUEUE: entityName = queueName; break; case SUBSCRIPTION: entityName = topicName; break; case UNKNOWN: entityName = connectionStringEntityName; break; default: throw LOGGER.logExceptionAsError( new IllegalArgumentException("Unknown entity type: " + entityType)); } return new ServiceBusSenderAsyncClient(entityName, entityType, connectionProcessor, retryOptions, tracerProvider, messageSerializer, ServiceBusClientBuilder.this::onClientClose, null); } /** * Creates a <b>synchronous</b> {@link ServiceBusSenderClient client} for transmitting {@link ServiceBusMessage} * to a Service Bus queue or topic. * * @return A new {@link ServiceBusSenderAsyncClient} for transmitting to a Service queue or topic. * @throws IllegalStateException if {@link * topicName} are not set or, both of these fields are set. It is also thrown if the Service Bus {@link * * {@link * @throws IllegalArgumentException if the entity type is not a queue or a topic. */ public ServiceBusSenderClient buildClient() { return new ServiceBusSenderClient(buildAsyncClient(), MessageUtils.getTotalTimeout(retryOptions)); } } /** * Builder for creating {@link ServiceBusProcessorClient} to consume messages from a session-based Service Bus * entity. {@link ServiceBusProcessorClient} processes messages and errors via {@link * and {@link * next session to process. * * <p> * By default, the processor: * <ul> * <li>Automatically settles messages. Disabled via {@link * <li>Processes 1 session concurrently. Configured via {@link * <li>Invokes 1 instance of {@link * {@link * </ul> * * <p><strong>Instantiate a session-enabled processor client</strong></p> * <!-- src_embed com.azure.messaging.servicebus.servicebusprocessorclient * <pre> * Consumer&lt;ServiceBusReceivedMessageContext&gt; onMessage = context -&gt; & * ServiceBusReceivedMessage message = context.getMessage& * System.out.printf& * message.getSessionId& * & * * Consumer&lt;ServiceBusErrorContext&gt; onError = context -&gt; & * System.out.printf& * context.getFullyQualifiedNamespace& * * if & * ServiceBusException exception = & * System.out.printf& * exception.getReason& * & * System.out.printf& * & * & * * & * * ServiceBusProcessorClient sessionProcessor = new ServiceBusClientBuilder& * .connectionString& * .sessionProcessor& * .queueName& * .maxConcurrentSessions& * .processMessage& * .processError& * .buildProcessorClient& * * & * sessionProcessor.start& * </pre> * <!-- end com.azure.messaging.servicebus.servicebusprocessorclient * * @see ServiceBusProcessorClient */ public final class ServiceBusSessionProcessorClientBuilder { private final ServiceBusProcessorClientOptions processorClientOptions; private final ServiceBusSessionReceiverClientBuilder sessionReceiverClientBuilder; private Consumer<ServiceBusReceivedMessageContext> processMessage; private Consumer<ServiceBusErrorContext> processError; private ServiceBusSessionProcessorClientBuilder() { sessionReceiverClientBuilder = new ServiceBusSessionReceiverClientBuilder(); processorClientOptions = new ServiceBusProcessorClientOptions() .setMaxConcurrentCalls(1) .setTracerProvider(tracerProvider); sessionReceiverClientBuilder.maxConcurrentSessions(1); } /** * Sets the amount of time to continue auto-renewing the lock. Setting {@link Duration * disables auto-renewal. For {@link ServiceBusReceiveMode * auto-renewal is disabled. * * @param maxAutoLockRenewDuration the amount of time to continue auto-renewing the lock. {@link Duration * or {@code null} indicates that auto-renewal is disabled. * * @return The updated {@link ServiceBusSessionProcessorClientBuilder} object. * @throws IllegalArgumentException If {code maxAutoLockRenewDuration} is negative. */ public ServiceBusSessionProcessorClientBuilder maxAutoLockRenewDuration(Duration maxAutoLockRenewDuration) { validateAndThrow(maxAutoLockRenewDuration); sessionReceiverClientBuilder.maxAutoLockRenewDuration(maxAutoLockRenewDuration); return this; } /** * Enables session processing roll-over by processing at most {@code maxConcurrentSessions}. * * @param maxConcurrentSessions Maximum number of concurrent sessions to process at any given time. * * @return The modified {@link ServiceBusSessionProcessorClientBuilder} object. * @throws IllegalArgumentException if {@code maxConcurrentSessions} is less than 1. */ public ServiceBusSessionProcessorClientBuilder maxConcurrentSessions(int maxConcurrentSessions) { if (maxConcurrentSessions < 1) { throw LOGGER.logExceptionAsError( new IllegalArgumentException("'maxConcurrentSessions' cannot be less than 1")); } sessionReceiverClientBuilder.maxConcurrentSessions(maxConcurrentSessions); return this; } /** * Sets the prefetch count of the processor. For both {@link ServiceBusReceiveMode * {@link ServiceBusReceiveMode * * Prefetch speeds up the message flow by aiming to have a message readily available for local retrieval when * and before the application starts the processor. * Setting a non-zero value will prefetch that number of messages. Setting the value to zero turns prefetch off. * Using a non-zero prefetch risks of losing messages even though it has better performance. * @see <a href="https: * * @param prefetchCount The prefetch count. * * @return The modified {@link ServiceBusProcessorClientBuilder} object. */ public ServiceBusSessionProcessorClientBuilder prefetchCount(int prefetchCount) { sessionReceiverClientBuilder.prefetchCount(prefetchCount); return this; } /** * Sets the name of the queue to create a processor for. * @param queueName Name of the queue. * * @return The modified {@link ServiceBusSessionProcessorClientBuilder} object. */ public ServiceBusSessionProcessorClientBuilder queueName(String queueName) { sessionReceiverClientBuilder.queueName(queueName); return this; } /** * Sets the receive mode for the processor. * @param receiveMode Mode for receiving messages. * * @return The modified {@link ServiceBusSessionProcessorClientBuilder} object. */ public ServiceBusSessionProcessorClientBuilder receiveMode(ServiceBusReceiveMode receiveMode) { sessionReceiverClientBuilder.receiveMode(receiveMode); return this; } /** * Sets the type of the {@link SubQueue} to connect to. Azure Service Bus queues and subscriptions provide a * secondary sub-queue, called a dead-letter queue (DLQ). * * @param subQueue The type of the sub queue. * * @return The modified {@link ServiceBusSessionProcessorClientBuilder} object. * @see * @see SubQueue */ public ServiceBusSessionProcessorClientBuilder subQueue(SubQueue subQueue) { this.sessionReceiverClientBuilder.subQueue(subQueue); return this; } /** * Sets the name of the subscription in the topic to listen to. <b>{@link * </b> * @param subscriptionName Name of the subscription. * * @return The modified {@link ServiceBusSessionProcessorClientBuilder} object. * @see */ public ServiceBusSessionProcessorClientBuilder subscriptionName(String subscriptionName) { sessionReceiverClientBuilder.subscriptionName(subscriptionName); return this; } /** * Sets the name of the topic. <b>{@link * @param topicName Name of the topic. * * @return The modified {@link ServiceBusSessionProcessorClientBuilder} object. * @see */ public ServiceBusSessionProcessorClientBuilder topicName(String topicName) { sessionReceiverClientBuilder.topicName(topicName); return this; } /** * The message processing callback for the processor that will be executed when a message is received. * @param processMessage The message processing consumer that will be executed when a message is received. * * @return The updated {@link ServiceBusProcessorClientBuilder} object. */ public ServiceBusSessionProcessorClientBuilder processMessage( Consumer<ServiceBusReceivedMessageContext> processMessage) { this.processMessage = processMessage; return this; } /** * The error handler for the processor which will be invoked in the event of an error while receiving messages. * @param processError The error handler which will be executed when an error occurs. * * @return The updated {@link ServiceBusProcessorClientBuilder} object */ public ServiceBusSessionProcessorClientBuilder processError( Consumer<ServiceBusErrorContext> processError) { this.processError = processError; return this; } /** * Max concurrent messages that this processor should process. * * @param maxConcurrentCalls max concurrent messages that this processor should process. * * @return The updated {@link ServiceBusSessionProcessorClientBuilder} object. * @throws IllegalArgumentException if {@code maxConcurrentCalls} is less than 1. */ public ServiceBusSessionProcessorClientBuilder maxConcurrentCalls(int maxConcurrentCalls) { if (maxConcurrentCalls < 1) { throw LOGGER.logExceptionAsError( new IllegalArgumentException("'maxConcurrentCalls' cannot be less than 1")); } processorClientOptions.setMaxConcurrentCalls(maxConcurrentCalls); return this; } /** * Disables auto-complete and auto-abandon of received messages. By default, a successfully processed message is * {@link ServiceBusReceivedMessageContext * the message is processed, it is {@link ServiceBusReceivedMessageContext * abandoned}. * * @return The modified {@link ServiceBusSessionProcessorClientBuilder} object. */ public ServiceBusSessionProcessorClientBuilder disableAutoComplete() { sessionReceiverClientBuilder.disableAutoComplete(); processorClientOptions.setDisableAutoComplete(true); return this; } /** * Creates a <b>session-aware</b> Service Bus processor responsible for reading * {@link ServiceBusReceivedMessage messages} from a specific queue or subscription. * * @return An new {@link ServiceBusProcessorClient} that receives messages from a queue or subscription. * @throws IllegalStateException if {@link * topicName} are not set or, both of these fields are set. It is also thrown if the Service Bus {@link * * {@link * * @throws IllegalArgumentException Queue or topic name are not set via {@link * queueName()} or {@link * @throws NullPointerException if the {@link * callbacks are not set. */ public ServiceBusProcessorClient buildProcessorClient() { return new ServiceBusProcessorClient(sessionReceiverClientBuilder, sessionReceiverClientBuilder.queueName, sessionReceiverClientBuilder.topicName, sessionReceiverClientBuilder.subscriptionName, Objects.requireNonNull(processMessage, "'processMessage' cannot be null"), Objects.requireNonNull(processError, "'processError' cannot be null"), processorClientOptions); } } /** * Builder for creating {@link ServiceBusReceiverClient} and {@link ServiceBusReceiverAsyncClient} to consume * messages from a <b>session aware</b> Service Bus entity. * * @see ServiceBusReceiverAsyncClient * @see ServiceBusReceiverClient */ @ServiceClientBuilder(serviceClients = {ServiceBusReceiverClient.class, ServiceBusReceiverAsyncClient.class}) public final class ServiceBusSessionReceiverClientBuilder { private boolean enableAutoComplete = true; private Integer maxConcurrentSessions = null; private int prefetchCount = DEFAULT_PREFETCH_COUNT; private String queueName; private ServiceBusReceiveMode receiveMode = ServiceBusReceiveMode.PEEK_LOCK; private String subscriptionName; private String topicName; private Duration maxAutoLockRenewDuration = MAX_LOCK_RENEW_DEFAULT_DURATION; private SubQueue subQueue = SubQueue.NONE; private ServiceBusSessionReceiverClientBuilder() { } /** * Disables auto-complete and auto-abandon of received messages. By default, a successfully processed message is * {@link ServiceBusReceiverAsyncClient * the message is processed, it is {@link ServiceBusReceiverAsyncClient * abandoned}. * * @return The modified {@link ServiceBusSessionReceiverClientBuilder} object. */ public ServiceBusSessionReceiverClientBuilder disableAutoComplete() { this.enableAutoComplete = false; return this; } /** * Sets the amount of time to continue auto-renewing the session lock. Setting {@link Duration * {@code null} disables auto-renewal. For {@link ServiceBusReceiveMode * mode, auto-renewal is disabled. * * @param maxAutoLockRenewDuration the amount of time to continue auto-renewing the session lock. * {@link Duration * * @return The updated {@link ServiceBusSessionReceiverClientBuilder} object. * @throws IllegalArgumentException If {code maxAutoLockRenewDuration} is negative. */ public ServiceBusSessionReceiverClientBuilder maxAutoLockRenewDuration(Duration maxAutoLockRenewDuration) { validateAndThrow(maxAutoLockRenewDuration); this.maxAutoLockRenewDuration = maxAutoLockRenewDuration; return this; } /** * Enables session processing roll-over by processing at most {@code maxConcurrentSessions}. * * @param maxConcurrentSessions Maximum number of concurrent sessions to process at any given time. * * @return The modified {@link ServiceBusSessionReceiverClientBuilder} object. * @throws IllegalArgumentException if {@code maxConcurrentSessions} is less than 1. */ ServiceBusSessionReceiverClientBuilder maxConcurrentSessions(int maxConcurrentSessions) { if (maxConcurrentSessions < 1) { throw LOGGER.logExceptionAsError(new IllegalArgumentException( "maxConcurrentSessions cannot be less than 1.")); } this.maxConcurrentSessions = maxConcurrentSessions; return this; } /** * Sets the prefetch count of the receiver. For both {@link ServiceBusReceiveMode * {@link ServiceBusReceiveMode * * Prefetch speeds up the message flow by aiming to have a message readily available for local retrieval when * and before the application asks for one using {@link ServiceBusReceiverAsyncClient * Setting a non-zero value will prefetch that number of messages. Setting the value to zero turns prefetch * off. * * @param prefetchCount The prefetch count. * * @return The modified {@link ServiceBusSessionReceiverClientBuilder} object. * @throws IllegalArgumentException If {code prefetchCount} is negative. */ public ServiceBusSessionReceiverClientBuilder prefetchCount(int prefetchCount) { validateAndThrow(prefetchCount); this.prefetchCount = prefetchCount; return this; } /** * Sets the name of the queue to create a receiver for. * * @param queueName Name of the queue. * * @return The modified {@link ServiceBusSessionReceiverClientBuilder} object. */ public ServiceBusSessionReceiverClientBuilder queueName(String queueName) { this.queueName = queueName; return this; } /** * Sets the receive mode for the receiver. * * @param receiveMode Mode for receiving messages. * * @return The modified {@link ServiceBusSessionReceiverClientBuilder} object. */ public ServiceBusSessionReceiverClientBuilder receiveMode(ServiceBusReceiveMode receiveMode) { this.receiveMode = receiveMode; return this; } /** * Sets the type of the {@link SubQueue} to connect to. Azure Service Bus queues and subscriptions provide a * secondary sub-queue, called a dead-letter queue (DLQ). * * @param subQueue The type of the sub queue. * * @return The modified {@link ServiceBusSessionReceiverClientBuilder} object. * @see * @see SubQueue */ public ServiceBusSessionReceiverClientBuilder subQueue(SubQueue subQueue) { this.subQueue = subQueue; return this; } /** * Sets the name of the subscription in the topic to listen to. <b>{@link * </b> * * @param subscriptionName Name of the subscription. * * @return The modified {@link ServiceBusSessionReceiverClientBuilder} object. * @see */ public ServiceBusSessionReceiverClientBuilder subscriptionName(String subscriptionName) { this.subscriptionName = subscriptionName; return this; } /** * Sets the name of the topic. <b>{@link * * @param topicName Name of the topic. * * @return The modified {@link ServiceBusSessionReceiverClientBuilder} object. * @see */ public ServiceBusSessionReceiverClientBuilder topicName(String topicName) { this.topicName = topicName; return this; } /** * Creates an <b>asynchronous</b>, <b>session-aware</b> Service Bus receiver responsible for reading {@link * ServiceBusMessage messages} from a specific queue or subscription. * * @return An new {@link ServiceBusReceiverAsyncClient} that receives messages from a queue or subscription. * @throws IllegalStateException if {@link * topicName} are not set or, both of these fields are set. It is also thrown if the Service Bus {@link * * {@link * * @throws IllegalArgumentException Queue or topic name are not set via {@link * queueName()} or {@link */ ServiceBusReceiverAsyncClient buildAsyncClientForProcessor() { final MessagingEntityType entityType = validateEntityPaths(connectionStringEntityName, topicName, queueName); final String entityPath = getEntityPath(entityType, queueName, topicName, subscriptionName, subQueue); if (enableAutoComplete && receiveMode == ServiceBusReceiveMode.RECEIVE_AND_DELETE) { LOGGER.warning("'enableAutoComplete' is not needed in for RECEIVE_AND_DELETE mode."); enableAutoComplete = false; } if (receiveMode == ServiceBusReceiveMode.RECEIVE_AND_DELETE) { maxAutoLockRenewDuration = Duration.ZERO; } final ServiceBusConnectionProcessor connectionProcessor = getOrCreateConnectionProcessor(messageSerializer); final ReceiverOptions receiverOptions = new ReceiverOptions(receiveMode, prefetchCount, maxAutoLockRenewDuration, enableAutoComplete, null, maxConcurrentSessions); final ServiceBusSessionManager sessionManager = new ServiceBusSessionManager(entityPath, entityType, connectionProcessor, tracerProvider, messageSerializer, receiverOptions); return new ServiceBusReceiverAsyncClient(connectionProcessor.getFullyQualifiedNamespace(), entityPath, entityType, receiverOptions, connectionProcessor, ServiceBusConstants.OPERATION_TIMEOUT, tracerProvider, messageSerializer, ServiceBusClientBuilder.this::onClientClose, sessionManager); } /** * Creates an <b>asynchronous</b>, <b>session-aware</b> Service Bus receiver responsible for reading {@link * ServiceBusMessage messages} from a specific queue or subscription. * * @return An new {@link ServiceBusSessionReceiverAsyncClient} that receives messages from a queue or * subscription. * @throws IllegalStateException if {@link * topicName} are not set or, both of these fields are set. It is also thrown if the Service Bus {@link * * {@link * * @throws IllegalArgumentException Queue or topic name are not set via {@link * queueName()} or {@link */ public ServiceBusSessionReceiverAsyncClient buildAsyncClient() { return buildAsyncClient(true); } /** * Creates a <b>synchronous</b>, <b>session-aware</b> Service Bus receiver responsible for reading {@link * ServiceBusMessage messages} from a specific queue or subscription. * * @return An new {@link ServiceBusReceiverClient} that receives messages from a queue or subscription. * @throws IllegalStateException if {@link * topicName} are not set or, both of these fields are set. It is also thrown if the Service Bus {@link * * {@link * * @throws IllegalArgumentException Queue or topic name are not set via {@link * queueName()} or {@link */ public ServiceBusSessionReceiverClient buildClient() { final boolean isPrefetchDisabled = prefetchCount == 0; return new ServiceBusSessionReceiverClient(buildAsyncClient(false), isPrefetchDisabled, MessageUtils.getTotalTimeout(retryOptions)); } private ServiceBusSessionReceiverAsyncClient buildAsyncClient(boolean isAutoCompleteAllowed) { final MessagingEntityType entityType = validateEntityPaths(connectionStringEntityName, topicName, queueName); final String entityPath = getEntityPath(entityType, queueName, topicName, subscriptionName, SubQueue.NONE); if (!isAutoCompleteAllowed && enableAutoComplete) { LOGGER.warning( "'enableAutoComplete' is not supported in synchronous client except through callback receive."); enableAutoComplete = false; } else if (enableAutoComplete && receiveMode == ServiceBusReceiveMode.RECEIVE_AND_DELETE) { LOGGER.warning("'enableAutoComplete' is not needed in for RECEIVE_AND_DELETE mode."); enableAutoComplete = false; } if (receiveMode == ServiceBusReceiveMode.RECEIVE_AND_DELETE) { maxAutoLockRenewDuration = Duration.ZERO; } final ServiceBusConnectionProcessor connectionProcessor = getOrCreateConnectionProcessor(messageSerializer); final ReceiverOptions receiverOptions = new ReceiverOptions(receiveMode, prefetchCount, maxAutoLockRenewDuration, enableAutoComplete, null, maxConcurrentSessions); return new ServiceBusSessionReceiverAsyncClient(connectionProcessor.getFullyQualifiedNamespace(), entityPath, entityType, receiverOptions, connectionProcessor, tracerProvider, messageSerializer, ServiceBusClientBuilder.this::onClientClose); } } /** * Builder for creating {@link ServiceBusProcessorClient} to consume messages from a Service Bus entity. * {@link ServiceBusProcessorClient ServiceBusProcessorClients} provides a push-based mechanism that notifies * the message processing callback when a message is received or the error handle when an error is observed. To * create an instance, therefore, configuring the two callbacks - {@link * {@link * with auto-completion and auto-lock renewal capabilities. * * <p><strong>Sample code to instantiate a processor client</strong></p> * <!-- src_embed com.azure.messaging.servicebus.servicebusprocessorclient * <pre> * Consumer&lt;ServiceBusReceivedMessageContext&gt; onMessage = context -&gt; & * ServiceBusReceivedMessage message = context.getMessage& * System.out.printf& * message.getSequenceNumber& * & * * Consumer&lt;ServiceBusErrorContext&gt; onError = context -&gt; & * System.out.printf& * context.getFullyQualifiedNamespace& * * if & * ServiceBusException exception = & * System.out.printf& * exception.getReason& * & * System.out.printf& * & * & * * & * * ServiceBusProcessorClient processor = new ServiceBusClientBuilder& * .connectionString& * .processor& * .queueName& * .processMessage& * .processError& * .buildProcessorClient& * * & * processor.start& * </pre> * <!-- end com.azure.messaging.servicebus.servicebusprocessorclient * * @see ServiceBusProcessorClient */ public final class ServiceBusProcessorClientBuilder { private final ServiceBusReceiverClientBuilder serviceBusReceiverClientBuilder; private final ServiceBusProcessorClientOptions processorClientOptions; private Consumer<ServiceBusReceivedMessageContext> processMessage; private Consumer<ServiceBusErrorContext> processError; private ServiceBusProcessorClientBuilder() { serviceBusReceiverClientBuilder = new ServiceBusReceiverClientBuilder(); processorClientOptions = new ServiceBusProcessorClientOptions() .setMaxConcurrentCalls(1) .setTracerProvider(tracerProvider); } /** * Sets the prefetch count of the processor. For both {@link ServiceBusReceiveMode * {@link ServiceBusReceiveMode * * Prefetch speeds up the message flow by aiming to have a message readily available for local retrieval when * and before the application starts the processor. * Setting a non-zero value will prefetch that number of messages. Setting the value to zero turns prefetch off. * * @param prefetchCount The prefetch count. * * @return The modified {@link ServiceBusProcessorClientBuilder} object. */ public ServiceBusProcessorClientBuilder prefetchCount(int prefetchCount) { serviceBusReceiverClientBuilder.prefetchCount(prefetchCount); return this; } /** * Sets the name of the queue to create a processor for. * @param queueName Name of the queue. * * @return The modified {@link ServiceBusProcessorClientBuilder} object. */ public ServiceBusProcessorClientBuilder queueName(String queueName) { serviceBusReceiverClientBuilder.queueName(queueName); return this; } /** * Sets the receive mode for the processor. * @param receiveMode Mode for receiving messages. * * @return The modified {@link ServiceBusProcessorClientBuilder} object. */ public ServiceBusProcessorClientBuilder receiveMode(ServiceBusReceiveMode receiveMode) { serviceBusReceiverClientBuilder.receiveMode(receiveMode); return this; } /** * Sets the type of the {@link SubQueue} to connect to. Azure Service Bus queues and subscriptions provide a * secondary sub-queue, called a dead-letter queue (DLQ). * * @param subQueue The type of the sub queue. * * @return The modified {@link ServiceBusProcessorClientBuilder} object. * @see * @see SubQueue */ public ServiceBusProcessorClientBuilder subQueue(SubQueue subQueue) { serviceBusReceiverClientBuilder.subQueue(subQueue); return this; } /** * Sets the name of the subscription in the topic to listen to. <b>{@link * </b> * @param subscriptionName Name of the subscription. * * @return The modified {@link ServiceBusProcessorClientBuilder} object. * @see */ public ServiceBusProcessorClientBuilder subscriptionName(String subscriptionName) { serviceBusReceiverClientBuilder.subscriptionName(subscriptionName); return this; } /** * Sets the name of the topic. <b>{@link * @param topicName Name of the topic. * * @return The modified {@link ServiceBusProcessorClientBuilder} object. * @see */ public ServiceBusProcessorClientBuilder topicName(String topicName) { serviceBusReceiverClientBuilder.topicName(topicName); return this; } /** * The message processing callback for the processor which will be executed when a message is received. * @param processMessage The message processing consumer that will be executed when a message is received. * * @return The updated {@link ServiceBusProcessorClientBuilder} object. */ public ServiceBusProcessorClientBuilder processMessage( Consumer<ServiceBusReceivedMessageContext> processMessage) { this.processMessage = processMessage; return this; } /** * The error handler for the processor which will be invoked in the event of an error while receiving messages. * @param processError The error handler which will be executed when an error occurs. * * @return The updated {@link ServiceBusProcessorClientBuilder} object */ public ServiceBusProcessorClientBuilder processError(Consumer<ServiceBusErrorContext> processError) { this.processError = processError; return this; } /** * Sets the amount of time to continue auto-renewing the lock. Setting {@link Duration * disables auto-renewal. For {@link ServiceBusReceiveMode * auto-renewal is disabled. * * @param maxAutoLockRenewDuration the amount of time to continue auto-renewing the lock. {@link Duration * or {@code null} indicates that auto-renewal is disabled. * * @return The updated {@link ServiceBusProcessorClientBuilder} object. * @throws IllegalArgumentException If {code maxAutoLockRenewDuration} is negative. */ public ServiceBusProcessorClientBuilder maxAutoLockRenewDuration(Duration maxAutoLockRenewDuration) { validateAndThrow(maxAutoLockRenewDuration); serviceBusReceiverClientBuilder.maxAutoLockRenewDuration(maxAutoLockRenewDuration); return this; } /** * Max concurrent messages that this processor should process. By default, this is set to 1. * * @param maxConcurrentCalls max concurrent messages that this processor should process. * @return The updated {@link ServiceBusProcessorClientBuilder} object. * @throws IllegalArgumentException if the {@code maxConcurrentCalls} is set to a value less than 1. */ public ServiceBusProcessorClientBuilder maxConcurrentCalls(int maxConcurrentCalls) { if (maxConcurrentCalls < 1) { throw LOGGER.logExceptionAsError( new IllegalArgumentException("'maxConcurrentCalls' cannot be less than 1")); } processorClientOptions.setMaxConcurrentCalls(maxConcurrentCalls); return this; } /** * Disables auto-complete and auto-abandon of received messages. By default, a successfully processed message is * {@link ServiceBusReceivedMessageContext * the message is processed, it is {@link ServiceBusReceivedMessageContext * abandoned}. * * @return The modified {@link ServiceBusProcessorClientBuilder} object. */ public ServiceBusProcessorClientBuilder disableAutoComplete() { serviceBusReceiverClientBuilder.disableAutoComplete(); processorClientOptions.setDisableAutoComplete(true); return this; } /** * Creates Service Bus message processor responsible for reading {@link ServiceBusReceivedMessage * messages} from a specific queue or subscription. * * @return An new {@link ServiceBusProcessorClient} that processes messages from a queue or subscription. * @throws IllegalStateException if {@link * topicName} are not set or, both of these fields are set. It is also thrown if the Service Bus {@link * * {@link * * @throws IllegalArgumentException Queue or topic name are not set via {@link * queueName()} or {@link * @throws NullPointerException if the {@link * callbacks are not set. */ public ServiceBusProcessorClient buildProcessorClient() { return new ServiceBusProcessorClient(serviceBusReceiverClientBuilder, serviceBusReceiverClientBuilder.queueName, serviceBusReceiverClientBuilder.topicName, serviceBusReceiverClientBuilder.subscriptionName, Objects.requireNonNull(processMessage, "'processMessage' cannot be null"), Objects.requireNonNull(processError, "'processError' cannot be null"), processorClientOptions); } } /** * Builder for creating {@link ServiceBusReceiverClient} and {@link ServiceBusReceiverAsyncClient} to consume * messages from Service Bus. * * @see ServiceBusReceiverAsyncClient * @see ServiceBusReceiverClient */ @ServiceClientBuilder(serviceClients = {ServiceBusReceiverClient.class, ServiceBusReceiverAsyncClient.class}) public final class ServiceBusReceiverClientBuilder { private boolean enableAutoComplete = true; private int prefetchCount = DEFAULT_PREFETCH_COUNT; private String queueName; private SubQueue subQueue; private ServiceBusReceiveMode receiveMode = ServiceBusReceiveMode.PEEK_LOCK; private String subscriptionName; private String topicName; private Duration maxAutoLockRenewDuration = MAX_LOCK_RENEW_DEFAULT_DURATION; private ServiceBusReceiverClientBuilder() { } /** * Disables auto-complete and auto-abandon of received messages. By default, a successfully processed message is * {@link ServiceBusReceiverAsyncClient * the message is processed, it is {@link ServiceBusReceiverAsyncClient * abandoned}. * * @return The modified {@link ServiceBusReceiverClientBuilder} object. */ public ServiceBusReceiverClientBuilder disableAutoComplete() { this.enableAutoComplete = false; return this; } /** * Sets the amount of time to continue auto-renewing the lock. Setting {@link Duration * disables auto-renewal. For {@link ServiceBusReceiveMode * auto-renewal is disabled. * * @param maxAutoLockRenewDuration the amount of time to continue auto-renewing the lock. {@link Duration * or {@code null} indicates that auto-renewal is disabled. * * @return The updated {@link ServiceBusReceiverClientBuilder} object. * @throws IllegalArgumentException If {code maxAutoLockRenewDuration} is negative. */ public ServiceBusReceiverClientBuilder maxAutoLockRenewDuration(Duration maxAutoLockRenewDuration) { validateAndThrow(maxAutoLockRenewDuration); this.maxAutoLockRenewDuration = maxAutoLockRenewDuration; return this; } /** * Sets the prefetch count of the receiver. For both {@link ServiceBusReceiveMode * {@link ServiceBusReceiveMode * * Prefetch speeds up the message flow by aiming to have a message readily available for local retrieval when * and before the application asks for one using {@link ServiceBusReceiverAsyncClient * Setting a non-zero value will prefetch that number of messages. Setting the value to zero turns prefetch * off. * * @param prefetchCount The prefetch count. * * @return The modified {@link ServiceBusReceiverClientBuilder} object. * @throws IllegalArgumentException If {code prefetchCount} is negative. */ public ServiceBusReceiverClientBuilder prefetchCount(int prefetchCount) { validateAndThrow(prefetchCount); this.prefetchCount = prefetchCount; return this; } /** * Sets the name of the queue to create a receiver for. * * @param queueName Name of the queue. * * @return The modified {@link ServiceBusReceiverClientBuilder} object. */ public ServiceBusReceiverClientBuilder queueName(String queueName) { this.queueName = queueName; return this; } /** * Sets the receive mode for the receiver. * * @param receiveMode Mode for receiving messages. * * @return The modified {@link ServiceBusReceiverClientBuilder} object. */ public ServiceBusReceiverClientBuilder receiveMode(ServiceBusReceiveMode receiveMode) { this.receiveMode = receiveMode; return this; } /** * Sets the type of the {@link SubQueue} to connect to. * * @param subQueue The type of the sub queue. * * @return The modified {@link ServiceBusReceiverClientBuilder} object. * @see */ public ServiceBusReceiverClientBuilder subQueue(SubQueue subQueue) { this.subQueue = subQueue; return this; } /** * Sets the name of the subscription in the topic to listen to. <b>{@link * </b> * * @param subscriptionName Name of the subscription. * * @return The modified {@link ServiceBusReceiverClientBuilder} object. * @see */ public ServiceBusReceiverClientBuilder subscriptionName(String subscriptionName) { this.subscriptionName = subscriptionName; return this; } /** * Sets the name of the topic. <b>{@link * * @param topicName Name of the topic. * * @return The modified {@link ServiceBusReceiverClientBuilder} object. * @see */ public ServiceBusReceiverClientBuilder topicName(String topicName) { this.topicName = topicName; return this; } /** * Creates an <b>asynchronous</b> Service Bus receiver responsible for reading {@link ServiceBusMessage * messages} from a specific queue or subscription. * * @return An new {@link ServiceBusReceiverAsyncClient} that receives messages from a queue or subscription. * @throws IllegalStateException if {@link * topicName} are not set or, both of these fields are set. It is also thrown if the Service Bus {@link * * {@link * * @throws IllegalArgumentException Queue or topic name are not set via {@link * queueName()} or {@link */ public ServiceBusReceiverAsyncClient buildAsyncClient() { return buildAsyncClient(true); } /** * Creates <b>synchronous</b> Service Bus receiver responsible for reading {@link ServiceBusMessage messages} * from a specific queue or subscription. * * @return An new {@link ServiceBusReceiverClient} that receives messages from a queue or subscription. * @throws IllegalStateException if {@link * topicName} are not set or, both of these fields are set. It is also thrown if the Service Bus {@link * * {@link * * @throws IllegalArgumentException Queue or topic name are not set via {@link * queueName()} or {@link */ public ServiceBusReceiverClient buildClient() { final boolean isPrefetchDisabled = prefetchCount == 0; return new ServiceBusReceiverClient(buildAsyncClient(false), isPrefetchDisabled, MessageUtils.getTotalTimeout(retryOptions)); } ServiceBusReceiverAsyncClient buildAsyncClient(boolean isAutoCompleteAllowed) { final MessagingEntityType entityType = validateEntityPaths(connectionStringEntityName, topicName, queueName); final String entityPath = getEntityPath(entityType, queueName, topicName, subscriptionName, subQueue); if (!isAutoCompleteAllowed && enableAutoComplete) { LOGGER.warning( "'enableAutoComplete' is not supported in synchronous client except through callback receive."); enableAutoComplete = false; } else if (enableAutoComplete && receiveMode == ServiceBusReceiveMode.RECEIVE_AND_DELETE) { LOGGER.warning("'enableAutoComplete' is not needed in for RECEIVE_AND_DELETE mode."); enableAutoComplete = false; } if (receiveMode == ServiceBusReceiveMode.RECEIVE_AND_DELETE) { maxAutoLockRenewDuration = Duration.ZERO; } final ServiceBusConnectionProcessor connectionProcessor = getOrCreateConnectionProcessor(messageSerializer); final ReceiverOptions receiverOptions = new ReceiverOptions(receiveMode, prefetchCount, maxAutoLockRenewDuration, enableAutoComplete); return new ServiceBusReceiverAsyncClient(connectionProcessor.getFullyQualifiedNamespace(), entityPath, entityType, receiverOptions, connectionProcessor, ServiceBusConstants.OPERATION_TIMEOUT, tracerProvider, messageSerializer, ServiceBusClientBuilder.this::onClientClose); } } private void validateAndThrow(int prefetchCount) { if (prefetchCount < 0) { throw LOGGER.logExceptionAsError(new IllegalArgumentException(String.format( "prefetchCount (%s) cannot be less than 0.", prefetchCount))); } } private void validateAndThrow(Duration maxLockRenewalDuration) { if (maxLockRenewalDuration != null && maxLockRenewalDuration.isNegative()) { throw LOGGER.logExceptionAsError(new IllegalArgumentException( "'maxLockRenewalDuration' cannot be negative.")); } } }
class ServiceBusClientBuilder implements TokenCredentialTrait<ServiceBusClientBuilder>, AzureNamedKeyCredentialTrait<ServiceBusClientBuilder>, ConnectionStringTrait<ServiceBusClientBuilder>, AzureSasCredentialTrait<ServiceBusClientBuilder>, AmqpTrait<ServiceBusClientBuilder>, ConfigurationTrait<ServiceBusClientBuilder> { private static final AmqpRetryOptions DEFAULT_RETRY = new AmqpRetryOptions().setTryTimeout(ServiceBusConstants.OPERATION_TIMEOUT); private static final String SERVICE_BUS_PROPERTIES_FILE = "azure-messaging-servicebus.properties"; private static final String SUBSCRIPTION_ENTITY_PATH_FORMAT = "%s/subscriptions/%s"; private static final String DEAD_LETTER_QUEUE_NAME_SUFFIX = "/$deadletterqueue"; private static final String TRANSFER_DEAD_LETTER_QUEUE_NAME_SUFFIX = "/$Transfer/$deadletterqueue"; private static final int DEFAULT_PREFETCH_COUNT = 0; private static final String NAME_KEY = "name"; private static final String VERSION_KEY = "version"; private static final String UNKNOWN = "UNKNOWN"; private static final Pattern HOST_PORT_PATTERN = Pattern.compile("^[^:]+:\\d+"); private static final Duration MAX_LOCK_RENEW_DEFAULT_DURATION = Duration.ofMinutes(5); private static final ClientLogger LOGGER = new ClientLogger(ServiceBusClientBuilder.class); private final Object connectionLock = new Object(); private final MessageSerializer messageSerializer = new ServiceBusMessageSerializer(); private final TracerProvider tracerProvider = new TracerProvider(ServiceLoader.load(Tracer.class)); private ClientOptions clientOptions; private Configuration configuration; private ServiceBusConnectionProcessor sharedConnection; private String connectionStringEntityName; private TokenCredential credentials; private String fullyQualifiedNamespace; private ProxyOptions proxyOptions; private AmqpRetryOptions retryOptions; private Scheduler scheduler; private AmqpTransportType transport = AmqpTransportType.AMQP; private SslDomain.VerifyMode verifyMode; private boolean crossEntityTransactions; private URL customEndpointAddress; /** * Keeps track of the open clients that were created from this builder when there is a shared connection. */ private final AtomicInteger openClients = new AtomicInteger(); /** * Creates a new instance with the default transport {@link AmqpTransportType */ public ServiceBusClientBuilder() { } /** * Sets the {@link ClientOptions} to be sent from the client built from this builder, enabling customization of * certain properties, as well as support the addition of custom header information. Refer to the {@link * ClientOptions} documentation for more information. * * @param clientOptions to be set on the client. * * @return The updated {@link ServiceBusClientBuilder} object. */ @Override public ServiceBusClientBuilder clientOptions(ClientOptions clientOptions) { this.clientOptions = clientOptions; return this; } /** * Sets the fully-qualified namespace for the Service Bus. * * @param fullyQualifiedNamespace The fully-qualified namespace for the Service Bus. * * @return The updated {@link ServiceBusClientBuilder} object. */ public ServiceBusClientBuilder fullyQualifiedNamespace(String fullyQualifiedNamespace) { this.fullyQualifiedNamespace = Objects.requireNonNull(fullyQualifiedNamespace, "'fullyQualifiedNamespace' cannot be null."); if (CoreUtils.isNullOrEmpty(fullyQualifiedNamespace)) { throw LOGGER.logExceptionAsError( new IllegalArgumentException("'fullyQualifiedNamespace' cannot be an empty string.")); } return this; } private String getAndValidateFullyQualifiedNamespace() { if (CoreUtils.isNullOrEmpty(fullyQualifiedNamespace)) { throw LOGGER.logExceptionAsError( new IllegalArgumentException("'fullyQualifiedNamespace' cannot be an empty string.")); } return fullyQualifiedNamespace; } /** * Sets a custom endpoint address when connecting to the Service Bus service. This can be useful when your network * does not allow connecting to the standard Azure Service Bus endpoint address, but does allow connecting through * an intermediary. For example: {@literal https: * <p> * If no port is specified, the default port for the {@link * used. * * @param customEndpointAddress The custom endpoint address. * @return The updated {@link ServiceBusClientBuilder} object. * @throws IllegalArgumentException if {@code customEndpointAddress} cannot be parsed into a valid {@link URL}. */ /** * Sets the connection string for a Service Bus namespace or a specific Service Bus resource. * * @param connectionString Connection string for a Service Bus namespace or a specific Service Bus resource. * * @return The updated {@link ServiceBusClientBuilder} object. */ public ServiceBusClientBuilder connectionString(String connectionString) { final ConnectionStringProperties properties = new ConnectionStringProperties(connectionString); final TokenCredential tokenCredential; try { tokenCredential = getTokenCredential(properties); } catch (Exception e) { throw LOGGER.logExceptionAsError( new AzureException("Could not create the ServiceBusSharedKeyCredential.", e)); } this.fullyQualifiedNamespace = properties.getEndpoint().getHost(); String entityPath = properties.getEntityPath(); if (!CoreUtils.isNullOrEmpty(entityPath)) { LOGGER.atInfo() .addKeyValue(ENTITY_PATH_KEY, entityPath) .log("Setting entity from connection string."); this.connectionStringEntityName = entityPath; } return credential(properties.getEndpoint().getHost(), tokenCredential); } /** * Enable cross entity transaction on the connection to Service bus. Use this feature only when your transaction * scope spans across different Service Bus entities. This feature is achieved by routing all the messages through * one 'send-via' entity on server side as explained next. * Once clients are created for multiple entities, the first entity that an operation occurs on becomes the * entity through which all subsequent sends will be routed through ('send-via' entity). This enables the service to * perform a transaction that is meant to span multiple entities. This means that subsequent entities that perform * their first operation need to either be senders, or if they are receivers they need to be on the same entity as * the initial entity through which all sends are routed through (otherwise the service would not be able to ensure * that the transaction is committed because it cannot route a receive operation through a different entity). For * instance, if you have SenderA (For entity A) and ReceiverB (For entity B) that are created from a client with * cross-entity transactions enabled, you would need to receive first with ReceiverB to allow this to work. If you * first send to entity A, and then attempted to receive from entity B, an exception would be thrown. * * <p><strong>Avoid using non-transaction API on this client</strong></p> * Since this feature will set up connection to Service Bus optimised to enable this feature. Once all the clients * have been setup, the first receiver or sender used will initialize 'send-via' queue as a single message transfer * entity. All the messages will flow via this queue. Thus this client is not suitable for any non-transaction API. * * <p><strong>When not to enable this feature</strong></p> * If your transaction is involved in one Service bus entity only. For example you are receiving from one * queue/subscription and you want to settle your own messages which are part of one transaction. * * @return The updated {@link ServiceBusSenderClientBuilder} object. * * @see <a href="https: */ public ServiceBusClientBuilder enableCrossEntityTransactions() { this.crossEntityTransactions = true; return this; } private TokenCredential getTokenCredential(ConnectionStringProperties properties) { TokenCredential tokenCredential; if (properties.getSharedAccessSignature() == null) { tokenCredential = new ServiceBusSharedKeyCredential(properties.getSharedAccessKeyName(), properties.getSharedAccessKey(), ServiceBusConstants.TOKEN_VALIDITY); } else { tokenCredential = new ServiceBusSharedKeyCredential(properties.getSharedAccessSignature()); } return tokenCredential; } /** * Sets the configuration store that is used during construction of the service client. * * If not specified, the default configuration store is used to configure Service Bus clients. Use {@link * Configuration * * @param configuration The configuration store used to configure Service Bus clients. * * @return The updated {@link ServiceBusClientBuilder} object. */ @Override public ServiceBusClientBuilder configuration(Configuration configuration) { this.configuration = configuration; return this; } /** * Sets the credential by using a {@link TokenCredential} for the Service Bus resource. * <a href="https: * azure-identity</a> has multiple {@link TokenCredential} implementations that can be used to authenticate * the access to the Service Bus resource. * * @param fullyQualifiedNamespace The fully-qualified namespace for the Service Bus. * @param credential The token credential to use for authentication. Access controls may be specified by the * ServiceBus namespace or the requested Service Bus entity, depending on Azure configuration. * * @return The updated {@link ServiceBusClientBuilder} object. */ public ServiceBusClientBuilder credential(String fullyQualifiedNamespace, TokenCredential credential) { this.fullyQualifiedNamespace = Objects.requireNonNull(fullyQualifiedNamespace, "'fullyQualifiedNamespace' cannot be null."); this.credentials = Objects.requireNonNull(credential, "'credential' cannot be null."); if (CoreUtils.isNullOrEmpty(fullyQualifiedNamespace)) { throw LOGGER.logExceptionAsError( new IllegalArgumentException("'fullyQualifiedNamespace' cannot be an empty string.")); } return this; } /** * Sets the {@link TokenCredential} used to authorize requests sent to the service. Refer to the Azure SDK for Java * <a href="https: * documentation for more details on proper usage of the {@link TokenCredential} type. * * @param credential The token credential to use for authentication. Access controls may be specified by the * ServiceBus namespace or the requested Service Bus entity, depending on Azure configuration. * * @return The updated {@link ServiceBusClientBuilder} object. */ @Override public ServiceBusClientBuilder credential(TokenCredential credential) { this.credentials = Objects.requireNonNull(credential, "'credential' cannot be null."); return this; } /** * Sets the credential with the shared access policies for the Service Bus resource. * You can find the shared access policies on the azure portal or Azure CLI. * For instance, on the portal, "Shared Access policies" has 'policy' and its 'Primary Key' and 'Secondary Key'. * The 'name' attribute of the {@link AzureNamedKeyCredential} is the 'policy' on portal and the 'key' attribute * can be either 'Primary Key' or 'Secondary Key'. * This method and {@link * you to update the name and key. * * @param fullyQualifiedNamespace The fully-qualified namespace for the Service Bus. * @param credential {@link AzureNamedKeyCredential} to be used for authentication. * * @return The updated {@link ServiceBusClientBuilder} object. */ public ServiceBusClientBuilder credential(String fullyQualifiedNamespace, AzureNamedKeyCredential credential) { this.fullyQualifiedNamespace = Objects.requireNonNull(fullyQualifiedNamespace, "'fullyQualifiedNamespace' cannot be null."); Objects.requireNonNull(credential, "'credential' cannot be null."); if (CoreUtils.isNullOrEmpty(fullyQualifiedNamespace)) { throw LOGGER.logExceptionAsError( new IllegalArgumentException("'fullyQualifiedNamespace' cannot be an empty string.")); } this.credentials = new ServiceBusSharedKeyCredential(credential.getAzureNamedKey().getName(), credential.getAzureNamedKey().getKey(), ServiceBusConstants.TOKEN_VALIDITY); return this; } /** * Sets the credential with the shared access policies for the Service Bus resource. * You can find the shared access policies on the azure portal or Azure CLI. * For instance, on the portal, "Shared Access policies" has 'policy' and its 'Primary Key' and 'Secondary Key'. * The 'name' attribute of the {@link AzureNamedKeyCredential} is the 'policy' on portal and the 'key' attribute * can be either 'Primary Key' or 'Secondary Key'. * This method and {@link * you to update the name and key. * * @param credential {@link AzureNamedKeyCredential} to be used for authentication. * * @return The updated {@link ServiceBusClientBuilder} object. */ @Override public ServiceBusClientBuilder credential(AzureNamedKeyCredential credential) { Objects.requireNonNull(credential, "'credential' cannot be null."); this.credentials = new ServiceBusSharedKeyCredential(credential.getAzureNamedKey().getName(), credential.getAzureNamedKey().getKey(), ServiceBusConstants.TOKEN_VALIDITY); return this; } /** * Sets the credential with Shared Access Signature for the Service Bus resource. * Refer to <a href="https: * Service Bus access control with Shared Access Signatures</a>. * * @param fullyQualifiedNamespace The fully-qualified namespace for the Service Bus. * @param credential {@link AzureSasCredential} to be used for authentication. * * @return The updated {@link ServiceBusClientBuilder} object. */ public ServiceBusClientBuilder credential(String fullyQualifiedNamespace, AzureSasCredential credential) { this.fullyQualifiedNamespace = Objects.requireNonNull(fullyQualifiedNamespace, "'fullyQualifiedNamespace' cannot be null."); Objects.requireNonNull(credential, "'credential' cannot be null."); if (CoreUtils.isNullOrEmpty(fullyQualifiedNamespace)) { throw LOGGER.logExceptionAsError( new IllegalArgumentException("'fullyQualifiedNamespace' cannot be an empty string.")); } this.credentials = new ServiceBusSharedKeyCredential(credential.getSignature()); return this; } /** * Sets the credential with Shared Access Signature for the Service Bus resource. * Refer to <a href="https: * Service Bus access control with Shared Access Signatures</a>. * * @param credential {@link AzureSasCredential} to be used for authentication. * * @return The updated {@link ServiceBusClientBuilder} object. */ @Override public ServiceBusClientBuilder credential(AzureSasCredential credential) { Objects.requireNonNull(credential, "'credential' cannot be null."); this.credentials = new ServiceBusSharedKeyCredential(credential.getSignature()); return this; } /** * Sets the proxy configuration to use for {@link ServiceBusSenderAsyncClient}. When a proxy is configured, {@link * AmqpTransportType * * @param proxyOptions The proxy configuration to use. * * @return The updated {@link ServiceBusClientBuilder} object. */ @Override public ServiceBusClientBuilder proxyOptions(ProxyOptions proxyOptions) { this.proxyOptions = proxyOptions; return this; } /** * Package-private method that sets the verify mode for this connection. * * @param verifyMode The verification mode. * @return The updated {@link ServiceBusClientBuilder} object. */ ServiceBusClientBuilder verifyMode(SslDomain.VerifyMode verifyMode) { this.verifyMode = verifyMode; return this; } /** * Sets the retry options for Service Bus clients. If not specified, the default retry options are used. * * @param retryOptions The retry options to use. * * @return The updated {@link ServiceBusClientBuilder} object. */ @Override public ServiceBusClientBuilder retryOptions(AmqpRetryOptions retryOptions) { this.retryOptions = retryOptions; return this; } /** * Sets the scheduler to use. * * @param scheduler Scheduler to be used. * * @return The updated {@link ServiceBusClientBuilder} object. */ ServiceBusClientBuilder scheduler(Scheduler scheduler) { this.scheduler = scheduler; return this; } /** * Sets the transport type by which all the communication with Azure Service Bus occurs. Default value is {@link * AmqpTransportType * * @param transportType The transport type to use. * * @return The updated {@link ServiceBusClientBuilder} object. */ @Override public ServiceBusClientBuilder transportType(AmqpTransportType transportType) { this.transport = transportType; return this; } /** * A new instance of {@link ServiceBusSenderClientBuilder} used to configure Service Bus message senders. * * @return A new instance of {@link ServiceBusSenderClientBuilder}. */ public ServiceBusSenderClientBuilder sender() { return new ServiceBusSenderClientBuilder(); } /** * A new instance of {@link ServiceBusReceiverClientBuilder} used to configure Service Bus message receivers. * * @return A new instance of {@link ServiceBusReceiverClientBuilder}. */ public ServiceBusReceiverClientBuilder receiver() { return new ServiceBusReceiverClientBuilder(); } /** * A new instance of {@link ServiceBusSessionReceiverClientBuilder} used to configure <b>session aware</b> Service * Bus message receivers. * * @return A new instance of {@link ServiceBusSessionReceiverClientBuilder}. */ public ServiceBusSessionReceiverClientBuilder sessionReceiver() { return new ServiceBusSessionReceiverClientBuilder(); } /** * A new instance of {@link ServiceBusProcessorClientBuilder} used to configure {@link ServiceBusProcessorClient} * instance. * * @return A new instance of {@link ServiceBusProcessorClientBuilder}. */ public ServiceBusProcessorClientBuilder processor() { return new ServiceBusProcessorClientBuilder(); } /** * A new instance of {@link ServiceBusSessionProcessorClientBuilder} used to configure a Service Bus processor * instance that processes sessions. * @return A new instance of {@link ServiceBusSessionProcessorClientBuilder}. */ public ServiceBusSessionProcessorClientBuilder sessionProcessor() { return new ServiceBusSessionProcessorClientBuilder(); } /** * Called when a child client is closed. Disposes of the shared connection if there are no more clients. */ void onClientClose() { synchronized (connectionLock) { final int numberOfOpenClients = openClients.decrementAndGet(); LOGGER.atInfo() .addKeyValue("numberOfOpenClients", numberOfOpenClients) .log("Closing a dependent client."); if (numberOfOpenClients > 0) { return; } if (numberOfOpenClients < 0) { LOGGER.atWarning() .addKeyValue("numberOfOpenClients", numberOfOpenClients) .log("There should not be less than 0 clients."); } LOGGER.info("No more open clients, closing shared connection."); if (sharedConnection != null) { sharedConnection.dispose(); sharedConnection = null; } else { LOGGER.warning("Shared ServiceBusConnectionProcessor was already disposed."); } } } private ServiceBusConnectionProcessor getOrCreateConnectionProcessor(MessageSerializer serializer) { if (retryOptions == null) { retryOptions = DEFAULT_RETRY; } if (scheduler == null) { scheduler = Schedulers.elastic(); } synchronized (connectionLock) { if (sharedConnection == null) { final ConnectionOptions connectionOptions = getConnectionOptions(); final Flux<ServiceBusAmqpConnection> connectionFlux = Mono.fromCallable(() -> { final String connectionId = StringUtil.getRandomString("MF"); final ReactorProvider provider = new ReactorProvider(); final ReactorHandlerProvider handlerProvider = new ReactorHandlerProvider(provider); final TokenManagerProvider tokenManagerProvider = new AzureTokenManagerProvider( connectionOptions.getAuthorizationType(), connectionOptions.getFullyQualifiedNamespace(), connectionOptions.getAuthorizationScope()); return (ServiceBusAmqpConnection) new ServiceBusReactorAmqpConnection(connectionId, connectionOptions, provider, handlerProvider, tokenManagerProvider, serializer, crossEntityTransactions); }).repeat(); sharedConnection = connectionFlux.subscribeWith(new ServiceBusConnectionProcessor( connectionOptions.getFullyQualifiedNamespace(), connectionOptions.getRetry())); } } final int numberOfOpenClients = openClients.incrementAndGet(); LOGGER.info(" return sharedConnection; } private ConnectionOptions getConnectionOptions() { configuration = configuration == null ? Configuration.getGlobalConfiguration().clone() : configuration; if (credentials == null) { throw LOGGER.logExceptionAsError(new IllegalArgumentException("Credentials have not been set. " + "They can be set using: connectionString(String), connectionString(String, String), " + "or credentials(String, String, TokenCredential)" )); } if (proxyOptions != null && proxyOptions.isProxyAddressConfigured() && transport != AmqpTransportType.AMQP_WEB_SOCKETS) { throw LOGGER.logExceptionAsError(new IllegalArgumentException( "Cannot use a proxy when TransportType is not AMQP.")); } if (proxyOptions == null) { proxyOptions = getDefaultProxyConfiguration(configuration); } final CbsAuthorizationType authorizationType = credentials instanceof ServiceBusSharedKeyCredential ? CbsAuthorizationType.SHARED_ACCESS_SIGNATURE : CbsAuthorizationType.JSON_WEB_TOKEN; final SslDomain.VerifyMode verificationMode = verifyMode != null ? verifyMode : SslDomain.VerifyMode.VERIFY_PEER_NAME; final ClientOptions options = clientOptions != null ? clientOptions : new ClientOptions(); final Map<String, String> properties = CoreUtils.getProperties(SERVICE_BUS_PROPERTIES_FILE); final String product = properties.getOrDefault(NAME_KEY, UNKNOWN); final String clientVersion = properties.getOrDefault(VERSION_KEY, UNKNOWN); if (customEndpointAddress == null) { return new ConnectionOptions(getAndValidateFullyQualifiedNamespace(), credentials, authorizationType, ServiceBusConstants.AZURE_ACTIVE_DIRECTORY_SCOPE, transport, retryOptions, proxyOptions, scheduler, options, verificationMode, product, clientVersion); } else { return new ConnectionOptions(getAndValidateFullyQualifiedNamespace(), credentials, authorizationType, ServiceBusConstants.AZURE_ACTIVE_DIRECTORY_SCOPE, transport, retryOptions, proxyOptions, scheduler, options, verificationMode, product, clientVersion, customEndpointAddress.getHost(), customEndpointAddress.getPort()); } } private ProxyOptions getDefaultProxyConfiguration(Configuration configuration) { ProxyAuthenticationType authentication = ProxyAuthenticationType.NONE; if (proxyOptions != null) { authentication = proxyOptions.getAuthentication(); } String proxyAddress = configuration.get(Configuration.PROPERTY_HTTP_PROXY); if (CoreUtils.isNullOrEmpty(proxyAddress)) { return ProxyOptions.SYSTEM_DEFAULTS; } return getProxyOptions(authentication, proxyAddress, configuration, Boolean.parseBoolean(configuration.get("java.net.useSystemProxies"))); } private ProxyOptions getProxyOptions(ProxyAuthenticationType authentication, String proxyAddress, Configuration configuration, boolean useSystemProxies) { String host; int port; if (HOST_PORT_PATTERN.matcher(proxyAddress.trim()).find()) { final String[] hostPort = proxyAddress.split(":"); host = hostPort[0]; port = Integer.parseInt(hostPort[1]); final Proxy proxy = new Proxy(Proxy.Type.HTTP, new InetSocketAddress(host, port)); final String username = configuration.get(ProxyOptions.PROXY_USERNAME); final String password = configuration.get(ProxyOptions.PROXY_PASSWORD); return new ProxyOptions(authentication, proxy, username, password); } else if (useSystemProxies) { com.azure.core.http.ProxyOptions coreProxyOptions = com.azure.core.http.ProxyOptions .fromConfiguration(configuration); return new ProxyOptions(authentication, new Proxy(coreProxyOptions.getType().toProxyType(), coreProxyOptions.getAddress()), coreProxyOptions.getUsername(), coreProxyOptions.getPassword()); } else { LOGGER.verbose("'HTTP_PROXY' was configured but ignored as 'java.net.useSystemProxies' wasn't " + "set or was false."); return ProxyOptions.SYSTEM_DEFAULTS; } } private static boolean isNullOrEmpty(String item) { return item == null || item.isEmpty(); } private static MessagingEntityType validateEntityPaths(String connectionStringEntityName, String topicName, String queueName) { final boolean hasTopicName = !isNullOrEmpty(topicName); final boolean hasQueueName = !isNullOrEmpty(queueName); final boolean hasConnectionStringEntity = !isNullOrEmpty(connectionStringEntityName); final MessagingEntityType entityType; if (!hasConnectionStringEntity && !hasQueueName && !hasTopicName) { throw ServiceBusClientBuilder.LOGGER.logExceptionAsError(new IllegalStateException( "Cannot build client without setting either a queueName or topicName.")); } else if (hasQueueName && hasTopicName) { throw ServiceBusClientBuilder.LOGGER.logExceptionAsError(new IllegalStateException(String.format( "Cannot build client with both queueName (%s) and topicName (%s) set.", queueName, topicName))); } else if (hasQueueName) { if (hasConnectionStringEntity && !queueName.equals(connectionStringEntityName)) { throw ServiceBusClientBuilder.LOGGER.logExceptionAsError(new IllegalStateException(String.format( "queueName (%s) is different than the connectionString's EntityPath (%s).", queueName, connectionStringEntityName))); } entityType = MessagingEntityType.QUEUE; } else if (hasTopicName) { if (hasConnectionStringEntity && !topicName.equals(connectionStringEntityName)) { throw ServiceBusClientBuilder.LOGGER.logExceptionAsError(new IllegalStateException(String.format( "topicName (%s) is different than the connectionString's EntityPath (%s).", topicName, connectionStringEntityName))); } entityType = MessagingEntityType.SUBSCRIPTION; } else { entityType = MessagingEntityType.UNKNOWN; } return entityType; } private static String getEntityPath(MessagingEntityType entityType, String queueName, String topicName, String subscriptionName, SubQueue subQueue) { String entityPath; switch (entityType) { case QUEUE: entityPath = queueName; break; case SUBSCRIPTION: if (isNullOrEmpty(subscriptionName)) { throw ServiceBusClientBuilder.LOGGER.logExceptionAsError(new IllegalStateException(String.format( "topicName (%s) must have a subscriptionName associated with it.", topicName))); } entityPath = String.format(Locale.ROOT, SUBSCRIPTION_ENTITY_PATH_FORMAT, topicName, subscriptionName); break; default: throw ServiceBusClientBuilder.LOGGER.logExceptionAsError( new IllegalArgumentException("Unknown entity type: " + entityType)); } if (subQueue == null) { return entityPath; } switch (subQueue) { case NONE: break; case TRANSFER_DEAD_LETTER_QUEUE: entityPath += TRANSFER_DEAD_LETTER_QUEUE_NAME_SUFFIX; break; case DEAD_LETTER_QUEUE: entityPath += DEAD_LETTER_QUEUE_NAME_SUFFIX; break; default: throw ServiceBusClientBuilder.LOGGER.logExceptionAsError(new IllegalArgumentException("Unsupported value of subqueue type: " + subQueue)); } return entityPath; } /** * Builder for creating {@link ServiceBusSenderClient} and {@link ServiceBusSenderAsyncClient} to publish messages * to Service Bus. * * @see ServiceBusSenderAsyncClient * @see ServiceBusSenderClient */ @ServiceClientBuilder(serviceClients = {ServiceBusSenderClient.class, ServiceBusSenderAsyncClient.class}) public final class ServiceBusSenderClientBuilder { private String queueName; private String topicName; private ServiceBusSenderClientBuilder() { } /** * Sets the name of the Service Bus queue to publish messages to. * * @param queueName Name of the queue. * * @return The modified {@link ServiceBusSenderClientBuilder} object. */ public ServiceBusSenderClientBuilder queueName(String queueName) { this.queueName = queueName; return this; } /** * Sets the name of the Service Bus topic to publish messages to. * * @param topicName Name of the topic. * * @return The modified {@link ServiceBusSenderClientBuilder} object. */ public ServiceBusSenderClientBuilder topicName(String topicName) { this.topicName = topicName; return this; } /** * Creates an <b>asynchronous</b> {@link ServiceBusSenderAsyncClient client} for transmitting {@link * ServiceBusMessage} to a Service Bus queue or topic. * * @return A new {@link ServiceBusSenderAsyncClient} for transmitting to a Service queue or topic. * @throws IllegalStateException if {@link * topicName} are not set or, both of these fields are set. It is also thrown if the Service Bus {@link * * {@link * @throws IllegalArgumentException if the entity type is not a queue or a topic. */ public ServiceBusSenderAsyncClient buildAsyncClient() { final ServiceBusConnectionProcessor connectionProcessor = getOrCreateConnectionProcessor(messageSerializer); final MessagingEntityType entityType = validateEntityPaths(connectionStringEntityName, topicName, queueName); final String entityName; switch (entityType) { case QUEUE: entityName = queueName; break; case SUBSCRIPTION: entityName = topicName; break; case UNKNOWN: entityName = connectionStringEntityName; break; default: throw LOGGER.logExceptionAsError( new IllegalArgumentException("Unknown entity type: " + entityType)); } return new ServiceBusSenderAsyncClient(entityName, entityType, connectionProcessor, retryOptions, tracerProvider, messageSerializer, ServiceBusClientBuilder.this::onClientClose, null); } /** * Creates a <b>synchronous</b> {@link ServiceBusSenderClient client} for transmitting {@link ServiceBusMessage} * to a Service Bus queue or topic. * * @return A new {@link ServiceBusSenderAsyncClient} for transmitting to a Service queue or topic. * @throws IllegalStateException if {@link * topicName} are not set or, both of these fields are set. It is also thrown if the Service Bus {@link * * {@link * @throws IllegalArgumentException if the entity type is not a queue or a topic. */ public ServiceBusSenderClient buildClient() { return new ServiceBusSenderClient(buildAsyncClient(), MessageUtils.getTotalTimeout(retryOptions)); } } /** * Builder for creating {@link ServiceBusProcessorClient} to consume messages from a session-based Service Bus * entity. {@link ServiceBusProcessorClient} processes messages and errors via {@link * and {@link * next session to process. * * <p> * By default, the processor: * <ul> * <li>Automatically settles messages. Disabled via {@link * <li>Processes 1 session concurrently. Configured via {@link * <li>Invokes 1 instance of {@link * {@link * </ul> * * <p><strong>Instantiate a session-enabled processor client</strong></p> * <!-- src_embed com.azure.messaging.servicebus.servicebusprocessorclient * <pre> * Consumer&lt;ServiceBusReceivedMessageContext&gt; onMessage = context -&gt; & * ServiceBusReceivedMessage message = context.getMessage& * System.out.printf& * message.getSessionId& * & * * Consumer&lt;ServiceBusErrorContext&gt; onError = context -&gt; & * System.out.printf& * context.getFullyQualifiedNamespace& * * if & * ServiceBusException exception = & * System.out.printf& * exception.getReason& * & * System.out.printf& * & * & * * & * * ServiceBusProcessorClient sessionProcessor = new ServiceBusClientBuilder& * .connectionString& * .sessionProcessor& * .queueName& * .maxConcurrentSessions& * .processMessage& * .processError& * .buildProcessorClient& * * & * sessionProcessor.start& * </pre> * <!-- end com.azure.messaging.servicebus.servicebusprocessorclient * * @see ServiceBusProcessorClient */ public final class ServiceBusSessionProcessorClientBuilder { private final ServiceBusProcessorClientOptions processorClientOptions; private final ServiceBusSessionReceiverClientBuilder sessionReceiverClientBuilder; private Consumer<ServiceBusReceivedMessageContext> processMessage; private Consumer<ServiceBusErrorContext> processError; private ServiceBusSessionProcessorClientBuilder() { sessionReceiverClientBuilder = new ServiceBusSessionReceiverClientBuilder(); processorClientOptions = new ServiceBusProcessorClientOptions() .setMaxConcurrentCalls(1) .setTracerProvider(tracerProvider); sessionReceiverClientBuilder.maxConcurrentSessions(1); } /** * Sets the amount of time to continue auto-renewing the lock. Setting {@link Duration * disables auto-renewal. For {@link ServiceBusReceiveMode * auto-renewal is disabled. * * @param maxAutoLockRenewDuration the amount of time to continue auto-renewing the lock. {@link Duration * or {@code null} indicates that auto-renewal is disabled. * * @return The updated {@link ServiceBusSessionProcessorClientBuilder} object. * @throws IllegalArgumentException If {code maxAutoLockRenewDuration} is negative. */ public ServiceBusSessionProcessorClientBuilder maxAutoLockRenewDuration(Duration maxAutoLockRenewDuration) { validateAndThrow(maxAutoLockRenewDuration); sessionReceiverClientBuilder.maxAutoLockRenewDuration(maxAutoLockRenewDuration); return this; } /** * Enables session processing roll-over by processing at most {@code maxConcurrentSessions}. * * @param maxConcurrentSessions Maximum number of concurrent sessions to process at any given time. * * @return The modified {@link ServiceBusSessionProcessorClientBuilder} object. * @throws IllegalArgumentException if {@code maxConcurrentSessions} is less than 1. */ public ServiceBusSessionProcessorClientBuilder maxConcurrentSessions(int maxConcurrentSessions) { if (maxConcurrentSessions < 1) { throw LOGGER.logExceptionAsError( new IllegalArgumentException("'maxConcurrentSessions' cannot be less than 1")); } sessionReceiverClientBuilder.maxConcurrentSessions(maxConcurrentSessions); return this; } /** * Sets the prefetch count of the processor. For both {@link ServiceBusReceiveMode * {@link ServiceBusReceiveMode * * Prefetch speeds up the message flow by aiming to have a message readily available for local retrieval when * and before the application starts the processor. * Setting a non-zero value will prefetch that number of messages. Setting the value to zero turns prefetch off. * Using a non-zero prefetch risks of losing messages even though it has better performance. * @see <a href="https: * * @param prefetchCount The prefetch count. * * @return The modified {@link ServiceBusProcessorClientBuilder} object. */ public ServiceBusSessionProcessorClientBuilder prefetchCount(int prefetchCount) { sessionReceiverClientBuilder.prefetchCount(prefetchCount); return this; } /** * Sets the name of the queue to create a processor for. * @param queueName Name of the queue. * * @return The modified {@link ServiceBusSessionProcessorClientBuilder} object. */ public ServiceBusSessionProcessorClientBuilder queueName(String queueName) { sessionReceiverClientBuilder.queueName(queueName); return this; } /** * Sets the receive mode for the processor. * @param receiveMode Mode for receiving messages. * * @return The modified {@link ServiceBusSessionProcessorClientBuilder} object. */ public ServiceBusSessionProcessorClientBuilder receiveMode(ServiceBusReceiveMode receiveMode) { sessionReceiverClientBuilder.receiveMode(receiveMode); return this; } /** * Sets the type of the {@link SubQueue} to connect to. Azure Service Bus queues and subscriptions provide a * secondary sub-queue, called a dead-letter queue (DLQ). * * @param subQueue The type of the sub queue. * * @return The modified {@link ServiceBusSessionProcessorClientBuilder} object. * @see * @see SubQueue */ public ServiceBusSessionProcessorClientBuilder subQueue(SubQueue subQueue) { this.sessionReceiverClientBuilder.subQueue(subQueue); return this; } /** * Sets the name of the subscription in the topic to listen to. <b>{@link * </b> * @param subscriptionName Name of the subscription. * * @return The modified {@link ServiceBusSessionProcessorClientBuilder} object. * @see */ public ServiceBusSessionProcessorClientBuilder subscriptionName(String subscriptionName) { sessionReceiverClientBuilder.subscriptionName(subscriptionName); return this; } /** * Sets the name of the topic. <b>{@link * @param topicName Name of the topic. * * @return The modified {@link ServiceBusSessionProcessorClientBuilder} object. * @see */ public ServiceBusSessionProcessorClientBuilder topicName(String topicName) { sessionReceiverClientBuilder.topicName(topicName); return this; } /** * The message processing callback for the processor that will be executed when a message is received. * @param processMessage The message processing consumer that will be executed when a message is received. * * @return The updated {@link ServiceBusProcessorClientBuilder} object. */ public ServiceBusSessionProcessorClientBuilder processMessage( Consumer<ServiceBusReceivedMessageContext> processMessage) { this.processMessage = processMessage; return this; } /** * The error handler for the processor which will be invoked in the event of an error while receiving messages. * @param processError The error handler which will be executed when an error occurs. * * @return The updated {@link ServiceBusProcessorClientBuilder} object */ public ServiceBusSessionProcessorClientBuilder processError( Consumer<ServiceBusErrorContext> processError) { this.processError = processError; return this; } /** * Max concurrent messages that this processor should process. * * @param maxConcurrentCalls max concurrent messages that this processor should process. * * @return The updated {@link ServiceBusSessionProcessorClientBuilder} object. * @throws IllegalArgumentException if {@code maxConcurrentCalls} is less than 1. */ public ServiceBusSessionProcessorClientBuilder maxConcurrentCalls(int maxConcurrentCalls) { if (maxConcurrentCalls < 1) { throw LOGGER.logExceptionAsError( new IllegalArgumentException("'maxConcurrentCalls' cannot be less than 1")); } processorClientOptions.setMaxConcurrentCalls(maxConcurrentCalls); return this; } /** * Disables auto-complete and auto-abandon of received messages. By default, a successfully processed message is * {@link ServiceBusReceivedMessageContext * the message is processed, it is {@link ServiceBusReceivedMessageContext * abandoned}. * * @return The modified {@link ServiceBusSessionProcessorClientBuilder} object. */ public ServiceBusSessionProcessorClientBuilder disableAutoComplete() { sessionReceiverClientBuilder.disableAutoComplete(); processorClientOptions.setDisableAutoComplete(true); return this; } /** * Creates a <b>session-aware</b> Service Bus processor responsible for reading * {@link ServiceBusReceivedMessage messages} from a specific queue or subscription. * * @return An new {@link ServiceBusProcessorClient} that receives messages from a queue or subscription. * @throws IllegalStateException if {@link * topicName} are not set or, both of these fields are set. It is also thrown if the Service Bus {@link * * {@link * * @throws IllegalArgumentException Queue or topic name are not set via {@link * queueName()} or {@link * @throws NullPointerException if the {@link * callbacks are not set. */ public ServiceBusProcessorClient buildProcessorClient() { return new ServiceBusProcessorClient(sessionReceiverClientBuilder, sessionReceiverClientBuilder.queueName, sessionReceiverClientBuilder.topicName, sessionReceiverClientBuilder.subscriptionName, Objects.requireNonNull(processMessage, "'processMessage' cannot be null"), Objects.requireNonNull(processError, "'processError' cannot be null"), processorClientOptions); } } /** * Builder for creating {@link ServiceBusReceiverClient} and {@link ServiceBusReceiverAsyncClient} to consume * messages from a <b>session aware</b> Service Bus entity. * * @see ServiceBusReceiverAsyncClient * @see ServiceBusReceiverClient */ @ServiceClientBuilder(serviceClients = {ServiceBusReceiverClient.class, ServiceBusReceiverAsyncClient.class}) public final class ServiceBusSessionReceiverClientBuilder { private boolean enableAutoComplete = true; private Integer maxConcurrentSessions = null; private int prefetchCount = DEFAULT_PREFETCH_COUNT; private String queueName; private ServiceBusReceiveMode receiveMode = ServiceBusReceiveMode.PEEK_LOCK; private String subscriptionName; private String topicName; private Duration maxAutoLockRenewDuration = MAX_LOCK_RENEW_DEFAULT_DURATION; private SubQueue subQueue = SubQueue.NONE; private ServiceBusSessionReceiverClientBuilder() { } /** * Disables auto-complete and auto-abandon of received messages. By default, a successfully processed message is * {@link ServiceBusReceiverAsyncClient * the message is processed, it is {@link ServiceBusReceiverAsyncClient * abandoned}. * * @return The modified {@link ServiceBusSessionReceiverClientBuilder} object. */ public ServiceBusSessionReceiverClientBuilder disableAutoComplete() { this.enableAutoComplete = false; return this; } /** * Sets the amount of time to continue auto-renewing the session lock. Setting {@link Duration * {@code null} disables auto-renewal. For {@link ServiceBusReceiveMode * mode, auto-renewal is disabled. * * @param maxAutoLockRenewDuration the amount of time to continue auto-renewing the session lock. * {@link Duration * * @return The updated {@link ServiceBusSessionReceiverClientBuilder} object. * @throws IllegalArgumentException If {code maxAutoLockRenewDuration} is negative. */ public ServiceBusSessionReceiverClientBuilder maxAutoLockRenewDuration(Duration maxAutoLockRenewDuration) { validateAndThrow(maxAutoLockRenewDuration); this.maxAutoLockRenewDuration = maxAutoLockRenewDuration; return this; } /** * Enables session processing roll-over by processing at most {@code maxConcurrentSessions}. * * @param maxConcurrentSessions Maximum number of concurrent sessions to process at any given time. * * @return The modified {@link ServiceBusSessionReceiverClientBuilder} object. * @throws IllegalArgumentException if {@code maxConcurrentSessions} is less than 1. */ ServiceBusSessionReceiverClientBuilder maxConcurrentSessions(int maxConcurrentSessions) { if (maxConcurrentSessions < 1) { throw LOGGER.logExceptionAsError(new IllegalArgumentException( "maxConcurrentSessions cannot be less than 1.")); } this.maxConcurrentSessions = maxConcurrentSessions; return this; } /** * Sets the prefetch count of the receiver. For both {@link ServiceBusReceiveMode * {@link ServiceBusReceiveMode * * Prefetch speeds up the message flow by aiming to have a message readily available for local retrieval when * and before the application asks for one using {@link ServiceBusReceiverAsyncClient * Setting a non-zero value will prefetch that number of messages. Setting the value to zero turns prefetch * off. * * @param prefetchCount The prefetch count. * * @return The modified {@link ServiceBusSessionReceiverClientBuilder} object. * @throws IllegalArgumentException If {code prefetchCount} is negative. */ public ServiceBusSessionReceiverClientBuilder prefetchCount(int prefetchCount) { validateAndThrow(prefetchCount); this.prefetchCount = prefetchCount; return this; } /** * Sets the name of the queue to create a receiver for. * * @param queueName Name of the queue. * * @return The modified {@link ServiceBusSessionReceiverClientBuilder} object. */ public ServiceBusSessionReceiverClientBuilder queueName(String queueName) { this.queueName = queueName; return this; } /** * Sets the receive mode for the receiver. * * @param receiveMode Mode for receiving messages. * * @return The modified {@link ServiceBusSessionReceiverClientBuilder} object. */ public ServiceBusSessionReceiverClientBuilder receiveMode(ServiceBusReceiveMode receiveMode) { this.receiveMode = receiveMode; return this; } /** * Sets the type of the {@link SubQueue} to connect to. Azure Service Bus queues and subscriptions provide a * secondary sub-queue, called a dead-letter queue (DLQ). * * @param subQueue The type of the sub queue. * * @return The modified {@link ServiceBusSessionReceiverClientBuilder} object. * @see * @see SubQueue */ public ServiceBusSessionReceiverClientBuilder subQueue(SubQueue subQueue) { this.subQueue = subQueue; return this; } /** * Sets the name of the subscription in the topic to listen to. <b>{@link * </b> * * @param subscriptionName Name of the subscription. * * @return The modified {@link ServiceBusSessionReceiverClientBuilder} object. * @see */ public ServiceBusSessionReceiverClientBuilder subscriptionName(String subscriptionName) { this.subscriptionName = subscriptionName; return this; } /** * Sets the name of the topic. <b>{@link * * @param topicName Name of the topic. * * @return The modified {@link ServiceBusSessionReceiverClientBuilder} object. * @see */ public ServiceBusSessionReceiverClientBuilder topicName(String topicName) { this.topicName = topicName; return this; } /** * Creates an <b>asynchronous</b>, <b>session-aware</b> Service Bus receiver responsible for reading {@link * ServiceBusMessage messages} from a specific queue or subscription. * * @return An new {@link ServiceBusReceiverAsyncClient} that receives messages from a queue or subscription. * @throws IllegalStateException if {@link * topicName} are not set or, both of these fields are set. It is also thrown if the Service Bus {@link * * {@link * * @throws IllegalArgumentException Queue or topic name are not set via {@link * queueName()} or {@link */ ServiceBusReceiverAsyncClient buildAsyncClientForProcessor() { final MessagingEntityType entityType = validateEntityPaths(connectionStringEntityName, topicName, queueName); final String entityPath = getEntityPath(entityType, queueName, topicName, subscriptionName, subQueue); if (enableAutoComplete && receiveMode == ServiceBusReceiveMode.RECEIVE_AND_DELETE) { LOGGER.warning("'enableAutoComplete' is not needed in for RECEIVE_AND_DELETE mode."); enableAutoComplete = false; } if (receiveMode == ServiceBusReceiveMode.RECEIVE_AND_DELETE) { maxAutoLockRenewDuration = Duration.ZERO; } final ServiceBusConnectionProcessor connectionProcessor = getOrCreateConnectionProcessor(messageSerializer); final ReceiverOptions receiverOptions = new ReceiverOptions(receiveMode, prefetchCount, maxAutoLockRenewDuration, enableAutoComplete, null, maxConcurrentSessions); final ServiceBusSessionManager sessionManager = new ServiceBusSessionManager(entityPath, entityType, connectionProcessor, tracerProvider, messageSerializer, receiverOptions); return new ServiceBusReceiverAsyncClient(connectionProcessor.getFullyQualifiedNamespace(), entityPath, entityType, receiverOptions, connectionProcessor, ServiceBusConstants.OPERATION_TIMEOUT, tracerProvider, messageSerializer, ServiceBusClientBuilder.this::onClientClose, sessionManager); } /** * Creates an <b>asynchronous</b>, <b>session-aware</b> Service Bus receiver responsible for reading {@link * ServiceBusMessage messages} from a specific queue or subscription. * * @return An new {@link ServiceBusSessionReceiverAsyncClient} that receives messages from a queue or * subscription. * @throws IllegalStateException if {@link * topicName} are not set or, both of these fields are set. It is also thrown if the Service Bus {@link * * {@link * * @throws IllegalArgumentException Queue or topic name are not set via {@link * queueName()} or {@link */ public ServiceBusSessionReceiverAsyncClient buildAsyncClient() { return buildAsyncClient(true); } /** * Creates a <b>synchronous</b>, <b>session-aware</b> Service Bus receiver responsible for reading {@link * ServiceBusMessage messages} from a specific queue or subscription. * * @return An new {@link ServiceBusReceiverClient} that receives messages from a queue or subscription. * @throws IllegalStateException if {@link * topicName} are not set or, both of these fields are set. It is also thrown if the Service Bus {@link * * {@link * * @throws IllegalArgumentException Queue or topic name are not set via {@link * queueName()} or {@link */ public ServiceBusSessionReceiverClient buildClient() { final boolean isPrefetchDisabled = prefetchCount == 0; return new ServiceBusSessionReceiverClient(buildAsyncClient(false), isPrefetchDisabled, MessageUtils.getTotalTimeout(retryOptions)); } private ServiceBusSessionReceiverAsyncClient buildAsyncClient(boolean isAutoCompleteAllowed) { final MessagingEntityType entityType = validateEntityPaths(connectionStringEntityName, topicName, queueName); final String entityPath = getEntityPath(entityType, queueName, topicName, subscriptionName, SubQueue.NONE); if (!isAutoCompleteAllowed && enableAutoComplete) { LOGGER.warning( "'enableAutoComplete' is not supported in synchronous client except through callback receive."); enableAutoComplete = false; } else if (enableAutoComplete && receiveMode == ServiceBusReceiveMode.RECEIVE_AND_DELETE) { LOGGER.warning("'enableAutoComplete' is not needed in for RECEIVE_AND_DELETE mode."); enableAutoComplete = false; } if (receiveMode == ServiceBusReceiveMode.RECEIVE_AND_DELETE) { maxAutoLockRenewDuration = Duration.ZERO; } final ServiceBusConnectionProcessor connectionProcessor = getOrCreateConnectionProcessor(messageSerializer); final ReceiverOptions receiverOptions = new ReceiverOptions(receiveMode, prefetchCount, maxAutoLockRenewDuration, enableAutoComplete, null, maxConcurrentSessions); return new ServiceBusSessionReceiverAsyncClient(connectionProcessor.getFullyQualifiedNamespace(), entityPath, entityType, receiverOptions, connectionProcessor, tracerProvider, messageSerializer, ServiceBusClientBuilder.this::onClientClose); } } /** * Builder for creating {@link ServiceBusProcessorClient} to consume messages from a Service Bus entity. * {@link ServiceBusProcessorClient ServiceBusProcessorClients} provides a push-based mechanism that notifies * the message processing callback when a message is received or the error handle when an error is observed. To * create an instance, therefore, configuring the two callbacks - {@link * {@link * with auto-completion and auto-lock renewal capabilities. * * <p><strong>Sample code to instantiate a processor client</strong></p> * <!-- src_embed com.azure.messaging.servicebus.servicebusprocessorclient * <pre> * Consumer&lt;ServiceBusReceivedMessageContext&gt; onMessage = context -&gt; & * ServiceBusReceivedMessage message = context.getMessage& * System.out.printf& * message.getSequenceNumber& * & * * Consumer&lt;ServiceBusErrorContext&gt; onError = context -&gt; & * System.out.printf& * context.getFullyQualifiedNamespace& * * if & * ServiceBusException exception = & * System.out.printf& * exception.getReason& * & * System.out.printf& * & * & * * & * * ServiceBusProcessorClient processor = new ServiceBusClientBuilder& * .connectionString& * .processor& * .queueName& * .processMessage& * .processError& * .buildProcessorClient& * * & * processor.start& * </pre> * <!-- end com.azure.messaging.servicebus.servicebusprocessorclient * * @see ServiceBusProcessorClient */ public final class ServiceBusProcessorClientBuilder { private final ServiceBusReceiverClientBuilder serviceBusReceiverClientBuilder; private final ServiceBusProcessorClientOptions processorClientOptions; private Consumer<ServiceBusReceivedMessageContext> processMessage; private Consumer<ServiceBusErrorContext> processError; private ServiceBusProcessorClientBuilder() { serviceBusReceiverClientBuilder = new ServiceBusReceiverClientBuilder(); processorClientOptions = new ServiceBusProcessorClientOptions() .setMaxConcurrentCalls(1) .setTracerProvider(tracerProvider); } /** * Sets the prefetch count of the processor. For both {@link ServiceBusReceiveMode * {@link ServiceBusReceiveMode * * Prefetch speeds up the message flow by aiming to have a message readily available for local retrieval when * and before the application starts the processor. * Setting a non-zero value will prefetch that number of messages. Setting the value to zero turns prefetch off. * * @param prefetchCount The prefetch count. * * @return The modified {@link ServiceBusProcessorClientBuilder} object. */ public ServiceBusProcessorClientBuilder prefetchCount(int prefetchCount) { serviceBusReceiverClientBuilder.prefetchCount(prefetchCount); return this; } /** * Sets the name of the queue to create a processor for. * @param queueName Name of the queue. * * @return The modified {@link ServiceBusProcessorClientBuilder} object. */ public ServiceBusProcessorClientBuilder queueName(String queueName) { serviceBusReceiverClientBuilder.queueName(queueName); return this; } /** * Sets the receive mode for the processor. * @param receiveMode Mode for receiving messages. * * @return The modified {@link ServiceBusProcessorClientBuilder} object. */ public ServiceBusProcessorClientBuilder receiveMode(ServiceBusReceiveMode receiveMode) { serviceBusReceiverClientBuilder.receiveMode(receiveMode); return this; } /** * Sets the type of the {@link SubQueue} to connect to. Azure Service Bus queues and subscriptions provide a * secondary sub-queue, called a dead-letter queue (DLQ). * * @param subQueue The type of the sub queue. * * @return The modified {@link ServiceBusProcessorClientBuilder} object. * @see * @see SubQueue */ public ServiceBusProcessorClientBuilder subQueue(SubQueue subQueue) { serviceBusReceiverClientBuilder.subQueue(subQueue); return this; } /** * Sets the name of the subscription in the topic to listen to. <b>{@link * </b> * @param subscriptionName Name of the subscription. * * @return The modified {@link ServiceBusProcessorClientBuilder} object. * @see */ public ServiceBusProcessorClientBuilder subscriptionName(String subscriptionName) { serviceBusReceiverClientBuilder.subscriptionName(subscriptionName); return this; } /** * Sets the name of the topic. <b>{@link * @param topicName Name of the topic. * * @return The modified {@link ServiceBusProcessorClientBuilder} object. * @see */ public ServiceBusProcessorClientBuilder topicName(String topicName) { serviceBusReceiverClientBuilder.topicName(topicName); return this; } /** * The message processing callback for the processor which will be executed when a message is received. * @param processMessage The message processing consumer that will be executed when a message is received. * * @return The updated {@link ServiceBusProcessorClientBuilder} object. */ public ServiceBusProcessorClientBuilder processMessage( Consumer<ServiceBusReceivedMessageContext> processMessage) { this.processMessage = processMessage; return this; } /** * The error handler for the processor which will be invoked in the event of an error while receiving messages. * @param processError The error handler which will be executed when an error occurs. * * @return The updated {@link ServiceBusProcessorClientBuilder} object */ public ServiceBusProcessorClientBuilder processError(Consumer<ServiceBusErrorContext> processError) { this.processError = processError; return this; } /** * Sets the amount of time to continue auto-renewing the lock. Setting {@link Duration * disables auto-renewal. For {@link ServiceBusReceiveMode * auto-renewal is disabled. * * @param maxAutoLockRenewDuration the amount of time to continue auto-renewing the lock. {@link Duration * or {@code null} indicates that auto-renewal is disabled. * * @return The updated {@link ServiceBusProcessorClientBuilder} object. * @throws IllegalArgumentException If {code maxAutoLockRenewDuration} is negative. */ public ServiceBusProcessorClientBuilder maxAutoLockRenewDuration(Duration maxAutoLockRenewDuration) { validateAndThrow(maxAutoLockRenewDuration); serviceBusReceiverClientBuilder.maxAutoLockRenewDuration(maxAutoLockRenewDuration); return this; } /** * Max concurrent messages that this processor should process. By default, this is set to 1. * * @param maxConcurrentCalls max concurrent messages that this processor should process. * @return The updated {@link ServiceBusProcessorClientBuilder} object. * @throws IllegalArgumentException if the {@code maxConcurrentCalls} is set to a value less than 1. */ public ServiceBusProcessorClientBuilder maxConcurrentCalls(int maxConcurrentCalls) { if (maxConcurrentCalls < 1) { throw LOGGER.logExceptionAsError( new IllegalArgumentException("'maxConcurrentCalls' cannot be less than 1")); } processorClientOptions.setMaxConcurrentCalls(maxConcurrentCalls); return this; } /** * Disables auto-complete and auto-abandon of received messages. By default, a successfully processed message is * {@link ServiceBusReceivedMessageContext * the message is processed, it is {@link ServiceBusReceivedMessageContext * abandoned}. * * @return The modified {@link ServiceBusProcessorClientBuilder} object. */ public ServiceBusProcessorClientBuilder disableAutoComplete() { serviceBusReceiverClientBuilder.disableAutoComplete(); processorClientOptions.setDisableAutoComplete(true); return this; } /** * Creates Service Bus message processor responsible for reading {@link ServiceBusReceivedMessage * messages} from a specific queue or subscription. * * @return An new {@link ServiceBusProcessorClient} that processes messages from a queue or subscription. * @throws IllegalStateException if {@link * topicName} are not set or, both of these fields are set. It is also thrown if the Service Bus {@link * * {@link * * @throws IllegalArgumentException Queue or topic name are not set via {@link * queueName()} or {@link * @throws NullPointerException if the {@link * callbacks are not set. */ public ServiceBusProcessorClient buildProcessorClient() { return new ServiceBusProcessorClient(serviceBusReceiverClientBuilder, serviceBusReceiverClientBuilder.queueName, serviceBusReceiverClientBuilder.topicName, serviceBusReceiverClientBuilder.subscriptionName, Objects.requireNonNull(processMessage, "'processMessage' cannot be null"), Objects.requireNonNull(processError, "'processError' cannot be null"), processorClientOptions); } } /** * Builder for creating {@link ServiceBusReceiverClient} and {@link ServiceBusReceiverAsyncClient} to consume * messages from Service Bus. * * @see ServiceBusReceiverAsyncClient * @see ServiceBusReceiverClient */ @ServiceClientBuilder(serviceClients = {ServiceBusReceiverClient.class, ServiceBusReceiverAsyncClient.class}) public final class ServiceBusReceiverClientBuilder { private boolean enableAutoComplete = true; private int prefetchCount = DEFAULT_PREFETCH_COUNT; private String queueName; private SubQueue subQueue; private ServiceBusReceiveMode receiveMode = ServiceBusReceiveMode.PEEK_LOCK; private String subscriptionName; private String topicName; private Duration maxAutoLockRenewDuration = MAX_LOCK_RENEW_DEFAULT_DURATION; private ServiceBusReceiverClientBuilder() { } /** * Disables auto-complete and auto-abandon of received messages. By default, a successfully processed message is * {@link ServiceBusReceiverAsyncClient * the message is processed, it is {@link ServiceBusReceiverAsyncClient * abandoned}. * * @return The modified {@link ServiceBusReceiverClientBuilder} object. */ public ServiceBusReceiverClientBuilder disableAutoComplete() { this.enableAutoComplete = false; return this; } /** * Sets the amount of time to continue auto-renewing the lock. Setting {@link Duration * disables auto-renewal. For {@link ServiceBusReceiveMode * auto-renewal is disabled. * * @param maxAutoLockRenewDuration the amount of time to continue auto-renewing the lock. {@link Duration * or {@code null} indicates that auto-renewal is disabled. * * @return The updated {@link ServiceBusReceiverClientBuilder} object. * @throws IllegalArgumentException If {code maxAutoLockRenewDuration} is negative. */ public ServiceBusReceiverClientBuilder maxAutoLockRenewDuration(Duration maxAutoLockRenewDuration) { validateAndThrow(maxAutoLockRenewDuration); this.maxAutoLockRenewDuration = maxAutoLockRenewDuration; return this; } /** * Sets the prefetch count of the receiver. For both {@link ServiceBusReceiveMode * {@link ServiceBusReceiveMode * * Prefetch speeds up the message flow by aiming to have a message readily available for local retrieval when * and before the application asks for one using {@link ServiceBusReceiverAsyncClient * Setting a non-zero value will prefetch that number of messages. Setting the value to zero turns prefetch * off. * * @param prefetchCount The prefetch count. * * @return The modified {@link ServiceBusReceiverClientBuilder} object. * @throws IllegalArgumentException If {code prefetchCount} is negative. */ public ServiceBusReceiverClientBuilder prefetchCount(int prefetchCount) { validateAndThrow(prefetchCount); this.prefetchCount = prefetchCount; return this; } /** * Sets the name of the queue to create a receiver for. * * @param queueName Name of the queue. * * @return The modified {@link ServiceBusReceiverClientBuilder} object. */ public ServiceBusReceiverClientBuilder queueName(String queueName) { this.queueName = queueName; return this; } /** * Sets the receive mode for the receiver. * * @param receiveMode Mode for receiving messages. * * @return The modified {@link ServiceBusReceiverClientBuilder} object. */ public ServiceBusReceiverClientBuilder receiveMode(ServiceBusReceiveMode receiveMode) { this.receiveMode = receiveMode; return this; } /** * Sets the type of the {@link SubQueue} to connect to. * * @param subQueue The type of the sub queue. * * @return The modified {@link ServiceBusReceiverClientBuilder} object. * @see */ public ServiceBusReceiverClientBuilder subQueue(SubQueue subQueue) { this.subQueue = subQueue; return this; } /** * Sets the name of the subscription in the topic to listen to. <b>{@link * </b> * * @param subscriptionName Name of the subscription. * * @return The modified {@link ServiceBusReceiverClientBuilder} object. * @see */ public ServiceBusReceiverClientBuilder subscriptionName(String subscriptionName) { this.subscriptionName = subscriptionName; return this; } /** * Sets the name of the topic. <b>{@link * * @param topicName Name of the topic. * * @return The modified {@link ServiceBusReceiverClientBuilder} object. * @see */ public ServiceBusReceiverClientBuilder topicName(String topicName) { this.topicName = topicName; return this; } /** * Creates an <b>asynchronous</b> Service Bus receiver responsible for reading {@link ServiceBusMessage * messages} from a specific queue or subscription. * * @return An new {@link ServiceBusReceiverAsyncClient} that receives messages from a queue or subscription. * @throws IllegalStateException if {@link * topicName} are not set or, both of these fields are set. It is also thrown if the Service Bus {@link * * {@link * * @throws IllegalArgumentException Queue or topic name are not set via {@link * queueName()} or {@link */ public ServiceBusReceiverAsyncClient buildAsyncClient() { return buildAsyncClient(true); } /** * Creates <b>synchronous</b> Service Bus receiver responsible for reading {@link ServiceBusMessage messages} * from a specific queue or subscription. * * @return An new {@link ServiceBusReceiverClient} that receives messages from a queue or subscription. * @throws IllegalStateException if {@link * topicName} are not set or, both of these fields are set. It is also thrown if the Service Bus {@link * * {@link * * @throws IllegalArgumentException Queue or topic name are not set via {@link * queueName()} or {@link */ public ServiceBusReceiverClient buildClient() { final boolean isPrefetchDisabled = prefetchCount == 0; return new ServiceBusReceiverClient(buildAsyncClient(false), isPrefetchDisabled, MessageUtils.getTotalTimeout(retryOptions)); } ServiceBusReceiverAsyncClient buildAsyncClient(boolean isAutoCompleteAllowed) { final MessagingEntityType entityType = validateEntityPaths(connectionStringEntityName, topicName, queueName); final String entityPath = getEntityPath(entityType, queueName, topicName, subscriptionName, subQueue); if (!isAutoCompleteAllowed && enableAutoComplete) { LOGGER.warning( "'enableAutoComplete' is not supported in synchronous client except through callback receive."); enableAutoComplete = false; } else if (enableAutoComplete && receiveMode == ServiceBusReceiveMode.RECEIVE_AND_DELETE) { LOGGER.warning("'enableAutoComplete' is not needed in for RECEIVE_AND_DELETE mode."); enableAutoComplete = false; } if (receiveMode == ServiceBusReceiveMode.RECEIVE_AND_DELETE) { maxAutoLockRenewDuration = Duration.ZERO; } final ServiceBusConnectionProcessor connectionProcessor = getOrCreateConnectionProcessor(messageSerializer); final ReceiverOptions receiverOptions = new ReceiverOptions(receiveMode, prefetchCount, maxAutoLockRenewDuration, enableAutoComplete); return new ServiceBusReceiverAsyncClient(connectionProcessor.getFullyQualifiedNamespace(), entityPath, entityType, receiverOptions, connectionProcessor, ServiceBusConstants.OPERATION_TIMEOUT, tracerProvider, messageSerializer, ServiceBusClientBuilder.this::onClientClose); } } private void validateAndThrow(int prefetchCount) { if (prefetchCount < 0) { throw LOGGER.logExceptionAsError(new IllegalArgumentException(String.format( "prefetchCount (%s) cannot be less than 0.", prefetchCount))); } } private void validateAndThrow(Duration maxLockRenewalDuration) { if (maxLockRenewalDuration != null && maxLockRenewalDuration.isNegative()) { throw LOGGER.logExceptionAsError(new IllegalArgumentException( "'maxLockRenewalDuration' cannot be negative.")); } } }
please run the playback test (without "RECORD") locally. this line and some more is wrong. In test env on CI, there is no such environment variable defined. You had to give them a default value.
protected void beforeTest() { try { ConfidentialLedgerIdentityClientBuilder confidentialLedgerIdentityClientbuilder = new ConfidentialLedgerIdentityClientBuilder() .identityServiceUri( Configuration.getGlobalConfiguration().get("IDENTITYSERVICEURI", "identityserviceuri")) .httpClient(HttpClient.createDefault()) .httpLogOptions(new HttpLogOptions().setLogLevel(HttpLogDetailLevel.BASIC)); if (getTestMode() == TestMode.PLAYBACK) { confidentialLedgerIdentityClientbuilder .httpClient(interceptorManager.getPlaybackClient()) .credential(request -> Mono.just(new AccessToken("this_is_a_token", OffsetDateTime.MAX))); } else if (getTestMode() == TestMode.RECORD) { confidentialLedgerIdentityClientbuilder .addPolicy(interceptorManager.getRecordPolicy()) .credential(new DefaultAzureCredentialBuilder().build()); } else if (getTestMode() == TestMode.LIVE) { confidentialLedgerIdentityClientbuilder.credential(new DefaultAzureCredentialBuilder().build()); } ConfidentialLedgerIdentityClient confidentialLedgerIdentityClient = confidentialLedgerIdentityClientbuilder .buildClient(); String ledgerId = Configuration.getGlobalConfiguration().get("LEDGERURI") .replaceAll("\\w+: .replaceAll("\\..*", ""); Response<BinaryData> ledgerIdentityWithResponse = confidentialLedgerIdentityClient .getLedgerIdentityWithResponse(ledgerId, null); BinaryData identityResponse = ledgerIdentityWithResponse.getValue(); ObjectMapper mapper = new ObjectMapper(); JsonNode jsonNode = mapper.readTree(identityResponse.toBytes()); String ledgerTslCertificate = jsonNode.get("ledgerTlsCertificate").asText(); SslContext sslContext = SslContextBuilder.forClient() .trustManager(new ByteArrayInputStream(ledgerTslCertificate.getBytes(StandardCharsets.UTF_8))).build(); reactor.netty.http.client.HttpClient reactorClient = reactor.netty.http.client.HttpClient.create() .secure(sslContextSpec -> sslContextSpec.sslContext(sslContext)); HttpClient httpClient = new NettyAsyncHttpClientBuilder(reactorClient).wiretap(true).build(); System.out.println("Creating Confidential Ledger client with the certificate..."); ConfidentialLedgerClientBuilder confidentialLedgerClientbuilder = new ConfidentialLedgerClientBuilder() .ledgerUri(Configuration.getGlobalConfiguration().get("LEDGERURI", "ledgeruri")) .httpClient(httpClient) .httpLogOptions(new HttpLogOptions().setLogLevel(HttpLogDetailLevel.BASIC)); if (getTestMode() == TestMode.PLAYBACK) { confidentialLedgerClientbuilder .httpClient(interceptorManager.getPlaybackClient()) .credential(request -> Mono.just(new AccessToken("this_is_a_token", OffsetDateTime.MAX))); } else if (getTestMode() == TestMode.RECORD) { confidentialLedgerClientbuilder .addPolicy(interceptorManager.getRecordPolicy()) .credential(new DefaultAzureCredentialBuilder().build()); } else if (getTestMode() == TestMode.LIVE) { confidentialLedgerClientbuilder.credential(new AzureCliCredentialBuilder().build()); } confidentialLedgerClient = confidentialLedgerClientbuilder.buildClient(); } catch (Exception ex) { System.out.println("Error thrown from ConfidentialLedgerClientTestBase:" + ex); } }
.replaceAll("\\..*", "");
protected void beforeTest() { try { ConfidentialLedgerIdentityClientBuilder confidentialLedgerIdentityClientbuilder = new ConfidentialLedgerIdentityClientBuilder() .identityServiceUri( Configuration.getGlobalConfiguration().get("IDENTITYSERVICEURI", "https: .httpClient(HttpClient.createDefault()) .httpLogOptions(new HttpLogOptions().setLogLevel(HttpLogDetailLevel.BASIC)); if (getTestMode() == TestMode.PLAYBACK) { confidentialLedgerIdentityClientbuilder .httpClient(interceptorManager.getPlaybackClient()) .credential(request -> Mono.just(new AccessToken("this_is_a_token", OffsetDateTime.MAX))); } else if (getTestMode() == TestMode.RECORD) { confidentialLedgerIdentityClientbuilder .addPolicy(interceptorManager.getRecordPolicy()) .credential(new DefaultAzureCredentialBuilder().build()); } else if (getTestMode() == TestMode.LIVE) { confidentialLedgerIdentityClientbuilder.credential(new DefaultAzureCredentialBuilder().build()); } confidentialLedgerIdentityClient = confidentialLedgerIdentityClientbuilder .buildClient(); String ledgerId = Configuration.getGlobalConfiguration().get("LEDGERID", "emily-java-sdk-tests"); confidentialLedgerClientBuilder = new ConfidentialLedgerClientBuilder() .ledgerUri(Configuration.getGlobalConfiguration().get("LEDGERURI", "https: .httpLogOptions(new HttpLogOptions().setLogLevel(HttpLogDetailLevel.BASIC)); if (getTestMode() == TestMode.PLAYBACK) { confidentialLedgerClientBuilder .httpClient(interceptorManager.getPlaybackClient()) .credential(request -> Mono.just(new AccessToken("this_is_a_token", OffsetDateTime.MAX))); } else if (getTestMode() == TestMode.RECORD) { confidentialLedgerClientBuilder .addPolicy(interceptorManager.getRecordPolicy()) .credential(new DefaultAzureCredentialBuilder().build()); } else if (getTestMode() == TestMode.LIVE) { confidentialLedgerClientBuilder.credential(new AzureCliCredentialBuilder().build()); } } catch (Exception ex) { System.out.println("Error thrown from ConfidentialLedgerClientTestBase:" + ex); } }
class ConfidentialLedgerClientTestBase extends TestBase { protected ConfidentialLedgerClient confidentialLedgerClient; @Override }
class ConfidentialLedgerClientTestBase extends TestBase { protected ConfidentialLedgerClient confidentialLedgerClient; protected ConfidentialLedgerClientBuilder confidentialLedgerClientBuilder; protected ConfidentialLedgerIdentityClient confidentialLedgerIdentityClient; @Override }
nit, it helps to move them to a function in the base class, so the code can be shared by all tests.
public void testGetUserTests() throws Exception { String ledgerId = Configuration.getGlobalConfiguration().get("LEDGERID", "emily-java-sdk-tests"); Response<BinaryData> ledgerIdentityWithResponse = confidentialLedgerIdentityClient .getLedgerIdentityWithResponse(ledgerId, null); BinaryData identityResponse = ledgerIdentityWithResponse.getValue(); ObjectMapper mapper = new ObjectMapper(); JsonNode jsonNode = mapper.readTree(identityResponse.toBytes()); String ledgerTslCertificate = jsonNode.get("ledgerTlsCertificate").asText(); SslContext sslContext = SslContextBuilder.forClient() .trustManager(new ByteArrayInputStream(ledgerTslCertificate.getBytes(StandardCharsets.UTF_8))).build(); reactor.netty.http.client.HttpClient reactorClient = reactor.netty.http.client.HttpClient.create() .secure(sslContextSpec -> sslContextSpec.sslContext(sslContext)); HttpClient httpClient = new NettyAsyncHttpClientBuilder(reactorClient).wiretap(true).build(); if (getTestMode() == TestMode.PLAYBACK) { confidentialLedgerClientBuilder .httpClient(interceptorManager.getPlaybackClient()) .credential(request -> Mono.just(new AccessToken("this_is_a_token", OffsetDateTime.MAX))); } else if (getTestMode() == TestMode.RECORD) { confidentialLedgerClientBuilder .addPolicy(interceptorManager.getRecordPolicy()) .httpClient(httpClient) .credential(new DefaultAzureCredentialBuilder().build()); } else if (getTestMode() == TestMode.LIVE) { confidentialLedgerClientBuilder .credential(new DefaultAzureCredentialBuilder().build()) .httpClient(httpClient); } ConfidentialLedgerClient confidentialLedgerClient = confidentialLedgerClientBuilder.buildClient(); String userAad = Configuration.getGlobalConfiguration().get("USERAAD", "ec667af1-0642-45f0-be8a-b76758a35dde"); RequestOptions requestOptions = new RequestOptions(); Response<BinaryData> response = confidentialLedgerClient.getUserWithResponse(userAad, requestOptions); BinaryData parsedResponse = response.getValue(); Assertions.assertEquals(200, response.getStatusCode()); ObjectMapper objectMapper = new ObjectMapper(); JsonNode responseBodyJson = null; try { responseBodyJson = objectMapper.readTree(parsedResponse.toBytes()); } catch (IOException e) { e.printStackTrace(); Assertions.assertTrue(false); } Assertions.assertEquals(responseBodyJson.get("assignedRole").asText(), "Administrator"); Assertions.assertEquals(responseBodyJson.get("userId").asText(), userAad); }
ConfidentialLedgerClient confidentialLedgerClient = confidentialLedgerClientBuilder.buildClient();
public void testGetUserTests() throws Exception { String ledgerId = Configuration.getGlobalConfiguration().get("LEDGERID", "emily-java-sdk-tests"); Response<BinaryData> ledgerIdentityWithResponse = confidentialLedgerIdentityClient .getLedgerIdentityWithResponse(ledgerId, null); BinaryData identityResponse = ledgerIdentityWithResponse.getValue(); ObjectMapper mapper = new ObjectMapper(); JsonNode jsonNode = mapper.readTree(identityResponse.toBytes()); String ledgerTslCertificate = jsonNode.get("ledgerTlsCertificate").asText(); SslContext sslContext = SslContextBuilder.forClient() .trustManager(new ByteArrayInputStream(ledgerTslCertificate.getBytes(StandardCharsets.UTF_8))).build(); reactor.netty.http.client.HttpClient reactorClient = reactor.netty.http.client.HttpClient.create() .secure(sslContextSpec -> sslContextSpec.sslContext(sslContext)); HttpClient httpClient = new NettyAsyncHttpClientBuilder(reactorClient).wiretap(true).build(); if (getTestMode() == TestMode.PLAYBACK) { confidentialLedgerClientBuilder .httpClient(interceptorManager.getPlaybackClient()) .credential(request -> Mono.just(new AccessToken("this_is_a_token", OffsetDateTime.MAX))); } else if (getTestMode() == TestMode.RECORD) { confidentialLedgerClientBuilder .addPolicy(interceptorManager.getRecordPolicy()) .httpClient(httpClient) .credential(new DefaultAzureCredentialBuilder().build()); } else if (getTestMode() == TestMode.LIVE) { confidentialLedgerClientBuilder .credential(new DefaultAzureCredentialBuilder().build()) .httpClient(httpClient); } ConfidentialLedgerClient confidentialLedgerClient = confidentialLedgerClientBuilder.buildClient(); String userAad = Configuration.getGlobalConfiguration().get("USERAAD", "ec667af1-0642-45f0-be8a-b76758a35dde"); RequestOptions requestOptions = new RequestOptions(); Response<BinaryData> response = confidentialLedgerClient.getUserWithResponse(userAad, requestOptions); BinaryData parsedResponse = response.getValue(); Assertions.assertEquals(200, response.getStatusCode()); ObjectMapper objectMapper = new ObjectMapper(); JsonNode responseBodyJson = null; try { responseBodyJson = objectMapper.readTree(parsedResponse.toBytes()); } catch (IOException e) { e.printStackTrace(); Assertions.assertTrue(false); } Assertions.assertEquals(responseBodyJson.get("assignedRole").asText(), "Administrator"); Assertions.assertEquals(responseBodyJson.get("userId").asText(), userAad); }
class UserTests extends ConfidentialLedgerClientTestBase { @Test }
class UserTests extends ConfidentialLedgerClientTestBase { @Test }
I am curious why we have an assertion here if this isn't a test.
public static void main(String[] args) { ConfidentialLedgerClient confidentialLedgerClient = new ConfidentialLedgerClientBuilder() .credential(new DefaultAzureCredentialBuilder().build()) .ledgerUri("https: .buildClient(); RequestOptions requestOptions = new RequestOptions(); String aadObjectId = "<YOUR AAD ID>"; Response<Void> response = confidentialLedgerClient.deleteUserWithResponse(aadObjectId, requestOptions); Assertions.assertEquals(response.getStatusCode(), 204); }
Assertions.assertEquals(response.getStatusCode(), 204);
public static void main(String[] args) { ConfidentialLedgerClient confidentialLedgerClient = new ConfidentialLedgerClientBuilder() .credential(new DefaultAzureCredentialBuilder().build()) .ledgerUri("https: .buildClient(); RequestOptions requestOptions = new RequestOptions(); String aadObjectId = "<YOUR AAD ID>"; Response<Void> response = confidentialLedgerClient.deleteUserWithResponse(aadObjectId, requestOptions); }
class DeleteUser { }
class DeleteUser { }
I noticed a lot of these samples use the Response overload, is this what we expect customers to use generally? It would be helpful to show what to do with this BinaryData (in all these samples)? I find customers often copying and pasting sample code as-is.
public static void main(String[] args) { ConfidentialLedgerClient confidentialLedgerClient = new ConfidentialLedgerClientBuilder() .credential(new DefaultAzureCredentialBuilder().build()) .ledgerUri("https: .buildClient(); RequestOptions requestOptions = new RequestOptions(); Response<BinaryData> response = confidentialLedgerClient.listCollectionsWithResponse(requestOptions); }
Response<BinaryData> response = confidentialLedgerClient.listCollectionsWithResponse(requestOptions);
public static void main(String[] args) { ConfidentialLedgerClient confidentialLedgerClient = new ConfidentialLedgerClientBuilder() .credential(new DefaultAzureCredentialBuilder().build()) .ledgerUri("https: .buildClient(); RequestOptions requestOptions = new RequestOptions(); Response<BinaryData> response = confidentialLedgerClient.listCollectionsWithResponse(requestOptions); }
class GetCollectionIds { }
class GetCollectionIds { }
how about use a const for : Duration.ofSeconds(30) ?
private OAuthBearerToken getOAuthBearerToken() { if (accessToken == null || accessToken.isExpired()) { TokenRequestContext request = new TokenRequestContext(); request.addScopes(tokenAudience); request.setTenantId(properties.getProfile().getTenantId()); AccessToken accessToken = credential.getToken(request).block(Duration.ofSeconds(30)); if (accessToken != null) { this.accessToken = new AzureOAuthBearerToken(accessToken); } } return accessToken; }
AccessToken accessToken = credential.getToken(request).block(Duration.ofSeconds(30));
private OAuthBearerToken getOAuthBearerToken() { if (accessToken == null || accessToken.isExpired()) { TokenRequestContext request = new TokenRequestContext(); request.addScopes(tokenAudience); request.setTenantId(properties.getProfile().getTenantId()); AccessToken accessToken = credential.getToken(request).block(ACCESS_TOKEN_REQUEST_BLOCK_TIME); if (accessToken != null) { this.accessToken = new AzureOAuthBearerToken(accessToken); } } return accessToken; }
class KafkaOAuth2AuthenticateCallbackHandler implements AuthenticateCallbackHandler { private final AzureThirdPartyServiceProperties properties = new AzureThirdPartyServiceProperties(); private final DefaultAzureCredentialBuilderFactory defaultAzureCredentialBuilderFactory = new DefaultAzureCredentialBuilderFactory(properties); private TokenCredential credential; private AzureOAuthBearerToken accessToken; private String tokenAudience; private final AzureTokenCredentialResolver tokenCredentialResolver = new AzureTokenCredentialResolver(); @Override public void configure(Map<String, ?> configs, String mechanism, List<AppConfigurationEntry> jaasConfigEntries) { String bootstrapServer = Arrays.asList(configs.get(BOOTSTRAP_SERVERS_CONFIG)).get(0).toString(); bootstrapServer = bootstrapServer.replaceAll("\\[|\\]", ""); URI uri = URI.create("https: this.tokenAudience = uri.getScheme() + ": credential = (TokenCredential) configs.get(AZURE_TOKEN_CREDENTIAL); AzureIdentityCustomConfigUtils.convertConfigMapToAzureProperties(configs, properties); } @Override public void handle(Callback[] callbacks) throws UnsupportedCallbackException { for (Callback callback : callbacks) { if (callback instanceof OAuthBearerTokenCallback) { OAuthBearerTokenCallback oauthCallback = (OAuthBearerTokenCallback) callback; credential = getTokenCredential(); OAuthBearerToken token = getOAuthBearerToken(); oauthCallback.token(token); } else { throw new UnsupportedCallbackException(callback); } } } private TokenCredential getTokenCredential() { if (credential == null) { credential = tokenCredentialResolver.resolve(properties); if (credential == null) { credential = defaultAzureCredentialBuilderFactory.build().build(); } } return credential; } @Override public void close() { } }
class KafkaOAuth2AuthenticateCallbackHandler implements AuthenticateCallbackHandler { private static final Duration ACCESS_TOKEN_REQUEST_BLOCK_TIME = Duration.ofSeconds(30); private final AzureKafkaProperties properties; private final AzureTokenCredentialResolver tokenCredentialResolver; private TokenCredential credential; private AzureOAuthBearerToken accessToken; private String tokenAudience; public KafkaOAuth2AuthenticateCallbackHandler() { this(new AzureKafkaProperties(), new AzureTokenCredentialResolver()); } public KafkaOAuth2AuthenticateCallbackHandler(AzureKafkaProperties properties, AzureTokenCredentialResolver tokenCredentialResolver) { this.properties = properties; this.tokenCredentialResolver = tokenCredentialResolver; } @SuppressWarnings("unchecked") @Override public void configure(Map<String, ?> configs, String mechanism, List<AppConfigurationEntry> jaasConfigEntries) { List<String> bootstrapServers = (List<String>) configs.get(BOOTSTRAP_SERVERS_CONFIG); if (bootstrapServers == null || bootstrapServers.size() != 1) { throw new IllegalArgumentException("Invalid bootstrap servers configured for Azure Event Hubs for Kafka! Must supply exactly 1 non-null bootstrap server configuration," + " with the format as {YOUR.EVENTHUBS.FQDN}:9093."); } String bootstrapServer = bootstrapServers.get(0); if (!bootstrapServer.endsWith(":9093")) { throw new IllegalArgumentException("Invalid bootstrap server configured for Azure Event Hubs for Kafka! The format should be {YOUR.EVENTHUBS.FQDN}:9093."); } URI uri = URI.create("https: this.tokenAudience = uri.getScheme() + ": credential = (TokenCredential) configs.get(AZURE_TOKEN_CREDENTIAL); AzureKafkaPropertiesUtils.convertConfigMapToAzureProperties(configs, properties); } @Override public void handle(Callback[] callbacks) throws UnsupportedCallbackException { for (Callback callback : callbacks) { if (callback instanceof OAuthBearerTokenCallback) { OAuthBearerTokenCallback oauthCallback = (OAuthBearerTokenCallback) callback; credential = getTokenCredential(); OAuthBearerToken token = getOAuthBearerToken(); oauthCallback.token(token); } else { throw new UnsupportedCallbackException(callback); } } } private TokenCredential getTokenCredential() { if (credential == null) { credential = tokenCredentialResolver.resolve(properties); if (credential == null) { credential = new DefaultAzureCredentialBuilderFactory(properties).build().build(); } } return credential; } @Override public void close() { } }
> please use OAuth2 instead how, suggest the warnning message should be specific and actionable
public Object postProcessAfterInitialization(Object bean, String beanName) throws BeansException { if (bean instanceof KafkaProperties) { LOGGER.warn("Autoconfiguration for Event Hubs for Kafka on connection string/Azure Resource Manager" + " has been deprecated, please use OAuth2 instead."); KafkaProperties kafkaProperties = (KafkaProperties) bean; String connectionString = connectionStringProvider.getConnectionString(); String bootstrapServer = new EventHubsConnectionString(connectionString).getFullyQualifiedNamespace() + ":9093"; kafkaProperties.setBootstrapServers(new ArrayList<>(Collections.singletonList(bootstrapServer))); kafkaProperties.getProperties().put(SECURITY_PROTOCOL_CONFIG, SASL_SSL.name()); kafkaProperties.getProperties().put(SASL_MECHANISM, "PLAIN"); kafkaProperties.getProperties().put(SASL_JAAS_CONFIG, String.format(SASL_CONFIG_VALUE, connectionString, System.getProperty("line.separator"))); } return bean; }
+ " has been deprecated, please use OAuth2 instead.");
public Object postProcessAfterInitialization(Object bean, String beanName) throws BeansException { if (bean instanceof KafkaProperties) { LOGGER.warn("Autoconfiguration for Event Hubs for Kafka on connection string/Azure Resource Manager" + " has been deprecated, please migrate to AzureEventHubsKafkaOAuth2AutoConfiguration for OAuth2 authentication with Azure Identity credentials." + " To leverage the OAuth2 authentication, you can delete all your Event Hubs for Kafka credential configurations, and configure Kafka bootstrap servers" + " instead, which can be set as spring.kafka.boostrap-servers=EventHubsNamespacesFQDN:9093."); KafkaProperties kafkaProperties = (KafkaProperties) bean; String connectionString = connectionStringProvider.getConnectionString(); String bootstrapServer = new EventHubsConnectionString(connectionString).getFullyQualifiedNamespace() + ":9093"; kafkaProperties.setBootstrapServers(new ArrayList<>(Collections.singletonList(bootstrapServer))); kafkaProperties.getProperties().put(SECURITY_PROTOCOL_CONFIG, SASL_SSL.name()); kafkaProperties.getProperties().put(SASL_MECHANISM, "PLAIN"); kafkaProperties.getProperties().put(SASL_JAAS_CONFIG, String.format(SASL_CONFIG_VALUE, connectionString, System.getProperty("line.separator"))); } return bean; }
class KafkaPropertiesBeanPostProcessor implements BeanPostProcessor { private static final Logger LOGGER = LoggerFactory.getLogger(KafkaPropertiesBeanPostProcessor.class); private static final String SASL_CONFIG_VALUE = "org.apache.kafka.common.security.plain.PlainLoginModule required username=\"$ConnectionString\" password=\"%s\";%s"; private final ServiceConnectionStringProvider<AzureServiceType.EventHubs> connectionStringProvider; KafkaPropertiesBeanPostProcessor(ServiceConnectionStringProvider<AzureServiceType.EventHubs> connectionStringProvider) { this.connectionStringProvider = connectionStringProvider; } @Override }
class KafkaPropertiesBeanPostProcessor implements BeanPostProcessor { private static final Logger LOGGER = LoggerFactory.getLogger(KafkaPropertiesBeanPostProcessor.class); private static final String SASL_CONFIG_VALUE = "org.apache.kafka.common.security.plain.PlainLoginModule required username=\"$ConnectionString\" password=\"%s\";%s"; private final ServiceConnectionStringProvider<AzureServiceType.EventHubs> connectionStringProvider; KafkaPropertiesBeanPostProcessor(ServiceConnectionStringProvider<AzureServiceType.EventHubs> connectionStringProvider) { this.connectionStringProvider = connectionStringProvider; } @Override }