Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -40,6 +40,7 @@
import io.fabric8.kubernetes.client.dsl.Reaper;
import io.fabric8.kubernetes.client.dsl.Watchable;
import io.fabric8.kubernetes.client.dsl.internal.WatchConnectionManager;
import io.fabric8.kubernetes.client.dsl.internal.WatchHTTPManager;
import io.fabric8.kubernetes.client.utils.URLUtils;

import java.io.File;
Expand Down Expand Up @@ -649,6 +650,27 @@ public Watch watch(String resourceVersion, final Watcher<T> watcher) throws Kube
return watch;
} catch (MalformedURLException e) {
throw KubernetesClientException.launderThrowable(e);
} catch (KubernetesClientException ke) {
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Should we check the type of the exception? For example, there would be no point in falling back to HTTP watch in e.g. auth error. Perhaps we should only fall back if we receive a 200 instead of a 101 for example?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Done, added a check for ke.getCode() to be 200.

if (ke.getCode() != 200) {
throw ke;
}

// If the HTTP return code is 200, we retry the watch again using a persistent hanging
// HTTP GET. This is meant to handle cases like kubectl local proxy which does not support
// websockets. Issue: https://github.com/kubernetes/kubernetes/issues/25126
try {
return new WatchHTTPManager(
client,
this,
resourceVersion,
watcher,
config.getWatchReconnectInterval(),
config.getWatchReconnectLimit(),
config.getConnectionTimeout()
);
} catch (MalformedURLException e) {
throw KubernetesClientException.launderThrowable(e);
}
}
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -302,7 +302,7 @@ public static Status createStatus(int statusCode, String message) {
return status;
}

KubernetesClientException requestFailure(Request request, Status status) {
public static KubernetesClientException requestFailure(Request request, Status status) {
StringBuilder sb = new StringBuilder();
sb.append("Failure executing: ").append(request.method())
.append(" at: ").append(request.url().toString()).append(".");
Expand All @@ -318,7 +318,7 @@ KubernetesClientException requestFailure(Request request, Status status) {
return new KubernetesClientException(sb.toString(), status.getCode(), status);
}

KubernetesClientException requestException(Request request, Exception e) {
public static KubernetesClientException requestException(Request request, Exception e) {
StringBuilder sb = new StringBuilder();
sb.append("Error executing: ").append(request.method())
.append(" at: ").append(request.url().toString())
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,285 @@
/**
* Copyright (C) 2015 Red Hat, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.fabric8.kubernetes.client.dsl.internal;

import static io.fabric8.kubernetes.client.utils.Utils.isNotNullOrEmpty;
import static java.net.HttpURLConnection.HTTP_GONE;

import com.fasterxml.jackson.databind.ObjectMapper;
import io.fabric8.kubernetes.api.model.HasMetadata;
import io.fabric8.kubernetes.api.model.KubernetesResourceList;
import io.fabric8.kubernetes.api.model.Status;
import io.fabric8.kubernetes.api.model.WatchEvent;
import io.fabric8.kubernetes.client.KubernetesClient;
import io.fabric8.kubernetes.client.KubernetesClientException;
import io.fabric8.kubernetes.client.Watch;
import io.fabric8.kubernetes.client.Watcher;
import io.fabric8.kubernetes.client.dsl.base.BaseOperation;

import io.fabric8.kubernetes.client.dsl.base.OperationSupport;
import java.io.IOException;
import java.net.MalformedURLException;
import java.net.URL;
import java.util.concurrent.Executors;
import java.util.concurrent.RejectedExecutionException;
import java.util.concurrent.ScheduledExecutorService;
import java.util.concurrent.ThreadFactory;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.atomic.AtomicReference;
import okhttp3.HttpUrl;
import okhttp3.Interceptor;
import okhttp3.OkHttpClient;
import okhttp3.Request;
import okhttp3.Response;
import okhttp3.logging.HttpLoggingInterceptor;
import okio.BufferedSource;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

public class WatchHTTPManager<T extends HasMetadata, L extends KubernetesResourceList<T>> implements
Watch {
private static final Logger logger = LoggerFactory.getLogger(WatchHTTPManager.class);
private static final ObjectMapper mapper = new ObjectMapper();

private final BaseOperation<T, L, ?, ?> baseOperation;
private final Watcher<T> watcher;
private final AtomicBoolean forceClosed = new AtomicBoolean();
private final AtomicReference<String> resourceVersion;
private final int reconnectLimit;
private final int reconnectInterval;

private final AtomicBoolean reconnectPending = new AtomicBoolean(false);
private final static int maxIntervalExponent = 5; // max 32x slowdown from base interval
private final URL requestUrl;
private final AtomicInteger currentReconnectAttempt = new AtomicInteger(0);
private OkHttpClient clonedClient;

private final ScheduledExecutorService executor = Executors.newSingleThreadScheduledExecutor(new ThreadFactory() {
@Override
public Thread newThread(Runnable r) {
Thread ret = new Thread(r, "Executor for Watch " + System.identityHashCode(WatchHTTPManager.this));
ret.setDaemon(true);
return ret;
}
});

public WatchHTTPManager(final OkHttpClient client,
final BaseOperation<T, L, ?, ?> baseOperation,
final String version, final Watcher<T> watcher, final int reconnectInterval,
final int reconnectLimit, long connectTimeout)
throws MalformedURLException {

if (version == null) {
L currentList = baseOperation.list();
this.resourceVersion = new AtomicReference<>(currentList.getMetadata().getResourceVersion());
} else {
this.resourceVersion = new AtomicReference<>(version);
}
this.baseOperation = baseOperation;
this.watcher = watcher;
this.reconnectInterval = reconnectInterval;
this.reconnectLimit = reconnectLimit;

OkHttpClient clonedClient = client.newBuilder()
.connectTimeout(connectTimeout, TimeUnit.MILLISECONDS)
.readTimeout(0,TimeUnit.MILLISECONDS)
.cache(null)
.build();

// If we set the HttpLoggingInterceptor's logging level to Body (as it is by default), it does
// not let us stream responses from the server.
for (Interceptor i : clonedClient.networkInterceptors()) {
if (i instanceof HttpLoggingInterceptor) {
HttpLoggingInterceptor interceptor = (HttpLoggingInterceptor) i;
interceptor.setLevel(HttpLoggingInterceptor.Level.BASIC);
}
}

this.clonedClient = clonedClient;
requestUrl = baseOperation.getNamespacedUrl();
runWatch();
}

private final void runWatch() {
logger.debug("Watching via HTTP GET ... {}", this);

HttpUrl.Builder httpUrlBuilder = HttpUrl.get(requestUrl).newBuilder();
String labelQueryParam = baseOperation.getLabelQueryParam();
if (isNotNullOrEmpty(labelQueryParam)) {
httpUrlBuilder.addQueryParameter("labelSelector", labelQueryParam);
}

String fieldQueryString = baseOperation.getFieldQueryParam();
String name = baseOperation.getName();
if (name != null && name.length() > 0) {
if (fieldQueryString.length() > 0) {
fieldQueryString += ",";
}
fieldQueryString += "metadata.name=" + name;
}

if (isNotNullOrEmpty(fieldQueryString)) {
httpUrlBuilder.addQueryParameter("fieldSelector", fieldQueryString);
}

httpUrlBuilder
.addQueryParameter("resourceVersion", this.resourceVersion.get())
.addQueryParameter("watch", "true");

final Request request = new Request.Builder()
.get()
.url(httpUrlBuilder.build())
.addHeader("Origin", requestUrl.getProtocol() + "://" + requestUrl.getHost() + ":" + requestUrl.getPort())
.build();

Response response = null;
try {
response = clonedClient.newCall(request).execute();
if(!response.isSuccessful()) {
throw OperationSupport.requestFailure(request,
OperationSupport.createStatus(response.code(), response.message()));
}

BufferedSource source = response.body().source();
while (!source.exhausted()) {
String message = source.readUtf8LineStrict();
onMessage(message);
}
} catch (Exception e) {
logger.info("Watch connection close received. reason: {}", e.getMessage());
} finally {
if (forceClosed.get()) {
logger.warn("Ignoring onClose for already closed/closing connection");
return;
}
if (currentReconnectAttempt.get() >= reconnectLimit && reconnectLimit >= 0) {
watcher.onClose(new KubernetesClientException("Connection unexpectedly closed"));
return;
}


// if we get here, the source is exhausted, so, we have lost our "watch".
// we must reconnect.
if (response != null) {
response.body().close();
}
scheduleReconnect();
}
}

private void scheduleReconnect() {
logger.debug("Submitting reconnect task to the executor");
// make sure that whichever thread calls this method, the tasks are
// performed serially in the executor.
executor.submit(new Runnable() {
@Override
public void run() {
if (!reconnectPending.compareAndSet(false, true)) {
logger.debug("Reconnect already scheduled");
return;
}
try {
// actual reconnect only after the back-off time has passed, without
// blocking the thread
logger.debug("Scheduling reconnect task");
executor.schedule(new Runnable() {
@Override
public void run() {
try {
WatchHTTPManager.this.runWatch();
reconnectPending.set(false);
} catch (Exception e) {
// An unexpected error occurred and we didn't even get an onFailure callback.
logger.error("Exception in reconnect", e);
close();
watcher.onClose(new KubernetesClientException("Unhandled exception in reconnect attempt", e));
}
}
}, nextReconnectInterval(), TimeUnit.MILLISECONDS);
} catch (RejectedExecutionException e) {
logger.error("Exception in reconnect", e);
reconnectPending.set(false);
}
}
});
}

public void onMessage(String messageSource) throws IOException {
try {
WatchEvent event = mapper.readValue(messageSource, WatchEvent.class);
if (event.getObject() instanceof HasMetadata) {
@SuppressWarnings("unchecked")
T obj = (T) event.getObject();
// Dirty cast - should always be valid though
String currentResourceVersion = resourceVersion.get();
String newResourceVersion = ((HasMetadata) obj).getMetadata().getResourceVersion();
if (currentResourceVersion.compareTo(newResourceVersion) < 0) {
resourceVersion.compareAndSet(currentResourceVersion, newResourceVersion);
}
Watcher.Action action = Watcher.Action.valueOf(event.getType());
watcher.eventReceived(action, obj);
} else if (event.getObject() instanceof Status) {
Status status = (Status) event.getObject();
// The resource version no longer exists - this has to be handled by the caller.
if (status.getCode() == HTTP_GONE) {
// exception
// shut down executor, etc.
close();
watcher.onClose(new KubernetesClientException(status));
return;
}

logger.error("Error received: {}", status.toString());
} else {
logger.error("Unknown message received: {}", messageSource);
}
} catch (IOException e) {
logger.error("Could not deserialize watch event: {}", messageSource, e);
} catch (ClassCastException e) {
logger.error("Received wrong type of object for watch", e);
} catch (IllegalArgumentException e) {
logger.error("Invalid event type", e);
}
}

private long nextReconnectInterval() {
int exponentOfTwo = currentReconnectAttempt.getAndIncrement();
if (exponentOfTwo > maxIntervalExponent)
exponentOfTwo = maxIntervalExponent;
long ret = reconnectInterval * (1 << exponentOfTwo);
logger.info("Current reconnect backoff is " + ret + " milliseconds (T" + exponentOfTwo + ")");
return ret;
}

@Override
public void close() {
logger.debug("Force closing the watch {}", this);
forceClosed.set(true);
if (!executor.isShutdown()) {
try {
executor.shutdown();
if (!executor.awaitTermination(1, TimeUnit.SECONDS)) {
logger.warn("Executor didn't terminate in time after shutdown in close(), killing it in: {}", this);
executor.shutdownNow();
}
} catch (Throwable t) {
throw KubernetesClientException.launderThrowable(t);
}
}
}
}
Loading