Skip to content

Commit

Permalink
Implement multicluster console tech preview
Browse files Browse the repository at this point in the history
    Backend:
    - Update server config to accept managed cluster configuration and configure proxies, clients, and authenticators for each cluster.
    - Consume multicluster configuration from config file, env, or cli arg
    - Add new multicluster logout endpoint to prevent redirect back to login
    - Parse X-Cluster request header or cluster query parameter and proxy to appropriate cluster
    - Disable Prometheus metrics when multicluster is configured

    Frontend:
    - Recieve managed cluster list through global SERVER_FLAGS var
    - Add cluster selector dropdown in side nav (when ACM plugin is detected or managed cluster list is greater than 1)
    - Move ACM link from perspective dropdown to cluster switcher as "All Clusters" option
    - API requests from frontend now include `X-Cluster` header or `cluster` query param where applicable
    - Add frontend redux state to keep track of currently selected cluster
    - User preference requests are always proxied to local cluster
    - Update logout logic to Use new multicluster logout endpoint
    - Address frontend bugs associated with switching between clusters
    - Disable workload metrics tabs and main overview page when Prometheus is unavailable
  • Loading branch information
TheRealJon committed Jan 24, 2022
1 parent d51ed6d commit 742a7fb
Show file tree
Hide file tree
Showing 82 changed files with 1,620 additions and 921 deletions.
110 changes: 101 additions & 9 deletions cmd/bridge/main.go
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,7 @@ import (
"context"
"crypto/tls"
"crypto/x509"
"encoding/json"
"flag"
"fmt"
"runtime"
Expand All @@ -20,6 +21,7 @@ import (
"github.com/openshift/console/pkg/proxy"
"github.com/openshift/console/pkg/server"
"github.com/openshift/console/pkg/serverconfig"
"github.com/openshift/console/pkg/serverutils"
oscrypto "github.com/openshift/library-go/pkg/crypto"

"k8s.io/klog"
Expand Down Expand Up @@ -127,6 +129,7 @@ func main() {
fQuickStarts := fs.String("quick-starts", "", "Allow customization of available ConsoleQuickStart resources in console. (JSON as string)")
fAddPage := fs.String("add-page", "", "DEV ONLY. Allow add page customization. (JSON as string)")
fProjectAccessClusterRoles := fs.String("project-access-cluster-roles", "", "The list of Cluster Roles assignable for the project access page. (JSON as string)")
fManagedClusterConfigs := fs.String("managed-clusters", "", "List of managed cluster configurations. (JSON as string)")

if err := serverconfig.Parse(fs, os.Args[1:], "BRIDGE"); err != nil {
fmt.Fprintln(os.Stderr, err.Error())
Expand Down Expand Up @@ -243,6 +246,63 @@ func main() {
QuickStarts: *fQuickStarts,
AddPage: *fAddPage,
ProjectAccessClusterRoles: *fProjectAccessClusterRoles,
K8sProxyConfigs: make(map[string]*proxy.Config),
K8sClients: make(map[string]*http.Client),
}

managedClusterConfigs := []serverconfig.ManagedClusterConfig{}
if *fManagedClusterConfigs != "" {
unvalidatedManagedClusters := []serverconfig.ManagedClusterConfig{}
if err := json.Unmarshal([]byte(*fManagedClusterConfigs), &unvalidatedManagedClusters); err != nil {
klog.Fatalf("Unable to parse managed cluster JSON: %v", *fManagedClusterConfigs)
}
for _, managedClusterConfig := range unvalidatedManagedClusters {
err := serverconfig.ValidateManagedClusterConfig(managedClusterConfig)
if err != nil {
klog.Errorf("Error configuring managed cluster. Invalid configuration: %v", err)
continue
}
managedClusterConfigs = append(managedClusterConfigs, managedClusterConfig)
}
}

if len(managedClusterConfigs) > 0 {
for _, managedCluster := range managedClusterConfigs {
klog.Infof("Configuring managed cluster %s", managedCluster.Name)
managedClusterAPIEndpointURL, err := url.Parse(managedCluster.APIServer.URL)
if err != nil {
klog.Errorf("Error parsing managed cluster URL for cluster %s", managedCluster.Name)
continue
}

managedClusterCertPEM, err := ioutil.ReadFile(managedCluster.APIServer.CAFile)
if err != nil {
klog.Errorf("Error parsing managed cluster CA file for cluster %s", managedCluster.Name)
continue
}

managedClusterRootCAs := x509.NewCertPool()
if !managedClusterRootCAs.AppendCertsFromPEM(managedClusterCertPEM) {
klog.Errorf("No CA found for the managed cluster %s", managedCluster.Name)
continue
}

managedClusterTLSConfig := oscrypto.SecureTLSConfig(&tls.Config{
RootCAs: managedClusterRootCAs,
})

srv.K8sProxyConfigs[managedCluster.Name] = &proxy.Config{
TLSClientConfig: managedClusterTLSConfig,
HeaderBlacklist: []string{"Cookie", "X-CSRFToken"},
Endpoint: managedClusterAPIEndpointURL,
}

srv.K8sClients[managedCluster.Name] = &http.Client{
Transport: &http.Transport{
TLSClientConfig: managedClusterTLSConfig,
},
}
}
}

// if !in-cluster (dev) we should not pass these values to the frontend
Expand Down Expand Up @@ -306,7 +366,7 @@ func main() {
klog.Fatalf("failed to read bearer token: %v", err)
}

srv.K8sProxyConfig = &proxy.Config{
srv.K8sProxyConfigs[serverutils.LocalClusterName] = &proxy.Config{
TLSClientConfig: tlsConfig,
HeaderBlacklist: []string{"Cookie", "X-CSRFToken"},
Endpoint: k8sEndpoint,
Expand Down Expand Up @@ -372,7 +432,7 @@ func main() {
serviceProxyTLSConfig := oscrypto.SecureTLSConfig(&tls.Config{
InsecureSkipVerify: *fK8sModeOffClusterSkipVerifyTLS,
})
srv.K8sProxyConfig = &proxy.Config{
srv.K8sProxyConfigs[serverutils.LocalClusterName] = &proxy.Config{
TLSClientConfig: serviceProxyTLSConfig,
HeaderBlacklist: []string{"Cookie", "X-CSRFToken"},
Endpoint: k8sEndpoint,
Expand Down Expand Up @@ -441,12 +501,12 @@ func main() {

apiServerEndpoint := *fK8sPublicEndpoint
if apiServerEndpoint == "" {
apiServerEndpoint = srv.K8sProxyConfig.Endpoint.String()
apiServerEndpoint = srv.K8sProxyConfigs[serverutils.LocalClusterName].Endpoint.String()
}
srv.KubeAPIServerURL = apiServerEndpoint
srv.K8sClient = &http.Client{
srv.K8sClients[serverutils.LocalClusterName] = &http.Client{
Transport: &http.Transport{
TLSClientConfig: srv.K8sProxyConfig.TLSClientConfig,
TLSClientConfig: srv.K8sProxyConfigs[serverutils.LocalClusterName].TLSClientConfig,
},
}

Expand Down Expand Up @@ -522,6 +582,7 @@ func main() {
CookiePath: cookiePath,
RefererPath: refererPath,
SecureCookies: secureCookies,
ClusterName: serverutils.LocalClusterName,
}

// NOTE: This won't work when using the OpenShift auth mode.
Expand All @@ -539,9 +600,40 @@ func main() {

}

if srv.Auther, err = auth.NewAuthenticator(context.Background(), oidcClientConfig); err != nil {
srv.Authers = make(map[string]*auth.Authenticator)
if srv.Authers[serverutils.LocalClusterName], err = auth.NewAuthenticator(context.Background(), oidcClientConfig); err != nil {
klog.Fatalf("Error initializing authenticator: %v", err)
}

if len(managedClusterConfigs) > 0 {
for _, managedCluster := range managedClusterConfigs {
managedClusterOIDCClientConfig := &auth.Config{
AuthSource: authSource,
IssuerURL: managedCluster.APIServer.URL,
IssuerCA: managedCluster.OAuth.CAFile,
ClientID: managedCluster.OAuth.ClientID,
ClientSecret: managedCluster.OAuth.ClientSecret,
RedirectURL: proxy.SingleJoiningSlash(srv.BaseURL.String(), fmt.Sprintf("%s/%s", server.AuthLoginCallbackEndpoint, managedCluster.Name)),
Scope: scopes,

// Use the k8s CA file for OpenShift OAuth metadata discovery.
// This might be different than IssuerCA.
K8sCA: managedCluster.APIServer.CAFile,

ErrorURL: authLoginErrorEndpoint,
SuccessURL: authLoginSuccessEndpoint,

CookiePath: cookiePath,
RefererPath: refererPath,
SecureCookies: secureCookies,
ClusterName: managedCluster.Name,
}

if srv.Authers[managedCluster.Name], err = auth.NewAuthenticator(context.Background(), managedClusterOIDCClientConfig); err != nil {
klog.Fatalf("Error initializing managed cluster authenticator: %v", err)
}
}
}
case "disabled":
klog.Warning("running with AUTHENTICATION DISABLED!")
default:
Expand Down Expand Up @@ -580,7 +672,7 @@ func main() {
},
&http.Client{
Transport: &http.Transport{
TLSClientConfig: srv.K8sProxyConfig.TLSClientConfig,
TLSClientConfig: srv.K8sProxyConfigs[serverutils.LocalClusterName].TLSClientConfig,
},
},
nil,
Expand All @@ -598,7 +690,7 @@ func main() {
},
&http.Client{
Transport: &http.Transport{
TLSClientConfig: srv.K8sProxyConfig.TLSClientConfig,
TLSClientConfig: srv.K8sProxyConfigs[serverutils.LocalClusterName].TLSClientConfig,
},
},
knative.EventSourceFilter,
Expand All @@ -616,7 +708,7 @@ func main() {
},
&http.Client{
Transport: &http.Transport{
TLSClientConfig: srv.K8sProxyConfig.TLSClientConfig,
TLSClientConfig: srv.K8sProxyConfigs[serverutils.LocalClusterName].TLSClientConfig,
},
},
knative.ChannelFilter,
Expand Down
96 changes: 96 additions & 0 deletions contrib/multicluster-environment.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,96 @@
#!/usr/bin/env bash

# This script will set up every cluster in your kubeconfig file as a managed
# cluster and run a local bridge. The hub cluster will be your current
# kubeconfig context. Typically, you'll want to start with a fresh kubeconfig
# file. For example:
#
# $ export KUBECONFIG=multicluster.config
# $ oc login cluster1.devcluster.openshift.com:6443
# $ oc login cluster2.devcluster.openshift.com:6443
# $ oc login cluster3.devcluster.openshift.com:6443
# $ source ./contrib/multicluster-environment.sh
# $ ./bin/bridge
#
# The script will create OAuthClients on each cluster and is meant only for
# development clusters.

CURRENT_CONTEXT=$(oc config current-context)
OAUTH_CLIENT_ID=${OAUTH_CLIENT_ID:=local-console-oauth-client}
OAUTH_CLIENT_SECRET=${OAUTH_CLIENT_SECRET:=open-sesame}
BRIDGE_MANAGED_CLUSTERS="[]"
CA_FILE_DIR=$(mktemp -d 2>/dev/null || mktemp -d -t 'bridge-ca-files')

oc get -n openshift-config-managed cm kube-root-ca.crt -o json | jq -r '.data["ca.crt"]' > "$CA_FILE_DIR/api-ca.crt"
oc get -n openshift-config-managed cm default-ingress-cert -o json | jq -r '.data["ca-bundle.crt"]' > "$CA_FILE_DIR/oauth-ca.crt"

for CONTEXT in $(oc config get-contexts -o name); do
# Set up the OAuthClient for this cluster
cat <<EOF | oc --context "$CONTEXT" apply -f -
apiVersion: oauth.openshift.io/v1
kind: OAuthClient
metadata:
name: "$OAUTH_CLIENT_ID"
grantMethod: auto
secret: "$OAUTH_CLIENT_SECRET"
redirectURIs:
- http://localhost:9000
EOF
# If not the hub (current context), add the cluster to the managed cluster JSON array
if [ "$CONTEXT" != "$CURRENT_CONTEXT" ]; then
NAME=$(echo "$CONTEXT" | cut -f2 -d"/" | cut -f1 -d":")
URL=$(oc --context "$CONTEXT" whoami --show-server)
# Make a directory for CA files
mkdir -p "$CA_FILE_DIR/$NAME"
CA_FILE="$CA_FILE_DIR/$NAME/api-ca.crt"
OAUTH_CA_FILE="$CA_FILE_DIR/$NAME/oauth-ca.crt"
oc --context "$CONTEXT" get -n openshift-config-managed cm kube-root-ca.crt -o json | jq -r '.data["ca.crt"]' > "$CA_FILE"
oc --context "$CONTEXT" get -n openshift-config-managed cm default-ingress-cert -o json | jq -r '.data["ca-bundle.crt"]' > "$OAUTH_CA_FILE"
BRIDGE_MANAGED_CLUSTERS=$(echo "$BRIDGE_MANAGED_CLUSTERS" | \
jq --arg name "$NAME" \
--arg url "$URL" \
--arg caFile "$CA_FILE" \
--arg clientID "$OAUTH_CLIENT_ID" \
--arg clientSecret "$OAUTH_CLIENT_SECRET" \
--arg oauthCAFile "$OAUTH_CA_FILE" \
'. += [{"name": $name, "apiServer": {"url": $url, "caFile": $caFile}, "oauth": {"clientID": $clientID, "clientSecret": $clientSecret, caFile: $oauthCAFile}}]')
fi
done

export BRIDGE_MANAGED_CLUSTERS

BRIDGE_BASE_ADDRESS="http://localhost:9000"
export BRIDGE_BASE_ADDRESS

# FIXME: We should be able to get rid of this, but it requires changes to
# main.go to support `ca-file` in off-cluster mode for the k8s proxy.
BRIDGE_K8S_MODE_OFF_CLUSTER_SKIP_VERIFY_TLS=true
export BRIDGE_K8S_MODE_OFF_CLUSTER_SKIP_VERIFY_TLS

BRIDGE_K8S_AUTH="openshift"
export BRIDGE_K8S_AUTH

BRIDGE_K8S_MODE="off-cluster"
export BRIDGE_K8S_MODE

BRIDGE_K8S_MODE_OFF_CLUSTER_ENDPOINT=$(oc whoami --show-server)
export BRIDGE_K8S_MODE_OFF_CLUSTER_ENDPOINT

BRIDGE_CA_FILE="$CA_FILE_DIR/api-ca.crt"
export BRIDGE_CA_FILE

BRIDGE_USER_AUTH="openshift"
export BRIDGE_USER_AUTH

BRIDGE_USER_AUTH_OIDC_CLIENT_ID="$OAUTH_CLIENT_ID"
export BRIDGE_USER_AUTH_OIDC_CLIENT_ID

BRIDGE_USER_AUTH_OIDC_CLIENT_SECRET="$OAUTH_CLIENT_SECRET"
export BRIDGE_USER_AUTH_OIDC_CLIENT_SECRET

BRIDGE_USER_AUTH_OIDC_CA_FILE="$CA_FILE_DIR/oauth-ca.crt"
export BRIDGE_USER_AUTH_OIDC_CA_FILE

echo "Using hub cluster: $BRIDGE_K8S_MODE_OFF_CLUSTER_ENDPOINT"
echo "Using managed clusters:"
echo "$BRIDGE_MANAGED_CLUSTERS" | jq -r '.[].apiServer.url'
2 changes: 1 addition & 1 deletion frontend/@types/console/index.d.ts
Original file line number Diff line number Diff line change
Expand Up @@ -46,8 +46,8 @@ declare interface Window {
addPage: string; // JSON encoded configuration
consolePlugins: string[]; // Console dynamic plugins enabled on the cluster
quickStarts: string;
clusters: string[];
projectAccessClusterRoles: string;
clusters: string[];
};
windowError?: string;
__REDUX_DEVTOOLS_EXTENSION_COMPOSE__?: Function;
Expand Down
11 changes: 8 additions & 3 deletions frontend/__tests__/components/pod.spec.tsx
Original file line number Diff line number Diff line change
@@ -1,14 +1,16 @@
import * as React from 'react';
import { shallow, ShallowWrapper } from 'enzyme';
import { Provider } from 'react-redux';
import { shallow, ShallowWrapper, mount, ReactWrapper } from 'enzyme';
import store from '@console/internal/redux';

import { ContainerRow, PodsDetailsPage } from '../../public/components/pod';
import { DetailsPage } from '../../public/components/factory';

describe(PodsDetailsPage.displayName, () => {
let wrapper: ShallowWrapper;
let wrapper: ReactWrapper;

beforeEach(() => {
wrapper = shallow(
wrapper = mount(
<PodsDetailsPage
match={{
url: '/k8s/ns/default/pods/example',
Expand All @@ -18,6 +20,9 @@ describe(PodsDetailsPage.displayName, () => {
}}
kind="Pod"
/>,
{
wrappingComponent: ({ children }) => <Provider store={store}>{children}</Provider>,
},
);
});

Expand Down
1 change: 0 additions & 1 deletion frontend/packages/console-app/locales/en/console-app.json
Original file line number Diff line number Diff line change
Expand Up @@ -255,7 +255,6 @@
"Kubelet version": "Kubelet version",
"Kube-Proxy version": "Kube-Proxy version",
"Overview": "Overview",
"Terminal": "Terminal",
"Pods": "Pods",
"Labels": "Labels",
"{{formattedCores}} cores / {{totalCores}} cores": "{{formattedCores}} cores / {{totalCores}} cores",
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,15 @@
import * as React from 'react';
import { ClusterContext, useValuesForClusterContext } from './cluster';

type DetectClusterProps = {
children: React.ReactNode;
};

const DetectCluster: React.FC<DetectClusterProps> = ({ children }) => {
const { cluster, setCluster } = useValuesForClusterContext();
return (
<ClusterContext.Provider value={{ cluster, setCluster }}>{children}</ClusterContext.Provider>
);
};

export default DetectCluster;
Loading

0 comments on commit 742a7fb

Please sign in to comment.