mirror of
https://github.com/zed-industries/zed.git
synced 2024-12-24 17:28:40 +00:00
Run migrations via a collab subcommand
This commit is contained in:
parent
9952f08cce
commit
cedc0f64d5
12 changed files with 180 additions and 113 deletions
|
@ -19,5 +19,7 @@ FROM debian:bullseye-slim as runtime
|
||||||
RUN apt-get update; \
|
RUN apt-get update; \
|
||||||
apt-get install -y --no-install-recommends libcurl4-openssl-dev ca-certificates
|
apt-get install -y --no-install-recommends libcurl4-openssl-dev ca-certificates
|
||||||
WORKDIR app
|
WORKDIR app
|
||||||
COPY --from=builder /app/collab /app
|
COPY --from=builder /app/collab /app/collab
|
||||||
|
COPY --from=builder /app/crates/collab/migrations /app/migrations
|
||||||
|
ENV MIGRATIONS_PATH=/app/migrations
|
||||||
ENTRYPOINT ["/app/collab"]
|
ENTRYPOINT ["/app/collab"]
|
||||||
|
|
|
@ -1,15 +0,0 @@
|
||||||
# syntax = docker/dockerfile:1.2
|
|
||||||
|
|
||||||
FROM rust:1.64-bullseye as builder
|
|
||||||
WORKDIR app
|
|
||||||
RUN --mount=type=cache,target=/usr/local/cargo/registry \
|
|
||||||
--mount=type=cache,target=./target \
|
|
||||||
cargo install sqlx-cli --root=/app --target-dir=/app/target --version 0.5.7
|
|
||||||
|
|
||||||
FROM debian:bullseye-slim as runtime
|
|
||||||
RUN apt-get update; \
|
|
||||||
apt-get install -y --no-install-recommends libssl1.1
|
|
||||||
WORKDIR app
|
|
||||||
COPY --from=builder /app/bin/sqlx /app
|
|
||||||
COPY ./crates/collab/migrations /app/migrations
|
|
||||||
ENTRYPOINT ["/app/sqlx", "migrate", "run"]
|
|
2
Procfile
2
Procfile
|
@ -1,2 +1,2 @@
|
||||||
web: cd ../zed.dev && PORT=3000 npx vercel dev
|
web: cd ../zed.dev && PORT=3000 npx vercel dev
|
||||||
collab: cd crates/collab && cargo run
|
collab: cd crates/collab && cargo run serve
|
||||||
|
|
|
@ -1,2 +0,0 @@
|
||||||
collab: ./target/release/collab
|
|
||||||
release: ./target/release/sqlx migrate run
|
|
|
@ -54,6 +54,8 @@ spec:
|
||||||
containers:
|
containers:
|
||||||
- name: collab
|
- name: collab
|
||||||
image: "${ZED_IMAGE_ID}"
|
image: "${ZED_IMAGE_ID}"
|
||||||
|
args:
|
||||||
|
- serve
|
||||||
ports:
|
ports:
|
||||||
- containerPort: 8080
|
- containerPort: 8080
|
||||||
protocol: TCP
|
protocol: TCP
|
||||||
|
|
|
@ -10,6 +10,8 @@ spec:
|
||||||
containers:
|
containers:
|
||||||
- name: migrator
|
- name: migrator
|
||||||
image: ${ZED_IMAGE_ID}
|
image: ${ZED_IMAGE_ID}
|
||||||
|
args:
|
||||||
|
- migrate
|
||||||
env:
|
env:
|
||||||
- name: DATABASE_URL
|
- name: DATABASE_URL
|
||||||
valueFrom:
|
valueFrom:
|
||||||
|
|
|
@ -6,8 +6,12 @@ use collections::HashMap;
|
||||||
use futures::StreamExt;
|
use futures::StreamExt;
|
||||||
use serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Serialize};
|
||||||
pub use sqlx::postgres::PgPoolOptions as DbOptions;
|
pub use sqlx::postgres::PgPoolOptions as DbOptions;
|
||||||
use sqlx::{types::Uuid, FromRow, QueryBuilder};
|
use sqlx::{
|
||||||
use std::{cmp, ops::Range, time::Duration};
|
migrate::{Migrate as _, Migration, MigrationSource},
|
||||||
|
types::Uuid,
|
||||||
|
FromRow, QueryBuilder,
|
||||||
|
};
|
||||||
|
use std::{cmp, ops::Range, path::Path, time::Duration};
|
||||||
use time::{OffsetDateTime, PrimitiveDateTime};
|
use time::{OffsetDateTime, PrimitiveDateTime};
|
||||||
|
|
||||||
#[async_trait]
|
#[async_trait]
|
||||||
|
@ -173,6 +177,13 @@ pub trait Db: Send + Sync {
|
||||||
fn as_fake(&self) -> Option<&FakeDb>;
|
fn as_fake(&self) -> Option<&FakeDb>;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[cfg(any(test, debug_assertions))]
|
||||||
|
pub const DEFAULT_MIGRATIONS_PATH: Option<&'static str> =
|
||||||
|
Some(concat!(env!("CARGO_MANIFEST_DIR"), "/migrations"));
|
||||||
|
|
||||||
|
#[cfg(not(any(test, debug_assertions)))]
|
||||||
|
pub const DEFAULT_MIGRATIONS_PATH: Option<&'static str> = None;
|
||||||
|
|
||||||
pub struct PostgresDb {
|
pub struct PostgresDb {
|
||||||
pool: sqlx::PgPool,
|
pool: sqlx::PgPool,
|
||||||
}
|
}
|
||||||
|
@ -187,6 +198,47 @@ impl PostgresDb {
|
||||||
Ok(Self { pool })
|
Ok(Self { pool })
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub async fn migrate(
|
||||||
|
&self,
|
||||||
|
migrations_path: &Path,
|
||||||
|
ignore_checksum_mismatch: bool,
|
||||||
|
) -> anyhow::Result<Vec<(Migration, Duration)>> {
|
||||||
|
let migrations = MigrationSource::resolve(migrations_path)
|
||||||
|
.await
|
||||||
|
.map_err(|err| anyhow!("failed to load migrations: {err:?}"))?;
|
||||||
|
|
||||||
|
let mut conn = self.pool.acquire().await?;
|
||||||
|
|
||||||
|
conn.ensure_migrations_table().await?;
|
||||||
|
let applied_migrations: HashMap<_, _> = conn
|
||||||
|
.list_applied_migrations()
|
||||||
|
.await?
|
||||||
|
.into_iter()
|
||||||
|
.map(|m| (m.version, m))
|
||||||
|
.collect();
|
||||||
|
|
||||||
|
let mut new_migrations = Vec::new();
|
||||||
|
for migration in migrations {
|
||||||
|
match applied_migrations.get(&migration.version) {
|
||||||
|
Some(applied_migration) => {
|
||||||
|
if migration.checksum != applied_migration.checksum && !ignore_checksum_mismatch
|
||||||
|
{
|
||||||
|
Err(anyhow!(
|
||||||
|
"checksum mismatch for applied migration {}",
|
||||||
|
migration.description
|
||||||
|
))?;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
None => {
|
||||||
|
let elapsed = conn.apply(&migration).await?;
|
||||||
|
new_migrations.push((migration, elapsed));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(new_migrations)
|
||||||
|
}
|
||||||
|
|
||||||
pub fn fuzzy_like_string(string: &str) -> String {
|
pub fn fuzzy_like_string(string: &str) -> String {
|
||||||
let mut result = String::with_capacity(string.len() * 2 + 1);
|
let mut result = String::with_capacity(string.len() * 2 + 1);
|
||||||
for c in string.chars() {
|
for c in string.chars() {
|
||||||
|
@ -1763,11 +1815,8 @@ mod test {
|
||||||
use lazy_static::lazy_static;
|
use lazy_static::lazy_static;
|
||||||
use parking_lot::Mutex;
|
use parking_lot::Mutex;
|
||||||
use rand::prelude::*;
|
use rand::prelude::*;
|
||||||
use sqlx::{
|
use sqlx::{migrate::MigrateDatabase, Postgres};
|
||||||
migrate::{MigrateDatabase, Migrator},
|
use std::sync::Arc;
|
||||||
Postgres,
|
|
||||||
};
|
|
||||||
use std::{path::Path, sync::Arc};
|
|
||||||
use util::post_inc;
|
use util::post_inc;
|
||||||
|
|
||||||
pub struct FakeDb {
|
pub struct FakeDb {
|
||||||
|
@ -2430,13 +2479,13 @@ mod test {
|
||||||
let mut rng = StdRng::from_entropy();
|
let mut rng = StdRng::from_entropy();
|
||||||
let name = format!("zed-test-{}", rng.gen::<u128>());
|
let name = format!("zed-test-{}", rng.gen::<u128>());
|
||||||
let url = format!("postgres://postgres@localhost/{}", name);
|
let url = format!("postgres://postgres@localhost/{}", name);
|
||||||
let migrations_path = Path::new(concat!(env!("CARGO_MANIFEST_DIR"), "/migrations"));
|
|
||||||
Postgres::create_database(&url)
|
Postgres::create_database(&url)
|
||||||
.await
|
.await
|
||||||
.expect("failed to create test db");
|
.expect("failed to create test db");
|
||||||
let db = PostgresDb::new(&url, 5).await.unwrap();
|
let db = PostgresDb::new(&url, 5).await.unwrap();
|
||||||
let migrator = Migrator::new(migrations_path).await.unwrap();
|
db.migrate(Path::new(DEFAULT_MIGRATIONS_PATH.unwrap()), false)
|
||||||
migrator.run(&db.pool).await.unwrap();
|
.await
|
||||||
|
.unwrap();
|
||||||
Self {
|
Self {
|
||||||
db: Some(Arc::new(db)),
|
db: Some(Arc::new(db)),
|
||||||
url,
|
url,
|
||||||
|
|
|
@ -9,12 +9,15 @@ mod db_tests;
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod integration_tests;
|
mod integration_tests;
|
||||||
|
|
||||||
|
use anyhow::anyhow;
|
||||||
use axum::{routing::get, Router};
|
use axum::{routing::get, Router};
|
||||||
use collab::{Error, Result};
|
use collab::{Error, Result};
|
||||||
use db::{Db, PostgresDb};
|
use db::{Db, PostgresDb};
|
||||||
use serde::Deserialize;
|
use serde::Deserialize;
|
||||||
use std::{
|
use std::{
|
||||||
|
env::args,
|
||||||
net::{SocketAddr, TcpListener},
|
net::{SocketAddr, TcpListener},
|
||||||
|
path::PathBuf,
|
||||||
sync::Arc,
|
sync::Arc,
|
||||||
time::Duration,
|
time::Duration,
|
||||||
};
|
};
|
||||||
|
@ -34,22 +37,17 @@ pub struct Config {
|
||||||
pub log_json: Option<bool>,
|
pub log_json: Option<bool>,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[derive(Default, Deserialize)]
|
||||||
|
pub struct MigrateConfig {
|
||||||
|
pub database_url: String,
|
||||||
|
pub migrations_path: Option<PathBuf>,
|
||||||
|
}
|
||||||
|
|
||||||
pub struct AppState {
|
pub struct AppState {
|
||||||
db: Arc<dyn Db>,
|
db: Arc<dyn Db>,
|
||||||
config: Config,
|
config: Config,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl AppState {
|
|
||||||
async fn new(config: Config) -> Result<Arc<Self>> {
|
|
||||||
let db = PostgresDb::new(&config.database_url, 5).await?;
|
|
||||||
let this = Self {
|
|
||||||
db: Arc::new(db),
|
|
||||||
config,
|
|
||||||
};
|
|
||||||
Ok(Arc::new(this))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[tokio::main]
|
#[tokio::main]
|
||||||
async fn main() -> Result<()> {
|
async fn main() -> Result<()> {
|
||||||
if let Err(error) = env::load_dotenv() {
|
if let Err(error) = env::load_dotenv() {
|
||||||
|
@ -59,24 +57,59 @@ async fn main() -> Result<()> {
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
let config = envy::from_env::<Config>().expect("error loading config");
|
match args().skip(1).next().as_deref() {
|
||||||
init_tracing(&config);
|
Some("version") => {
|
||||||
let state = AppState::new(config).await?;
|
println!("collab v{VERSION}");
|
||||||
|
}
|
||||||
|
Some("migrate") => {
|
||||||
|
let config = envy::from_env::<MigrateConfig>().expect("error loading config");
|
||||||
|
let db = PostgresDb::new(&config.database_url, 5).await?;
|
||||||
|
|
||||||
let listener = TcpListener::bind(&format!("0.0.0.0:{}", state.config.http_port))
|
let migrations_path = config
|
||||||
.expect("failed to bind TCP listener");
|
.migrations_path
|
||||||
let rpc_server = rpc::Server::new(state.clone(), None);
|
.as_deref()
|
||||||
|
.or(db::DEFAULT_MIGRATIONS_PATH.map(|s| s.as_ref()))
|
||||||
|
.ok_or_else(|| anyhow!("missing MIGRATIONS_PATH environment variable"))?;
|
||||||
|
|
||||||
rpc_server.start_recording_project_activity(Duration::from_secs(5 * 60), rpc::RealExecutor);
|
let migrations = db.migrate(&migrations_path, false).await?;
|
||||||
|
for (migration, duration) in migrations {
|
||||||
|
println!(
|
||||||
|
"Ran {} {} {:?}",
|
||||||
|
migration.version, migration.description, duration
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
let app = api::routes(&rpc_server, state.clone())
|
return Ok(());
|
||||||
.merge(rpc::routes(rpc_server))
|
}
|
||||||
.merge(Router::new().route("/", get(handle_root)));
|
Some("serve") => {
|
||||||
|
let config = envy::from_env::<Config>().expect("error loading config");
|
||||||
|
let db = PostgresDb::new(&config.database_url, 5).await?;
|
||||||
|
|
||||||
axum::Server::from_tcp(listener)?
|
init_tracing(&config);
|
||||||
.serve(app.into_make_service_with_connect_info::<SocketAddr>())
|
let state = Arc::new(AppState {
|
||||||
.await?;
|
db: Arc::new(db),
|
||||||
|
config,
|
||||||
|
});
|
||||||
|
|
||||||
|
let listener = TcpListener::bind(&format!("0.0.0.0:{}", state.config.http_port))
|
||||||
|
.expect("failed to bind TCP listener");
|
||||||
|
|
||||||
|
let rpc_server = rpc::Server::new(state.clone(), None);
|
||||||
|
rpc_server
|
||||||
|
.start_recording_project_activity(Duration::from_secs(5 * 60), rpc::RealExecutor);
|
||||||
|
|
||||||
|
let app = api::routes(&rpc_server, state.clone())
|
||||||
|
.merge(rpc::routes(rpc_server))
|
||||||
|
.merge(Router::new().route("/", get(handle_root)));
|
||||||
|
|
||||||
|
axum::Server::from_tcp(listener)?
|
||||||
|
.serve(app.into_make_service_with_connect_info::<SocketAddr>())
|
||||||
|
.await?;
|
||||||
|
}
|
||||||
|
_ => {
|
||||||
|
Err(anyhow!("usage: collab <version | migrate | serve>"))?;
|
||||||
|
}
|
||||||
|
}
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -7,7 +7,7 @@ echo "creating database..."
|
||||||
script/sqlx database create
|
script/sqlx database create
|
||||||
|
|
||||||
echo "migrating database..."
|
echo "migrating database..."
|
||||||
script/sqlx migrate run
|
cargo run -p collab -- migrate
|
||||||
|
|
||||||
echo "seeding database..."
|
echo "seeding database..."
|
||||||
script/seed-db
|
script/seed-db
|
||||||
|
|
|
@ -1,12 +1,7 @@
|
||||||
#!/bin/bash
|
#!/bin/bash
|
||||||
|
|
||||||
# Prerequisites:
|
|
||||||
#
|
|
||||||
# - Log in to the DigitalOcean API, either interactively, by running
|
|
||||||
# `doctl auth init`, or by setting the `DIGITALOCEAN_ACCESS_TOKEN`
|
|
||||||
# environment variable.
|
|
||||||
|
|
||||||
set -eu
|
set -eu
|
||||||
|
source script/lib/deploy-helpers.sh
|
||||||
|
|
||||||
if [[ $# < 2 ]]; then
|
if [[ $# < 2 ]]; then
|
||||||
echo "Usage: $0 <production|staging|preview> <tag-name>"
|
echo "Usage: $0 <production|staging|preview> <tag-name>"
|
||||||
|
@ -15,28 +10,8 @@ fi
|
||||||
export ZED_KUBE_NAMESPACE=$1
|
export ZED_KUBE_NAMESPACE=$1
|
||||||
COLLAB_VERSION=$2
|
COLLAB_VERSION=$2
|
||||||
|
|
||||||
ENV_FILE="crates/collab/k8s/environments/${ZED_KUBE_NAMESPACE}.sh"
|
export_vars_for_environment $ZED_KUBE_NAMESPACE
|
||||||
if [[ ! -f $ENV_FILE ]]; then
|
export ZED_IMAGE_ID=$(image_id_for_version $COLLAB_VERSION)
|
||||||
echo "Invalid environment name '${ZED_KUBE_NAMESPACE}'"
|
target_zed_kube_cluster
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
export $(cat $ENV_FILE)
|
|
||||||
|
|
||||||
if [[ ! $COLLAB_VERSION =~ ^[0-9]+\.[0-9]+\.[0-9]+$ ]]; then
|
|
||||||
echo "Invalid version number '$COLLAB_VERSION'"
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
TAG_NAMES=$(doctl registry repository list-tags collab --no-header --format Tag)
|
|
||||||
if ! $(echo "${TAG_NAMES}" | grep -Fqx v${COLLAB_VERSION}); then
|
|
||||||
echo "No such image tag: 'zed/collab:v${COLLAB_VERSION}'"
|
|
||||||
echo "Found tags"
|
|
||||||
echo "${TAG_NAMES}"
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
export ZED_IMAGE_ID="registry.digitalocean.com/zed/collab:v${COLLAB_VERSION}"
|
|
||||||
|
|
||||||
if [[ $(kubectl config current-context 2> /dev/null) != do-nyc1-zed-1 ]]; then
|
|
||||||
doctl kubernetes cluster kubeconfig save zed-1
|
|
||||||
fi
|
|
||||||
|
|
||||||
envsubst < crates/collab/k8s/manifest.template.yml | kubectl apply -f -
|
envsubst < crates/collab/k8s/manifest.template.yml | kubectl apply -f -
|
||||||
|
|
|
@ -1,42 +1,20 @@
|
||||||
#!/bin/bash
|
#!/bin/bash
|
||||||
|
|
||||||
# Prerequisites:
|
|
||||||
#
|
|
||||||
# - Log in to the DigitalOcean docker registry
|
|
||||||
# doctl registry login
|
|
||||||
#
|
|
||||||
# - Target the `zed-1` kubernetes cluster
|
|
||||||
# doctl kubernetes cluster kubeconfig save zed-1
|
|
||||||
|
|
||||||
set -eu
|
set -eu
|
||||||
|
source script/lib/deploy-helpers.sh
|
||||||
|
|
||||||
if [[ $# < 1 ]]; then
|
if [[ $# < 2 ]]; then
|
||||||
echo "Usage: $0 [production|staging|...]"
|
echo "Usage: $0 <production|staging|preview> <tag-name>"
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
export ZED_KUBE_NAMESPACE=$1
|
export ZED_KUBE_NAMESPACE=$1
|
||||||
ENV_FILE="crates/collab/k8s/environments/${ZED_KUBE_NAMESPACE}.sh"
|
COLLAB_VERSION=$2
|
||||||
if [[ ! -f $ENV_FILE ]]; then
|
|
||||||
echo "Invalid environment name '${ZED_KUBE_NAMESPACE}'"
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [[ -n $(git status --short) ]]; then
|
export_vars_for_environment $ZED_KUBE_NAMESPACE
|
||||||
echo "Cannot deploy with uncommited changes"
|
export ZED_IMAGE_ID=$(image_id_for_version ${COLLAB_VERSION})
|
||||||
exit 1
|
export ZED_MIGRATE_JOB_NAME=zed-migrate-${COLLAB_VERSION}
|
||||||
fi
|
target_zed_kube_cluster
|
||||||
|
|
||||||
git_sha=$(git rev-parse HEAD)
|
|
||||||
export ZED_IMAGE_ID=registry.digitalocean.com/zed/zed-migrator:${ZED_KUBE_NAMESPACE}-${git_sha}
|
|
||||||
export ZED_MIGRATE_JOB_NAME=zed-migrate-${git_sha}
|
|
||||||
|
|
||||||
docker build . \
|
|
||||||
--file ./Dockerfile.migrator \
|
|
||||||
--tag $ZED_IMAGE_ID
|
|
||||||
docker push $ZED_IMAGE_ID
|
|
||||||
|
|
||||||
envsubst < crates/collab/k8s/migrate.template.yml | kubectl apply -f -
|
envsubst < crates/collab/k8s/migrate.template.yml | kubectl apply -f -
|
||||||
|
|
||||||
pod=$(kubectl --namespace=${ZED_KUBE_NAMESPACE} get pods --selector=job-name=${ZED_MIGRATE_JOB_NAME} --output=jsonpath='{.items[*].metadata.name}')
|
pod=$(kubectl --namespace=${ZED_KUBE_NAMESPACE} get pods --selector=job-name=${ZED_MIGRATE_JOB_NAME} --output=jsonpath='{.items[*].metadata.name}')
|
||||||
echo "pod:" $pod
|
echo "pod:" $pod
|
||||||
|
|
43
script/lib/deploy-helpers.sh
Normal file
43
script/lib/deploy-helpers.sh
Normal file
|
@ -0,0 +1,43 @@
|
||||||
|
# Prerequisites:
|
||||||
|
#
|
||||||
|
# - Log in to the DigitalOcean API, either interactively, by running
|
||||||
|
# `doctl auth init`, or by setting the `DIGITALOCEAN_ACCESS_TOKEN`
|
||||||
|
# environment variable.
|
||||||
|
|
||||||
|
function export_vars_for_environment {
|
||||||
|
local environment=$1
|
||||||
|
local env_file="crates/collab/k8s/environments/${environment}.sh"
|
||||||
|
if [[ ! -f $env_file ]]; then
|
||||||
|
echo "Invalid environment name '${environment}'"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
export $(cat $env_file)
|
||||||
|
}
|
||||||
|
|
||||||
|
function image_id_for_version {
|
||||||
|
local version=$1
|
||||||
|
if [[ ! ${version} =~ ^[0-9]+\.[0-9]+\.[0-9]+$ ]]; then
|
||||||
|
echo "Invalid version number '${version}'"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
TAG_NAMES=$(doctl registry repository list-tags collab --no-header --format Tag)
|
||||||
|
if ! $(echo "${TAG_NAMES}" | grep -Fqx v${version}); then
|
||||||
|
echo "No such image tag: 'zed/collab:v${version}'"
|
||||||
|
echo "Found tags"
|
||||||
|
echo "${TAG_NAMES}"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
echo "registry.digitalocean.com/zed/collab:v${version}"
|
||||||
|
}
|
||||||
|
|
||||||
|
function version_for_image_id {
|
||||||
|
local image_id=$1
|
||||||
|
echo $image_id | cut -d: -f2
|
||||||
|
}
|
||||||
|
|
||||||
|
function target_zed_kube_cluster {
|
||||||
|
if [[ $(kubectl config current-context 2> /dev/null) != do-nyc1-zed-1 ]]; then
|
||||||
|
doctl kubernetes cluster kubeconfig save zed-1
|
||||||
|
fi
|
||||||
|
}
|
Loading…
Reference in a new issue