diff --git a/cmd/cmd.go b/cmd/cmd.go index 014471f763..8a70375514 100644 --- a/cmd/cmd.go +++ b/cmd/cmd.go @@ -2,117 +2,113 @@ package cmd import ( "context" + "fmt" "strings" - "github.com/databricks/cli/cmd/psql" - ssh "github.com/databricks/cli/experimental/ssh/cmd" - - "github.com/databricks/cli/cmd/account" - "github.com/databricks/cli/cmd/api" "github.com/databricks/cli/cmd/auth" - "github.com/databricks/cli/cmd/bundle" - "github.com/databricks/cli/cmd/cache" - "github.com/databricks/cli/cmd/completion" - "github.com/databricks/cli/cmd/configure" - "github.com/databricks/cli/cmd/experimental" - "github.com/databricks/cli/cmd/fs" - "github.com/databricks/cli/cmd/labs" - "github.com/databricks/cli/cmd/pipelines" + "github.com/databricks/cli/cmd/lakebox" "github.com/databricks/cli/cmd/root" - "github.com/databricks/cli/cmd/selftest" - "github.com/databricks/cli/cmd/sync" - "github.com/databricks/cli/cmd/version" - "github.com/databricks/cli/cmd/workspace" - "github.com/databricks/cli/libs/cmdgroup" + "github.com/databricks/databricks-sdk-go" "github.com/spf13/cobra" ) -const ( - mainGroup = "main" - permissionsGroup = "permissions" -) - -// configureGroups adds groups to the command, only if a group -// has at least one available command. -func configureGroups(cmd *cobra.Command, groups []cobra.Group) { - filteredGroups := cmdgroup.FilterGroups(groups, cmd.Commands()) - for i := range filteredGroups { - cmd.AddGroup(&filteredGroups[i]) - } -} - -func accountCommand() *cobra.Command { - cmd := account.New() - configureGroups(cmd, account.Groups()) - return cmd -} - func New(ctx context.Context) *cobra.Command { cli := root.New(ctx) - - // Add account subcommand. - cli.AddCommand(accountCommand()) - - // Add workspace subcommands. - workspaceCommands := workspace.All() - for _, cmd := range workspaceCommands { - // Order the permissions subcommands after the main commands. - for _, sub := range cmd.Commands() { - // some commands override groups in overrides.go, leave them as-is - if sub.GroupID != "" { - continue - } - - switch { - case strings.HasSuffix(sub.Name(), "-permissions"), strings.HasSuffix(sub.Name(), "-permission-levels"): - sub.GroupID = permissionsGroup - default: - sub.GroupID = mainGroup + cli.Use = "lakebox" + cli.Short = "Lakebox CLI — manage Databricks sandbox environments" + cli.Long = `Lakebox CLI — manage Databricks sandbox environments. + +Lakebox provides SSH-accessible development environments backed by +microVM isolation. Each lakebox is a personal sandbox with pre-installed +tooling (Python, Node.js, Rust, Databricks CLI) and persistent storage. + +Getting started: + lakebox auth login --host https://... # authenticate to Databricks workspace and lakebox service + lakebox ssh # SSH to your default lakebox + +Common workflows: + lakebox ssh # SSH to your default lakebox + lakebox ssh my-project # SSH to a named lakebox + lakebox list # list your lakeboxes + lakebox create # create a new lakebox + lakebox delete my-project # delete a lakebox + lakebox status my-project # show lakebox status + +The CLI manages your ~/.ssh/config so you can also connect directly: + ssh my-project # after 'lakebox ssh' +` + cli.CompletionOptions.DisableDefaultCmd = true + + authCmd := auth.New() + // Hook into 'auth login' to auto-register SSH key after OAuth completes. + for _, sub := range authCmd.Commands() { + if sub.Name() == "login" { + origRunE := sub.RunE + sub.RunE = func(cmd *cobra.Command, args []string) error { + // Run the original auth login. + if err := origRunE(cmd, args); err != nil { + return err + } + + // Auto-register: generate lakebox SSH key and register it. + fmt.Fprintln(cmd.ErrOrStderr(), "") + fmt.Fprintln(cmd.ErrOrStderr(), "Setting up SSH access...") + + keyPath, pubKey, err := lakebox.EnsureAndReadKey() + if err != nil { + fmt.Fprintf(cmd.ErrOrStderr(), + "SSH key setup failed: %v\n"+ + "You can set it up later with: lakebox register\n", err) + return nil + } + fmt.Fprintf(cmd.ErrOrStderr(), "Using SSH key: %s\n", keyPath) + + host := cmd.Flag("host").Value.String() + if host == "" && len(args) > 0 { + host = args[0] + } + profile := cmd.Flag("profile").Value.String() + if profile == "" && host != "" { + // Derive profile name the same way auth login does. + h := strings.TrimPrefix(host, "https://") + h = strings.TrimPrefix(h, "http://") + profile = strings.SplitN(h, ".", 2)[0] + } + if profile != "" { + if err := lakebox.SetLastProfile(profile); err != nil { + fmt.Fprintf(cmd.ErrOrStderr(), "Warning: failed to save last profile: %v\n", err) + } + } + w, err := databricks.NewWorkspaceClient(&databricks.Config{ + Host: host, + Profile: profile, + }) + if err != nil { + fmt.Fprintf(cmd.ErrOrStderr(), + "Could not initialize workspace client for key registration: %v\n"+ + "Run 'lakebox register' to complete setup.\n", err) + return nil + } + + if err := lakebox.RegisterKey(cmd.Context(), w, pubKey); err != nil { + fmt.Fprintf(cmd.ErrOrStderr(), + "Key registration failed: %v\n"+ + "Run 'lakebox register' to retry.\n", err) + return nil + } + + fmt.Fprintln(cmd.ErrOrStderr(), "SSH key registered. You're ready to use 'lakebox ssh'.") + return nil } + break } - - cli.AddCommand(cmd) - - // Built-in groups for the workspace commands. - groups := []cobra.Group{ - { - ID: mainGroup, - Title: "Available Commands", - }, - { - ID: pipelines.ManagementGroupID, - Title: "Management Commands", - }, - { - ID: permissionsGroup, - Title: "Permission Commands", - }, - } - - configureGroups(cmd, groups) } + cli.AddCommand(authCmd) - // Add other subcommands. - cli.AddCommand(api.New()) - cli.AddCommand(auth.New()) - cli.AddCommand(completion.New()) - cli.AddCommand(bundle.New()) - cli.AddCommand(cache.New()) - cli.AddCommand(experimental.New()) - cli.AddCommand(psql.New()) - cli.AddCommand(configure.New()) - cli.AddCommand(fs.New()) - cli.AddCommand(labs.New(ctx)) - cli.AddCommand(sync.New()) - cli.AddCommand(version.New()) - cli.AddCommand(selftest.New()) - cli.AddCommand(ssh.New()) - - // Add workspace command groups, filtering out empty groups or groups with only hidden commands. - configureGroups(cli, append(workspace.Groups(), cobra.Group{ - ID: "development", - Title: "Developer Tools", - })) + // Register lakebox subcommands directly at root level. + for _, sub := range lakebox.New().Commands() { + cli.AddCommand(sub) + } return cli } diff --git a/cmd/lakebox/api.go b/cmd/lakebox/api.go new file mode 100644 index 0000000000..acaeff47e8 --- /dev/null +++ b/cmd/lakebox/api.go @@ -0,0 +1,360 @@ +package lakebox + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "io" + "net/http" + "net/url" + "strings" + "time" + + "github.com/databricks/databricks-sdk-go" +) + +// Sandboxes live under the `/sandboxes` sub-collection of the lakebox service +// namespace (see `lakebox.proto` `LakeboxService.CreateSandbox`). +const lakeboxAPIPath = "/api/2.0/lakebox/sandboxes" + +// lakeboxAPI wraps raw HTTP calls to the lakebox REST API. +type lakeboxAPI struct { + w *databricks.WorkspaceClient +} + +// createRequest is the JSON body for POST /api/2.0/lakebox/sandboxes. +// +// The proto-defined `CreateSandboxRequest` carries a `Sandbox sandbox = 1` +// field today (every member is server-chosen), but JSON transcoding accepts +// the unwrapped form for forward-compatible callers. Keep `public_key` here +// as a no-op compat shim so older `lakebox create --public-key-file=...` +// invocations don't error — the manager ignores it on the wire. +type createRequest struct { + PublicKey string `json:"public_key,omitempty"` +} + +// createResponse is the JSON body returned by POST /api/2.0/lakebox/sandboxes. +// Mirrors the `Sandbox` proto message after JSON transcoding. +type createResponse struct { + SandboxID string `json:"sandboxId"` + Status string `json:"status"` + FQDN string `json:"fqdn"` +} + +// sandboxEntry is a single item in the list response. +// Mirrors the `Sandbox` proto message after JSON transcoding. +// +// IdleTimeout and NoAutostop correspond to the proto's `optional` fields; +// they're pointers so we can tell "field absent on the wire" (server has +// the global default) from "explicitly set to 0 / false." +// +// `IdleTimeout` is a `google.protobuf.Duration`. Proto3 JSON canonical +// form serializes Duration as a string with an `s` suffix (e.g. +// `"900s"`), so the Go field is `*string` and we parse on read. +type sandboxEntry struct { + SandboxID string `json:"sandboxId"` + Status string `json:"status"` + FQDN string `json:"fqdn"` + IdleTimeout *string `json:"idleTimeout,omitempty"` + NoAutostop *bool `json:"noAutostop,omitempty"` +} + +// idleTimeoutSecs parses the proto3-canonical Duration string off +// `IdleTimeout` (e.g. `"900s"` → `900`). Returns 0 when unset or when +// the string is not a recognizable Duration. Sub-second precision is +// dropped — the watchdog only acts on whole seconds. +func (e *sandboxEntry) idleTimeoutSecs() int64 { + if e.IdleTimeout == nil { + return 0 + } + s := *e.IdleTimeout + if !strings.HasSuffix(s, "s") { + return 0 + } + d, err := time.ParseDuration(s) + if err != nil { + return 0 + } + return int64(d.Seconds()) +} + +// defaultAutoStopSecs mirrors the manager's `watchdog_idle_grace_secs` +// fallback (10 minutes) used when a sandbox has no per-record override. +// The value is also documented in `lakebox/CLAUDE.md` ("Sandbox +// Watchdog" section). Hardcoded here so list/status can render the +// effective timeout without an extra round-trip to fetch manager config. +const defaultAutoStopSecs int64 = 600 + +// autoStopLabel renders the auto-stop policy advertised by the manager +// for one sandbox into a short human-readable string. Mirrors the wire +// semantics from `lakebox/proto/lakebox.proto`: +// - `no_autostop == true` → never auto-stops +// - `idle_timeout` set and positive → that many seconds +// - otherwise → manager's global default (`defaultAutoStopSecs`) +func (e *sandboxEntry) autoStopLabel() string { + if e.NoAutostop != nil && *e.NoAutostop { + return "never" + } + if secs := e.idleTimeoutSecs(); secs > 0 { + return formatDurationSecs(secs) + } + return formatDurationSecs(defaultAutoStopSecs) +} + +// formatDurationSecs prints `secs` as a compact duration (e.g. `90s`, +// `15m`, `2h`, `1h30m`). Falls back to seconds if it's not a clean +// minute/hour multiple. Avoids pulling in a dependency just for this. +func formatDurationSecs(secs int64) string { + if secs < 60 { + return fmt.Sprintf("%ds", secs) + } + if secs%3600 == 0 { + return fmt.Sprintf("%dh", secs/3600) + } + if secs >= 3600 { + return fmt.Sprintf("%dh%dm", secs/3600, (secs%3600)/60) + } + if secs%60 == 0 { + return fmt.Sprintf("%dm", secs/60) + } + return fmt.Sprintf("%ds", secs) +} + +// listResponse is the JSON body returned by GET /api/2.0/lakebox/sandboxes. +type listResponse struct { + Sandboxes []sandboxEntry `json:"sandboxes"` +} + +// apiError is the error body returned by the lakebox API. +type apiError struct { + ErrorCode string `json:"error_code"` + Message string `json:"message"` +} + +func (e *apiError) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode, e.Message) +} + +func newLakeboxAPI(w *databricks.WorkspaceClient) *lakeboxAPI { + return &lakeboxAPI{w: w} +} + +// create calls POST /api/2.0/lakebox with an optional public key. +func (a *lakeboxAPI) create(ctx context.Context, publicKey string) (*createResponse, error) { + body := createRequest{PublicKey: publicKey} + jsonBody, err := json.Marshal(body) + if err != nil { + return nil, fmt.Errorf("failed to marshal request: %w", err) + } + + resp, err := a.doRequest(ctx, "POST", lakeboxAPIPath, bytes.NewReader(jsonBody)) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + if resp.StatusCode != http.StatusOK { + return nil, parseAPIError(resp) + } + + var result createResponse + if err := json.NewDecoder(resp.Body).Decode(&result); err != nil { + return nil, fmt.Errorf("failed to decode response: %w", err) + } + return &result, nil +} + +// list calls GET /api/2.0/lakebox/sandboxes. +func (a *lakeboxAPI) list(ctx context.Context) ([]sandboxEntry, error) { + resp, err := a.doRequest(ctx, "GET", lakeboxAPIPath, nil) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + if resp.StatusCode != http.StatusOK { + return nil, parseAPIError(resp) + } + + var result listResponse + if err := json.NewDecoder(resp.Body).Decode(&result); err != nil { + return nil, fmt.Errorf("failed to decode response: %w", err) + } + return result.Sandboxes, nil +} + +// get calls GET /api/2.0/lakebox/sandboxes/{id}. +func (a *lakeboxAPI) get(ctx context.Context, id string) (*sandboxEntry, error) { + resp, err := a.doRequest(ctx, "GET", lakeboxAPIPath+"/"+id, nil) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + if resp.StatusCode != http.StatusOK { + return nil, parseAPIError(resp) + } + + var result sandboxEntry + if err := json.NewDecoder(resp.Body).Decode(&result); err != nil { + return nil, fmt.Errorf("failed to decode response: %w", err) + } + return &result, nil +} + +// updateBody is the PATCH request body. The proto declares +// `UpdateSandboxRequest { Sandbox sandbox = 1 }` with `body: "sandbox"` +// in the (google.api.http) annotation, so the HTTP body is the inner +// `Sandbox` message directly — there is no `{"sandbox": {...}}` +// wrapping on the wire. +// +// Pointer fields encode the proto3 `optional` semantics — only the +// fields we explicitly set are emitted, leaving everything else +// server-untouched. `IdleTimeout` is a proto3-canonical Duration +// string (e.g. `"900s"`); the server-side wire type is +// `google.protobuf.Duration`. +type updateBody struct { + SandboxID string `json:"sandbox_id"` + IdleTimeout *string `json:"idle_timeout,omitempty"` + NoAutostop *bool `json:"no_autostop,omitempty"` +} + +// update calls PATCH /api/2.0/lakebox/sandboxes/{id} with whichever of +// `idle_timeout` / `no_autostop` the caller chose to set. Fields left +// nil are omitted from the wire payload, so the server preserves their +// current values. Returns the refreshed `sandboxEntry`. +func (a *lakeboxAPI) update( + ctx context.Context, + id string, + idleTimeoutSecs *int64, + noAutostop *bool, +) (*sandboxEntry, error) { + var idleTimeout *string + if idleTimeoutSecs != nil { + s := fmt.Sprintf("%ds", *idleTimeoutSecs) + idleTimeout = &s + } + body := updateBody{ + SandboxID: id, + IdleTimeout: idleTimeout, + NoAutostop: noAutostop, + } + jsonBody, err := json.Marshal(body) + if err != nil { + return nil, fmt.Errorf("failed to marshal request: %w", err) + } + + resp, err := a.doRequest(ctx, "PATCH", lakeboxAPIPath+"/"+id, bytes.NewReader(jsonBody)) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + if resp.StatusCode != http.StatusOK { + return nil, parseAPIError(resp) + } + + var result sandboxEntry + if err := json.NewDecoder(resp.Body).Decode(&result); err != nil { + return nil, fmt.Errorf("failed to decode response: %w", err) + } + return &result, nil +} + +// delete calls DELETE /api/2.0/lakebox/sandboxes/{id}. +func (a *lakeboxAPI) delete(ctx context.Context, id string) error { + resp, err := a.doRequest(ctx, "DELETE", lakeboxAPIPath+"/"+id, nil) + if err != nil { + return err + } + defer resp.Body.Close() + + if resp.StatusCode != http.StatusOK { + return parseAPIError(resp) + } + return nil +} + +// doRequest makes an authenticated HTTP request to the workspace. +func (a *lakeboxAPI) doRequest(ctx context.Context, method, path string, body io.Reader) (*http.Response, error) { + // The configured host may be just a hostname or may carry a workspace + // selector in the query (e.g. `https://dogfood.staging.databricks.com/?o=...`). + // Parse it so we can append the API path while preserving the query, and so + // we can pull the workspace ID out of `?o=` when the SDK config doesn't + // carry it on a separate `workspace_id` field. + parsed, err := url.Parse(a.w.Config.Host) + if err != nil { + return nil, fmt.Errorf("failed to parse host %q: %w", a.w.Config.Host, err) + } + wsid := a.w.Config.WorkspaceID + if wsid == "" { + if v := parsed.Query().Get("o"); v != "" { + wsid = v + } + } + parsed.Path = strings.TrimRight(parsed.Path, "/") + path + + req, err := http.NewRequestWithContext(ctx, method, parsed.String(), body) + if err != nil { + return nil, fmt.Errorf("failed to create request: %w", err) + } + + if err := a.w.Config.Authenticate(req); err != nil { + return nil, fmt.Errorf("failed to authenticate: %w", err) + } + + // Multi-workspace gateways (e.g. dogfood.staging.databricks.com) need a + // workspace selector to route the request — without it the gateway can't + // scope the credential and rejects with "Credential was not sent or was of + // an unsupported type for this API". `?o=` in the URL works as a + // fallback, but the explicit header is the well-defined contract. + if wsid != "" { + req.Header.Set("X-Databricks-Org-Id", wsid) + } + + if body != nil { + req.Header.Set("Content-Type", "application/json") + } + + return http.DefaultClient.Do(req) +} + +func parseAPIError(resp *http.Response) error { + body, _ := io.ReadAll(resp.Body) + var apiErr apiError + if json.Unmarshal(body, &apiErr) == nil && apiErr.Message != "" { + return &apiErr + } + return fmt.Errorf("HTTP %d: %s", resp.StatusCode, string(body)) +} + +// SSH keys are now nested under the lakebox service namespace alongside +// `sandboxes/` (see `LakeboxService.CreateSshKey`). +const lakeboxKeysAPIPath = "/api/2.0/lakebox/ssh-keys" + +// registerKeyRequest is the JSON body for POST /api/2.0/lakebox/ssh-keys. +type registerKeyRequest struct { + PublicKey string `json:"public_key"` + Name string `json:"name,omitempty"` +} + +// registerKey calls POST /api/2.0/lakebox/ssh-keys. +func (a *lakeboxAPI) registerKey(ctx context.Context, publicKey string) error { + body := registerKeyRequest{PublicKey: publicKey} + jsonBody, err := json.Marshal(body) + if err != nil { + return fmt.Errorf("failed to marshal request: %w", err) + } + + resp, err := a.doRequest(ctx, "POST", lakeboxKeysAPIPath, bytes.NewReader(jsonBody)) + if err != nil { + return err + } + defer resp.Body.Close() + + if resp.StatusCode != http.StatusOK { + return parseAPIError(resp) + } + return nil +} diff --git a/cmd/lakebox/config.go b/cmd/lakebox/config.go new file mode 100644 index 0000000000..fe3b80ddf2 --- /dev/null +++ b/cmd/lakebox/config.go @@ -0,0 +1,130 @@ +package lakebox + +import ( + "fmt" + "time" + + "github.com/databricks/cli/libs/cmdctx" + "github.com/spf13/cobra" +) + +// MIN_IDLE_TIMEOUT_SECS / MAX_IDLE_TIMEOUT_SECS mirror the manager-side +// constants in lakebox/src/api/handlers/sandbox.rs. Pre-flighting client-side +// gives a clearer error than waiting for the server's INVALID_ARGUMENT. +const ( + minIdleTimeoutSecs = 60 + maxIdleTimeoutSecs = 86_400 +) + +func newConfigCommand() *cobra.Command { + var idleTimeoutFlag string + var noAutostopFlag bool + + cmd := &cobra.Command{ + Use: "config ", + Short: "Configure a Lakebox's auto-stop policy", + Long: `Configure a Lakebox's auto-stop policy. + +Two knobs are independent — pass either or both: + + --idle-timeout Per-sandbox idle timeout. The watchdog reaps + the sandbox after this much idle time. Pass + 0 (or 0s) to clear and revert to the manager's + global default (10m). Valid range when set: + 60s to 24h. + + --no-autostop[=true|false] When true, the sandbox is exempt from + idle-driven auto-stop entirely. The + --idle-timeout setting is ignored while + this is on. Setting --idle-timeout to a + non-zero value in a later call clears + --no-autostop automatically. Sandbox still + stops on explicit 'lakebox delete'. + +Examples: + lakebox config happy-panda-1234 --idle-timeout 15m + lakebox config happy-panda-1234 --idle-timeout 1h30m + lakebox config happy-panda-1234 --idle-timeout 0 # clear, use default + lakebox config happy-panda-1234 --no-autostop # never auto-stop + lakebox config happy-panda-1234 --no-autostop=false # back to timeout path + lakebox config happy-panda-1234 --idle-timeout 30m --no-autostop=false`, + Args: cobra.ExactArgs(1), + PreRunE: mustWorkspaceClient, + RunE: func(cmd *cobra.Command, args []string) error { + ctx := cmd.Context() + w := cmdctx.WorkspaceClient(ctx) + api := newLakeboxAPI(w) + out := cmd.OutOrStdout() + + id := args[0] + + // Translate flag presence + value into the proto3 + // optional-field semantics the server expects. + var idleSecs *int64 + if cmd.Flags().Changed("idle-timeout") { + secs, err := parseIdleTimeoutFlag(idleTimeoutFlag) + if err != nil { + return err + } + idleSecs = &secs + } + + var noAutostop *bool + if cmd.Flags().Changed("no-autostop") { + p := noAutostopFlag + noAutostop = &p + } + + if idleSecs == nil && noAutostop == nil { + return fmt.Errorf("nothing to update — pass --idle-timeout and/or --no-autostop") + } + + updated, err := api.update(ctx, id, idleSecs, noAutostop) + if err != nil { + return fmt.Errorf("failed to update lakebox %s: %w", id, err) + } + + blank(out) + field(out, "id", bold(updated.SandboxID)) + field(out, "autostop", dim(updated.autoStopLabel())) + blank(out) + return nil + }, + } + + cmd.Flags().StringVar(&idleTimeoutFlag, "idle-timeout", "", + "Idle timeout (e.g. 15m, 1h30m, 90s). Pass 0 to clear and revert to the manager's default.") + cmd.Flags().BoolVar(&noAutostopFlag, "no-autostop", false, + "When true, this sandbox never auto-stops on idle. Pass --no-autostop=false to revert.") + + return cmd +} + +// parseIdleTimeoutFlag accepts the same syntax as time.ParseDuration plus +// the special-case "0" / "0s" → clear. Anything else outside the +// [60s, 86400s] window is rejected client-side. +func parseIdleTimeoutFlag(raw string) (int64, error) { + d, err := time.ParseDuration(raw) + if err != nil { + // Allow bare integer seconds as a convenience (`--idle-timeout 900`). + var secs int64 + if _, e2 := fmt.Sscanf(raw, "%d", &secs); e2 == nil { + return checkIdleSecs(secs) + } + return 0, fmt.Errorf("invalid --idle-timeout %q: %w (use Go duration syntax, e.g. 15m, 1h30m)", raw, err) + } + return checkIdleSecs(int64(d.Seconds())) +} + +func checkIdleSecs(secs int64) (int64, error) { + if secs == 0 { + return 0, nil // clear / revert to global default + } + if secs < minIdleTimeoutSecs || secs > maxIdleTimeoutSecs { + return 0, fmt.Errorf( + "idle-timeout must be 0 (clear) or between %ds and %ds, got %ds", + minIdleTimeoutSecs, maxIdleTimeoutSecs, secs, + ) + } + return secs, nil +} diff --git a/cmd/lakebox/create.go b/cmd/lakebox/create.go new file mode 100644 index 0000000000..096df26ce6 --- /dev/null +++ b/cmd/lakebox/create.go @@ -0,0 +1,79 @@ +package lakebox + +import ( + "fmt" + "os" + + "github.com/databricks/cli/libs/cmdctx" + "github.com/spf13/cobra" +) + +func newCreateCommand() *cobra.Command { + var publicKeyFile string + + cmd := &cobra.Command{ + Use: "create", + Short: "Create a new Lakebox environment", + Long: `Create a new Lakebox environment. + +Creates a new personal development environment backed by a microVM. +Blocks until the lakebox is running and prints the lakebox ID. + +Example: + lakebox create`, + PreRunE: mustWorkspaceClient, + RunE: func(cmd *cobra.Command, args []string) error { + ctx := cmd.Context() + w := cmdctx.WorkspaceClient(ctx) + api := newLakeboxAPI(w) + stderr := cmd.ErrOrStderr() + + var publicKey string + if publicKeyFile != "" { + data, err := os.ReadFile(publicKeyFile) + if err != nil { + return fmt.Errorf("failed to read public key file %s: %w", publicKeyFile, err) + } + publicKey = string(data) + } + + s := spin(stderr, "Provisioning your lakebox…") + + result, err := api.create(ctx, publicKey) + if err != nil { + s.fail("Failed to create lakebox") + return fmt.Errorf("failed to create lakebox: %w", err) + } + + s.ok(fmt.Sprintf("Lakebox %s is %s", bold(result.SandboxID), status(result.Status))) + + profile := w.Config.Profile + if profile == "" { + profile = w.Config.Host + } + + currentDefault := getDefault(profile) + shouldSetDefault := currentDefault == "" + if !shouldSetDefault && currentDefault != "" { + if _, err := api.get(ctx, currentDefault); err != nil { + shouldSetDefault = true + } + } + if shouldSetDefault { + if err := setDefault(profile, result.SandboxID); err != nil { + warn(stderr, fmt.Sprintf("Could not save default: %v", err)) + } else { + field(stderr, "default", result.SandboxID) + } + } + + blank(stderr) + fmt.Fprintln(cmd.OutOrStdout(), result.SandboxID) + return nil + }, + } + + cmd.Flags().StringVar(&publicKeyFile, "public-key-file", "", "Path to SSH public key file to install in the lakebox") + + return cmd +} diff --git a/cmd/lakebox/default.go b/cmd/lakebox/default.go new file mode 100644 index 0000000000..b632c5984a --- /dev/null +++ b/cmd/lakebox/default.go @@ -0,0 +1,38 @@ +package lakebox + +import ( + "fmt" + + "github.com/databricks/cli/libs/cmdctx" + "github.com/spf13/cobra" +) + +func newSetDefaultCommand() *cobra.Command { + cmd := &cobra.Command{ + Use: "set-default ", + Short: "Set the default Lakebox for SSH", + Long: `Set the default Lakebox that 'databricks lakebox ssh' connects to. + +The default is stored locally in ~/.databricks/lakebox.json per profile. + +Example: + databricks lakebox set-default happy-panda-1234`, + Args: cobra.ExactArgs(1), + PreRunE: mustWorkspaceClient, + RunE: func(cmd *cobra.Command, args []string) error { + w := cmdctx.WorkspaceClient(cmd.Context()) + profile := w.Config.Profile + if profile == "" { + profile = w.Config.Host + } + + lakeboxID := args[0] + if err := setDefault(profile, lakeboxID); err != nil { + return fmt.Errorf("failed to set default: %w", err) + } + fmt.Fprintf(cmd.OutOrStdout(), "Default lakebox set to: %s\n", lakeboxID) + return nil + }, + } + return cmd +} diff --git a/cmd/lakebox/delete.go b/cmd/lakebox/delete.go new file mode 100644 index 0000000000..ba56e2a508 --- /dev/null +++ b/cmd/lakebox/delete.go @@ -0,0 +1,51 @@ +package lakebox + +import ( + "fmt" + + "github.com/databricks/cli/libs/cmdctx" + "github.com/spf13/cobra" +) + +func newDeleteCommand() *cobra.Command { + cmd := &cobra.Command{ + Use: "delete ", + Short: "Delete a Lakebox environment", + Long: `Delete a Lakebox environment. + +Permanently terminates and removes the specified lakebox. + +Example: + lakebox delete happy-panda-1234`, + Args: cobra.ExactArgs(1), + PreRunE: mustWorkspaceClient, + RunE: func(cmd *cobra.Command, args []string) error { + ctx := cmd.Context() + w := cmdctx.WorkspaceClient(ctx) + api := newLakeboxAPI(w) + stderr := cmd.ErrOrStderr() + + lakeboxID := args[0] + s := spin(stderr, fmt.Sprintf("Removing %s…", lakeboxID)) + + if err := api.delete(ctx, lakeboxID); err != nil { + s.fail(fmt.Sprintf("Failed to delete %s", lakeboxID)) + return fmt.Errorf("failed to delete lakebox %s: %w", lakeboxID, err) + } + + profile := w.Config.Profile + if profile == "" { + profile = w.Config.Host + } + if getDefault(profile) == lakeboxID { + _ = clearDefault(profile) + s.ok(fmt.Sprintf("Removed %s %s", bold(lakeboxID), dim("(default cleared)"))) + } else { + s.ok(fmt.Sprintf("Removed %s", bold(lakeboxID))) + } + return nil + }, + } + + return cmd +} diff --git a/cmd/lakebox/exec_unix.go b/cmd/lakebox/exec_unix.go new file mode 100644 index 0000000000..d47f629572 --- /dev/null +++ b/cmd/lakebox/exec_unix.go @@ -0,0 +1,13 @@ +//go:build !windows + +package lakebox + +import ( + "os" + "syscall" +) + +// execSyscall replaces the current process with the given command (Unix only). +func execSyscall(path string, args []string) error { + return syscall.Exec(path, args, os.Environ()) +} diff --git a/cmd/lakebox/lakebox.go b/cmd/lakebox/lakebox.go new file mode 100644 index 0000000000..25a9b479e5 --- /dev/null +++ b/cmd/lakebox/lakebox.go @@ -0,0 +1,57 @@ +package lakebox + +import ( + "github.com/databricks/cli/cmd/root" + "github.com/spf13/cobra" +) + +func New() *cobra.Command { + cmd := &cobra.Command{ + Use: "lakebox", + Short: "Manage Databricks Lakebox environments", + Long: `Manage Databricks Lakebox environments. + +Lakebox provides SSH-accessible development environments backed by +microVM isolation. Each lakebox is a personal sandbox with pre-installed +tooling (Python, Node.js, Rust, Databricks CLI) and persistent storage. + +Getting started: + lakebox auth login --host https://... # authenticate to Databricks workspace and lakebox service + lakebox ssh # SSH to your default lakebox + +Common workflows: + lakebox ssh # SSH to your default lakebox + lakebox ssh my-project # SSH to a named lakebox + lakebox list # list your lakeboxes + lakebox create # create a new lakebox + lakebox delete my-project # delete a lakebox + lakebox status my-project # show lakebox status + +The CLI manages your ~/.ssh/config so you can also connect directly: + ssh my-project # after 'lakebox ssh' +`, + } + + cmd.AddCommand(newRegisterCommand()) + cmd.AddCommand(newSetDefaultCommand()) + cmd.AddCommand(newSSHCommand()) + cmd.AddCommand(newListCommand()) + cmd.AddCommand(newCreateCommand()) + cmd.AddCommand(newDeleteCommand()) + cmd.AddCommand(newStatusCommand()) + cmd.AddCommand(newConfigCommand()) + + return cmd +} + +// mustWorkspaceClient applies the saved last-login profile when the user +// hasn't explicitly set --profile, then delegates to root.MustWorkspaceClient. +func mustWorkspaceClient(cmd *cobra.Command, args []string) error { + profileFlag := cmd.Flag("profile") + if profileFlag != nil && !profileFlag.Changed { + if last := GetLastProfile(); last != "" { + _ = profileFlag.Value.Set(last) + } + } + return root.MustWorkspaceClient(cmd, args) +} diff --git a/cmd/lakebox/list.go b/cmd/lakebox/list.go new file mode 100644 index 0000000000..f058524e7e --- /dev/null +++ b/cmd/lakebox/list.go @@ -0,0 +1,116 @@ +package lakebox + +import ( + "encoding/json" + "fmt" + "strings" + + "github.com/databricks/cli/libs/cmdctx" + "github.com/spf13/cobra" +) + +func newListCommand() *cobra.Command { + var outputJSON bool + + cmd := &cobra.Command{ + Use: "list", + Short: "List your Lakebox environments", + Long: `List your Lakebox environments. + +Shows all lakeboxes associated with your account, including their +current status and ID. + +Example: + lakebox list + lakebox list --json`, + PreRunE: mustWorkspaceClient, + RunE: func(cmd *cobra.Command, args []string) error { + ctx := cmd.Context() + w := cmdctx.WorkspaceClient(ctx) + api := newLakeboxAPI(w) + + entries, err := api.list(ctx) + if err != nil { + return fmt.Errorf("failed to list lakeboxes: %w", err) + } + + if outputJSON { + enc := json.NewEncoder(cmd.OutOrStdout()) + enc.SetIndent("", " ") + return enc.Encode(entries) + } + + if len(entries) == 0 { + fmt.Fprintf(cmd.ErrOrStderr(), " %sNo lakeboxes found.%s\n", dm, rs) + return nil + } + + profile := w.Config.Profile + if profile == "" { + profile = w.Config.Host + } + defaultID := getDefault(profile) + + out := cmd.OutOrStdout() + + // Compute column widths. AUTOSTOP holds short tokens like + // `default`, `never`, `15m`, `1h30m` — 8 chars covers them. + col := 10 + autostopCol := 8 + for _, e := range entries { + if l := len(e.SandboxID); l > col { + col = l + } + if l := len(e.autoStopLabel()); l > autostopCol { + autostopCol = l + } + } + col += 2 + autostopCol += 2 + + blank(out) + fmt.Fprintf(out, " %s%-*s %-10s %-*s %s%s\n", + dm, col, "ID", "STATUS", autostopCol, "AUTOSTOP", "DEFAULT", rs) + fmt.Fprintf(out, " %s%s%s\n", dm, strings.Repeat("─", col+10+autostopCol+12), rs) + + for _, e := range entries { + id := e.SandboxID + def := "" + if id == defaultID { + def = accent("*") + } + // Pad ID manually to avoid ANSI codes breaking alignment. + idPad := col - len(id) + if idPad < 0 { + idPad = 0 + } + st := status(e.Status) + // Pad status to 10 visible chars. + stPad := 10 - len(e.Status) + if stPad < 0 { + stPad = 0 + } + as := e.autoStopLabel() + asPad := autostopCol - len(as) + if asPad < 0 { + asPad = 0 + } + idStr := bold(id) + if strings.EqualFold(e.Status, "running") { + idStr = cyan + bo + id + rs + } + fmt.Fprintf(out, " %s%s %s%s %s%s %s\n", + idStr, strings.Repeat(" ", idPad), + st, strings.Repeat(" ", stPad), + dim(as), strings.Repeat(" ", asPad), + def) + } + blank(out) + return nil + }, + } + + cmd.Flags().BoolVar(&outputJSON, "json", false, "Output as JSON") + + return cmd +} diff --git a/cmd/lakebox/register.go b/cmd/lakebox/register.go new file mode 100644 index 0000000000..f3550d8e5d --- /dev/null +++ b/cmd/lakebox/register.go @@ -0,0 +1,137 @@ +package lakebox + +import ( + "context" + "fmt" + "os" + "os/exec" + "path/filepath" + + "github.com/databricks/cli/libs/cmdctx" + "github.com/databricks/databricks-sdk-go" + "github.com/spf13/cobra" +) + +const lakeboxKeyName = "lakebox_rsa" + +func newRegisterCommand() *cobra.Command { + cmd := &cobra.Command{ + Use: "register", + Short: "Register this machine for lakebox SSH access", + Long: `Generate a dedicated SSH key for lakebox and register it with the service. + +This command: +1. Generates an RSA SSH key at ~/.ssh/lakebox_rsa (if it doesn't exist) +2. Registers the public key with the lakebox service + +After registration, 'lakebox ssh' will use this key automatically. +Run this once per machine. + +Example: + lakebox register`, + PreRunE: mustWorkspaceClient, + RunE: func(cmd *cobra.Command, args []string) error { + ctx := cmd.Context() + w := cmdctx.WorkspaceClient(ctx) + api := newLakeboxAPI(w) + + keyPath, generated, err := ensureLakeboxKey() + if err != nil { + return fmt.Errorf("failed to ensure lakebox SSH key: %w", err) + } + + stderr := cmd.ErrOrStderr() + if generated { + ok(stderr, fmt.Sprintf("Generated SSH key at %s", dim(keyPath))) + } else { + field(stderr, "key", keyPath) + } + + pubKeyData, err := os.ReadFile(keyPath + ".pub") + if err != nil { + return fmt.Errorf("failed to read public key %s.pub: %w", keyPath, err) + } + + s := spin(stderr, "Registering key…") + if err := api.registerKey(ctx, string(pubKeyData)); err != nil { + s.fail("Failed to register key") + return fmt.Errorf("failed to register key: %w", err) + } + s.ok("SSH key registered") + + blank(stderr) + fmt.Fprintf(stderr, " Run %s to connect.\n\n", bold("lakebox ssh")) + return nil + }, + } + + return cmd +} + +// lakeboxKeyPath returns the path to the dedicated lakebox SSH key. +func lakeboxKeyPath() (string, error) { + homeDir, err := os.UserHomeDir() + if err != nil { + return "", err + } + return filepath.Join(homeDir, ".ssh", lakeboxKeyName), nil +} + +// ensureLakeboxKey returns the path to the lakebox SSH key, generating it if +// it doesn't exist. Returns (path, wasGenerated, error). +func ensureLakeboxKey() (string, bool, error) { + keyPath, err := lakeboxKeyPath() + if err != nil { + return "", false, err + } + + if _, err := os.Stat(keyPath); err == nil { + return keyPath, false, nil + } + + // Check that ssh-keygen is available before trying to generate. + if _, err := exec.LookPath("ssh-keygen"); err != nil { + return "", false, fmt.Errorf( + "ssh-keygen not found in PATH.\n" + + "Please install OpenSSH and run 'lakebox register' again.\n" + + " macOS: brew install openssh\n" + + " Ubuntu: sudo apt install openssh-client\n" + + " Windows: install Git for Windows (includes ssh-keygen)") + } + + sshDir := filepath.Dir(keyPath) + if err := os.MkdirAll(sshDir, 0700); err != nil { + return "", false, fmt.Errorf("failed to create %s: %w", sshDir, err) + } + + genCmd := exec.Command("ssh-keygen", "-t", "rsa", "-b", "4096", "-f", keyPath, "-N", "", "-q", "-C", "lakebox") + genCmd.Stdin = os.Stdin + genCmd.Stdout = os.Stderr + genCmd.Stderr = os.Stderr + if err := genCmd.Run(); err != nil { + return "", false, fmt.Errorf("ssh-keygen failed: %w", err) + } + + return keyPath, true, nil +} + +// EnsureAndReadKey generates the lakebox SSH key if needed and returns +// (keyPath, publicKeyContent, error). Exported for use by the auth login hook. +func EnsureAndReadKey() (string, string, error) { + keyPath, _, err := ensureLakeboxKey() + if err != nil { + return "", "", err + } + pubKeyData, err := os.ReadFile(keyPath + ".pub") + if err != nil { + return "", "", fmt.Errorf("failed to read public key %s.pub: %w", keyPath, err) + } + return keyPath, string(pubKeyData), nil +} + +// RegisterKey registers a public key with the lakebox API. Exported for use +// by the auth login hook. +func RegisterKey(ctx context.Context, w *databricks.WorkspaceClient, pubKey string) error { + api := newLakeboxAPI(w) + return api.registerKey(ctx, pubKey) +} diff --git a/cmd/lakebox/ssh.go b/cmd/lakebox/ssh.go new file mode 100644 index 0000000000..2a7db87a1b --- /dev/null +++ b/cmd/lakebox/ssh.go @@ -0,0 +1,160 @@ +package lakebox + +import ( + "fmt" + "os" + "os/exec" + "runtime" + "strings" + + "github.com/databricks/cli/libs/cmdctx" + "github.com/spf13/cobra" +) + +const ( + defaultGatewayHost = "uw2.dbrx.dev" + stagingDefaultGatewayHost = "uw2.s.dbrx.dev" + defaultGatewayPort = "2222" +) + +// resolveGatewayHost picks the SSH gateway hostname based on the workspace host. +// Staging workspaces (*.staging.cloud.databricks.com etc.) route through +// uw2.s.dbrx.dev; everything else uses prod uw2.dbrx.dev. +func resolveGatewayHost(workspaceHost string) string { + if strings.Contains(workspaceHost, ".staging.") { + return stagingDefaultGatewayHost + } + return defaultGatewayHost +} + +func newSSHCommand() *cobra.Command { + var gatewayHost string + var gatewayPort string + + cmd := &cobra.Command{ + Use: "ssh [lakebox-id] [-- ...]", + Short: "SSH into a Lakebox environment", + Long: `SSH into a Lakebox environment. + +Connect to your default or a named lakebox via SSH. Extra arguments +after -- are passed directly to the ssh process. This lets you run +remote commands, set up port forwarding, or pass any other ssh flags. + +Examples: + lakebox ssh # interactive shell on default lakebox + lakebox ssh happy-panda-1234 # interactive shell on specific lakebox + lakebox ssh -- ls -la /home # run command on default lakebox + lakebox ssh happy-panda-1234 -- cat /etc/os-release # run command on specific lakebox + lakebox ssh -- -L 8080:localhost:8080 # port forwarding on default lakebox`, + Args: cobra.ArbitraryArgs, + PreRunE: mustWorkspaceClient, + RunE: func(cmd *cobra.Command, args []string) error { + ctx := cmd.Context() + w := cmdctx.WorkspaceClient(ctx) + + profile := w.Config.Profile + if profile == "" { + profile = w.Config.Host + } + + // Use the dedicated lakebox SSH key. + keyPath, err := lakeboxKeyPath() + if err != nil { + return fmt.Errorf("failed to determine lakebox key path: %w", err) + } + if _, err := os.Stat(keyPath); os.IsNotExist(err) { + return fmt.Errorf("lakebox SSH key not found at %s — run 'lakebox register' first", keyPath) + } + stderr := cmd.ErrOrStderr() + + // Parse args: everything before -- is the optional lakebox ID, + // everything after -- is passed through to ssh. + var lakeboxID string + var extraArgs []string + + dashAt := cmd.ArgsLenAtDash() + if dashAt == -1 { + if len(args) > 0 { + lakeboxID = args[0] + } + } else if dashAt == 0 { + extraArgs = args[dashAt:] + } else { + lakeboxID = args[0] + extraArgs = args[dashAt:] + } + + // Determine lakebox ID if not explicit. + if lakeboxID == "" { + if def := getDefault(profile); def != "" { + lakeboxID = def + } else { + api := newLakeboxAPI(w) + pubKeyData, err := os.ReadFile(keyPath + ".pub") + if err != nil { + return fmt.Errorf("failed to read public key %s.pub: %w", keyPath, err) + } + + s := spin(stderr, "Provisioning your lakebox…") + result, err := api.create(ctx, string(pubKeyData)) + if err != nil { + s.fail("Failed to create lakebox") + return fmt.Errorf("failed to create lakebox: %w", err) + } + lakeboxID = result.SandboxID + s.ok(fmt.Sprintf("Lakebox %s ready", bold(lakeboxID))) + + if err := setDefault(profile, lakeboxID); err != nil { + warn(stderr, fmt.Sprintf("Could not save default: %v", err)) + } + } + } + + host := gatewayHost + if host == "" { + host = resolveGatewayHost(w.Config.Host) + } + + s := spin(stderr, fmt.Sprintf("Connecting to %s…", bold(lakeboxID))) + s.ok(fmt.Sprintf("Connected to %s", bold(lakeboxID))) + return execSSHDirect(lakeboxID, host, gatewayPort, keyPath, extraArgs) + }, + } + + cmd.Flags().StringVar(&gatewayHost, "gateway", "", "Lakebox gateway hostname (auto-detected from profile if empty)") + cmd.Flags().StringVar(&gatewayPort, "port", defaultGatewayPort, "Lakebox gateway SSH port") + + return cmd +} + +// execSSHDirect execs into ssh with all options passed as args (no ~/.ssh/config needed). +// Extra args are appended after the destination (for remote commands or ssh flags). +func execSSHDirect(lakeboxID, host, port, keyPath string, extraArgs []string) error { + sshPath, err := exec.LookPath("ssh") + if err != nil { + return fmt.Errorf("ssh not found in PATH: %w", err) + } + + args := []string{ + "ssh", + "-i", keyPath, + "-p", port, + "-o", "IdentitiesOnly=yes", + "-o", "PreferredAuthentications=publickey", + "-o", "StrictHostKeyChecking=no", + "-o", "UserKnownHostsFile=/dev/null", + "-o", "LogLevel=ERROR", + fmt.Sprintf("%s@%s", lakeboxID, host), + } + args = append(args, extraArgs...) + + if runtime.GOOS == "windows" { + cmd := exec.Command(sshPath, args[1:]...) + cmd.Stdin = os.Stdin + cmd.Stdout = os.Stdout + cmd.Stderr = os.Stderr + return cmd.Run() + } + + return execSyscall(sshPath, args) +} diff --git a/cmd/lakebox/state.go b/cmd/lakebox/state.go new file mode 100644 index 0000000000..b84b5b16e1 --- /dev/null +++ b/cmd/lakebox/state.go @@ -0,0 +1,111 @@ +package lakebox + +import ( + "encoding/json" + "fmt" + "os" + "path/filepath" +) + +// stateFile stores per-profile lakebox defaults on the local filesystem. +// Located at ~/.databricks/lakebox.json. +type stateFile struct { + // Profile name → default lakebox ID. + Defaults map[string]string `json:"defaults"` + // Last profile used with 'lakebox auth login'. + LastProfile string `json:"last_profile,omitempty"` +} + +func stateFilePath() (string, error) { + home, err := os.UserHomeDir() + if err != nil { + return "", err + } + return filepath.Join(home, ".databricks", "lakebox.json"), nil +} + +func loadState() (*stateFile, error) { + path, err := stateFilePath() + if err != nil { + return nil, err + } + + data, err := os.ReadFile(path) + if os.IsNotExist(err) { + return &stateFile{Defaults: make(map[string]string)}, nil + } + if err != nil { + return nil, fmt.Errorf("failed to read %s: %w", path, err) + } + + var state stateFile + if err := json.Unmarshal(data, &state); err != nil { + return &stateFile{Defaults: make(map[string]string)}, nil + } + if state.Defaults == nil { + state.Defaults = make(map[string]string) + } + return &state, nil +} + +func saveState(state *stateFile) error { + path, err := stateFilePath() + if err != nil { + return err + } + + if err := os.MkdirAll(filepath.Dir(path), 0700); err != nil { + return err + } + + data, err := json.MarshalIndent(state, "", " ") + if err != nil { + return err + } + return os.WriteFile(path, data, 0600) +} + +func getDefault(profile string) string { + state, err := loadState() + if err != nil { + return "" + } + return state.Defaults[profile] +} + +func setDefault(profile, lakeboxID string) error { + state, err := loadState() + if err != nil { + return err + } + state.Defaults[profile] = lakeboxID + return saveState(state) +} + +// GetLastProfile returns the profile saved by the most recent 'lakebox auth login'. +func GetLastProfile() string { + state, err := loadState() + if err != nil { + return "" + } + return state.LastProfile +} + +// SetLastProfile persists the profile used during 'lakebox auth login'. +func SetLastProfile(profile string) error { + state, err := loadState() + if err != nil { + return err + } + state.LastProfile = profile + return saveState(state) +} + +func clearDefault(profile string) error { + state, err := loadState() + if err != nil { + return err + } + delete(state.Defaults, profile) + return saveState(state) +} diff --git a/cmd/lakebox/status.go b/cmd/lakebox/status.go new file mode 100644 index 0000000000..f5df1ee4a4 --- /dev/null +++ b/cmd/lakebox/status.go @@ -0,0 +1,58 @@ +package lakebox + +import ( + "encoding/json" + "fmt" + + "github.com/databricks/cli/libs/cmdctx" + "github.com/spf13/cobra" +) + +func newStatusCommand() *cobra.Command { + var outputJSON bool + + cmd := &cobra.Command{ + Use: "status ", + Short: "Show Lakebox environment status", + Long: `Show detailed status of a Lakebox environment. + +Example: + lakebox status happy-panda-1234 + lakebox status happy-panda-1234 --json`, + Args: cobra.ExactArgs(1), + PreRunE: mustWorkspaceClient, + RunE: func(cmd *cobra.Command, args []string) error { + ctx := cmd.Context() + w := cmdctx.WorkspaceClient(ctx) + api := newLakeboxAPI(w) + + lakeboxID := args[0] + + entry, err := api.get(ctx, lakeboxID) + if err != nil { + return fmt.Errorf("failed to get lakebox %s: %w", lakeboxID, err) + } + + if outputJSON { + enc := json.NewEncoder(cmd.OutOrStdout()) + enc.SetIndent("", " ") + return enc.Encode(entry) + } + + out := cmd.OutOrStdout() + blank(out) + field(out, "id", bold(entry.SandboxID)) + field(out, "status", status(entry.Status)) + if entry.FQDN != "" { + field(out, "fqdn", dim(entry.FQDN)) + } + field(out, "autostop", dim(entry.autoStopLabel())) + blank(out) + return nil + }, + } + + cmd.Flags().BoolVar(&outputJSON, "json", false, "Output as JSON") + + return cmd +} diff --git a/cmd/lakebox/ui.go b/cmd/lakebox/ui.go new file mode 100644 index 0000000000..2eab33310c --- /dev/null +++ b/cmd/lakebox/ui.go @@ -0,0 +1,141 @@ +package lakebox + +import ( + "fmt" + "io" + "os" + "strings" + "sync" + "time" +) + +// Single accent color throughout. Bold for emphasis. Dim for metadata. +const ( + rs = "\033[0m" // reset + bo = "\033[1m" // bold + dm = "\033[2m" // dim + cyan = "\033[36m" // accent +) + +func isTTY(w io.Writer) bool { + if f, ok := w.(*os.File); ok { + fi, err := f.Stat() + if err != nil { + return false + } + return fi.Mode()&os.ModeCharDevice != 0 + } + return false +} + +// spinner shows a braille spinner like Claude Code. +type spinner struct { + w io.Writer + msg string + done chan struct{} + once sync.Once + started time.Time +} + +func spin(w io.Writer, msg string) *spinner { + s := &spinner{w: w, msg: msg, done: make(chan struct{}), started: time.Now()} + if isTTY(w) { + go s.run() + } else { + fmt.Fprintf(w, "* %s\n", msg) + } + return s +} + +func (s *spinner) run() { + frames := []string{"⣾", "⣽", "⣻", "⢿", "⡿", "⣟", "⣯", "⣷"} + i := 0 + ticker := time.NewTicker(80 * time.Millisecond) + defer ticker.Stop() + for { + select { + case <-s.done: + return + case <-ticker.C: + elapsed := time.Since(s.started).Truncate(time.Second) + fmt.Fprintf(s.w, "\r %s%s%s %s%s%s %s(%s)%s ", + cyan, frames[i%len(frames)], rs, + bo, s.msg, rs, + dm, elapsed, rs) + i++ + } + } +} + +func (s *spinner) ok(msg string) { + s.once.Do(func() { + close(s.done) + if isTTY(s.w) { + fmt.Fprintf(s.w, "\r\033[K %s✓%s %s\n", cyan, rs, msg) + } else { + fmt.Fprintf(s.w, "✓ %s\n", msg) + } + }) +} + +func (s *spinner) fail(msg string) { + s.once.Do(func() { + close(s.done) + if isTTY(s.w) { + fmt.Fprintf(s.w, "\r\033[K %s✗%s %s\n", cyan, rs, msg) + } else { + fmt.Fprintf(s.w, "✗ %s\n", msg) + } + }) +} + +// --- Consistent output primitives --- + +// status formats a status string with the accent color. +func status(s string) string { + switch strings.ToLower(s) { + case "running": + return cyan + "running" + rs + case "stopped": + return dm + "stopped" + rs + case "creating": + return cyan + bo + "creating…" + rs + default: + return dm + strings.ToLower(s) + rs + } +} + +// field prints " label value" +func field(w io.Writer, label, value string) { + fmt.Fprintf(w, " %s%-10s%s %s\n", dm, label, rs, value) +} + +// ok prints " ✓ message" +func ok(w io.Writer, msg string) { + fmt.Fprintf(w, " %s✓%s %s\n", cyan, rs, msg) +} + +// warn prints " ! message" +func warn(w io.Writer, msg string) { + fmt.Fprintf(w, " %s!%s %s\n", cyan, rs, msg) +} + +// blank prints an empty line. +func blank(w io.Writer) { + fmt.Fprintln(w) +} + +// accent wraps text in the accent color. +func accent(s string) string { + return cyan + s + rs +} + +// bold wraps text in bold. +func bold(s string) string { + return bo + s + rs +} + +// dim wraps text in dim. +func dim(s string) string { + return dm + s + rs +} diff --git a/install.sh b/install.sh new file mode 100755 index 0000000000..acdf259b4c --- /dev/null +++ b/install.sh @@ -0,0 +1,80 @@ +#!/bin/sh +# Lakebox CLI installer — . <(curl -s devbox.dbrx.dev) + +_lakebox_install() { + INSTALL_DIR="$HOME/.lakebox/bin" + REMOTE_NAME="databricks" + LOCAL_NAME="lakebox" + BASE_URL="https://devbox.dbrx.dev" + + case "$(uname -s)" in + Linux*) OS="linux" ;; + Darwin*) OS="darwin" ;; + *) printf "error: unsupported OS: %s\n" "$(uname -s)" >&2; return 1 ;; + esac + + case "$(uname -m)" in + x86_64|amd64) ARCH="amd64" ;; + aarch64|arm64) ARCH="arm64" ;; + *) printf "error: unsupported arch: %s\n" "$(uname -m)" >&2; return 1 ;; + esac + + url="${BASE_URL}/${REMOTE_NAME}-${OS}-${ARCH}" + + printf "📦 Installing Lakebox CLI (%s/%s)...\n" "$OS" "$ARCH" + + mkdir -p "$INSTALL_DIR" || { printf "error: could not create %s\n" "$INSTALL_DIR" >&2; return 1; } + + if command -v curl >/dev/null 2>&1; then + curl -fSL --progress-bar "$url" -o "$INSTALL_DIR/$LOCAL_NAME" || { printf "error: download failed\n" >&2; return 1; } + elif command -v wget >/dev/null 2>&1; then + wget -q --show-progress "$url" -O "$INSTALL_DIR/$LOCAL_NAME" || { printf "error: download failed\n" >&2; return 1; } + else + printf "error: curl or wget is required\n" >&2; return 1 + fi + + chmod +x "$INSTALL_DIR/$LOCAL_NAME" + + PATH_LINE="export PATH=\"\$HOME/.lakebox/bin:\$PATH\"" + case ":$PATH:" in + *":$INSTALL_DIR:"*) ;; + *) + added=0 + for rc in "$HOME/.zshrc" "$HOME/.bashrc"; do + [ -f "$rc" ] || continue + if ! grep -qF '.lakebox/bin' "$rc" 2>/dev/null; then + printf '\n# Lakebox CLI\n%s\n' "$PATH_LINE" >> "$rc" + printf "📝 Updated %s\n" "$rc" + added=1 + fi + done + if [ "$added" = 0 ]; then + if [ "$OS" = "darwin" ]; then + rc="$HOME/.zshrc" + else + rc="$HOME/.bashrc" + fi + printf '\n# Lakebox CLI\n%s\n' "$PATH_LINE" >> "$rc" + printf "📝 Updated %s\n" "$rc" + fi + export PATH="$INSTALL_DIR:$PATH" + ;; + esac + + printf "\n✅ Lakebox CLI installed to %s\n" "$INSTALL_DIR/$LOCAL_NAME" + + LAKEBOX_HOST="https://dbsql-dev-testing-default.dev.databricks.com" + LAKEBOX_PROFILE="dbsql-dev-testing-default" + if ! grep -qF "$LAKEBOX_PROFILE" "$HOME/.databrickscfg" 2>/dev/null; then + printf "\n🔑 Logging in...\n" + lakebox auth login --host "$LAKEBOX_HOST" --profile "$LAKEBOX_PROFILE" + fi + + printf "\nCommon workflows:\n" + printf " lakebox ssh # SSH to your default lakebox\n" + printf " lakebox ssh my-project # SSH to a named lakebox\n" + printf " lakebox list # list your lakeboxes\n" +} + +_lakebox_install +unset -f _lakebox_install \ No newline at end of file diff --git a/upload.sh b/upload.sh new file mode 100755 index 0000000000..c55c0aa182 --- /dev/null +++ b/upload.sh @@ -0,0 +1,13 @@ +#!/bin/sh +set -eu + +HOST="arca.ssh" +FILES="install.sh databricks-darwin-amd64 databricks-darwin-arm64 databricks-linux-amd64 databricks-linux-arm64" + +for f in $FILES; do + printf "Uploading %s...\n" "$f" + scp "$f" "$HOST:~/" + ssh "$HOST" "~/unp-upload.sh ~/$f" +done + +printf "\nDone.\n"