diff --git a/.DS_Store b/.DS_Store index 32e7892..a8e6a54 100644 Binary files a/.DS_Store and b/.DS_Store differ diff --git a/NEWS.md b/NEWS.md index 1f818a2..fdc219d 100644 --- a/NEWS.md +++ b/NEWS.md @@ -1,5 +1,8 @@ # brickster 0.2.4 +* `open_workspace()` and the rstudio connection pane have been heavily revised + to enhance browsing unity catalog and also remove DBFS and WSFS browsing (#52) + # brickster 0.2.3 * Adding NEWS.md diff --git a/R/clusters.R b/R/clusters.R index ba6a21e..02aaf3a 100644 --- a/R/clusters.R +++ b/R/clusters.R @@ -825,6 +825,8 @@ get_and_start_cluster <- function(cluster_id, polling_interval = 5, #' #' @return Named list #' @export +#' +#' @importFrom rlang .data get_latest_dbr <- function(lts, ml, gpu, photon, host = db_host(), token = db_token()) { diff --git a/R/connection-pane.R b/R/connection-pane.R index 8b35e67..52b425d 100644 --- a/R/connection-pane.R +++ b/R/connection-pane.R @@ -5,76 +5,10 @@ # nocov start brickster_actions <- function(host) { list( - Workspace = list( + `Open Workspace` = list( icon = "", callback = function() { - utils::browseURL(host) - } - ), - SQL = list( - icon = "", - callback = function() { - utils::browseURL(paste0(host, "sql")) - } - ), - `Upload to DBFS` = list( - icon = "", - callback = function() { - path <- rstudioapi::selectFile( - caption = "Select file to upload to DBFS", - existing = TRUE - ) - if (!is.null(path)) { - dbfs_path <- rstudioapi::showPrompt( - title = "File Destination (DBFS Path)", - message = "File Destination (DBFS Path):", - default = "/" - ) - } - if (dbfs_path != "") { - db_dbfs_put( - path = dbfs_path, - file = path, - overwrite = TRUE - ) - } - } - ), - `Workspace Import` = list( - icon = "", - callback = function() { - path <- rstudioapi::selectFile( - caption = "Select file to upload to DBFS", - existing = TRUE, - filter = "Databricks Permitted Files (*.R | *.r | *.py | *.scala | *.sql | *.dbc | *.html | *.ipynb)" - ) - if (!is.null(path)) { - ws_path <- rstudioapi::showPrompt( - title = "Import Path (Workspace Path)", - message = "Workspace Destination:", - default = "/Shared/" - ) - } - if (ws_path != "") { - - filename <- base::basename(path) - ext <- gsub(".*\\.(.*)", "\\1", filename) - - if (ext %in% c("R", "r", "py", "scala", "sql")) { - lang <- toupper(ext) - format <- "SOURCE" - } else { - lang <- NULL - format <- toupper(ifelse(ext == "ipynb", "jupyter", ext)) - } - db_workspace_import( - file = path, - path = ws_path, - format = format, - language = lang, - overwrite = TRUE - ) - } + utils::browseURL(glue::glue("https://{host}")) } ) ) @@ -85,6 +19,10 @@ get_id_from_panel_name <- function(x) { sub(pattern = ".*\\((.*)\\).*", replacement = "\\1", x = x) } +get_model_version_from_string <- function(x) { + as.integer(sub(pattern = "(\\d+).*", replacement = "\\1", x = x)) +} + readable_time <- function(x) { time <- as.POSIXct( x = x/1000, @@ -94,54 +32,6 @@ readable_time <- function(x) { as.character(time) } - -get_dbfs_items <- function(path = "/", host, token, is_file = FALSE) { - items <- db_dbfs_list(path = path, host = host, token = token) - if (is_file) { - data.frame( - name = c("file size", "modification time"), - type = c( - base::format(base::structure(items$file_size, class = "object_size"), units = "auto"), - readable_time(items$modification_time) - ) - ) - } else { - data.frame( - name = gsub(pattern = "^.*\\/(.*)$", replacement = "\\1", x = items$path), - type = ifelse(items$is_dir, "folder", "files") - ) - } -} - -#' @importFrom rlang .data -get_notebook_items <- function(path = "/", host, token, is_nb = FALSE) { - - items <- db_workspace_list(path = path, host = host, token = token) - - if (is_nb) { - info <- data.frame( - name = c("language", "object id"), - type = c(tolower(items[[1]]$language), as.character(items[[1]]$object_id)) - ) - } else { - info <- purrr::map_dfr(items, function(x) { - list( - name = gsub(pattern = "^.*\\/(.*)$", replacement = "\\1", x = x$path), - type = x$object_type - ) - }) - if (nrow(info) > 0) { - info <- dplyr::filter(info, .data$type %in% c("DIRECTORY", "NOTEBOOK")) - info <- dplyr::mutate(info, type = dplyr::if_else(.data$type == "NOTEBOOK", "notebook", "folder")) - } else { - data.frame(name = NULL, type = NULL) - } - } - - info - -} - get_catalogs <- function(host, token) { catalogs <- db_uc_catalogs_list(host = host, token = token) data.frame( @@ -191,6 +81,217 @@ get_tables <- function(catalog, schema, host, token) { } } +get_uc_models <- function(catalog, schema, host, token) { + models <- db_uc_models_list( + catalog = catalog, + schema = schema, + host = host, + token = token + ) + if (length(models) > 0) { + data.frame( + name = purrr::map_chr(models, "name"), + type = "model" + ) + } else { + data.frame(name = NULL, type = NULL) + } +} + +get_uc_model <- function(catalog, schema, model, host, token) { + model <- db_uc_models_get( + catalog = catalog, + schema = schema, + model = model, + host = host, + token = token + ) + info <- list( + "name" = model$name, + "owner" = model$owner, + "created at" = readable_time(model$created_at), + "created by" = model$created_by, + "last updated" = readable_time(model$updated_at), + "updated by" = model$updated_by, + "id" = model$id + ) + + data.frame( + name = names(info), + type = unname(unlist(info)) + ) +} + +get_uc_model_versions <- function(catalog, schema, model, host, token, + version = NULL) { + + # if version is NULL get all, otherwise specific versions + versions <- db_uc_model_versions_get( + catalog, + schema, + model, + host = host, + token = token + )[[1]] + + # get model info again to get the aliases + model_info <- db_uc_models_get(catalog, schema, model, host, token) + + aliases <- purrr::map( + model_info$aliases, ~{ + setNames(.x$version_num, .x$alias_name) + }) %>% + unlist() + + version_names <- purrr::map_chr(versions, function(x) { + if (x$version %in% aliases) { + alias_values <- names(aliases[x$version %in% aliases]) + alias_part <- paste0("@", alias_values, collapse = ", ") + paste0(x$version, " (", alias_part, ")") + } else { + x$version + } + }) + + if (is.null(version)) { + + res <- data.frame( + name = version_names, + type = "version" + ) + + } else { + version_meta <- versions[[which(purrr::map_vec(versions, "version") == version)]] + info <- list( + "created at" = readable_time(version_meta$created_at), + "created by" = version_meta$created_by, + "last updated" = readable_time(version_meta$updated_at), + "updated by" = version_meta$updated_by, + "run id" = version_meta$run_id, + "run workspace id" = version_meta$run_workspace_id, + "source" = version_meta$source, + "status" = version_meta$status, + "id" = version_meta$id + ) + + res <- data.frame( + name = names(info), + type = unname(unlist(info)) + ) + + } + + res + +} + +get_uc_functions <- function(catalog, schema, host, token) { + funcs <- db_uc_funcs_list( + catalog = catalog, + schema = schema, + host = host, + token = token + ) + if (length(funcs) > 0) { + data.frame( + name = purrr::map_chr(funcs, "name"), + type = "func" + ) + } else { + data.frame(name = NULL, type = NULL) + } +} + +get_uc_function <- function(catalog, schema, func, host, token) { + func <- db_uc_funcs_get( + catalog = catalog, + schema = schema, + func = func, + host = host, + token = token + ) + info <- list( + "name" = func$name, + "date type" = func$data_type, + "full data type" = func$full_data_type, + "created at" = readable_time(func$created_at), + "created by" = func$created_by, + "last updated" = readable_time(func$updated_at), + "updated by" = func$updated_by, + "id" = func$function_id + ) + + data.frame( + name = names(info), + type = unname(unlist(info)) + ) +} + +get_uc_volumes <- function(catalog, schema, host, token) { + volumes <- db_uc_volumes_list( + catalog = catalog, + schema = schema, + host = host, + token = token + ) + if (length(volumes) > 0) { + data.frame( + name = purrr::map_chr(volumes, "name"), + type = "volume" + ) + } else { + data.frame(name = NULL, type = NULL) + } +} + +get_uc_volume <- function(catalog, schema, host, volume, token) { + volumes <- db_uc_volumes_list( + catalog = catalog, + schema = schema, + host = host, + token = token + ) + + volume <- purrr::keep(volumes, ~.x$name == volume)[[1]] + + info <- list( + "name" = volume$name, + "volume type" = volume$volume_type, + "storage location" = volume$storage_location, + "created at" = readable_time(volume$created_at), + "created by" = volume$created_by, + "last updated" = readable_time(volume$updated_at), + "updated by" = volume$updated_by, + "id" = volume$volume_id + ) + + data.frame( + name = names(info), + type = unname(unlist(info)) + ) +} + +get_schema_objects <- function(catalog, schema, host, token) { + + objects <- list() + objects$tables <- get_tables(catalog, schema, host, token) + objects$volumes <- get_uc_volumes(catalog, schema, host, token) + objects$models <- get_uc_models(catalog, schema, host, token) + objects$funcs <- get_uc_functions(catalog, schema, host, token) + + # how many objects of each type exist + # only show when objects exist within + sizes <- purrr::map_int(objects, nrow) %>% + purrr::keep(~.x > 0) %>% + purrr::imap_chr(~ glue::glue("{.y} ({.x})")) + + data.frame( + name = unname(sizes), + type = names(sizes) + ) + +} + get_table_data <- function(catalog, schema, table, host, token, metadata = TRUE) { # if metadata is TRUE then return metadata, otherwise columns tbl <- db_uc_tables_get( @@ -225,7 +326,21 @@ get_table_data <- function(catalog, schema, table, host, token, metadata = TRUE) "updated at" = readable_time(tbl$updated_at), "updated by" = tbl$updated_by ) - } else { + } else if (tbl$data_source_format == "VECTOR_INDEX_FORMAT") { + info <- list( + "table type" = tbl$table_type, + "data source format" = tbl$data_source_format, + "full name" = tbl$full_name, + "owner" = tbl$owner, + "endpoint name" = tbl$properties$endpoint_name, + "endpoint type" = tbl$properties$endpoint_type, + "primary key" = tbl$properties$primary_key, + "created at" = readable_time(tbl$created_at), + "created by" = tbl$created_by, + "updated at" = readable_time(tbl$updated_at), + "updated by" = tbl$updated_by + ) + } else if (tbl$data_source_format == "TABLE") { info <- list( "table type" = tbl$table_type, "data source format" = tbl$data_source_format, @@ -240,6 +355,17 @@ get_table_data <- function(catalog, schema, table, host, token, metadata = TRUE) "min reader version" = tbl$properties$delta.minReaderVersion, "min writer version" = tbl$properties$delta.minWriterVersion ) + } else { + info <- list( + "table type" = tbl$table_type, + "data source format" = tbl$data_source_format, + "full name" = tbl$full_name, + "owner" = tbl$owner, + "created at" = readable_time(tbl$created_at), + "created by" = tbl$created_by, + "updated at" = readable_time(tbl$updated_at), + "updated by" = tbl$updated_by + ) } } else { info <- purrr::map_chr(tbl$columns, function(x) { @@ -434,16 +560,17 @@ get_warehouse <- function(id, host, token) { list_objects <- function(host, token, type = NULL, - dbfs = NULL, - notebooks = NULL, workspace = NULL, - folder = NULL, clusters = NULL, warehouses = NULL, metastore = NULL, catalog = NULL, schema = NULL, + tables = NULL, table = NULL, + volumes = NULL, + funcs = NULL, + models = NULL, modelregistry = NULL, model = NULL, versions = NULL, @@ -454,33 +581,70 @@ list_objects <- function(host, token, # uc metastore if (!is.null(metastore)) { - if (!is.null(table)) { - objects <- data.frame( - name = c("metadata", "columns"), - type = c("metadata", "columns") - ) - return(objects) - } - if (!is.null(schema)) { - objects <- get_tables(catalog = catalog, schema = schema, host = host, token = token) + + if (!is.null(volumes)) { + objects <- get_uc_volumes(catalog, schema, host, token) + return(objects) + } + + if (!is.null(funcs)) { + objects <- get_uc_functions(catalog, schema, host, token) + return(objects) + } + + if (!is.null(models)) { + + if (!is.null(versions)) { + objects <- get_uc_model_versions(catalog, schema, model, host, token) + return(objects) + } + + if (!is.null(model)) { + objects <- data.frame( + name = c("metadata", "versions"), + type = c("metadata", "versions") + ) + return(objects) + } + + objects <- get_uc_models(catalog, schema, host, token) + return(objects) + } + + if (!is.null(tables)) { + + if (!is.null(table)) { + objects <- data.frame( + name = c("metadata", "columns"), + type = c("metadata", "columns") + ) + return(objects) + } + + objects <- get_tables(catalog, schema, host, token) + return(objects) + } + + objects <- get_schema_objects(catalog, schema, host, token) + return(objects) } if (!is.null(catalog)) { - objects <- get_schemas(catalog = catalog, host = host, token = token) + objects <- get_schemas(catalog, host, token) return(objects) } # catch all, return catalogs - objects <- get_catalogs(host = host, token = token) + objects <- get_catalogs(host, token) return(objects) } # experiments if (!is.null(experiments)) { - objects <- get_experiments(host = host, token = token) + objects <- get_experiments(host, token) return(objects) } @@ -488,7 +652,7 @@ list_objects <- function(host, token, if (!is.null(modelregistry)) { if (!is.null(versions)) { - objects <- get_model_versions(id = model, host = host, token = token) + objects <- get_model_versions(id = model, host, token) return(objects) } @@ -501,40 +665,20 @@ list_objects <- function(host, token, } # catch all to return models - objects <- get_models(host = host, token = token) + objects <- get_models(host, token) return(objects) } # clusters if (!is.null(clusters)) { - objects <- get_clusters(host = host, token = token) + objects <- get_clusters(host, token) return(objects) } # warehouses if (!is.null(warehouses)) { - objects <- get_warehouses(host = host, token = token) - return(objects) - } - - # dbfs - if (!is.null(dbfs)) { - if (is.null(folder)) { - objects <- get_dbfs_items(path = "/", host = host, token = token) - } else { - objects <- get_dbfs_items(path = folder, host = host, token = token) - } - return(objects) - } - - # workspace notebooks - if (!is.null(notebooks)) { - if (is.null(folder)) { - objects <- get_notebook_items(path = "/", host = host, token = token) - } else { - objects <- get_notebook_items(path = folder, host = host, token = token) - } + objects <- get_warehouses(host, token) return(objects) } @@ -544,7 +688,7 @@ list_objects <- function(host, token, # check if sql endpoint fails sql_active <- tryCatch( expr = { - db_sql_warehouse_list(host = host, token = token) + db_sql_warehouse_list(host, token) TRUE }, error = function(e) FALSE @@ -553,7 +697,7 @@ list_objects <- function(host, token, # check if UC catalogs endpoint fails uc_active <- tryCatch( expr = { - db_uc_catalogs_list(host = host, token = token) + db_uc_catalogs_list(host, token) TRUE }, error = function(e) FALSE @@ -564,9 +708,7 @@ list_objects <- function(host, token, "Model Registry" = "modelregistry", "Experiments" = "experiments", "Clusters" = "clusters", - "SQL Warehouses" = "warehouses", - "File System (DBFS)" = "dbfs", - "Workspace (Notebooks)" = "notebooks" + "SQL Warehouses" = "warehouses" ) if (!sql_active) { @@ -601,34 +743,7 @@ list_columns <- function(host, token, path = "", ...) { return(info) } - # folders can be nested indefinitely, resolve folders into a path - if ("folder" %in% names(dots)) { - path <- paste0("/", dots[names(dots) == "folder"], collapse = "") - } - - if (leaf_type == "folder") { - info <- get_dbfs_items(path = path, host = host, token = token) - } - - if (leaf_type == "files") { - info <- get_dbfs_items( - path = paste0(path, "/", leaf), - host = host, - token = token, - is_file = TRUE - ) - } - - if (leaf_type == "notebook") { - info <- get_notebook_items( - paste0(path, "/", leaf), - host = host, - token = token, - is_nb = TRUE - ) - } - - if ("model" %in% names(dots)) { + if (!is.null(dots$modelregistry) && "model" %in% names(dots)) { if (leaf_type == "metadata") { info <- get_model_metadata(id = dots$model, host = host, token = token) } else if (leaf_type == "version") { @@ -641,6 +756,27 @@ list_columns <- function(host, token, path = "", ...) { } } + if (!is.null(dots$catalog) && "model" %in% names(dots)) { + if (leaf_type == "metadata") { + info <- get_uc_model( + catalog = dots[["catalog"]], + schema = dots[["schema"]], + model = dots[["model"]], + host = host, + token = token + ) + } else if (leaf_type == "version") { + info <- get_uc_model_versions( + catalog = dots[["catalog"]], + schema = dots[["schema"]], + model = dots[["model"]], + version = get_model_version_from_string(leaf$version), + host = host, + token = token + ) + } + } + if ("table" %in% names(dots)) { info <- get_table_data( catalog = dots$catalog, @@ -652,6 +788,26 @@ list_columns <- function(host, token, path = "", ...) { ) } + if (leaf_type == "func") { + info <- get_uc_function( + catalog = dots$catalog, + schema = dots$schema, + func = dots$func, + host = host, + token = token + ) + } + + if (leaf_type == "volume") { + info <- get_uc_volume( + catalog = dots$catalog, + schema = dots$schema, + volume = leaf$volume, + host = host, + token = token + ) + } + if (leaf_type == "experiment") { info <- get_experiment( id = leaf$experiment, @@ -669,91 +825,74 @@ preview_object <- function(host, token, rowLimit, cluster = NULL, warehouse = NULL, files = NULL, - notebook = NULL, model = NULL, version = NULL, experiment = NULL, catalog = NULL, schema = NULL, table = NULL, + func = NULL, + volume = NULL, ...) { - # explore data + ws_id <- db_current_workspace_id() + if (!is.null(catalog)) { - path <- paste0(c(catalog, schema, table), collapse = "/") - url <- glue::glue("{host}explore/data/{path}?o={db_wsid()}") + + if (!is.null(catalog) & !is.null(schema) & !is.null(func)) { + path <- paste0(c("functions", catalog, schema, func), collapse = "/") + } else if (!is.null(catalog) & !is.null(schema) & !is.null(model) & !is.null(version)) { + version <- get_model_version_from_string(version) + path <- paste0(c("models", catalog, schema, model, "version", version), collapse = "/") + } else if (!is.null(catalog) & !is.null(schema) & !is.null(model)) { + path <- paste0(c("models", catalog, schema, model), collapse = "/") + } else if (!is.null(catalog) & !is.null(schema) & !is.null(volume)) { + path <- paste0(c("volumes", catalog, schema, volume), collapse = "/") + } else if (!is.null(catalog) & !is.null(schema) & !is.null(table)) { + path <- paste0(c(catalog, schema, table), collapse = "/") + } else if (!is.null(catalog) & !is.null(schema)) { + path <- paste0(c(catalog, schema), collapse = "/") + } else { + path <- catalog + } + + url <- glue::glue("https://{host}/explore/data/{path}?o={ws_id}") return(utils::browseURL(url)) + } # version of model if (!is.null(version) && !is.null(model)) { version <- gsub("(\\d+) .*", "\\1", version) - url <- glue::glue("{host}?o={db_wsid()}#mlflow/models/{model}/versions/{version}") + url <- glue::glue("https://{host}/?o={ws_id}#mlflow/models/{model}/versions/{version}") return(utils::browseURL(url)) } # model if (is.null(version) && !is.null(model)) { - url <- glue::glue("{host}?o={db_wsid()}#mlflow/models/{model}") + url <- glue::glue("https://{host}/ml/models/{model}?o={ws_id}") return(utils::browseURL(url)) } # experiment if (!is.null(experiment)) { id <- get_id_from_panel_name(experiment) - url <- glue::glue("{host}?o={db_wsid()}#mlflow/experiments/{id}") + url <- glue::glue("https://{host}/?o={ws_id}#mlflow/experiments/{id}") return(utils::browseURL(url)) } if (!is.null(cluster)) { id <- get_id_from_panel_name(cluster) - url <- glue::glue("{host}?o={db_wsid()}#setting/clusters/{id}/configuration") + url <- glue::glue("https://{host}/?o={ws_id}#setting/clusters/{id}/configuration") return(utils::browseURL(url)) } if (!is.null(warehouse)) { id <- get_id_from_panel_name(warehouse) - url <- glue::glue("{host}sql/warehouses/{id}") + url <- glue::glue("https://{host}/sql/warehouses/{id}") return(utils::browseURL(url)) } - # folders can be nested indefinitely - dots <- list(...) - if ("folder" %in% names(dots)) { - path <- paste0("/", dots[names(dots) == "folder"], collapse = "") - } - - if (!is.null(notebook)) { - # export notebook as ipynb - content <- db_workspace_export( - path = paste0(path, "/", notebook), - format = "JUPYTER" - ) - - # save to temporary directory and open - dir <- tempdir() - content_text <- rawToChar(base64enc::base64decode(content$content)) - nb_path <- file.path(dir, paste(notebook, content$file_type, sep = ".")) - rmd_path <- file.path(dir, paste(notebook, "rmd", sep = ".")) - base::writeLines(content_text, con = nb_path) - rmarkdown::convert_ipynb(input = nb_path, output = rmd_path) - rstudioapi::navigateToFile(file = rmd_path) - - } - - if (!is.null(files)) { - # TODO: check file size first, don't download if >10mb - # download from dbfs - content <- db_dbfs_read(path = paste0(path, "/", files)) - - # save to temporary directory and open - dir <- tempdir() - content_text <- rawToChar(base64enc::base64decode(content$data)) - file_path <- file.path(dir, files) - base::writeLines(content_text, con = file_path) - rstudioapi::navigateToFile(file = file_path) - } - } #' Connect to Databricks Workspace @@ -795,31 +934,27 @@ open_workspace <- function(host = db_host(), token = db_token(), name = NULL) { dots <- list(...) - # folders can be nested indefinitely - if ("folder" %in% names(dots)) { - path <- paste0("/", dots[names(dots) == "folder"], collapse = "") - } else { - path <- "/" - } - objects <- list_objects( host, token, - folder = path, - files = dots$files, - warehouses = dots$warehouses, - clusters = dots$clusters, - dbfs = dots$dbfs, - notebooks = dots$notebooks, - metastore = dots$metastore, - catalog = dots$catalog, - schema = dots$schema, - table = dots$table, - columns = dots$columns, - experiments = dots$experiments, - modelregistry = dots$modelregistry, + files = dots[["files"]], + warehouses = dots[["warehouses"]], + clusters = dots[["clusters"]], + metastore = dots[["metastore"]], + catalog = dots[["catalog"]], + schema = dots[["schema"]], + table = dots[["table"]], + tables = dots[["tables"]], + volume = dots[["volume"]], + volumes = dots[["volumes"]], + models = dots[["models"]], + func = dots[["func"]], + funcs = dots[["funcs"]], + columns = dots[["columns"]], + experiments = dots[["experiments"]], + modelregistry = dots[["modelregistry"]], model = dots[["model"]], - versions = dots$versions + versions = dots[["versions"]] ) return(objects) }, @@ -858,62 +993,93 @@ close_workspace <- function(host = db_host()) { list_objects_types <- function() { list( workspace = list(contains = list( - clusters = list(contains = list( + clusters = list( + icon = system.file("icons", "compute.png", package = "brickster"), + contains = list( cluster = list( - icon = system.file("icons", "magnify.png", package = "brickster"), + icon = system.file("icons", "open.png", package = "brickster"), contains = "data" ) )), - metastore = list(contains = list( - catalog = list(contains = list( - schema = list(contains = list( - table = list(contains = list( - metadata = list(contains = "data"), - columns = list(contains = "data") + metastore = list( + icon = system.file("icons", "metastore.png", package = "brickster"), + contains = list( + catalog = list( + icon = system.file("icons", "catalog.png", package = "brickster"), + contains = list( + schema = list( + icon = system.file("icons", "schema.png", package = "brickster"), + contains = list( + tables = list(contains = list( + table = list( + icon = system.file("icons", "table.png", package = "brickster"), + contains = list( + metadata = list( + icon = system.file("icons", "open.png", package = "brickster"), + contains = "data" + ), + columns = list( + contains = "data" + ) + )) + )), + volumes = list(contains = list( + volume = list( + icon = system.file("icons", "volume.png", package = "brickster"), + contains = "data" + ) + )), + models = list( + contains = list( + model = list( + icon = system.file("icons", "model.png", package = "brickster"), + contains = list( + metadata = list(contains = "data"), + versions = list(contains = list( + version = list( + icon = system.file("icons", "open.png", package = "brickster"), + contains = "data" + ) + )) + )) + )), + funcs = list(contains = list( + func = list( + icon = system.file("icons", "func.png", package = "brickster"), + contains = "data" + ) )) )) )) )), - experiments = list(contains = list( + experiments = list( + icon = system.file("icons", "exp.png", package = "brickster"), + contains = list( experiment = list( - icon = system.file("icons", "microscope.png", package = "brickster"), + icon = system.file("icons", "open.png", package = "brickster"), contains = "data" ) )), - modelregistry = list(contains = list( - model = list( - icon = system.file("icons", "abacus.png", package = "brickster"), - contains = list( + modelregistry = list( + icon = system.file("icons", "model.png", package = "brickster"), + contains = list( + model = list(contains = list( metadata = list(contains = "data"), versions = list(contains = list( version = list( - icon = system.file("icons", "package.png", package = "brickster"), + icon = system.file("icons", "open.png", package = "brickster"), contains = "data" ) )) )) )), - warehouses = list(contains = list( + warehouses = list( + icon = system.file("icons", "warehouse.png", package = "brickster"), + contains = list( warehouse = list( - icon = system.file("icons", "magnify.png", package = "brickster"), + icon = system.file("icons", "open.png", package = "brickster"), contains = "data" ) - )), - dbfs = list(contains = list( - folder = list(contains = list( - files = list( - icon = system.file("icons", "file.png", package = "brickster"), - contains = "data" - ) - )) - )), - notebooks = list(contains = list( - folder = list(contains = list( - notebook = list( - icon = system.file("icons", "notebook.png", package = "brickster"), - contains = "data" - ) - )) )) )) ) diff --git a/R/knitr-engine-helpers.R b/R/knitr-engine-helpers.R index 6546a52..ca4f64c 100644 --- a/R/knitr-engine-helpers.R +++ b/R/knitr-engine-helpers.R @@ -21,12 +21,12 @@ setup_databricks_rmd <- function(cluster_id) { # credentials should not be used in rmarkdown content as plain text # get and start cluster - brickster::get_and_start_cluster( + get_and_start_cluster( cluster_id = cluster_id ) # create execution context - exec_context <- brickster::db_context_create( + exec_context <- db_context_create( cluster_id = cluster_id, language = "r" ) diff --git a/R/sql-connector.R b/R/sql-connector.R index f5d91a7..7614101 100644 --- a/R/sql-connector.R +++ b/R/sql-connector.R @@ -63,7 +63,7 @@ db_sql_client <- function(id, use_cloud_fetch = FALSE, session_configuration = list(), host = db_host(), token = db_token(), - workspace_id = db_wsid(), + workspace_id = db_current_workspace_id(), ...) { compute_type <- match.arg(compute_type) @@ -329,7 +329,7 @@ DatabricksSqlClient <- R6::R6Class( ) generate_http_path <- function(id, is_warehouse = TRUE, - workspace_id = db_wsid()) { + workspace_id = db_current_workspace_id()) { if (is_warehouse) { paste0("/sql/1.0/warehouses/", id) } else { diff --git a/R/unity-catalog.R b/R/unity-catalog.R index 071caeb..c1e9224 100644 --- a/R/unity-catalog.R +++ b/R/unity-catalog.R @@ -225,6 +225,143 @@ db_uc_tables_get <- function(catalog, schema, table, } } +db_uc_models_list <- function(catalog, schema, + host = db_host(), token = db_token(), + perform_request = TRUE) { + + req <- db_request( + endpoint = "unity-catalog/models", + method = "GET", + version = "2.1", + host = host, + token = token + ) %>% + httr2::req_url_query( + catalog_name = catalog, + schema_name = schema, + include_browse = 'true' + ) + + if (perform_request) { + db_perform_request(req)$registered_models + } else { + req + } +} + + +db_uc_models_get <- function(catalog, schema, model, + host = db_host(), token = db_token(), + perform_request = TRUE) { + + req <- db_request( + endpoint = "unity-catalog/models", + method = "GET", + version = "2.1", + host = host, + token = token + ) %>% + httr2::req_url_path_append(paste(catalog, schema, model, sep = ".")) %>% + httr2::req_url_query(include_aliases = 'true') + + if (perform_request) { + db_perform_request(req) + } else { + req + } +} + + +db_uc_model_versions_get <- function(catalog, schema, model, + host = db_host(), token = db_token(), + perform_request = TRUE) { + + req <- db_request( + endpoint = "unity-catalog/models", + method = "GET", + version = "2.1", + host = host, + token = token + ) %>% + httr2::req_url_path_append(paste(catalog, schema, model, sep = ".")) %>% + httr2::req_url_path_append("versions") %>% + httr2::req_url_query(max_results = 1000) + + if (perform_request) { + db_perform_request(req) + } else { + req + } +} + +db_uc_funcs_list <- function(catalog, schema, + host = db_host(), token = db_token(), + perform_request = TRUE) { + + req <- db_request( + endpoint = "unity-catalog/functions", + method = "GET", + version = "2.1", + host = host, + token = token + ) %>% + httr2::req_url_query( + catalog_name = catalog, + schema_name = schema + ) + + if (perform_request) { + db_perform_request(req)$functions + } else { + req + } +} + + +db_uc_funcs_get <- function(catalog, schema, func, + host = db_host(), token = db_token(), + perform_request = TRUE) { + + req <- db_request( + endpoint = "unity-catalog/functions", + method = "GET", + version = "2.1", + host = host, + token = token + ) %>% + httr2::req_url_path_append(paste(catalog, schema, func, sep = ".")) + + if (perform_request) { + db_perform_request(req) + } else { + req + } +} + +db_uc_volumes_list <- function(catalog, schema, + host = db_host(), token = db_token(), + perform_request = TRUE) { + + req <- db_request( + endpoint = "unity-catalog/volumes", + method = "GET", + version = "2.1", + host = host, + token = token + ) %>% + httr2::req_url_query( + catalog_name = catalog, + schema_name = schema + ) + + if (perform_request) { + db_perform_request(req)$volumes + } else { + req + } +} + + db_uc_table_summaries <- function(catalog, schema_name_pattern = NULL, table_name_pattern = NULL, diff --git a/R/vector-search.R b/R/vector-search.R index d8be776..5c92239 100644 --- a/R/vector-search.R +++ b/R/vector-search.R @@ -201,7 +201,7 @@ db_vs_indexes_get <- function(index, #' @param name Name of vector search index #' @param endpoint Name of vector search endpoint #' @param primary_key Vector search primary key column name -#' @param spec +#' @param spec Either [delta_sync_index_spec()] or [direct_access_index_spec()]. #' #' @inheritParams auth_params #' @inheritParams db_sql_warehouse_create diff --git a/inst/icons/.DS_Store b/inst/icons/.DS_Store index 5008ddf..b22ed77 100644 Binary files a/inst/icons/.DS_Store and b/inst/icons/.DS_Store differ diff --git a/inst/icons/abacus.png b/inst/icons/abacus.png deleted file mode 100644 index e340e1a..0000000 Binary files a/inst/icons/abacus.png and /dev/null differ diff --git a/inst/icons/blank.png b/inst/icons/blank.png new file mode 100644 index 0000000..17b76bf Binary files /dev/null and b/inst/icons/blank.png differ diff --git a/inst/icons/cardbox.png b/inst/icons/cardbox.png deleted file mode 100644 index 96acc61..0000000 Binary files a/inst/icons/cardbox.png and /dev/null differ diff --git a/inst/icons/catalog.png b/inst/icons/catalog.png new file mode 100644 index 0000000..e66f4f8 Binary files /dev/null and b/inst/icons/catalog.png differ diff --git a/inst/icons/compute.png b/inst/icons/compute.png new file mode 100644 index 0000000..5e26f9f Binary files /dev/null and b/inst/icons/compute.png differ diff --git a/inst/icons/exp.png b/inst/icons/exp.png new file mode 100644 index 0000000..3913784 Binary files /dev/null and b/inst/icons/exp.png differ diff --git a/inst/icons/file.png b/inst/icons/file.png index 682d0e1..621ac52 100644 Binary files a/inst/icons/file.png and b/inst/icons/file.png differ diff --git a/inst/icons/func.png b/inst/icons/func.png new file mode 100644 index 0000000..f456b1b Binary files /dev/null and b/inst/icons/func.png differ diff --git a/inst/icons/magnify.png b/inst/icons/magnify.png deleted file mode 100644 index dc60690..0000000 Binary files a/inst/icons/magnify.png and /dev/null differ diff --git a/inst/icons/metastore.png b/inst/icons/metastore.png new file mode 100644 index 0000000..baa7274 Binary files /dev/null and b/inst/icons/metastore.png differ diff --git a/inst/icons/microscope.png b/inst/icons/microscope.png deleted file mode 100644 index 401f614..0000000 Binary files a/inst/icons/microscope.png and /dev/null differ diff --git a/inst/icons/model.png b/inst/icons/model.png new file mode 100644 index 0000000..9747126 Binary files /dev/null and b/inst/icons/model.png differ diff --git a/inst/icons/notebook.png b/inst/icons/notebook.png index 7b91e29..f6f3977 100644 Binary files a/inst/icons/notebook.png and b/inst/icons/notebook.png differ diff --git a/inst/icons/open.png b/inst/icons/open.png new file mode 100644 index 0000000..0abc383 Binary files /dev/null and b/inst/icons/open.png differ diff --git a/inst/icons/package.png b/inst/icons/package.png deleted file mode 100644 index 2b80fc9..0000000 Binary files a/inst/icons/package.png and /dev/null differ diff --git a/inst/icons/schema.png b/inst/icons/schema.png new file mode 100644 index 0000000..55e1419 Binary files /dev/null and b/inst/icons/schema.png differ diff --git a/inst/icons/settings.png b/inst/icons/settings.png new file mode 100644 index 0000000..e051a64 Binary files /dev/null and b/inst/icons/settings.png differ diff --git a/inst/icons/table.png b/inst/icons/table.png new file mode 100644 index 0000000..08bd32b Binary files /dev/null and b/inst/icons/table.png differ diff --git a/inst/icons/volume.png b/inst/icons/volume.png new file mode 100644 index 0000000..7a7189f Binary files /dev/null and b/inst/icons/volume.png differ diff --git a/inst/icons/warehouse.png b/inst/icons/warehouse.png new file mode 100644 index 0000000..66be6bc Binary files /dev/null and b/inst/icons/warehouse.png differ diff --git a/inst/icons/workspace.png b/inst/icons/workspace.png new file mode 100644 index 0000000..f6f3977 Binary files /dev/null and b/inst/icons/workspace.png differ diff --git a/inst/rmarkdown/templates/databricks-remote-notebook/skeleton/skeleton.Rmd b/inst/rmarkdown/templates/databricks-remote-notebook/skeleton/skeleton.Rmd index bd01790..f0c28b3 100644 --- a/inst/rmarkdown/templates/databricks-remote-notebook/skeleton/skeleton.Rmd +++ b/inst/rmarkdown/templates/databricks-remote-notebook/skeleton/skeleton.Rmd @@ -5,16 +5,10 @@ date: "The Date" output: output_format --- -Ensure that you have correctly setup `.Renviron` with: - -- `DATABRICKS_HOST` - -- `DATABRICKS_TOKEN` +Ensure that you have correctly setup `.Renviron` with `DATABRICKS_HOST`. ```{r setup, include=FALSE} library(brickster) - -# you can use Addins -> `Databricks Compute` to get and insert cluster ID brickster::setup_databricks_rmd(cluster_id = "") ``` diff --git a/man/db_sql_client.Rd b/man/db_sql_client.Rd index 7adfcf2..3d48ccd 100644 --- a/man/db_sql_client.Rd +++ b/man/db_sql_client.Rd @@ -13,7 +13,7 @@ db_sql_client( session_configuration = list(), host = db_host(), token = db_token(), - workspace_id = db_wsid(), + workspace_id = db_current_workspace_id(), ... ) } diff --git a/man/db_vs_indexes_create.Rd b/man/db_vs_indexes_create.Rd index 1e1d05b..35fff1b 100644 --- a/man/db_vs_indexes_create.Rd +++ b/man/db_vs_indexes_create.Rd @@ -19,9 +19,9 @@ db_vs_indexes_create( \item{endpoint}{Name of vector search endpoint} -\item{primary_key}{Name of vector search index} +\item{primary_key}{Vector search primary key column name} -\item{spec}{Name of vector search index} +\item{spec}{Either \code{\link[=delta_sync_index_spec]{delta_sync_index_spec()}} or \code{\link[=direct_access_index_spec]{direct_access_index_spec()}}.} \item{host}{Databricks workspace URL, defaults to calling \code{\link[=db_host]{db_host()}}.} diff --git a/tests/testthat/test-connection-pane.R b/tests/testthat/test-connection-pane.R index 02299ae..ca1c7c5 100644 --- a/tests/testthat/test-connection-pane.R +++ b/tests/testthat/test-connection-pane.R @@ -19,16 +19,6 @@ test_that("Connection Pane Helpers", { expect_identical(readable_time(1713146793000), "2024-04-15 02:06:33") expect_error(readable_time("1713146793000")) - expect_no_error({ - dbfs_items <- get_dbfs_items(host = db_host(), token = db_token()) - }) - expect_type(dbfs_items, "list") - - expect_no_error({ - nb_items <- get_notebook_items(host = db_host(), token = db_token()) - }) - expect_type(dbfs_items, "list") - expect_no_error({ catalog_items <- get_catalogs(host = db_host(), token = db_token()) }) @@ -185,6 +175,7 @@ test_that("Connection Pane Helpers", { metastore = "some_metastore", catalog = "system", schema = "information_schema", + tables = "some_tables", table = "catalogs", host = db_host(), token = db_token() diff --git a/tests/testthat/test-data-structures.R b/tests/testthat/test-data-structures.R index 4f5ed47..46f1178 100644 --- a/tests/testthat/test-data-structures.R +++ b/tests/testthat/test-data-structures.R @@ -2,26 +2,26 @@ test_that("job tasks object behaviour", { # we don't currently test if inputs to `job_task()` are of the correct type # therefore these tests for basic behaviour - mock_task_a <- brickster::job_task( + mock_task_a <- job_task( task_key = "mock_task_a", existing_cluster_id = "mock_cluster", - task = brickster::notebook_task(notebook_path = "MockNotebook") + task = notebook_task(notebook_path = "MockNotebook") ) - mock_task_b <- brickster::job_task( + mock_task_b <- job_task( task_key = "mock_task_b", existing_cluster_id = "mock_cluster", - task = brickster::spark_jar_task(main_class_name = "MockClass"), + task = spark_jar_task(main_class_name = "MockClass"), depends_on = c("mock_task_a") ) expect_s3_class(mock_task_a, c("JobTaskSettings", "list")) expect_s3_class(mock_task_b, c("JobTaskSettings", "list")) - expect_true(brickster::is.job_task(mock_task_a)) - expect_true(brickster::is.job_task(mock_task_b)) + expect_true(is.job_task(mock_task_a)) + expect_true(is.job_task(mock_task_b)) expect_error( - brickster::job_task( + job_task( task_key = "mock_task_b", existing_cluster_id = "mock_cluster", task = "MockTask", @@ -30,99 +30,99 @@ test_that("job tasks object behaviour", { ) expect_s3_class( - brickster::job_tasks(mock_task_a, mock_task_b), + job_tasks(mock_task_a, mock_task_b), c("JobTasks", "list") ) - expect_error(brickster::job_tasks(mock_task_a, list())) - expect_error(brickster::job_tasks()) + expect_error(job_tasks(mock_task_a, list())) + expect_error(job_tasks()) }) test_that("task object behaviour", { # notebook task - nb_task <- brickster::notebook_task(notebook_path = "MockNotebook") + nb_task <- notebook_task(notebook_path = "MockNotebook") expect_s3_class(nb_task, c("NotebookTask", "JobTask")) - expect_true(brickster::is.notebook_task(nb_task)) - expect_true(brickster::is.valid_task_type(nb_task)) + expect_true(is.notebook_task(nb_task)) + expect_true(is.valid_task_type(nb_task)) # spark jar task - sj_task <- brickster::spark_jar_task(main_class_name = "MockClass") + sj_task <- spark_jar_task(main_class_name = "MockClass") expect_s3_class(sj_task, c("SparkJarTask", "JobTask")) - expect_true(brickster::is.spark_jar_task(sj_task)) - expect_true(brickster::is.valid_task_type(sj_task)) + expect_true(is.spark_jar_task(sj_task)) + expect_true(is.valid_task_type(sj_task)) # spark python task - sp_task <- brickster::spark_python_task(python_file = "MockPythonScript") + sp_task <- spark_python_task(python_file = "MockPythonScript") expect_s3_class(sp_task, c("SparkPythonTask", "JobTask")) - expect_true(brickster::is.spark_python_task(sp_task)) - expect_true(brickster::is.valid_task_type(sp_task)) + expect_true(is.spark_python_task(sp_task)) + expect_true(is.valid_task_type(sp_task)) # spark submit task - ss_task <- brickster::spark_submit_task(parameters = list(a = "A", b = "B")) + ss_task <- spark_submit_task(parameters = list(a = "A", b = "B")) expect_s3_class(ss_task, c("SparkSubmitTask", "JobTask")) - expect_true(brickster::is.spark_submit_task(ss_task)) - expect_true(brickster::is.valid_task_type(ss_task)) + expect_true(is.spark_submit_task(ss_task)) + expect_true(is.valid_task_type(ss_task)) # pipeline task - pl_task <- brickster::pipeline_task(pipeline_id = "MockPipelineId") + pl_task <- pipeline_task(pipeline_id = "MockPipelineId") expect_s3_class(pl_task, c("PipelineTask", "JobTask")) - expect_true(brickster::is.pipeline_task(pl_task)) - expect_true(brickster::is.valid_task_type(pl_task)) + expect_true(is.pipeline_task(pl_task)) + expect_true(is.valid_task_type(pl_task)) # python wheel task - pw_task <- brickster::python_wheel_task(package_name = "MockPythonWheel") + pw_task <- python_wheel_task(package_name = "MockPythonWheel") expect_s3_class(pw_task, c("PythonWheelTask", "JobTask")) - expect_true(brickster::is.python_wheel_task(pw_task)) - expect_true(brickster::is.valid_task_type(pw_task)) + expect_true(is.python_wheel_task(pw_task)) + expect_true(is.valid_task_type(pw_task)) }) test_that("library object behaviour", { # jar - jar <- brickster::lib_jar(jar = "MockJar.jar") + jar <- lib_jar(jar = "MockJar.jar") expect_s3_class(jar, c("JarLibrary", "Library")) - expect_true(brickster::is.lib_jar(jar)) - expect_true(brickster::is.library(jar)) + expect_true(is.lib_jar(jar)) + expect_true(is.library(jar)) # egg - egg <- brickster::lib_egg(egg = "s3://mock-bucket/MockEgg") + egg <- lib_egg(egg = "s3://mock-bucket/MockEgg") expect_s3_class(egg, c("EggLibrary", "Library")) - expect_true(brickster::is.lib_egg(egg)) - expect_true(brickster::is.library(egg)) + expect_true(is.lib_egg(egg)) + expect_true(is.library(egg)) # wheel - whl <- brickster::lib_whl(whl = "s3://mock-bucket/MockWheel") + whl <- lib_whl(whl = "s3://mock-bucket/MockWheel") expect_s3_class(whl, c("WhlLibrary", "Library")) - expect_true(brickster::is.lib_whl(whl)) - expect_true(brickster::is.library(whl)) + expect_true(is.lib_whl(whl)) + expect_true(is.library(whl)) # PyPI - pypi <- brickster::lib_pypi(package = "MockPackage") + pypi <- lib_pypi(package = "MockPackage") expect_s3_class(pypi, c("PyPiLibrary", "Library")) - expect_true(brickster::is.lib_pypi(pypi)) - expect_true(brickster::is.library(pypi)) + expect_true(is.lib_pypi(pypi)) + expect_true(is.library(pypi)) # maven - maven <- brickster::lib_maven(coordinates = "org.Mock.Package:0.0.1") + maven <- lib_maven(coordinates = "org.Mock.Package:0.0.1") expect_s3_class(maven, c("MavenLibrary", "Library")) - expect_true(brickster::is.lib_maven(maven)) - expect_true(brickster::is.library(maven)) + expect_true(is.lib_maven(maven)) + expect_true(is.library(maven)) # cran - cran <- brickster::lib_cran(package = "brickster") + cran <- lib_cran(package = "brickster") expect_s3_class(cran, c("CranLibrary", "Library")) - expect_true(brickster::is.lib_cran(cran)) - expect_true(brickster::is.library(cran)) + expect_true(is.lib_cran(cran)) + expect_true(is.library(cran)) # libraries object - libs <- brickster::libraries(jar, egg, whl, pypi, maven, cran) + libs <- libraries(jar, egg, whl, pypi, maven, cran) expect_s3_class(libs, c("Libraries", "list")) - expect_true(brickster::is.libraries(libs)) - expect_error(brickster::libraries(123)) - expect_error(brickster::libraries("MockLibrary")) - expect_error(brickster::libraries("MockLibrary", jar, cran)) + expect_true(is.libraries(libs)) + expect_error(libraries(123)) + expect_error(libraries("MockLibrary")) + expect_error(libraries("MockLibrary", jar, cran)) }) @@ -133,13 +133,13 @@ test_that("access control object behaviour", { valid_permissions <- c("CAN_MANAGE", "CAN_MANAGE_RUN", "CAN_VIEW", "IS_OWNER") for (perm in valid_permissions) { expect_s3_class( - cont_req_user <- brickster::access_control_req_user( + cont_req_user <- access_control_req_user( user_name = "user@mock.com", permission_level = perm ), class = c("AccessControlRequestForUser", "list") ) - expect_true(brickster::is.access_control_req_user(cont_req_user)) + expect_true(is.access_control_req_user(cont_req_user)) } # test invalid permissions raise errors @@ -150,7 +150,7 @@ test_that("access control object behaviour", { ) for (perm in invalid_permissions) { expect_error( - brickster::access_control_req_user( + access_control_req_user( user_name = "user@mock.com", permission_level = perm ) @@ -162,13 +162,13 @@ test_that("access control object behaviour", { valid_permissions <- c("CAN_MANAGE", "CAN_MANAGE_RUN", "CAN_VIEW") for (perm in valid_permissions) { expect_s3_class( - cont_req_grp <- brickster::access_control_req_group( + cont_req_grp <- access_control_req_group( group = "MockGroup", permission_level = perm ), class = c("AccessControlRequestForUser", "list") ) - expect_true(brickster::is.access_control_req_group(cont_req_grp)) + expect_true(is.access_control_req_group(cont_req_grp)) } # test invalid permissions raise errors @@ -179,7 +179,7 @@ test_that("access control object behaviour", { ) for (perm in invalid_permissions) { expect_error( - brickster::access_control_req_group( + access_control_req_group( group = "MockGroup", permission_level = perm ) @@ -187,33 +187,33 @@ test_that("access control object behaviour", { } # access control request - group_perm <- brickster::access_control_req_group( + group_perm <- access_control_req_group( group = "MockGroup", permission_level = "CAN_VIEW" ) - user_perm <- brickster::access_control_req_user( + user_perm <- access_control_req_user( user_name = "user@mock.com", permission_level = "IS_OWNER" ) expect_s3_class( - brickster::access_control_request(group_perm), + access_control_request(group_perm), c("AccessControlRequest", "list") ) expect_s3_class( - brickster::access_control_request(user_perm), + access_control_request(user_perm), c("AccessControlRequest", "list") ) expect_s3_class( - brickster::access_control_request(group_perm, user_perm), + access_control_request(group_perm, user_perm), c("AccessControlRequest", "list") ) expect_length( - brickster::access_control_request(group_perm, user_perm), + access_control_request(group_perm, user_perm), 2L ) expect_true( is.access_control_request( - brickster::access_control_request(group_perm, user_perm) + access_control_request(group_perm, user_perm) ) ) @@ -227,20 +227,20 @@ test_that("cron object behaviour", { valid_status <- c("UNPAUSED", "PAUSED") for (status in valid_status) { expect_s3_class( - cron <- brickster::cron_schedule( + cron <- cron_schedule( quartz_cron_expression = "* * * 5 *", timezone_id = "Etc/UTC", pause_status = status ), c("CronSchedule", "list") ) - expect_true(brickster::is.cron_schedule(cron)) + expect_true(is.cron_schedule(cron)) } invalid_status <- c("paused", "active", 123L, 123, character(0)) for (status in invalid_status) { expect_error( - brickster::cron_schedule( + cron_schedule( quartz_cron_expression = "* * * 5 *", timezone_id = "Etc/UTC", pause_status = status @@ -259,7 +259,7 @@ test_that("email notification object behaviour", { for (input in not_allowed_on) { expect_error( - brickster::email_notifications( + email_notifications( on_start = not_allowed_on, on_success = not_allowed_on, on_failure = not_allowed_on @@ -269,7 +269,7 @@ test_that("email notification object behaviour", { for (input in not_allowed_alert) { expect_error( - brickster::email_notifications( + email_notifications( on_start = allowed, on_success = allowed, on_failure = allowed, @@ -280,17 +280,17 @@ test_that("email notification object behaviour", { # test that valid inputs don't error expect_s3_class( - email_notif <- brickster::email_notifications( + email_notif <- email_notifications( on_start = allowed, on_success = allowed, on_failure = allowed ), c("JobEmailNotifications", "list") ) - expect_true(brickster::is.email_notifications(email_notif)) + expect_true(is.email_notifications(email_notif)) expect_s3_class( - email_notif2 <- brickster::email_notifications( + email_notif2 <- email_notifications( on_start = allowed, on_success = allowed, on_failure = allowed, @@ -298,7 +298,7 @@ test_that("email notification object behaviour", { ), c("JobEmailNotifications", "list") ) - expect_true(brickster::is.email_notifications(email_notif2)) + expect_true(is.email_notifications(email_notif2)) }) @@ -307,49 +307,49 @@ test_that("cluster objects behaviour", { # cluster autoscale expect_s3_class( - autoscale <- brickster::cluster_autoscale(min_workers = 2, max_workers = 4), + autoscale <- cluster_autoscale(min_workers = 2, max_workers = 4), c("AutoScale", "list") ) - expect_true(brickster::is.cluster_autoscale(autoscale)) + expect_true(is.cluster_autoscale(autoscale)) - expect_error(brickster::cluster_autoscale(min_workers = 2, max_workers = 0)) - expect_error(brickster::cluster_autoscale(min_workers = 2, max_workers = 2)) - expect_error(brickster::cluster_autoscale(min_workers = -2, max_workers = 2)) - expect_error(brickster::cluster_autoscale(min_workers = 2)) - expect_error(brickster::cluster_autoscale(max_workers = 2)) + expect_error(cluster_autoscale(min_workers = 2, max_workers = 0)) + expect_error(cluster_autoscale(min_workers = 2, max_workers = 2)) + expect_error(cluster_autoscale(min_workers = -2, max_workers = 2)) + expect_error(cluster_autoscale(min_workers = 2)) + expect_error(cluster_autoscale(max_workers = 2)) # dbfs storage expect_s3_class( - dbfs <- brickster::dbfs_storage_info(destination = "/mock/storage/path"), + dbfs <- dbfs_storage_info(destination = "/mock/storage/path"), c("DbfsStorageInfo", "list") ) - expect_true(brickster::is.dbfs_storage_info(dbfs)) + expect_true(is.dbfs_storage_info(dbfs)) # file storage expect_s3_class( - fs <- brickster::file_storage_info(destination = "/mock/storage/path"), + fs <- file_storage_info(destination = "/mock/storage/path"), c("DbfsStorageInfo", "list") ) - expect_true(brickster::is.file_storage_info(fs)) + expect_true(is.file_storage_info(fs)) # s3 storage info expect_s3_class( - s3 <- brickster::s3_storage_info( + s3 <- s3_storage_info( destination = "s3://mock/bucket/path", region = "ap-southeast-2" ), c("S3StorageInfo", "list") ) - expect_true(brickster::is.s3_storage_info(s3)) + expect_true(is.s3_storage_info(s3)) expect_error( - brickster::s3_storage_info( + s3_storage_info( destination = "s3://mock/bucket/path", region = "ap-southeast-2", encryption_type = "mock_encrypton" ) ) expect_length( - brickster::s3_storage_info( + s3_storage_info( destination = "s3://mock/bucket/path", region = "ap-southeast-2", encryption_type = c("sse-s3", "sse-kms") @@ -359,72 +359,72 @@ test_that("cluster objects behaviour", { # cluster log conf expect_s3_class( - clc_dbfs <- brickster::cluster_log_conf(dbfs = dbfs), + clc_dbfs <- cluster_log_conf(dbfs = dbfs), c("ClusterLogConf", "list") ) - expect_true(brickster::is.cluster_log_conf(clc_dbfs)) + expect_true(is.cluster_log_conf(clc_dbfs)) expect_s3_class( - clc_s3 <- brickster::cluster_log_conf(s3 = s3), + clc_s3 <- cluster_log_conf(s3 = s3), c("ClusterLogConf", "list") ) - expect_true(brickster::is.cluster_log_conf(clc_s3)) + expect_true(is.cluster_log_conf(clc_s3)) - expect_error(brickster::cluster_log_conf(dbfs = dbfs, s3 = s3)) - expect_error(brickster::cluster_log_conf(s3 = dbfs)) - expect_error(brickster::cluster_log_conf(dbfs = s3)) - expect_error(brickster::cluster_log_conf(dbfs = fs)) - expect_error(brickster::cluster_log_conf(s3 = fs)) - expect_error(brickster::cluster_log_conf()) + expect_error(cluster_log_conf(dbfs = dbfs, s3 = s3)) + expect_error(cluster_log_conf(s3 = dbfs)) + expect_error(cluster_log_conf(dbfs = s3)) + expect_error(cluster_log_conf(dbfs = fs)) + expect_error(cluster_log_conf(s3 = fs)) + expect_error(cluster_log_conf()) # docker image - mock_image <- brickster::docker_image("mock_url", "mock_user", "mock_pass") + mock_image <- docker_image("mock_url", "mock_user", "mock_pass") expect_s3_class(mock_image, c("DockerImage", "list")) - expect_true(brickster::is.docker_image(mock_image)) - expect_false(brickster::is.docker_image(list())) - expect_error(brickster::docker_image()) - expect_error(brickster::docker_image(list())) + expect_true(is.docker_image(mock_image)) + expect_false(is.docker_image(list())) + expect_error(docker_image()) + expect_error(docker_image(list())) # init script expect_s3_class( - init <- brickster::init_script_info(s3, dbfs, fs), + init <- init_script_info(s3, dbfs, fs), c("InitScriptInfo", "list") ) expect_s3_class( init_script_info(), c("InitScriptInfo", "list") ) - expect_true(brickster::is.init_script_info(init)) + expect_true(is.init_script_info(init)) - expect_error(brickster::init_script_info(1)) - expect_error(brickster::init_script_info("a")) - expect_error(brickster::init_script_info(fs, 1)) + expect_error(init_script_info(1)) + expect_error(init_script_info("a")) + expect_error(init_script_info(fs, 1)) ## cloud attributes # gcp - expect_s3_class(brickster::gcp_attributes(), c("GcpAttributes", "list")) - expect_true(brickster::is.gcp_attributes(brickster::gcp_attributes())) + expect_s3_class(gcp_attributes(), c("GcpAttributes", "list")) + expect_true(is.gcp_attributes(gcp_attributes())) # aws - expect_s3_class(brickster::aws_attributes(), c("AwsAttributes", "list")) - expect_true(brickster::is.aws_attributes(brickster::aws_attributes())) + expect_s3_class(aws_attributes(), c("AwsAttributes", "list")) + expect_true(is.aws_attributes(aws_attributes())) # azure - expect_s3_class(brickster::azure_attributes(), c("AzureAttributes", "list")) - expect_true(brickster::is.azure_attributes(brickster::azure_attributes())) - expect_error(brickster::azure_attributes(first_on_demand = -1)) - expect_error(brickster::azure_attributes(first_on_demand = 0)) + expect_s3_class(azure_attributes(), c("AzureAttributes", "list")) + expect_true(is.azure_attributes(azure_attributes())) + expect_error(azure_attributes(first_on_demand = -1)) + expect_error(azure_attributes(first_on_demand = 0)) # new cluster # TODO: add more checks, but should add more logic to `new_cluster()` cloud_attr_types <- list( - brickster::aws_attributes(), - brickster::gcp_attributes(), - brickster::azure_attributes() + aws_attributes(), + gcp_attributes(), + azure_attributes() ) for (cloud in cloud_attr_types) { - cluster <- brickster::new_cluster( + cluster <- new_cluster( num_workers = 1, spark_version = "mock_spark_version", node_type_id = "mock_node_type_id", @@ -432,11 +432,11 @@ test_that("cluster objects behaviour", { cloud_attrs = cloud ) expect_s3_class(cluster, c("NewCluster", "list")) - expect_true(brickster::is.new_cluster(cluster)) + expect_true(is.new_cluster(cluster)) } expect_error( - brickster::new_cluster( + new_cluster( num_workers = 1, spark_version = "mock_spark_version", node_type_id = "mock_node_type_id", @@ -449,21 +449,21 @@ test_that("cluster objects behaviour", { test_that("git_source behaviour", { - gs_git_tag <- brickster::git_source( + gs_git_tag <- git_source( git_url = "mockUrl", git_provider = "github", reference = "a", type = "tag" ) - gs_git_branch <- brickster::git_source( + gs_git_branch <- git_source( git_url = "mockUrl", git_provider = "github", reference = "a", type = "branch" ) - gs_git_commit <- brickster::git_source( + gs_git_commit <- git_source( git_url = "mockUrl", git_provider = "github", reference = "a", @@ -474,12 +474,12 @@ test_that("git_source behaviour", { expect_s3_class(gs_git_branch, c("GitSource", "list")) expect_s3_class(gs_git_commit, c("GitSource", "list")) - expect_true(brickster::is.git_source(gs_git_tag)) - expect_true(brickster::is.git_source(gs_git_branch)) - expect_true(brickster::is.git_source(gs_git_commit)) + expect_true(is.git_source(gs_git_tag)) + expect_true(is.git_source(gs_git_branch)) + expect_true(is.git_source(gs_git_commit)) expect_error( - brickster::git_source( + git_source( git_url = "mockUrl", git_provider = "fake", reference = "a", @@ -488,7 +488,7 @@ test_that("git_source behaviour", { ) expect_error( - brickster::git_source( + git_source( git_provider = "fake", reference = "a", type = "commit" @@ -496,14 +496,14 @@ test_that("git_source behaviour", { ) expect_error( - brickster::git_source( + git_source( git_url = "mockUrl", reference = "a", type = "commit" ) ) - expect_error(brickster::git_source()) + expect_error(git_source()) }) diff --git a/tests/testthat/test-request-helpers.R b/tests/testthat/test-request-helpers.R index b659717..a85bb03 100644 --- a/tests/testthat/test-request-helpers.R +++ b/tests/testthat/test-request-helpers.R @@ -8,7 +8,7 @@ test_that("request helpers - building requests", { body <- list(a = 1, b = 2) expect_no_condition({ - req <- brickster:::db_request( + req <- db_request( endpoint = endpoint, method = method, version = endpoint_version, diff --git a/tests/testthat/test-unity-catalog.R b/tests/testthat/test-unity-catalog.R index 5555c49..c5d1680 100644 --- a/tests/testthat/test-unity-catalog.R +++ b/tests/testthat/test-unity-catalog.R @@ -69,6 +69,44 @@ test_that("Unity Catalog API - don't perform", { ) expect_s3_class(resp_table_get, "httr2_request") + resp_models_get <- db_uc_models_get( + catalog = "some_catalog", + schema = "some_schema", + model = "some_model", + perform_request = F + ) + expect_s3_class(resp_models_get, "httr2_request") + + resp_models_list <- db_uc_models_list( + catalog = "some_catalog", + schema = "some_schema", + perform_request = F + ) + expect_s3_class(resp_models_list, "httr2_request") + + resp_funcs_get <- db_uc_funcs_get( + catalog = "some_catalog", + schema = "some_schema", + func = "some_func", + perform_request = F + ) + expect_s3_class(resp_funcs_get, "httr2_request") + + resp_funcs_list <- db_uc_funcs_list( + catalog = "some_catalog", + schema = "some_schema", + perform_request = F + ) + expect_s3_class(resp_funcs_list, "httr2_request") + + resp_volumes_list <- db_uc_volumes_list( + catalog = "some_catalog", + schema = "some_schema", + perform_request = F + ) + expect_s3_class(resp_volumes_list, "httr2_request") + + })