diff --git a/Cargo.lock b/Cargo.lock index 769d58adf3b2a..16f3929d1c1c7 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -306,6 +306,7 @@ checksum = "926013f2860c46252efceabb19f4a6b308197505082c609025aa6706c011d427" name = "catalog" version = "0.1.0" dependencies = [ + "backtrace", "bincode", "dataflow-types", "expr", diff --git a/src/catalog/Cargo.toml b/src/catalog/Cargo.toml index 73cc50d44345b..1bc0f37e9f859 100644 --- a/src/catalog/Cargo.toml +++ b/src/catalog/Cargo.toml @@ -9,6 +9,7 @@ publish = false path = "lib.rs" [dependencies] +backtrace = "0.3.43" bincode = { version = "1.2", optional = true } dataflow-types = { path = "../dataflow-types" } expr = { path = "../expr" } diff --git a/src/catalog/error.rs b/src/catalog/error.rs new file mode 100644 index 0000000000000..3f1992f835c98 --- /dev/null +++ b/src/catalog/error.rs @@ -0,0 +1,99 @@ +// Copyright Materialize, Inc. All rights reserved. +// +// Use of this software is governed by the Business Source License +// included in the LICENSE file. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0. + +use std::fmt; + +use backtrace::Backtrace; + +#[derive(Debug)] +pub struct Error { + kind: ErrorKind, + backtrace: Backtrace, +} + +#[derive(Debug)] +pub(crate) enum ErrorKind { + Corruption { detail: String }, + IdExhaustion, + UnknownDatabase(String), + UnknownSchema(String), + UnknownItem(String), + DatabaseAlreadyExists(String), + SchemaAlreadyExists(String), + ItemAlreadyExists(String), + UnacceptableSchemaName(String), + ReadOnlySystemSchema(String), + UnsatisfiableLoggingDependency { depender_name: String }, + Storage(rusqlite::Error), +} + +impl Error { + pub(crate) fn new(kind: ErrorKind) -> Error { + Error { + kind, + backtrace: Backtrace::new_unresolved(), + } + } +} + +impl From for Error { + fn from(e: rusqlite::Error) -> Error { + Error::new(ErrorKind::Storage(e)) + } +} + +impl std::error::Error for Error { + fn source(&self) -> Option<&(dyn std::error::Error + 'static)> { + match &self.kind { + ErrorKind::Corruption { .. } + | ErrorKind::IdExhaustion + | ErrorKind::UnknownDatabase(_) + | ErrorKind::UnknownSchema(_) + | ErrorKind::UnknownItem(_) + | ErrorKind::DatabaseAlreadyExists(_) + | ErrorKind::SchemaAlreadyExists(_) + | ErrorKind::ItemAlreadyExists(_) + | ErrorKind::UnacceptableSchemaName(_) + | ErrorKind::ReadOnlySystemSchema(_) + | ErrorKind::UnsatisfiableLoggingDependency { .. } => None, + ErrorKind::Storage(e) => Some(e), + } + } +} + +impl fmt::Display for Error { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + match &self.kind { + ErrorKind::Corruption { detail } => write!(f, "corrupt catalog: {}", detail), + ErrorKind::IdExhaustion => write!(f, "id counter overflows i64"), + ErrorKind::UnknownDatabase(name) => write!(f, "unknown database '{}'", name), + ErrorKind::UnknownSchema(name) => write!(f, "unknown schema '{}'", name), + ErrorKind::UnknownItem(name) => write!(f, "unknown catalog item '{}'", name), + ErrorKind::DatabaseAlreadyExists(name) => { + write!(f, "database '{}' already exists", name) + } + ErrorKind::SchemaAlreadyExists(name) => write!(f, "schema '{}' already exists", name), + ErrorKind::ItemAlreadyExists(name) => { + write!(f, "catalog item '{}' already exists", name) + } + ErrorKind::UnacceptableSchemaName(name) => { + write!(f, "unacceptable schema name '{}'", name) + } + ErrorKind::ReadOnlySystemSchema(name) => { + write!(f, "system schema '{}' cannot be modified", name) + } + ErrorKind::UnsatisfiableLoggingDependency { depender_name } => write!( + f, + "catalog item '{}' depends on system logging, but logging is disabled", + depender_name + ), + ErrorKind::Storage(e) => write!(f, "sqlite error: {}", e), + } + } +} diff --git a/src/catalog/lib.rs b/src/catalog/lib.rs index 14b3b0f5f929f..2a867bd2e4373 100644 --- a/src/catalog/lib.rs +++ b/src/catalog/lib.rs @@ -22,10 +22,11 @@ use dataflow_types::{SinkConnector, SourceConnector}; use expr::{EvalEnv, GlobalId, Id, IdHumanizer, OptimizedRelationExpr, ScalarExpr}; use repr::RelationDesc; +use crate::error::{Error, ErrorKind}; use crate::names::{DatabaseSpecifier, FullName, PartialName}; +mod error; pub mod names; - pub mod sql; /// A `Catalog` keeps track of the SQL objects known to the planner. @@ -205,7 +206,7 @@ impl Catalog { /// Opens or creates a `Catalog` that stores data at `path`. The /// `initialize` callback will be invoked after database and schemas are /// loaded but before any persisted user items are loaded. - pub fn open(path: Option<&Path>, f: F) -> Result + pub fn open(path: Option<&Path>, f: F) -> Result where S: CatalogItemSerializer, F: FnOnce(&mut Self), @@ -267,15 +268,20 @@ impl Catalog { // safely even if the error message we're sniffing out changes. lazy_static! { static ref LOGGING_ERROR: Regex = - Regex::new("catalog item 'mz_catalog.[^']*' does not exist").unwrap(); + Regex::new("unknown catalog item 'mz_catalog.[^']*'").unwrap(); } let item = match S::deserialize(&catalog, def) { Ok(item) => item, - Err(e) if LOGGING_ERROR.is_match(&e.to_string()) => bail!( - "catalog item '{}' depends on system logging, but logging is disabled", - name - ), - Err(e) => bail!("corrupt catalog: failed to deserialize item: {}", e), + Err(e) if LOGGING_ERROR.is_match(&e.to_string()) => { + return Err(Error::new(ErrorKind::UnsatisfiableLoggingDependency { + depender_name: name.to_string(), + })); + } + Err(e) => { + return Err(Error::new(ErrorKind::Corruption { + detail: format!("failed to deserialize item: {}", e), + })) + } }; catalog.insert_item(id, name, item); } @@ -301,7 +307,7 @@ impl Catalog { self.storage.clone() } - pub fn allocate_id(&mut self) -> Result { + pub fn allocate_id(&mut self) -> Result { self.storage().allocate_id() } @@ -315,7 +321,7 @@ impl Catalog { current_database: DatabaseSpecifier, search_path: &[&str], name: &PartialName, - ) -> Result { + ) -> Result { if let (Some(database_name), Some(schema_name)) = (&name.database, &name.schema) { // `name` is fully specified already. No resolution required. return Ok(FullName { @@ -350,7 +356,7 @@ impl Catalog { } } - bail!("catalog item '{}' does not exist", name); + Err(Error::new(ErrorKind::UnknownItem(name.to_string()))) } /// Returns the named catalog item, if it exists. @@ -366,9 +372,9 @@ impl Catalog { /// Returns the named catalog item, or an error if it does not exist. /// /// See also [`Catalog::try_get`]. - pub fn get(&self, name: &FullName) -> Result<&CatalogEntry, failure::Error> { + pub fn get(&self, name: &FullName) -> Result<&CatalogEntry, Error> { self.try_get(name) - .ok_or_else(|| failure::err_msg(format!("catalog item '{}' does not exist", name))) + .ok_or_else(|| Error::new(ErrorKind::UnknownItem(name.to_string()))) } pub fn get_by_id(&self, id: &GlobalId) -> &CatalogEntry { @@ -383,7 +389,7 @@ impl Catalog { pub fn database_resolver<'a>( &'a self, database_spec: DatabaseSpecifier, - ) -> Result, failure::Error> { + ) -> Result, Error> { match &database_spec { DatabaseSpecifier::Ambient => Ok(DatabaseResolver { database_spec, @@ -396,7 +402,7 @@ impl Catalog { database, ambient_schemas: &self.ambient_schemas, }), - None => bail!("unknown database '{}'", name), + None => Err(Error::new(ErrorKind::UnknownDatabase(name.to_owned()))), }, } } @@ -526,7 +532,7 @@ impl Catalog { } } - pub fn transact(&mut self, ops: Vec) -> Result, failure::Error> { + pub fn transact(&mut self, ops: Vec) -> Result, Error> { trace!("transact: {:?}", ops); #[derive(Debug, Clone)] @@ -569,12 +575,12 @@ impl Catalog { schema_name, } => { if schema_name.starts_with("mz_") || schema_name.starts_with("pg_") { - bail!("unacceptable schema name '{}'", schema_name); + return Err(Error::new(ErrorKind::UnacceptableSchemaName(schema_name))); } let (database_id, database_name) = match database_name { DatabaseSpecifier::Name(name) => (tx.load_database_id(&name)?, name), DatabaseSpecifier::Ambient => { - bail!("writing to {} is not allowed", schema_name) + return Err(Error::new(ErrorKind::ReadOnlySystemSchema(schema_name))); } }; Action::CreateSchema { @@ -587,7 +593,9 @@ impl Catalog { let database_id = match &name.database { DatabaseSpecifier::Name(name) => tx.load_database_id(&name)?, DatabaseSpecifier::Ambient => { - bail!("writing to {} is not allowed", name.schema) + return Err(Error::new(ErrorKind::ReadOnlySystemSchema( + name.to_string(), + ))); } }; let schema_id = tx.load_schema_id(database_id, &name.schema)?; @@ -606,7 +614,7 @@ impl Catalog { let (database_id, database_name) = match database_name { DatabaseSpecifier::Name(name) => (tx.load_database_id(&name)?, name), DatabaseSpecifier::Ambient => { - bail!("dropping {} is not allowed", schema_name) + return Err(Error::new(ErrorKind::ReadOnlySystemSchema(schema_name))); } }; tx.remove_schema(database_id, &schema_name)?; diff --git a/src/catalog/sql.rs b/src/catalog/sql.rs index c79c67eb7f4e9..97ebac1e90e9a 100644 --- a/src/catalog/sql.rs +++ b/src/catalog/sql.rs @@ -9,13 +9,13 @@ use std::path::Path; -use failure::bail; use rusqlite::params; use rusqlite::types::{FromSql, FromSqlError, ToSql, ToSqlOutput, Value, ValueRef}; use serde::{Deserialize, Serialize}; use expr::GlobalId; +use crate::error::{Error, ErrorKind}; use crate::names::{DatabaseSpecifier, FullName}; const APPLICATION_ID: i32 = 0x1854_47dc; @@ -67,7 +67,7 @@ pub struct Connection { } impl Connection { - pub fn open(path: Option<&Path>) -> Result { + pub fn open(path: Option<&Path>) -> Result { let mut sqlite = match path { Some(path) => rusqlite::Connection::open(path)?, None => rusqlite::Connection::open_in_memory()?, @@ -85,17 +85,19 @@ impl Connection { } else if app_id == APPLICATION_ID { false } else { - bail!("incorrect application_id in catalog"); + return Err(Error::new(ErrorKind::Corruption { + detail: "catalog file has incorrect application_id".into(), + })); }; tx.commit()?; Ok(Connection { inner: sqlite }) } - pub fn load_databases(&self) -> Result, failure::Error> { + pub fn load_databases(&self) -> Result, Error> { self.inner .prepare("SELECT id, name FROM databases")? - .query_and_then(params![], |row| -> Result<_, failure::Error> { + .query_and_then(params![], |row| -> Result<_, Error> { let id: i64 = row.get(0)?; let name: String = row.get(1)?; Ok((id, name)) @@ -103,14 +105,14 @@ impl Connection { .collect() } - pub fn load_schemas(&self) -> Result, String)>, failure::Error> { + pub fn load_schemas(&self) -> Result, String)>, Error> { self.inner .prepare( "SELECT schemas.id, databases.name, schemas.name FROM schemas LEFT JOIN databases ON schemas.database_id = databases.id", )? - .query_and_then(params![], |row| -> Result<_, failure::Error> { + .query_and_then(params![], |row| -> Result<_, Error> { let id: i64 = row.get(0)?; let database_name: Option = row.get(1)?; let schema_name: String = row.get(2)?; @@ -119,7 +121,7 @@ impl Connection { .collect() } - pub fn load_items(&self) -> Result)>, failure::Error> { + pub fn load_items(&self) -> Result)>, Error> { self.inner .prepare( "SELECT items.gid, databases.name, schemas.name, items.name, items.definition @@ -128,7 +130,7 @@ impl Connection { JOIN databases ON schemas.database_id = databases.id ORDER BY items.rowid", )? - .query_and_then(params![], |row| -> Result<_, failure::Error> { + .query_and_then(params![], |row| -> Result<_, Error> { let id: SqlVal = row.get(0)?; let database: Option = row.get(1)?; let schema: String = row.get(2)?; @@ -155,7 +157,7 @@ impl Connection { self.inner.prepare_cached(sql) } - pub fn allocate_id(&mut self) -> Result { + pub fn allocate_id(&mut self) -> Result { let tx = self.inner.transaction()?; // SQLite doesn't support u64s, so we constrain ourselves to the more // limited range of positive i64s. @@ -163,14 +165,14 @@ impl Connection { row.get(0) })?; if id == i64::max_value() { - bail!("catalog id exhaustion: id counter overflows an i64"); + return Err(Error::new(ErrorKind::IdExhaustion)); } tx.execute("UPDATE gid_alloc SET next_gid = ?", params![id + 1])?; tx.commit()?; Ok(GlobalId::User(id as u64)) } - pub fn transaction(&mut self) -> Result { + pub fn transaction(&mut self) -> Result { Ok(Transaction { inner: self.inner.transaction()?, }) @@ -182,64 +184,58 @@ pub struct Transaction<'a> { } impl Transaction<'_> { - pub fn load_database_id(&self, database_name: &str) -> Result { + pub fn load_database_id(&self, database_name: &str) -> Result { match self .inner .prepare_cached("SELECT id FROM databases WHERE name = ?")? .query_row(params![database_name], |row| row.get(0)) { Ok(id) => Ok(id), - Err(rusqlite::Error::QueryReturnedNoRows) => { - bail!("unknown database '{}'", database_name); - } + Err(rusqlite::Error::QueryReturnedNoRows) => Err(Error::new( + ErrorKind::UnknownDatabase(database_name.to_owned()), + )), Err(err) => Err(err.into()), } } - pub fn load_schema_id( - &self, - database_id: i64, - schema_name: &str, - ) -> Result { + pub fn load_schema_id(&self, database_id: i64, schema_name: &str) -> Result { match self .inner .prepare_cached("SELECT id FROM schemas WHERE database_id = ? AND name = ?")? .query_row(params![database_id, schema_name], |row| row.get(0)) { Ok(id) => Ok(id), - Err(rusqlite::Error::QueryReturnedNoRows) => bail!("unknown schema '{}'", schema_name), + Err(rusqlite::Error::QueryReturnedNoRows) => { + Err(Error::new(ErrorKind::UnknownSchema(schema_name.to_owned()))) + } Err(err) => Err(err.into()), } } - pub fn insert_database(&mut self, database_name: &str) -> Result { + pub fn insert_database(&mut self, database_name: &str) -> Result { match self .inner .prepare_cached("INSERT INTO databases (name) VALUES (?)")? .execute(params![database_name]) { Ok(_) => Ok(self.inner.last_insert_rowid()), - Err(err) if is_constraint_violation(&err) => { - bail!("database '{}' already exists", database_name); - } + Err(err) if is_constraint_violation(&err) => Err(Error::new( + ErrorKind::DatabaseAlreadyExists(database_name.to_owned()), + )), Err(err) => Err(err.into()), } } - pub fn insert_schema( - &mut self, - database_id: i64, - schema_name: &str, - ) -> Result { + pub fn insert_schema(&mut self, database_id: i64, schema_name: &str) -> Result { match self .inner .prepare_cached("INSERT INTO schemas (database_id, name) VALUES (?, ?)")? .execute(params![database_id, schema_name]) { Ok(_) => Ok(self.inner.last_insert_rowid()), - Err(err) if is_constraint_violation(&err) => { - bail!("schema '{}' already exists", schema_name); - } + Err(err) if is_constraint_violation(&err) => Err(Error::new( + ErrorKind::SchemaAlreadyExists(schema_name.to_owned()), + )), Err(err) => Err(err.into()), } } @@ -250,7 +246,7 @@ impl Transaction<'_> { schema_id: i64, item_name: &str, item: &[u8], - ) -> Result<(), failure::Error> { + ) -> Result<(), Error> { match self .inner .prepare_cached( @@ -259,47 +255,50 @@ impl Transaction<'_> { .execute(params![SqlVal(&id), schema_id, item_name, item]) { Ok(_) => Ok(()), - Err(err) if is_constraint_violation(&err) => { - bail!("catalog item '{}' already exists", item_name); - } + Err(err) if is_constraint_violation(&err) => Err(Error::new( + ErrorKind::ItemAlreadyExists(item_name.to_owned()), + )), Err(err) => Err(err.into()), } } - pub fn remove_database(&self, name: &str) -> Result<(), failure::Error> { + pub fn remove_database(&self, name: &str) -> Result<(), Error> { let n = self .inner .prepare_cached("DELETE FROM databases WHERE name = ?")? .execute(params![name])?; assert!(n <= 1); - if n != 1 { - bail!("database '{}' does not exist", name); + if n == 1 { + Ok(()) + } else { + Err(Error::new(ErrorKind::UnknownDatabase(name.to_owned()))) } - Ok(()) } - pub fn remove_schema(&self, database_id: i64, schema_name: &str) -> Result<(), failure::Error> { + pub fn remove_schema(&self, database_id: i64, schema_name: &str) -> Result<(), Error> { let n = self .inner .prepare_cached("DELETE FROM schemas WHERE database_id = ? AND name = ?")? .execute(params![database_id, schema_name])?; assert!(n <= 1); - if n != 1 { - bail!("schema '{}' does not exist", schema_name); + if n == 1 { + Ok(()) + } else { + Err(Error::new(ErrorKind::UnknownSchema(schema_name.to_owned()))) } - Ok(()) } - pub fn remove_item(&self, id: GlobalId) -> Result<(), failure::Error> { + pub fn remove_item(&self, id: GlobalId) -> Result<(), Error> { let n = self .inner .prepare_cached("DELETE FROM items WHERE gid = ?")? .execute(params![SqlVal(id)])?; assert!(n <= 1); - if n != 1 { - bail!("item {} does not exist", id); + if n == 1 { + Ok(()) + } else { + Err(Error::new(ErrorKind::UnknownItem(id.to_string()))) } - Ok(()) } pub fn commit(self) -> Result<(), rusqlite::Error> { diff --git a/src/sql/statement.rs b/src/sql/statement.rs index dc70476c46bac..dc9deab6c85b8 100644 --- a/src/sql/statement.rs +++ b/src/sql/statement.rs @@ -1194,7 +1194,7 @@ fn handle_drop_database( // TODO(benesch): generate a notice indicating that the database // does not exist. } - Err(err) => return Err(err), + Err(err) => return Err(err.into()), } Ok(Plan::DropDatabase { name }) } @@ -1255,7 +1255,7 @@ fn handle_drop_schema( // TODO(benesch): generate a notice indicating that the // database does not exist. } - Err(err) => return Err(err), + Err(err) => return Err(err.into()), } Ok(Plan::DropSchema { database_name, @@ -1337,7 +1337,7 @@ fn handle_drop_item( // item does not exist. Ok(None) } - Err(err) => Err(err), + Err(err) => Err(err.into()), } } @@ -1600,7 +1600,8 @@ impl<'a> StatementContext<'a> { pub fn resolve_name(&self, name: ObjectName) -> Result { let name = normalize::object_name(name)?; - self.catalog - .resolve(self.session.database(), self.session.search_path(), &name) + Ok(self + .catalog + .resolve(self.session.database(), self.session.search_path(), &name)?) } } diff --git a/test/sqllogictest/cockroach/case_sensitive_names.slt b/test/sqllogictest/cockroach/case_sensitive_names.slt index 3546564f47a8e..e18aae6991a0c 100644 --- a/test/sqllogictest/cockroach/case_sensitive_names.slt +++ b/test/sqllogictest/cockroach/case_sensitive_names.slt @@ -45,7 +45,7 @@ mode cockroach statement ok CREATE TABLE A(x INT) -statement error pgcode 42P01 catalog item 'A' does not exist +statement error pgcode 42P01 unknown catalog item 'A' SHOW COLUMNS FROM "A" # statement error pgcode 42P01 catalog item '"A"' does not exist @@ -63,7 +63,7 @@ SHOW COLUMNS FROM "A" # statement error pgcode 42P01 catalog item '"A"' does not exist # SHOW CONSTRAINTS FROM "A" -statement error pgcode 42P01 catalog item 'A' does not exist +statement error pgcode 42P01 unknown catalog item 'A' SELECT * FROM "A" # statement error pgcode 42P01 catalog item '"A"' does not exist @@ -117,7 +117,7 @@ DROP TABLE a statement ok CREATE TABLE "B"(x INT) -statement error pgcode 42P01 catalog item 'b' does not exist +statement error pgcode 42P01 unknown catalog item 'b' SHOW COLUMNS FROM B # statement error pgcode 42P01 catalog item 'b' does not exist @@ -135,7 +135,7 @@ SHOW COLUMNS FROM B # statement error pgcode 42P01 catalog item 'b' does not exist # SHOW CONSTRAINTS FROM B -statement error pgcode 42P01 catalog item 'b' does not exist +statement error pgcode 42P01 unknown catalog item 'b' SELECT * FROM B # statement error pgcode 42P01 catalog item 'b' does not exist diff --git a/test/sqllogictest/index.slt b/test/sqllogictest/index.slt index efe97601be607..554a984613e3c 100644 --- a/test/sqllogictest/index.slt +++ b/test/sqllogictest/index.slt @@ -76,7 +76,7 @@ materialize.public.foo materialize.public.foo_primary_idx a NULL materialize.public.foo materialize.public.foo_primary_idx b NULL true 2 materialize.public.foo materialize.public.foo_primary_idx c NULL true 3 -query error catalog item 'nonexistent' does not exist +query error unknown catalog item 'nonexistent' SHOW INDEX FROM nonexistent query error cannot show indexes on materialize.public.bar_idx because it is a index diff --git a/test/testdrive/createdrop.td b/test/testdrive/createdrop.td index 24dacaa3b3964..59eeb4ed9f97c 100644 --- a/test/testdrive/createdrop.td +++ b/test/testdrive/createdrop.td @@ -188,10 +188,10 @@ s is not of type VIEW # Test that drop without if exists does not work if the object does not exist ! DROP INDEX nonexistent -catalog item 'nonexistent' does not exist +unknown catalog item 'nonexistent' ! DROP VIEW nonexistent -catalog item 'nonexistent' does not exist +unknown catalog item 'nonexistent' ! DROP SOURCE nonexistent -catalog item 'nonexistent' does not exist +unknown catalog item 'nonexistent' diff --git a/test/testdrive/dependencies.td b/test/testdrive/dependencies.td index 770a10e5c7a6a..02637534d51f5 100644 --- a/test/testdrive/dependencies.td +++ b/test/testdrive/dependencies.td @@ -93,10 +93,10 @@ cannot drop materialize.public.test1: still depended upon by catalog item 'mater # rather than verifying the drop by checking whether DROP VIEW fails. ! DROP VIEW test1; -catalog item 'test1' does not exist +unknown catalog item 'test1' ! DROP VIEW test2; -catalog item 'test2' does not exist +unknown catalog item 'test2' # Test that DROP VIEW IF EXISTS succeeds even if the view does not exist. @@ -110,7 +110,7 @@ catalog item 'test2' does not exist > DROP SOURCE s CASCADE; ! DROP VIEW test4; -catalog item 'test4' does not exist +unknown catalog item 'test4' > CREATE SOURCE s FROM KAFKA BROKER '${testdrive.kafka-addr}' TOPIC 'testdrive-data-${testdrive.seed}' @@ -164,7 +164,7 @@ materialize.public.v2 materialize.public.v2_primary_idx x > DROP VIEW v2a; ! DROP VIEW v2a; -catalog item 'v2a' does not exist +unknown catalog item 'v2a' > SHOW INDEX in v2; Source_or_view Key_name Column_name Expression Null Seq_in_index @@ -173,7 +173,7 @@ materialize.public.v2 materialize.public.i1 x false 1 ! DROP INDEX i2; -catalog item 'i2' does not exist +unknown catalog item 'i2' > CREATE MATERIALIZED VIEW v4 AS SELECT x, y from s; @@ -200,10 +200,10 @@ materialize.public.v4 materialize.public.v4_primary_idx y > DROP VIEW v4a CASCADE; ! DROP VIEW v4a; -catalog item 'v4a' does not exist +unknown catalog item 'v4a' ! DROP INDEX i3; -catalog item 'i3' does not exist +unknown catalog item 'i3' > SHOW INDEX in v4; Source_or_view Key_name Column_name Expression Null Seq_in_index @@ -235,16 +235,16 @@ materialize.public.multicol materialize.public.i6 d fa > DROP VIEW v4 CASCADE; ! DROP VIEW v4; -catalog item 'v4' does not exist +unknown catalog item 'v4' ! DROP INDEX i5; -catalog item 'i5' does not exist +unknown catalog item 'i5' ! DROP VIEW v5; -catalog item 'v5' does not exist +unknown catalog item 'v5' ! DROP INDEX i4; -catalog item 'i4' does not exist +unknown catalog item 'i4' # Test that dropping indexes even with cascade does not cause the underlying view to be dropped