PostgreSQL driver, tests against DB backends, and general drivers cleanup (#2723)

* PostgreSQL driver and general drivers cleanup

Signed-off-by: Toby Harradine <tobyharradine@gmail.com>

* Make tests pass

Signed-off-by: Toby Harradine <tobyharradine@gmail.com>

* Add black --target-version flag in make.bat

Signed-off-by: Toby Harradine <tobyharradine@gmail.com>

* Rewrite postgres driver

Most of the logic is now in PL/pgSQL.

This completely avoids the use of Python f-strings to format identifiers into queries. Although an SQL-injection attack would have been impossible anyway (only the owner would have ever had the ability to do that), using PostgreSQL's format() is more reliable for unusual identifiers. Performance-wise, I'm not sure whether this is an improvement, but I highly doubt that it's worse.

Signed-off-by: Toby Harradine <tobyharradine@gmail.com>

* Reformat

Signed-off-by: Toby Harradine <tobyharradine@gmail.com>

* Fix PostgresDriver.delete_all_data()

Signed-off-by: Toby Harradine <tobyharradine@gmail.com>

* Clean up PL/pgSQL code

Signed-off-by: Toby Harradine <tobyharradine@gmail.com>

* More PL/pgSQL cleanup

Signed-off-by: Toby Harradine <tobyharradine@gmail.com>

* PL/pgSQL function optimisations

Signed-off-by: Toby Harradine <tobyharradine@gmail.com>

* Ensure compatibility with PostgreSQL 10 and below

Signed-off-by: Toby Harradine <tobyharradine@gmail.com>

* More/better docstrings for PG functions

Signed-off-by: Toby Harradine <tobyharradine@gmail.com>

* Fix typo in docstring

Signed-off-by: Toby Harradine <tobyharradine@gmail.com>

* Return correct value on toggle()

Signed-off-by: Toby Harradine <tobyharradine@gmail.com>

* Use composite type for PG function parameters

Signed-off-by: Toby Harradine <tobyharradine@gmail.com>

* Fix JSON driver's Config.clear_all()

Signed-off-by: Toby Harradine <tobyharradine@gmail.com>

* Correct description for Mongo tox recipe

Signed-off-by: Toby Harradine <tobyharradine@gmail.com>

* Fix linting errors

Signed-off-by: Toby Harradine <tobyharradine@gmail.com>

* Update dep specification after merging bumpdeps

Signed-off-by: Toby Harradine <tobyharradine@gmail.com>

* Add towncrier entries

Signed-off-by: Toby Harradine <tobyharradine@gmail.com>

* Update from merge

Signed-off-by: Toby Harradine <tobyharradine@gmail.com>

* Mention [postgres] extra in install docs

Signed-off-by: Toby Harradine <tobyharradine@gmail.com>

* Support more connection options and use better defaults

Signed-off-by: Toby Harradine <tobyharradine@gmail.com>

* Actually pass PG env vars in tox

Signed-off-by: Toby Harradine <tobyharradine@gmail.com>

* Replace event trigger with manual DELETE queries

Signed-off-by: Toby Harradine <tobyharradine@gmail.com>
This commit is contained in:
Toby Harradine
2019-08-27 12:02:26 +10:00
committed by Michael H
parent 57fa29dd64
commit d1a46acc9a
34 changed files with 2282 additions and 843 deletions

View File

@@ -0,0 +1,3 @@
from .postgres import PostgresDriver
__all__ = ["PostgresDriver"]

View File

@@ -0,0 +1,839 @@
/*
************************************************************
* PostgreSQL driver Data Definition Language (DDL) Script. *
************************************************************
*/
CREATE SCHEMA IF NOT EXISTS red_config;
CREATE SCHEMA IF NOT EXISTS red_utils;
DO $$
BEGIN
PERFORM 'red_config.identifier_data'::regtype;
EXCEPTION
WHEN UNDEFINED_OBJECT THEN
CREATE TYPE red_config.identifier_data AS (
cog_name text,
cog_id text,
category text,
pkeys text[],
identifiers text[],
pkey_len integer,
is_custom boolean
);
END;
$$;
CREATE OR REPLACE FUNCTION
/*
* Create the config schema and/or table if they do not exist yet.
*/
red_config.maybe_create_table(
id_data red_config.identifier_data
)
RETURNS void
LANGUAGE 'plpgsql'
AS $$
DECLARE
schemaname CONSTANT text := concat_ws('.', id_data.cog_name, id_data.cog_id);
schema_exists CONSTANT boolean := exists(
SELECT 1
FROM red_config.red_cogs t
WHERE t.cog_name = id_data.cog_name AND t.cog_id = id_data.cog_id);
table_exists CONSTANT boolean := schema_exists AND exists(
SELECT 1
FROM information_schema.tables
WHERE table_schema = schemaname AND table_name = id_data.category);
BEGIN
IF NOT schema_exists THEN
PERFORM red_config.create_schema(id_data.cog_name, id_data.cog_id);
END IF;
IF NOT table_exists THEN
PERFORM red_config.create_table(id_data);
END IF;
END;
$$;
CREATE OR REPLACE FUNCTION
/*
* Create the config schema for the given cog.
*/
red_config.create_schema(new_cog_name text, new_cog_id text, OUT schemaname text)
RETURNS text
LANGUAGE 'plpgsql'
AS $$
BEGIN
schemaname := concat_ws('.', new_cog_name, new_cog_id);
EXECUTE format('CREATE SCHEMA IF NOT EXISTS %I', schemaname);
INSERT INTO red_config.red_cogs AS t VALUES(new_cog_name, new_cog_id, schemaname)
ON CONFLICT(cog_name, cog_id) DO UPDATE
SET
schemaname = excluded.schemaname;
END;
$$;
CREATE OR REPLACE FUNCTION
/*
* Create the config table for the given category.
*/
red_config.create_table(id_data red_config.identifier_data)
RETURNS void
LANGUAGE 'plpgsql'
AS $$
DECLARE
schemaname CONSTANT text := concat_ws('.', id_data.cog_name, id_data.cog_id);
constraintname CONSTANT text := id_data.category||'_pkey';
pkey_columns CONSTANT text := red_utils.gen_pkey_columns(1, id_data.pkey_len);
pkey_type CONSTANT text := red_utils.get_pkey_type(id_data.is_custom);
pkey_column_definitions CONSTANT text := red_utils.gen_pkey_column_definitions(
1, id_data.pkey_len, pkey_type);
BEGIN
EXECUTE format(
$query$
CREATE TABLE IF NOT EXISTS %I.%I (
%s,
json_data jsonb DEFAULT '{}' NOT NULL,
CONSTRAINT %I PRIMARY KEY (%s)
)
$query$,
schemaname,
id_data.category,
pkey_column_definitions,
constraintname,
pkey_columns);
END;
$$;
CREATE OR REPLACE FUNCTION
/*
* Get config data.
*
* - When `pkeys` is a full primary key, all or part of a document
* will be returned.
* - When `pkeys` is not a full primary key, documents will be
* aggregated together into a single JSONB object, with primary keys
* as keys mapping to the documents.
*/
red_config.get(
id_data red_config.identifier_data,
OUT result jsonb
)
LANGUAGE 'plpgsql'
STABLE
PARALLEL SAFE
AS $$
DECLARE
schemaname CONSTANT text := concat_ws('.', id_data.cog_name, id_data.cog_id);
num_pkeys CONSTANT integer := coalesce(array_length(id_data.pkeys, 1), 0);
num_missing_pkeys CONSTANT integer := id_data.pkey_len - num_pkeys;
pkey_type CONSTANT text := red_utils.get_pkey_type(id_data.is_custom);
whereclause CONSTANT text := red_utils.gen_whereclause(num_pkeys, pkey_type);
missing_pkey_columns text;
BEGIN
IF num_missing_pkeys <= 0 THEN
-- No missing primary keys: we're getting all or part of a document.
EXECUTE format(
'SELECT json_data #> $2 FROM %I.%I WHERE %s',
schemaname,
id_data.category,
whereclause)
INTO result
USING id_data.pkeys, id_data.identifiers;
ELSIF num_missing_pkeys = 1 THEN
-- 1 missing primary key: we can use the built-in jsonb_object_agg() aggregate function.
EXECUTE format(
'SELECT jsonb_object_agg(%I::text, json_data) FROM %I.%I WHERE %s',
'primary_key_'||id_data.pkey_len,
schemaname,
id_data.category,
whereclause)
INTO result
USING id_data.pkeys;
ELSE
-- Multiple missing primary keys: we must use our custom red_utils.jsonb_object_agg2()
-- aggregate function.
missing_pkey_columns := red_utils.gen_pkey_columns_casted(num_pkeys + 1, id_data.pkey_len);
EXECUTE format(
'SELECT red_utils.jsonb_object_agg2(json_data, %s) FROM %I.%I WHERE %s',
missing_pkey_columns,
schemaname,
id_data.category,
whereclause)
INTO result
USING id_data.pkeys;
END IF;
END;
$$;
CREATE OR REPLACE FUNCTION
/*
* Set config data.
*
* - When `pkeys` is a full primary key, all or part of a document
* will be set.
* - When `pkeys` is not a full set, multiple documents will be
* replaced or removed - `new_value` must be a JSONB object mapping
* primary keys to the new documents.
*
* Raises `error_in_assignment` error when trying to set a sub-key
* of a non-document type.
*/
red_config.set(
id_data red_config.identifier_data,
new_value jsonb
)
RETURNS void
LANGUAGE 'plpgsql'
AS $$
DECLARE
schemaname CONSTANT text := concat_ws('.', id_data.cog_name, id_data.cog_id);
constraintname CONSTANT text := id_data.category||'_pkey';
num_pkeys CONSTANT integer := coalesce(array_length(id_data.pkeys, 1), 0);
num_missing_pkeys CONSTANT integer := id_data.pkey_len - num_pkeys;
pkey_type CONSTANT text := red_utils.get_pkey_type(id_data.is_custom);
pkey_placeholders CONSTANT text := red_utils.gen_pkey_placeholders(num_pkeys, pkey_type);
new_document jsonb;
pkey_column_definitions text;
whereclause text;
missing_pkey_columns text;
BEGIN
PERFORM red_config.maybe_create_table(id_data);
IF num_missing_pkeys = 0 THEN
-- Setting all or part of a document
new_document := red_utils.jsonb_set2('{}', new_value, VARIADIC id_data.identifiers);
EXECUTE format(
$query$
INSERT INTO %I.%I AS t VALUES (%s, $2)
ON CONFLICT ON CONSTRAINT %I DO UPDATE
SET
json_data = red_utils.jsonb_set2(t.json_data, $3, VARIADIC $4)
$query$,
schemaname,
id_data.category,
pkey_placeholders,
constraintname)
USING id_data.pkeys, new_document, new_value, id_data.identifiers;
ELSE
-- Setting multiple documents
whereclause := red_utils.gen_whereclause(num_pkeys, pkey_type);
missing_pkey_columns := red_utils.gen_pkey_columns_casted(
num_pkeys + 1, id_data.pkey_len, pkey_type);
pkey_column_definitions := red_utils.gen_pkey_column_definitions(num_pkeys + 1, id_data.pkey_len);
-- Delete all documents which we're setting first, since we don't know whether they'll be
-- replaced by the subsequent INSERT.
EXECUTE format('DELETE FROM %I.%I WHERE %s', schemaname, id_data.category, whereclause)
USING id_data.pkeys;
-- Insert all new documents
EXECUTE format(
$query$
INSERT INTO %I.%I AS t
SELECT %s, json_data
FROM red_utils.generate_rows_from_object($2, $3) AS f(%s, json_data jsonb)
ON CONFLICT ON CONSTRAINT %I DO UPDATE
SET
json_data = excluded.json_data
$query$,
schemaname,
id_data.category,
concat_ws(', ', pkey_placeholders, missing_pkey_columns),
pkey_column_definitions,
constraintname)
USING id_data.pkeys, new_value, num_missing_pkeys;
END IF;
END;
$$;
CREATE OR REPLACE FUNCTION
/*
* Clear config data.
*
* - When `identifiers` is not empty, this will clear a key from a
* document.
* - When `identifiers` is empty and `pkeys` is not empty, it will
* delete one or more documents.
* - When `pkeys` is empty, it will drop the whole table.
* - When `id_data.category` is NULL or an empty string, it will drop
* the whole schema.
*
* Has no effect when the document or key does not exist.
*/
red_config.clear(
id_data red_config.identifier_data
)
RETURNS void
LANGUAGE 'plpgsql'
AS $$
DECLARE
schemaname CONSTANT text := concat_ws('.', id_data.cog_name, id_data.cog_id);
num_pkeys CONSTANT integer := coalesce(array_length(id_data.pkeys, 1), 0);
num_identifiers CONSTANT integer := coalesce(array_length(id_data.identifiers, 1), 0);
pkey_type CONSTANT text := red_utils.get_pkey_type(id_data.is_custom);
whereclause text;
BEGIN
IF num_identifiers > 0 THEN
-- Popping a key from a document or nested document.
whereclause := red_utils.gen_whereclause(num_pkeys, pkey_type);
EXECUTE format(
$query$
UPDATE %I.%I AS t
SET
json_data = t.json_data #- $2
WHERE %s
$query$,
schemaname,
id_data.category,
whereclause)
USING id_data.pkeys, id_data.identifiers;
ELSIF num_pkeys > 0 THEN
-- Deleting one or many documents
whereclause := red_utils.gen_whereclause(num_pkeys, pkey_type);
EXECUTE format('DELETE FROM %I.%I WHERE %s', schemaname, id_data.category, whereclause)
USING id_data.pkeys;
ELSIF id_data.category IS NOT NULL AND id_data.category != '' THEN
-- Deleting an entire category
EXECUTE format('DROP TABLE %I.%I CASCADE', schemaname, id_data.category);
ELSE
-- Deleting an entire cog's data
EXECUTE format('DROP SCHEMA %I CASCADE', schemaname);
DELETE FROM red_config.red_cogs
WHERE cog_name = id_data.cog_name AND cog_id = id_data.cog_id;
END IF;
END;
$$;
CREATE OR REPLACE FUNCTION
/*
* Increment a number within a document.
*
* If the value doesn't already exist, it is inserted as
* `default_value + amount`.
*
* Raises 'wrong_object_type' error when trying to increment a
* non-numeric value.
*/
red_config.inc(
id_data red_config.identifier_data,
amount numeric,
default_value numeric,
OUT result numeric
)
LANGUAGE 'plpgsql'
AS $$
DECLARE
schemaname CONSTANT text := concat_ws('.', id_data.cog_name, id_data.cog_id);
num_identifiers CONSTANT integer := coalesce(array_length(id_data.identifiers, 1), 0);
pkey_type CONSTANT text := red_utils.get_pkey_type(id_data.is_custom);
whereclause CONSTANT text := red_utils.gen_whereclause(id_data.pkey_len, pkey_type);
new_document jsonb;
existing_document jsonb;
existing_value jsonb;
pkey_placeholders text;
BEGIN
IF num_identifiers = 0 THEN
-- Without identifiers, there's no chance we're actually incrementing a number
RAISE EXCEPTION 'Cannot increment document(s)'
USING ERRCODE = 'wrong_object_type';
END IF;
PERFORM red_config.maybe_create_table(id_data);
-- Look for the existing document
EXECUTE format(
'SELECT json_data FROM %I.%I WHERE %s',
schemaname,
id_data.category,
whereclause)
INTO existing_document USING id_data.pkeys;
IF existing_document IS NULL THEN
-- We need to insert a new document
result := default_value + amount;
new_document := red_utils.jsonb_set2('{}', result, VARIADIC id_data.identifiers);
pkey_placeholders := red_utils.gen_pkey_placeholders(id_data.pkey_len, pkey_type);
EXECUTE format(
'INSERT INTO %I.%I VALUES(%s, $2)',
schemaname,
id_data.category,
pkey_placeholders)
USING id_data.pkeys, new_document;
ELSE
-- We need to update the existing document
existing_value := existing_document #> id_data.identifiers;
IF existing_value IS NULL THEN
result := default_value + amount;
ELSIF jsonb_typeof(existing_value) = 'number' THEN
result := existing_value::text::numeric + amount;
ELSE
RAISE EXCEPTION 'Cannot increment non-numeric value %', existing_value
USING ERRCODE = 'wrong_object_type';
END IF;
new_document := red_utils.jsonb_set2(
existing_document, to_jsonb(result), id_data.identifiers);
EXECUTE format(
'UPDATE %I.%I SET json_data = $2 WHERE %s',
schemaname,
id_data.category,
whereclause)
USING id_data.pkeys, new_document;
END IF;
END;
$$;
CREATE OR REPLACE FUNCTION
/*
* Toggle a boolean within a document.
*
* If the value doesn't already exist, it is inserted as `NOT
* default_value`.
*
* Raises 'wrong_object_type' error when trying to toggle a
* non-boolean value.
*/
red_config.toggle(
id_data red_config.identifier_data,
default_value boolean,
OUT result boolean
)
LANGUAGE 'plpgsql'
AS $$
DECLARE
schemaname CONSTANT text := concat_ws('.', id_data.cog_name, id_data.cog_id);
num_identifiers CONSTANT integer := coalesce(array_length(id_data.identifiers, 1), 0);
pkey_type CONSTANT text := red_utils.get_pkey_type(id_data.is_custom);
whereclause CONSTANT text := red_utils.gen_whereclause(id_data.pkey_len, pkey_type);
new_document jsonb;
existing_document jsonb;
existing_value jsonb;
pkey_placeholders text;
BEGIN
IF num_identifiers = 0 THEN
-- Without identifiers, there's no chance we're actually toggling a boolean
RAISE EXCEPTION 'Cannot increment document(s)'
USING ERRCODE = 'wrong_object_type';
END IF;
PERFORM red_config.maybe_create_table(id_data);
-- Look for the existing document
EXECUTE format(
'SELECT json_data FROM %I.%I WHERE %s',
schemaname,
id_data.category,
whereclause)
INTO existing_document USING id_data.pkeys;
IF existing_document IS NULL THEN
-- We need to insert a new document
result := NOT default_value;
new_document := red_utils.jsonb_set2('{}', result, VARIADIC id_data.identifiers);
pkey_placeholders := red_utils.gen_pkey_placeholders(id_data.pkey_len, pkey_type);
EXECUTE format(
'INSERT INTO %I.%I VALUES(%s, $2)',
schemaname,
id_data.category,
pkey_placeholders)
USING id_data.pkeys, new_document;
ELSE
-- We need to update the existing document
existing_value := existing_document #> id_data.identifiers;
IF existing_value IS NULL THEN
result := NOT default_value;
ELSIF jsonb_typeof(existing_value) = 'boolean' THEN
result := NOT existing_value::text::boolean;
ELSE
RAISE EXCEPTION 'Cannot increment non-boolean value %', existing_value
USING ERRCODE = 'wrong_object_type';
END IF;
new_document := red_utils.jsonb_set2(
existing_document, to_jsonb(result), id_data.identifiers);
EXECUTE format(
'UPDATE %I.%I SET json_data = $2 WHERE %s',
schemaname,
id_data.category,
whereclause)
USING id_data.pkeys, new_document;
END IF;
END;
$$;
CREATE OR REPLACE FUNCTION
red_config.extend(
id_data red_config.identifier_data,
new_value text,
default_value text,
max_length integer DEFAULT NULL,
extend_left boolean DEFAULT FALSE,
OUT result jsonb
)
LANGUAGE 'plpgsql'
AS $$
DECLARE
schemaname CONSTANT text := concat_ws('.', id_data.cog_name, id_data.cog_id);
num_identifiers CONSTANT integer := coalesce(array_length(id_data.identifiers, 1), 0);
pkey_type CONSTANT text := red_utils.get_pkey_type(id_data.is_custom);
whereclause CONSTANT text := red_utils.gen_whereclause(id_data.pkey_len, pkey_type);
pop_idx CONSTANT integer := CASE extend_left WHEN TRUE THEN -1 ELSE 0 END;
new_document jsonb;
existing_document jsonb;
existing_value jsonb;
pkey_placeholders text;
idx integer;
BEGIN
IF num_identifiers = 0 THEN
-- Without identifiers, there's no chance we're actually appending to an array
RAISE EXCEPTION 'Cannot append to document(s)'
USING ERRCODE = 'wrong_object_type';
END IF;
PERFORM red_config.maybe_create_table(id_data);
-- Look for the existing document
EXECUTE format(
'SELECT json_data FROM %I.%I WHERE %s',
schemaname,
id_data.category,
whereclause)
INTO existing_document USING id_data.pkeys;
IF existing_document IS NULL THEN
result := default_value || new_value;
new_document := red_utils.jsonb_set2('{}'::jsonb, result, id_data.identifiers);
pkey_placeholders := red_utils.gen_pkey_placeholders(id_data.pkey_len, pkey_type);
EXECUTE format(
'INSERT INTO %I.%I VALUES(%s, $2)',
schemaname,
id_data.category,
pkey_placeholders)
USING id_data.pkeys, new_document;
ELSE
existing_value := existing_document #> id_data.identifiers;
IF existing_value IS NULL THEN
existing_value := default_value;
ELSIF jsonb_typeof(existing_value) != 'array' THEN
RAISE EXCEPTION 'Cannot append to non-array value %', existing_value
USING ERRCODE = 'wrong_object_type';
END IF;
CASE extend_left
WHEN TRUE THEN
result := new_value || existing_value;
ELSE
result := existing_value || new_value;
END CASE;
IF max_length IS NOT NULL THEN
FOR idx IN SELECT generate_series(1, jsonb_array_length(result) - max_length) LOOP
result := result - pop_idx;
END LOOP;
END IF;
new_document := red_utils.jsonb_set2(existing_document, result, id_data.identifiers);
EXECUTE format(
'UPDATE %I.%I SET json_data = $2 WHERE %s',
schemaname,
id_data.category,
whereclause)
USING id_data.pkeys, new_document;
END IF;
END;
$$;
CREATE OR REPLACE FUNCTION
/*
* Delete all schemas listed in the red_config.red_cogs table.
*/
red_config.delete_all_schemas()
RETURNS void
LANGUAGE 'plpgsql'
AS $$
DECLARE
cog_entry record;
BEGIN
FOR cog_entry IN SELECT * FROM red_config.red_cogs t LOOP
EXECUTE format('DROP SCHEMA %I CASCADE', cog_entry.schemaname);
END LOOP;
-- Clear out red_config.red_cogs table
DELETE FROM red_config.red_cogs WHERE TRUE;
END;
$$;
CREATE OR REPLACE FUNCTION
/*
* Like `jsonb_set` but will insert new objects where one is missing
* along the path.
*
* Raises `error_in_assignment` error when trying to set a sub-key
* of a non-document type.
*/
red_utils.jsonb_set2(target jsonb, new_value jsonb, VARIADIC identifiers text[])
RETURNS jsonb
LANGUAGE 'plpgsql'
IMMUTABLE
PARALLEL SAFE
AS $$
DECLARE
num_identifiers CONSTANT integer := coalesce(array_length(identifiers, 1), 0);
cur_value_type text;
idx integer;
BEGIN
IF num_identifiers = 0 THEN
RETURN new_value;
END IF;
FOR idx IN SELECT generate_series(1, num_identifiers - 1) LOOP
cur_value_type := jsonb_typeof(target #> identifiers[:idx]);
IF cur_value_type IS NULL THEN
-- Parent key didn't exist in JSON before - insert new object
target := jsonb_set(target, identifiers[:idx], '{}'::jsonb);
ELSIF cur_value_type != 'object' THEN
-- We can't set the sub-field of a null, int, float, array etc.
RAISE EXCEPTION 'Cannot set sub-field of "%s"', cur_value_type
USING ERRCODE = 'error_in_assignment';
END IF;
END LOOP;
RETURN jsonb_set(target, identifiers, new_value);
END;
$$;
CREATE OR REPLACE FUNCTION
/*
* Return a set of rows to insert into a table, from a single JSONB
* object containing multiple documents.
*/
red_utils.generate_rows_from_object(object jsonb, num_missing_pkeys integer)
RETURNS setof record
LANGUAGE 'plpgsql'
IMMUTABLE
PARALLEL SAFE
AS $$
DECLARE
pair record;
column_definitions text;
BEGIN
IF num_missing_pkeys = 1 THEN
-- Base case: Simply return (key, value) pairs
RETURN QUERY
SELECT key AS key_1, value AS json_data
FROM jsonb_each(object);
ELSE
-- We need to return (key, key, ..., value) pairs: recurse into inner JSONB objects
column_definitions := red_utils.gen_pkey_column_definitions(2, num_missing_pkeys);
FOR pair IN SELECT * FROM jsonb_each(object) LOOP
RETURN QUERY
EXECUTE format(
$query$
SELECT $1 AS key_1, *
FROM red_utils.generate_rows_from_object($2, $3)
AS f(%s, json_data jsonb)
$query$,
column_definitions)
USING pair.key, pair.value, num_missing_pkeys - 1;
END LOOP;
END IF;
RETURN;
END;
$$;
CREATE OR REPLACE FUNCTION
/*
* Get a comma-separated list of primary key placeholders.
*
* The placeholder will always be $1. Particularly useful for
* inserting values into a table from an array of primary keys.
*/
red_utils.gen_pkey_placeholders(num_pkeys integer, pkey_type text DEFAULT 'text')
RETURNS text
LANGUAGE 'sql'
IMMUTABLE
PARALLEL SAFE
AS $$
SELECT string_agg(t.item, ', ')
FROM (
SELECT format('$1[%s]::%s', idx, pkey_type) AS item
FROM generate_series(1, num_pkeys) idx) t
;
$$;
CREATE OR REPLACE FUNCTION
/*
* Generate a whereclause for the given number of primary keys.
*
* When there are no primary keys, this will simply return the the
* string 'TRUE'. When there are multiple, it will return multiple
* equality comparisons concatenated with 'AND'.
*/
red_utils.gen_whereclause(num_pkeys integer, pkey_type text)
RETURNS text
LANGUAGE 'sql'
IMMUTABLE
PARALLEL SAFE
AS $$
SELECT coalesce(string_agg(t.item, ' AND '), 'TRUE')
FROM (
SELECT format('%I = $1[%s]::%s', 'primary_key_'||idx, idx, pkey_type) AS item
FROM generate_series(1, num_pkeys) idx) t
;
$$;
CREATE OR REPLACE FUNCTION
/*
* Generate a comma-separated list of primary key column names.
*/
red_utils.gen_pkey_columns(start integer, stop integer)
RETURNS text
LANGUAGE 'sql'
IMMUTABLE
PARALLEL SAFE
AS $$
SELECT string_agg(t.item, ', ')
FROM (
SELECT quote_ident('primary_key_'||idx) AS item
FROM generate_series(start, stop) idx) t
;
$$;
CREATE OR REPLACE FUNCTION
/*
* Generate a comma-separated list of primary key column names casted
* to the given type.
*/
red_utils.gen_pkey_columns_casted(start integer, stop integer, pkey_type text DEFAULT 'text')
RETURNS text
LANGUAGE 'sql'
IMMUTABLE
PARALLEL SAFE
AS $$
SELECT string_agg(t.item, ', ')
FROM (
SELECT format('%I::%s', 'primary_key_'||idx, pkey_type) AS item
FROM generate_series(start, stop) idx) t
;
$$;
CREATE OR REPLACE FUNCTION
/*
* Generate a primary key column definition list.
*/
red_utils.gen_pkey_column_definitions(
start integer, stop integer, column_type text DEFAULT 'text'
)
RETURNS text
LANGUAGE 'sql'
IMMUTABLE
PARALLEL SAFE
AS $$
SELECT string_agg(t.item, ', ')
FROM (
SELECT format('%I %s', 'primary_key_'||idx, column_type) AS item
FROM generate_series(start, stop) idx) t
;
$$;
CREATE OR REPLACE FUNCTION
red_utils.get_pkey_type(is_custom boolean)
RETURNS TEXT
LANGUAGE 'sql'
IMMUTABLE
PARALLEL SAFE
AS $$
SELECT ('{bigint,text}'::text[])[is_custom::integer + 1];
$$;
DROP AGGREGATE IF EXISTS red_utils.jsonb_object_agg2(jsonb, VARIADIC text[]);
CREATE AGGREGATE
/*
* Like `jsonb_object_agg` but aggregates more than two columns into a
* single JSONB object.
*
* If possible, use `jsonb_object_agg` instead for performance
* reasons.
*/
red_utils.jsonb_object_agg2(json_data jsonb, VARIADIC primary_keys text[]) (
SFUNC = red_utils.jsonb_set2,
STYPE = jsonb,
INITCOND = '{}',
PARALLEL = SAFE
)
;
CREATE TABLE IF NOT EXISTS
/*
* Table to keep track of other cogs' schemas.
*/
red_config.red_cogs(
cog_name text,
cog_id text,
schemaname text NOT NULL,
PRIMARY KEY (cog_name, cog_id)
)
;

View File

@@ -0,0 +1,3 @@
SELECT red_config.delete_all_schemas();
DROP SCHEMA IF EXISTS red_config CASCADE;
DROP SCHEMA IF EXISTS red_utils CASCADE;

View File

@@ -0,0 +1,255 @@
import getpass
import json
import sys
from pathlib import Path
from typing import Optional, Any, AsyncIterator, Tuple, Union, Callable, List
try:
# pylint: disable=import-error
import asyncpg
except ModuleNotFoundError:
asyncpg = None
from ... import data_manager, errors
from ..base import BaseDriver, IdentifierData, ConfigCategory
from ..log import log
__all__ = ["PostgresDriver"]
_PKG_PATH = Path(__file__).parent
DDL_SCRIPT_PATH = _PKG_PATH / "ddl.sql"
DROP_DDL_SCRIPT_PATH = _PKG_PATH / "drop_ddl.sql"
def encode_identifier_data(
id_data: IdentifierData
) -> Tuple[str, str, str, List[str], List[str], int, bool]:
return (
id_data.cog_name,
id_data.uuid,
id_data.category,
["0"] if id_data.category == ConfigCategory.GLOBAL else list(id_data.primary_key),
list(id_data.identifiers),
1 if id_data.category == ConfigCategory.GLOBAL else id_data.primary_key_len,
id_data.is_custom,
)
class PostgresDriver(BaseDriver):
_pool: Optional["asyncpg.pool.Pool"] = None
@classmethod
async def initialize(cls, **storage_details) -> None:
if asyncpg is None:
raise errors.MissingExtraRequirements(
"Red must be installed with the [postgres] extra to use the PostgreSQL driver"
)
cls._pool = await asyncpg.create_pool(**storage_details)
with DDL_SCRIPT_PATH.open() as fs:
await cls._pool.execute(fs.read())
@classmethod
async def teardown(cls) -> None:
if cls._pool is not None:
await cls._pool.close()
@staticmethod
def get_config_details():
unixmsg = (
""
if sys.platform != "win32"
else (
" - Common directories for PostgreSQL Unix-domain sockets (/run/postgresql, "
"/var/run/postgresl, /var/pgsql_socket, /private/tmp, and /tmp),\n"
)
)
host = (
input(
f"Enter the PostgreSQL server's address.\n"
f"If left blank, Red will try the following, in order:\n"
f" - The PGHOST environment variable,\n{unixmsg}"
f" - localhost.\n"
f"> "
)
or None
)
print(
"Enter the PostgreSQL server port.\n"
"If left blank, this will default to either:\n"
" - The PGPORT environment variable,\n"
" - 5432."
)
while True:
port = input("> ") or None
if port is None:
break
try:
port = int(port)
except ValueError:
print("Port must be a number")
else:
break
user = (
input(
"Enter the PostgreSQL server username.\n"
"If left blank, this will default to either:\n"
" - The PGUSER environment variable,\n"
" - The OS name of the user running Red (ident/peer authentication).\n"
"> "
)
or None
)
passfile = r"%APPDATA%\postgresql\pgpass.conf" if sys.platform != "win32" else "~/.pgpass"
password = getpass.getpass(
f"Enter the PostgreSQL server password. The input will be hidden.\n"
f" NOTE: If using ident/peer authentication (no password), enter NONE.\n"
f"When NONE is entered, this will default to:\n"
f" - The PGPASSWORD environment variable,\n"
f" - Looking up the password in the {passfile} passfile,\n"
f" - No password.\n"
f"> "
)
if password == "NONE":
password = None
database = (
input(
"Enter the PostgreSQL database's name.\n"
"If left blank, this will default to either:\n"
" - The PGDATABASE environment variable,\n"
" - The OS name of the user running Red.\n"
"> "
)
or None
)
return {
"host": host,
"port": port,
"user": user,
"password": password,
"database": database,
}
async def get(self, identifier_data: IdentifierData):
try:
result = await self._execute(
"SELECT red_config.get($1)",
encode_identifier_data(identifier_data),
method=self._pool.fetchval,
)
except asyncpg.UndefinedTableError:
raise KeyError from None
if result is None:
# The result is None both when postgres yields no results, or when it yields a NULL row
# A 'null' JSON value would be returned as encoded JSON, i.e. the string 'null'
raise KeyError
return json.loads(result)
async def set(self, identifier_data: IdentifierData, value=None):
try:
await self._execute(
"SELECT red_config.set($1, $2::jsonb)",
encode_identifier_data(identifier_data),
json.dumps(value),
)
except asyncpg.ErrorInAssignmentError:
raise errors.CannotSetSubfield
async def clear(self, identifier_data: IdentifierData):
try:
await self._execute(
"SELECT red_config.clear($1)", encode_identifier_data(identifier_data)
)
except asyncpg.UndefinedTableError:
pass
async def inc(
self, identifier_data: IdentifierData, value: Union[int, float], default: Union[int, float]
) -> Union[int, float]:
try:
return await self._execute(
f"SELECT red_config.inc($1, $2, $3)",
encode_identifier_data(identifier_data),
value,
default,
method=self._pool.fetchval,
)
except asyncpg.WrongObjectTypeError as exc:
raise errors.StoredTypeError(*exc.args)
async def toggle(self, identifier_data: IdentifierData, default: bool) -> bool:
try:
return await self._execute(
"SELECT red_config.inc($1, $2)",
encode_identifier_data(identifier_data),
default,
method=self._pool.fetchval,
)
except asyncpg.WrongObjectTypeError as exc:
raise errors.StoredTypeError(*exc.args)
@classmethod
async def aiter_cogs(cls) -> AsyncIterator[Tuple[str, str]]:
query = "SELECT cog_name, cog_id FROM red_config.red_cogs"
log.invisible(query)
async with cls._pool.acquire() as conn, conn.transaction():
async for row in conn.cursor(query):
yield row["cog_name"], row["cog_id"]
@classmethod
async def delete_all_data(
cls, *, interactive: bool = False, drop_db: Optional[bool] = None, **kwargs
) -> None:
"""Delete all data being stored by this driver.
Parameters
----------
interactive : bool
Set to ``True`` to allow the method to ask the user for
input from the console, regarding the other unset parameters
for this method.
drop_db : Optional[bool]
Set to ``True`` to drop the entire database for the current
bot's instance. Otherwise, schemas within the database which
store bot data will be dropped, as well as functions,
aggregates, event triggers, and meta-tables.
"""
if interactive is True and drop_db is None:
print(
"Please choose from one of the following options:\n"
" 1. Drop the entire PostgreSQL database for this instance, or\n"
" 2. Delete all of Red's data within this database, without dropping the database "
"itself."
)
options = ("1", "2")
while True:
resp = input("> ")
try:
drop_db = bool(options.index(resp))
except ValueError:
print("Please type a number corresponding to one of the options.")
else:
break
if drop_db is True:
storage_details = data_manager.storage_details()
await cls._pool.execute(f"DROP DATABASE $1", storage_details["database"])
else:
with DROP_DDL_SCRIPT_PATH.open() as fs:
await cls._pool.execute(fs.read())
@classmethod
async def _execute(cls, query: str, *args, method: Optional[Callable] = None) -> Any:
if method is None:
method = cls._pool.execute
log.invisible("Query: %s", query)
if args:
log.invisible("Args: %s", args)
return await method(query, *args)