CREATE SCHEMA partman; CREATE EXTENSION pg_partman SCHEMA partman; CREATE ROLE partman WITH LOGIN; GRANT ALL ON SCHEMA partman TO partman; GRANT ALL ON ALL TABLES IN SCHEMA partman TO partman; GRANT EXECUTE ON ALL FUNCTIONS IN SCHEMA partman TO partman; GRANT EXECUTE ON ALL PROCEDURES IN SCHEMA partman TO partman; -- PG11+ only GRANT ALL ON SCHEMA my_partition_schema TO partman; GRANT TEMPORARY ON DATABASE sg to partman; -- allow creation of temp tables to move data out of default -- https://github.com/pgpartman/pg_partman/blob/master/doc/pg_partman_howto_native.md#offline-partitioning ALTER TABLE lsif_data_docs_search_public RENAME to old_lsif_data_docs_search_public; -- Create our new partitioned table, identical to the old one but with no indexes and with PARTITION BY RANGE (id). -- The actual search index over API docs, one entry per symbol/section of API docs. CREATE TABLE lsif_data_docs_search_public ( LIKE old_lsif_data_docs_search_public INCLUDING ALL ) PARTITION BY RANGE (id); -- Drop triggers on our old table, we do not want dthem updating our aggregate count tables as we -- move data. -DROP TRIGGER lsif_data_docs_search_public_delete ON old_lsif_data_docs_search_public; DROP TRIGGER lsif_data_docs_search_public_insert ON old_lsif_data_docs_search_public; -- Create partitions with 30 million rows each, about enough to store 1,200 Go repos worth of -- results. SELECT partman.create_parent('public.lsif_data_docs_search_public', 'id', 'native', '30000000'); -- Actually perform the partitioning. -- -- p_interval: copy in batches of 500,000 rows at a time -- p_batch: total number of batches to copy CALL partman.partition_data_proc('public.lsif_data_docs_search_public', p_interval := '500000', p_batch := 9999999, p_source_table := 'public.old_lsif_data_docs_search_public'); VACUUM ANALYZE public.lsif_data_docs_search_public;