diff --git a/.idea/runConfigurations/Check.xml b/.idea/runConfigurations/Check.xml
index b8c0970..9faa42d 100644
--- a/.idea/runConfigurations/Check.xml
+++ b/.idea/runConfigurations/Check.xml
@@ -1,7 +1,7 @@
-
+
\ No newline at end of file
diff --git a/.idea/runConfigurations/tests.xml b/.idea/runConfigurations/tests.xml
index d7fb66f..ceb308e 100644
--- a/.idea/runConfigurations/tests.xml
+++ b/.idea/runConfigurations/tests.xml
@@ -1,6 +1,6 @@
-
+
@@ -13,7 +13,7 @@
-
+
diff --git a/docker-compose.yml b/docker-compose.yml
new file mode 100644
index 0000000..844f018
--- /dev/null
+++ b/docker-compose.yml
@@ -0,0 +1,25 @@
+# To execute this docker-compose yml file use docker-compose -f up
+# Add the "-d" flag at the end for detached execution
+version: '3.7'
+services:
+ db:
+ container_name: postgres_json
+ build:
+ context: docker/postgresql
+ restart: always
+ ports:
+ - 5555:5432
+ environment:
+ POSTGRES_DB: json_test
+ POSTGRES_USER: test
+ POSTGRES_PASSWORD: test
+
+ pgadmin:
+ container_name: pgadmin4_json
+ image: dpage/pgadmin4
+ restart: always
+ ports:
+ - 8585:80
+ environment:
+ PGADMIN_DEFAULT_EMAIL: rusk23@gmail.com
+ PGADMIN_DEFAULT_PASSWORD: azerty
\ No newline at end of file
diff --git a/docker/postgresql/Dockerfile b/docker/postgresql/Dockerfile
new file mode 100644
index 0000000..f315eb7
--- /dev/null
+++ b/docker/postgresql/Dockerfile
@@ -0,0 +1,9 @@
+FROM postgres:11
+
+COPY postgresql.conf /tmp/postgresql.conf
+COPY extension.sh /docker-entrypoint-initdb.d/000-extension.sh
+COPY setup.sh /docker-entrypoint-initdb.d/100-setup.sh
+
+ENTRYPOINT ["docker-entrypoint.sh"]
+EXPOSE 5432
+CMD ["postgres"]
diff --git a/docker/postgresql/extension.sh b/docker/postgresql/extension.sh
new file mode 100644
index 0000000..290d0af
--- /dev/null
+++ b/docker/postgresql/extension.sh
@@ -0,0 +1,7 @@
+#!/usr/bin/env bash
+set -e
+
+psql -v ON_ERROR_STOP=1 --username "test" --dbname "json_test" <<-EOSQL
+create extension if not exists plpgsql;
+create extension if not exists "uuid-ossp";
+EOSQL
diff --git a/docker/postgresql/postgresql.conf b/docker/postgresql/postgresql.conf
new file mode 100644
index 0000000..fd24ea8
--- /dev/null
+++ b/docker/postgresql/postgresql.conf
@@ -0,0 +1,668 @@
+# -----------------------------
+# PostgreSQL configuration file
+# -----------------------------
+#
+# This file consists of lines of the form:
+#
+# name = value
+#
+# (The "=" is optional.) Whitespace may be used. Comments are introduced with
+# "#" anywhere on a line. The complete list of parameter names and allowed
+# values can be found in the PostgreSQL documentation.
+#
+# The commented-out settings shown in this file represent the default values.
+# Re-commenting a setting is NOT sufficient to revert it to the default value;
+# you need to reload the server.
+#
+# This file is read on server startup and when the server receives a SIGHUP
+# signal. If you edit the file on a running system, you have to SIGHUP the
+# server for the changes to take effect, run "pg_ctl reload", or execute
+# "SELECT pg_reload_conf()". Some parameters, which are marked below,
+# require a server shutdown and restart to take effect.
+#
+# Any parameter can also be given as a command-line option to the server, e.g.,
+# "postgres -c log_connections=on". Some parameters can be changed at run time
+# with the "SET" SQL command.
+#
+# Memory units: kB = kilobytes Time units: ms = milliseconds
+# MB = megabytes s = seconds
+# GB = gigabytes min = minutes
+# TB = terabytes h = hours
+# d = days
+
+
+#------------------------------------------------------------------------------
+# FILE LOCATIONS
+#------------------------------------------------------------------------------
+
+# The default values of these variables are driven from the -D command-line
+# option or PGDATA environment variable, represented here as ConfigDir.
+
+#data_directory = 'ConfigDir' # use data in another directory
+ # (change requires restart)
+#hba_file = 'ConfigDir/pg_hba.conf' # host-based authentication file
+ # (change requires restart)
+#ident_file = 'ConfigDir/pg_ident.conf' # ident configuration file
+ # (change requires restart)
+
+# If external_pid_file is not explicitly set, no extra PID file is written.
+#external_pid_file = '' # write an extra PID file
+ # (change requires restart)
+
+
+#------------------------------------------------------------------------------
+# CONNECTIONS AND AUTHENTICATION
+#------------------------------------------------------------------------------
+
+# - Connection Settings -
+
+listen_addresses = '*'
+ # comma-separated list of addresses;
+ # defaults to 'localhost'; use '*' for all
+ # (change requires restart)
+#port = 5432 # (change requires restart)
+#max_connections = 100 # (change requires restart)
+#superuser_reserved_connections = 3 # (change requires restart)
+#unix_socket_directories = '/tmp' # comma-separated list of directories
+ # (change requires restart)
+#unix_socket_group = '' # (change requires restart)
+#unix_socket_permissions = 0777 # begin with 0 to use octal notation
+ # (change requires restart)
+#bonjour = off # advertise server via Bonjour
+ # (change requires restart)
+#bonjour_name = '' # defaults to the computer name
+ # (change requires restart)
+
+# - Security and Authentication -
+
+#authentication_timeout = 1min # 1s-600s
+#ssl = off
+#ssl_ciphers = 'HIGH:MEDIUM:+3DES:!aNULL' # allowed SSL ciphers
+#ssl_prefer_server_ciphers = on
+#ssl_ecdh_curve = 'prime256v1'
+#ssl_dh_params_file = ''
+#ssl_cert_file = 'server.crt'
+#ssl_key_file = 'server.key'
+#ssl_ca_file = ''
+#ssl_crl_file = ''
+#password_encryption = md5 # md5 or scram-sha-256
+#db_user_namespace = off
+#row_security = on
+
+# GSSAPI using Kerberos
+#krb_server_keyfile = ''
+#krb_caseins_users = off
+
+# - TCP Keepalives -
+# see "man 7 tcp" for details
+
+#tcp_keepalives_idle = 0 # TCP_KEEPIDLE, in seconds;
+ # 0 selects the system default
+#tcp_keepalives_interval = 0 # TCP_KEEPINTVL, in seconds;
+ # 0 selects the system default
+#tcp_keepalives_count = 0 # TCP_KEEPCNT;
+ # 0 selects the system default
+
+
+#------------------------------------------------------------------------------
+# RESOURCE USAGE (except WAL)
+#------------------------------------------------------------------------------
+
+# - Memory -
+
+shared_buffers = 1GB # min 128kB
+ # (change requires restart)
+#huge_pages = try # on, off, or try
+ # (change requires restart)
+#temp_buffers = 8MB # min 800kB
+#max_prepared_transactions = 0 # zero disables the feature
+ # (change requires restart)
+# Caution: it is not advisable to set max_prepared_transactions nonzero unless
+# you actively intend to use prepared transactions.
+work_mem = 256MB # min 64kB
+#maintenance_work_mem = 64MB # min 1MB
+#replacement_sort_tuples = 150000 # limits use of replacement selection sort
+#autovacuum_work_mem = -1 # min 1MB, or -1 to use maintenance_work_mem
+#max_stack_depth = 2MB # min 100kB
+#dynamic_shared_memory_type = posix # the default is the first option
+ # supported by the operating system:
+ # posix
+ # sysv
+ # windows
+ # mmap
+ # use none to disable dynamic shared memory
+ # (change requires restart)
+
+# - Disk -
+
+#temp_file_limit = -1 # limits per-process temp file space
+ # in kB, or -1 for no limit
+
+# - Kernel Resource Usage -
+
+#max_files_per_process = 1000 # min 25
+ # (change requires restart)
+shared_preload_libraries = 'pg_stat_statements'
+pg_stat_statements.max = 10000
+pg_stat_statements.track = all
+
+# - Cost-Based Vacuum Delay -
+
+#vacuum_cost_delay = 0 # 0-100 milliseconds
+#vacuum_cost_page_hit = 1 # 0-10000 credits
+#vacuum_cost_page_miss = 10 # 0-10000 credits
+#vacuum_cost_page_dirty = 20 # 0-10000 credits
+#vacuum_cost_limit = 200 # 1-10000 credits
+
+# - Background Writer -
+
+#bgwriter_delay = 200ms # 10-10000ms between rounds
+#bgwriter_lru_maxpages = 100 # 0-1000 max buffers written/round
+#bgwriter_lru_multiplier = 2.0 # 0-10.0 multiplier on buffers scanned/round
+#bgwriter_flush_after = 0 # measured in pages, 0 disables
+
+# - Asynchronous Behavior -
+
+effective_io_concurrency = 100 # 1-1000; 0 disables prefetching
+#max_worker_processes = 8 # (change requires restart)
+max_parallel_workers_per_gather = 4 # taken from max_parallel_workers
+#max_parallel_workers = 8 # maximum number of max_worker_processes that
+ # can be used in parallel queries
+#old_snapshot_threshold = -1 # 1min-60d; -1 disables; 0 is immediate
+ # (change requires restart)
+#backend_flush_after = 0 # measured in pages, 0 disables
+
+
+#------------------------------------------------------------------------------
+# WRITE AHEAD LOG
+#------------------------------------------------------------------------------
+
+# - Settings -
+
+#wal_level = replica # minimal, replica, or logical
+ # (change requires restart)
+#fsync = on # flush data to disk for crash safety
+ # (turning this off can cause
+ # unrecoverable data corruption)
+#synchronous_commit = on # synchronization level;
+ # off, local, remote_write, remote_apply, or on
+#wal_sync_method = fsync # the default is the first option
+ # supported by the operating system:
+ # open_datasync
+ # fdatasync (default on Linux)
+ # fsync
+ # fsync_writethrough
+ # open_sync
+#full_page_writes = on # recover from partial page writes
+#wal_compression = off # enable compression of full-page writes
+#wal_log_hints = off # also do full page writes of non-critical updates
+ # (change requires restart)
+#wal_buffers = -1 # min 32kB, -1 sets based on shared_buffers
+ # (change requires restart)
+#wal_writer_delay = 200ms # 1-10000 milliseconds
+#wal_writer_flush_after = 1MB # measured in pages, 0 disables
+
+#commit_delay = 0 # range 0-100000, in microseconds
+#commit_siblings = 5 # range 1-1000
+
+# - Checkpoints -
+
+#checkpoint_timeout = 5min # range 30s-1d
+#max_wal_size = 1GB
+#min_wal_size = 80MB
+#checkpoint_completion_target = 0.5 # checkpoint target duration, 0.0 - 1.0
+#checkpoint_flush_after = 0 # measured in pages, 0 disables
+#checkpoint_warning = 30s # 0 disables
+
+# - Archiving -
+
+#archive_mode = off # enables archiving; off, on, or always
+ # (change requires restart)
+#archive_command = '' # command to use to archive a logfile segment
+ # placeholders: %p = path of file to archive
+ # %f = file name only
+ # e.g. 'test ! -f /mnt/server/archivedir/%f && cp %p /mnt/server/archivedir/%f'
+#archive_timeout = 0 # force a logfile segment switch after this
+ # number of seconds; 0 disables
+
+
+#------------------------------------------------------------------------------
+# REPLICATION
+#------------------------------------------------------------------------------
+
+# - Sending Server(s) -
+
+# Set these on the master and on any standby that will send replication data.
+
+#max_wal_senders = 10 # max number of walsender processes
+ # (change requires restart)
+#wal_keep_segments = 0 # in logfile segments, 16MB each; 0 disables
+#wal_sender_timeout = 60s # in milliseconds; 0 disables
+
+#max_replication_slots = 10 # max number of replication slots
+ # (change requires restart)
+#track_commit_timestamp = off # collect timestamp of transaction commit
+ # (change requires restart)
+
+# - Master Server -
+
+# These settings are ignored on a standby server.
+
+#synchronous_standby_names = '' # standby servers that provide sync rep
+ # method to choose sync standbys, number of sync standbys,
+ # and comma-separated list of application_name
+ # from standby(s); '*' = all
+#vacuum_defer_cleanup_age = 0 # number of xacts by which cleanup is delayed
+
+# - Standby Servers -
+
+# These settings are ignored on a master server.
+
+#hot_standby = on # "off" disallows queries during recovery
+ # (change requires restart)
+#max_standby_archive_delay = 30s # max delay before canceling queries
+ # when reading WAL from archive;
+ # -1 allows indefinite delay
+#max_standby_streaming_delay = 30s # max delay before canceling queries
+ # when reading streaming WAL;
+ # -1 allows indefinite delay
+#wal_receiver_status_interval = 10s # send replies at least this often
+ # 0 disables
+#hot_standby_feedback = off # send info from standby to prevent
+ # query conflicts
+#wal_receiver_timeout = 60s # time that receiver waits for
+ # communication from master
+ # in milliseconds; 0 disables
+#wal_retrieve_retry_interval = 5s # time to wait before retrying to
+ # retrieve WAL after a failed attempt
+
+# - Subscribers -
+
+# These settings are ignored on a publisher.
+
+#max_logical_replication_workers = 4 # taken from max_worker_processes
+ # (change requires restart)
+#max_sync_workers_per_subscription = 2 # taken from max_logical_replication_workers
+
+
+#------------------------------------------------------------------------------
+# QUERY TUNING
+#------------------------------------------------------------------------------
+
+# - Planner Method Configuration -
+
+#enable_bitmapscan = on
+#enable_hashagg = on
+#enable_hashjoin = on
+#enable_indexscan = on
+#enable_indexonlyscan = on
+#enable_material = on
+#enable_mergejoin = on
+#enable_nestloop = on
+#enable_seqscan = on
+#enable_sort = on
+#enable_tidscan = on
+
+# - Planner Cost Constants -
+
+#seq_page_cost = 1.0 # measured on an arbitrary scale
+#random_page_cost = 4.0 # same scale as above
+#cpu_tuple_cost = 0.01 # same scale as above
+#cpu_index_tuple_cost = 0.005 # same scale as above
+#cpu_operator_cost = 0.0025 # same scale as above
+#parallel_tuple_cost = 0.1 # same scale as above
+#parallel_setup_cost = 1000.0 # same scale as above
+#min_parallel_table_scan_size = 8MB
+#min_parallel_index_scan_size = 512kB
+#effective_cache_size = 4GB
+
+# - Genetic Query Optimizer -
+
+#geqo = on
+#geqo_threshold = 12
+#geqo_effort = 5 # range 1-10
+#geqo_pool_size = 0 # selects default based on effort
+#geqo_generations = 0 # selects default based on effort
+#geqo_selection_bias = 2.0 # range 1.5-2.0
+#geqo_seed = 0.0 # range 0.0-1.0
+
+# - Other Planner Options -
+
+#default_statistics_target = 100 # range 1-10000
+#constraint_exclusion = partition # on, off, or partition
+#cursor_tuple_fraction = 0.1 # range 0.0-1.0
+#from_collapse_limit = 8
+#join_collapse_limit = 8 # 1 disables collapsing of explicit
+ # JOIN clauses
+#force_parallel_mode = off
+
+
+#------------------------------------------------------------------------------
+# ERROR REPORTING AND LOGGING
+#------------------------------------------------------------------------------
+
+# - Where to Log -
+
+#log_destination = 'stderr' # Valid values are combinations of
+ # stderr, csvlog, syslog, and eventlog,
+ # depending on platform. csvlog
+ # requires logging_collector to be on.
+
+# This is used when logging to stderr:
+logging_collector = on
+ # Enable capturing of stderr and csvlog
+ # into log files. Required to be on for
+ # csvlogs.
+ # (change requires restart)
+
+# These are only used if logging_collector is on:
+log_directory = '/var/log/postgresql'
+ # directory where log files are written,
+ # can be absolute or relative to PGDATA
+log_filename = 'postgresql-%Y-%m-%d_%H%M%S.log'
+ # log file name pattern,
+ # can include strftime() escapes
+#log_file_mode = 0600 # creation mode for log files,
+ # begin with 0 to use octal notation
+log_file_mode = 0660
+#log_truncate_on_rotation = off # If on, an existing log file with the
+ # same name as the new log file will be
+ # truncated rather than appended to.
+ # But such truncation only occurs on
+ # time-driven rotation, not on restarts
+ # or size-driven rotation. Default is
+ # off, meaning append to existing files
+ # in all cases.
+log_rotation_age = 1d
+ # Automatic rotation of logfiles will
+ # happen after that time. 0 disables.
+log_rotation_size = 100MB
+ # Automatic rotation of logfiles will
+ # happen after that much log output.
+ # 0 disables.
+
+# These are relevant when logging to syslog:
+#syslog_facility = 'LOCAL0'
+#syslog_ident = 'postgres'
+#syslog_sequence_numbers = on
+#syslog_split_messages = on
+
+# This is only relevant when logging to eventlog (win32):
+# (change requires restart)
+#event_source = 'PostgreSQL'
+
+# - When to Log -
+
+#client_min_messages = notice # values in order of decreasing detail:
+ # debug5
+ # debug4
+ # debug3
+ # debug2
+ # debug1
+ # log
+ # notice
+ # warning
+ # error
+
+#log_min_messages = warning # values in order of decreasing detail:
+ # debug5
+ # debug4
+ # debug3
+ # debug2
+ # debug1
+ # info
+ # notice
+ # warning
+ # error
+ # log
+ # fatal
+ # panic
+
+#log_min_error_statement = error # values in order of decreasing detail:
+ # debug5
+ # debug4
+ # debug3
+ # debug2
+ # debug1
+ # info
+ # notice
+ # warning
+ # error
+ # log
+ # fatal
+ # panic (effectively off)
+
+#log_min_duration_statement = -1 # -1 is disabled, 0 logs all statements
+ # and their durations, > 0 logs only
+ # statements running at least this number
+ # of milliseconds
+
+
+# - What to Log -
+
+#debug_print_parse = off
+#debug_print_rewritten = off
+#debug_print_plan = off
+#debug_pretty_print = on
+#log_checkpoints = off
+#log_connections = off
+#log_disconnections = off
+#log_duration = off
+#log_error_verbosity = default # terse, default, or verbose messages
+#log_hostname = off
+#log_line_prefix = '%m [%p] ' # special values:
+ # %a = application name
+ # %u = user name
+ # %d = database name
+ # %r = remote host and port
+ # %h = remote host
+ # %p = process ID
+ # %t = timestamp without milliseconds
+ # %m = timestamp with milliseconds
+ # %n = timestamp with milliseconds (as a Unix epoch)
+ # %i = command tag
+ # %e = SQL state
+ # %c = session ID
+ # %l = session line number
+ # %s = session start timestamp
+ # %v = virtual transaction ID
+ # %x = transaction ID (0 if none)
+ # %q = stop here in non-session
+ # processes
+ # %% = '%'
+ # e.g. '<%u%%%d> '
+#log_lock_waits = off # log lock waits >= deadlock_timeout
+#log_statement = 'none' # none, ddl, mod, all
+#log_replication_commands = off
+#log_temp_files = -1 # log temporary files equal or larger
+ # than the specified size in kilobytes;
+ # -1 disables, 0 logs all temp files
+#log_timezone = 'GMT'
+
+
+# - Process Title -
+
+#cluster_name = '' # added to process titles if nonempty
+ # (change requires restart)
+#update_process_title = on
+
+
+#------------------------------------------------------------------------------
+# RUNTIME STATISTICS
+#------------------------------------------------------------------------------
+
+# - Query/Index Statistics Collector -
+
+#track_activities = on
+#track_counts = on
+#track_io_timing = off
+#track_functions = none # none, pl, all
+#track_activity_query_size = 1024 # (change requires restart)
+#stats_temp_directory = 'pg_stat_tmp'
+
+
+# - Statistics Monitoring -
+
+#log_parser_stats = off
+#log_planner_stats = off
+#log_executor_stats = off
+#log_statement_stats = off
+
+
+#------------------------------------------------------------------------------
+# AUTOVACUUM PARAMETERS
+#------------------------------------------------------------------------------
+
+#autovacuum = on # Enable autovacuum subprocess? 'on'
+ # requires track_counts to also be on.
+#log_autovacuum_min_duration = -1 # -1 disables, 0 logs all actions and
+ # their durations, > 0 logs only
+ # actions running at least this number
+ # of milliseconds.
+#autovacuum_max_workers = 3 # max number of autovacuum subprocesses
+ # (change requires restart)
+#autovacuum_naptime = 1min # time between autovacuum runs
+#autovacuum_vacuum_threshold = 50 # min number of row updates before
+ # vacuum
+#autovacuum_analyze_threshold = 50 # min number of row updates before
+ # analyze
+#autovacuum_vacuum_scale_factor = 0.2 # fraction of table size before vacuum
+#autovacuum_analyze_scale_factor = 0.1 # fraction of table size before analyze
+#autovacuum_freeze_max_age = 200000000 # maximum XID age before forced vacuum
+ # (change requires restart)
+#autovacuum_multixact_freeze_max_age = 400000000 # maximum multixact age
+ # before forced vacuum
+ # (change requires restart)
+#autovacuum_vacuum_cost_delay = 20ms # default vacuum cost delay for
+ # autovacuum, in milliseconds;
+ # -1 means use vacuum_cost_delay
+#autovacuum_vacuum_cost_limit = -1 # default vacuum cost limit for
+ # autovacuum, -1 means use
+ # vacuum_cost_limit
+
+
+#------------------------------------------------------------------------------
+# CLIENT CONNECTION DEFAULTS
+#------------------------------------------------------------------------------
+
+# - Statement Behavior -
+
+#search_path = '"$user", public' # schema names
+#default_tablespace = '' # a tablespace name, '' uses the default
+#temp_tablespaces = '' # a list of tablespace names, '' uses
+ # only default tablespace
+#check_function_bodies = on
+#default_transaction_isolation = 'read committed'
+#default_transaction_read_only = off
+#default_transaction_deferrable = off
+#session_replication_role = 'origin'
+#statement_timeout = 0 # in milliseconds, 0 is disabled
+#lock_timeout = 0 # in milliseconds, 0 is disabled
+#idle_in_transaction_session_timeout = 0 # in milliseconds, 0 is disabled
+#vacuum_freeze_min_age = 50000000
+#vacuum_freeze_table_age = 150000000
+#vacuum_multixact_freeze_min_age = 5000000
+#vacuum_multixact_freeze_table_age = 150000000
+#bytea_output = 'hex' # hex, escape
+#xmlbinary = 'base64'
+#xmloption = 'content'
+#gin_fuzzy_search_limit = 0
+#gin_pending_list_limit = 4MB
+
+# - Locale and Formatting -
+
+#datestyle = 'iso, mdy'
+#intervalstyle = 'postgres'
+#timezone = 'GMT'
+#timezone_abbreviations = 'Default' # Select the set of available time zone
+ # abbreviations. Currently, there are
+ # Default
+ # Australia (historical usage)
+ # India
+ # You can create your own file in
+ # share/timezonesets/.
+#extra_float_digits = 0 # min -15, max 3
+#client_encoding = sql_ascii # actually, defaults to database
+ # encoding
+
+# These settings are initialized by initdb, but they can be changed.
+#lc_messages = 'C' # locale for system error message
+ # strings
+#lc_monetary = 'C' # locale for monetary formatting
+#lc_numeric = 'C' # locale for number formatting
+#lc_time = 'C' # locale for time formatting
+
+# default configuration for text search
+#default_text_search_config = 'pg_catalog.simple'
+
+# - Other Defaults -
+
+#dynamic_library_path = '$libdir'
+#local_preload_libraries = ''
+#session_preload_libraries = ''
+
+
+#------------------------------------------------------------------------------
+# LOCK MANAGEMENT
+#------------------------------------------------------------------------------
+
+#deadlock_timeout = 1s
+#max_locks_per_transaction = 64 # min 10
+ # (change requires restart)
+#max_pred_locks_per_transaction = 64 # min 10
+ # (change requires restart)
+#max_pred_locks_per_relation = -2 # negative values mean
+ # (max_pred_locks_per_transaction
+ # / -max_pred_locks_per_relation) - 1
+#max_pred_locks_per_page = 2 # min 0
+
+
+#------------------------------------------------------------------------------
+# VERSION/PLATFORM COMPATIBILITY
+#------------------------------------------------------------------------------
+
+# - Previous PostgreSQL Versions -
+
+#array_nulls = on
+#backslash_quote = safe_encoding # on, off, or safe_encoding
+#default_with_oids = off
+#escape_string_warning = on
+#lo_compat_privileges = off
+#operator_precedence_warning = off
+#quote_all_identifiers = off
+#standard_conforming_strings = on
+#synchronize_seqscans = on
+
+# - Other Platforms and Clients -
+
+#transform_null_equals = off
+
+
+#------------------------------------------------------------------------------
+# ERROR HANDLING
+#------------------------------------------------------------------------------
+
+#exit_on_error = off # terminate session on any error?
+#restart_after_crash = on # reinitialize after backend crash?
+
+
+#------------------------------------------------------------------------------
+# CONFIG FILE INCLUDES
+#------------------------------------------------------------------------------
+
+# These options allow settings to be loaded from files other than the
+# default postgresql.conf.
+
+#include_dir = 'conf.d' # include files ending in '.conf' from
+ # directory 'conf.d'
+#include_if_exists = 'exists.conf' # include file only if it exists
+#include = 'special.conf' # include file
+
+
+#------------------------------------------------------------------------------
+# CUSTOMIZED OPTIONS
+#------------------------------------------------------------------------------
+
+# Add settings for extensions here
+
+zdb.default_elasticsearch_url = 'http://elasticsearch:9200/'
diff --git a/docker/postgresql/setup.sh b/docker/postgresql/setup.sh
new file mode 100644
index 0000000..db8672a
--- /dev/null
+++ b/docker/postgresql/setup.sh
@@ -0,0 +1,4 @@
+#!/usr/bin/env bash
+set -e
+
+cat /tmp/postgresql.conf > /var/lib/postgresql/data/postgresql.conf
\ No newline at end of file
diff --git a/src/main/kotlin/fr/postgresjson/connexion/Requester.kt b/src/main/kotlin/fr/postgresjson/connexion/Requester.kt
index 3d44790..d5ff270 100644
--- a/src/main/kotlin/fr/postgresjson/connexion/Requester.kt
+++ b/src/main/kotlin/fr/postgresjson/connexion/Requester.kt
@@ -1,7 +1,10 @@
package fr.postgresjson.connexion
-import java.io.File
+import fr.postgresjson.utils.searchSqlFiles
+import java.net.URI
import fr.postgresjson.definition.Function as DefinitionFunction
+import fr.postgresjson.definition.Function as FunctionDefinition
+import fr.postgresjson.definition.Query as QueryDefinition
class Requester(
private val connection: Connection,
@@ -13,17 +16,19 @@ class Requester(
return this
}
+ fun addQuery(query: QueryDefinition): Requester = addQuery(query.name, query.script)
+
fun addQuery(name: String, sql: String): Requester {
addQuery(Query(name, sql, connection))
return this
}
- fun addQuery(queriesDirectory: File): Requester {
- queriesDirectory.walk()
- .filter { it.isFile && it.extension == "sql" }
+ fun addQuery(queriesDirectory: URI): Requester {
+ queriesDirectory.searchSqlFiles()
.forEach {
- val path = it.parentFile.nameWithoutExtension
- addQuery("$path/${it.nameWithoutExtension}", it.readText())
+ if (it is QueryDefinition) {
+ addQuery(it)
+ }
}
return this
}
@@ -44,11 +49,12 @@ class Requester(
return this
}
- fun addFunction(functionsDirectory: File): Requester {
- functionsDirectory.walk()
- .filter { it.isFile && it.extension == "sql" }
+ fun addFunction(functionsDirectory: URI): Requester {
+ functionsDirectory.searchSqlFiles()
.forEach {
- addFunction(it.readText())
+ if (it is FunctionDefinition) {
+ addFunction(it)
+ }
}
return this
}
@@ -69,8 +75,8 @@ class Requester(
class RequesterFactory(
private val connection: Connection,
- private val queriesDirectory: File? = null,
- private val functionsDirectory: File? = null
+ private val queriesDirectory: URI? = null,
+ private val functionsDirectory: URI? = null
) {
constructor(
host: String = "localhost",
@@ -78,8 +84,8 @@ class Requester(
database: String = "dc-project",
username: String = "dc-project",
password: String = "dc-project",
- queriesDirectory: File? = null,
- functionsDirectory: File? = null
+ queriesDirectory: URI? = null,
+ functionsDirectory: URI? = null
) : this(
Connection(host = host, port = port, database = database, username = username, password = password),
queriesDirectory,
diff --git a/src/main/kotlin/fr/postgresjson/definition/Function.kt b/src/main/kotlin/fr/postgresjson/definition/Function.kt
index a1ab399..6c5d53f 100644
--- a/src/main/kotlin/fr/postgresjson/definition/Function.kt
+++ b/src/main/kotlin/fr/postgresjson/definition/Function.kt
@@ -1,14 +1,15 @@
package fr.postgresjson.definition
import java.io.File
+import java.nio.file.Path
-open class Function(
- override val script: String
+class Function(
+ override val script: String,
+ override var source: Path? = null
) : Resource, ParametersInterface {
val returns: String
override val name: String
override val parameters: List
- override var source: File? = null
init {
val functionRegex =
@@ -46,8 +47,7 @@ open class Function(
}
}
- abstract class ParseException(message: String, cause: Throwable? = null) : Exception(message, cause)
- class FunctionNotFound(cause: Throwable? = null) : ParseException("Function not found in script", cause)
+ class FunctionNotFound(cause: Throwable? = null) : Resource.ParseException("Function not found in script", cause)
fun getDefinition(): String {
return parameters
diff --git a/src/main/kotlin/fr/postgresjson/definition/Migration.kt b/src/main/kotlin/fr/postgresjson/definition/Migration.kt
new file mode 100644
index 0000000..d6fc199
--- /dev/null
+++ b/src/main/kotlin/fr/postgresjson/definition/Migration.kt
@@ -0,0 +1,35 @@
+package fr.postgresjson.definition
+
+import java.nio.file.Path
+
+class Migration(
+ override val script: String,
+ source: Path
+) : Resource {
+ override val name: String
+ val direction: Direction
+ override var source: Path? = null
+
+ init {
+ this.source = source
+ this.direction = source.fileName.toString()
+ .let {
+ when {
+ it.endsWith(".down.sql") -> Direction.DOWN
+ it.endsWith(".up.sql") -> Direction.UP
+ else -> throw MigrationNotFound()
+ }
+ }
+ this.name = source.fileName.toString()
+ .substringAfterLast("/")
+ .let {
+ when (direction) {
+ Direction.DOWN -> it.substringBefore(".down.sql")
+ Direction.UP -> it.substringBefore(".up.sql")
+ }
+ }
+ }
+
+ class MigrationNotFound(cause: Throwable? = null) : Resource.ParseException("Migration not found in script", cause)
+ enum class Direction { UP, DOWN }
+}
\ No newline at end of file
diff --git a/src/main/kotlin/fr/postgresjson/definition/Query.kt b/src/main/kotlin/fr/postgresjson/definition/Query.kt
new file mode 100644
index 0000000..f16058e
--- /dev/null
+++ b/src/main/kotlin/fr/postgresjson/definition/Query.kt
@@ -0,0 +1,25 @@
+package fr.postgresjson.definition
+
+import java.nio.file.Path
+
+class Query(
+ override val script: String,
+ source: Path
+) : Resource {
+ override var source: Path? = source
+ override val name: String = getNameFromComment(script) ?: getNameFromFile(source)
+
+ /** Try to get name from comment in file */
+ private fun getNameFromComment(script: String): String? =
+ """-- *name ?: ?(?[^ \n]+)"""
+ .toRegex(setOf(RegexOption.IGNORE_CASE, RegexOption.MULTILINE))
+ .find(script)?.let {
+ it.groups["name"]?.value?.trim()
+ }
+
+ /** Try to get name from the filename */
+ private fun getNameFromFile(source: Path): String = source
+ .fileName.toString()
+ .substringAfterLast("/")
+ .substringBeforeLast(".sql")
+}
\ No newline at end of file
diff --git a/src/main/kotlin/fr/postgresjson/definition/Resource.kt b/src/main/kotlin/fr/postgresjson/definition/Resource.kt
index 61cc097..7f32336 100644
--- a/src/main/kotlin/fr/postgresjson/definition/Resource.kt
+++ b/src/main/kotlin/fr/postgresjson/definition/Resource.kt
@@ -1,11 +1,38 @@
package fr.postgresjson.definition
import java.io.File
+import java.net.URL
+import java.nio.file.Path
interface Resource {
val name: String
val script: String
- var source: File?
+ var source: Path?
+
+ open class ParseException(message: String, cause: Throwable? = null) : Exception(message, cause)
+
+ companion object {
+ fun build(file: File): Resource =
+ build(file.readText(), Path.of(file.toURI()))
+
+ fun build(url: URL): Resource =
+ build(url.readText(), Path.of(url.toURI()))
+
+ fun build(resource: String, path: Path): Resource =
+ try {
+ Function(resource, path)
+ } catch (e: ParseException) {
+ try {
+ Migration(resource, path)
+ } catch (e: ParseException) {
+ try {
+ Query(resource, path)
+ } catch (e: ParseException) {
+ throw ParseException("No SQL resource found")
+ }
+ }
+ }
+ }
}
interface ResourceCollection {
diff --git a/src/main/kotlin/fr/postgresjson/migration/Migrations.kt b/src/main/kotlin/fr/postgresjson/migration/Migrations.kt
index 54c3c5e..a5323be 100644
--- a/src/main/kotlin/fr/postgresjson/migration/Migrations.kt
+++ b/src/main/kotlin/fr/postgresjson/migration/Migrations.kt
@@ -2,14 +2,15 @@ package fr.postgresjson.migration
import com.fasterxml.jackson.core.type.TypeReference
import fr.postgresjson.connexion.Connection
-import fr.postgresjson.definition.Function.FunctionNotFound
+import fr.postgresjson.definition.Migration as DefinitionMigration
import fr.postgresjson.entity.mutable.Entity
import fr.postgresjson.migration.Migration.Action
import fr.postgresjson.migration.Migration.Status
import fr.postgresjson.utils.LoggerDelegate
+import fr.postgresjson.utils.searchSqlFiles
import org.slf4j.Logger
-import java.io.File
import java.io.FileNotFoundException
+import java.net.URI
import java.util.*
import fr.postgresjson.definition.Function as DefinitionFunction
@@ -35,27 +36,27 @@ interface Migration {
data class Migrations private constructor(
private val connection: Connection,
- private val queries: MutableMap = mutableMapOf(),
+ private val migrationsScripts: MutableMap = mutableMapOf(),
private val functions: MutableMap = mutableMapOf()
) {
- private var directories: List = emptyList()
+ private var directories: List = emptyList()
private val logger: Logger? by LoggerDelegate()
- constructor(directory: File, connection: Connection) : this(listOf(directory), connection)
+ constructor(directory: URI, connection: Connection) : this(listOf(directory), connection)
- constructor(directories: List, connection: Connection) : this(connection) {
+ constructor(directories: List, connection: Connection) : this(connection) {
initDB()
this.directories = directories
reset()
}
fun reset() {
- queries.clear()
+ migrationsScripts.clear()
functions.clear()
getMigrationFromDB()
getMigrationFromDirectory(directories)
- queries.forEach { (_, query) ->
+ migrationsScripts.forEach { (_, query) ->
if (query.doExecute === null) {
query.doExecute = Action.DOWN
}
@@ -84,7 +85,7 @@ data class Migrations private constructor(
this::class.java.classLoader.getResource("sql/migration/findAllHistory.sql")!!.readText().let {
connection.select(it, object : TypeReference>() {})
.map { query ->
- queries[query.filename] = Query(query.filename, query.up, query.down, connection, query.executedAt)
+ migrationsScripts[query.filename] = Query(query.filename, query.up, query.down, connection, query.executedAt)
}
}
}
@@ -92,7 +93,7 @@ data class Migrations private constructor(
/**
* Get all migration from multiples Directories
*/
- private fun getMigrationFromDirectory(directory: List) {
+ private fun getMigrationFromDirectory(directory: List) {
directory.forEach {
getMigrationFromDirectory(it)
}
@@ -101,29 +102,26 @@ data class Migrations private constructor(
/**
* Get all migration from Directory
*/
- private fun getMigrationFromDirectory(directory: File) {
- directory.walk().filter {
- it.isFile
- }.forEach { file ->
- if (file.name.endsWith(".up.sql")) {
- file.path.substring(0, file.path.length - 7).let {
- try {
- val down = File("$it.down.sql").readText()
- val up = file.readText()
- val name = file.name.substring(0, file.name.length - 7)
- addQuery(name, up, down)
- } catch (e: FileNotFoundException) {
- throw DownMigrationNotDefined("$it.down.sql", e)
- }
+ private fun getMigrationFromDirectory(directory: URI) {
+ val downs: MutableMap = mutableMapOf()
+
+ /* Set Down Migration */
+ directory.searchSqlFiles().apply {
+ forEach { migration ->
+ if (migration is DefinitionMigration && migration.direction == DefinitionMigration.Direction.DOWN) {
+ downs += migration.name to migration
}
- } else if (file.name.endsWith(".down.sql")) {
- // Nothing
- } else {
- val fileContent = file.readText()
- try {
- addFunction(fileContent)
- } catch (e: FunctionNotFound) {
- // Nothing
+ }
+
+ /* Set up migrations and functions */
+ forEach { migration ->
+ if (migration is DefinitionMigration && migration.direction == DefinitionMigration.Direction.UP) {
+ val down = downs[migration.name] ?: throw DownMigrationNotDefined(migration.name + ".down.sql")
+ downs -= migration.name
+
+ addQuery(migration, down)
+ } else if (migration is DefinitionFunction) {
+ addFunction(migration)
}
}
}
@@ -131,7 +129,7 @@ data class Migrations private constructor(
enum class Direction { UP, DOWN }
- internal class DownMigrationNotDefined(path: String, cause: FileNotFoundException) :
+ internal class DownMigrationNotDefined(path: String, cause: FileNotFoundException? = null) :
Throwable("The file $path whas not found", cause)
fun addFunction(newDefinition: DefinitionFunction, callback: (Function) -> Unit = {}): Migrations {
@@ -155,18 +153,21 @@ data class Migrations private constructor(
return this
}
+ fun addQuery(up: DefinitionMigration, down: DefinitionMigration, callback: (Query) -> Unit = {}): Migrations =
+ addQuery(up.name, up.script, down.script, callback)
+
fun addQuery(name: String, up: String, down: String, callback: (Query) -> Unit = {}): Migrations {
- if (queries[name] === null) {
- queries[name] = Query(name, up, down, connection).apply {
+ if (migrationsScripts[name] === null) {
+ migrationsScripts[name] = Query(name, up, down, connection).apply {
doExecute = Action.UP
}
} else {
- queries[name]!!.apply {
+ migrationsScripts[name]!!.apply {
doExecute = Action.OK
}
}
- callback(queries[name]!!)
+ callback(migrationsScripts[name]!!)
return this
}
@@ -191,7 +192,7 @@ data class Migrations private constructor(
internal fun up(): Map {
val list: MutableMap = mutableMapOf()
- queries.forEach {
+ migrationsScripts.forEach {
it.value.let { query ->
if (query.doExecute == Action.UP) {
query.up().let { status ->
@@ -216,7 +217,7 @@ data class Migrations private constructor(
internal fun down(force: Boolean = false): Map {
val list: MutableMap = mutableMapOf()
- queries.forEach {
+ migrationsScripts.forEach {
it.value.let { query ->
if (query.doExecute == Action.DOWN || force) {
query.down().let { status ->
@@ -297,7 +298,7 @@ data class Migrations private constructor(
}
fun copy(): Migrations {
- val queriesCopy = queries.map {
+ val queriesCopy = migrationsScripts.map {
it.key to it.value.copy()
}.toMap().toMutableMap()
diff --git a/src/main/kotlin/fr/postgresjson/utils/searchSqlFiles.kt b/src/main/kotlin/fr/postgresjson/utils/searchSqlFiles.kt
new file mode 100644
index 0000000..d95b23b
--- /dev/null
+++ b/src/main/kotlin/fr/postgresjson/utils/searchSqlFiles.kt
@@ -0,0 +1,46 @@
+package fr.postgresjson.utils
+
+import fr.postgresjson.definition.Resource
+import org.slf4j.Logger
+import org.slf4j.LoggerFactory
+import java.net.URI
+import java.net.URL
+import java.nio.file.FileSystems
+import java.nio.file.FileVisitOption
+import java.nio.file.Files
+import java.nio.file.Path
+import kotlin.streams.asSequence
+
+fun URL.searchSqlFiles() = this.toURI().searchSqlFiles()
+
+fun URI.searchSqlFiles() = sequence {
+ val logger: Logger = LoggerFactory.getLogger("sqlFilesSearch")
+ val uri: URI = this@searchSqlFiles
+ if (uri.scheme == "jar") {
+ val relativePath = uri.toString().substringAfter('!')
+ FileSystems
+ .newFileSystem(uri, emptyMap())
+ .getPath(relativePath)
+ .walk(5)
+ .asSequence()
+ .filter { it.fileName.toString().endsWith(".sql") }
+ .map { this::class.java.getResource(path.toString()) }
+ .forEach {
+ logger.debug(it.toString())
+ yield(Resource.build(it))
+ }
+ } else {
+ uri
+ .walk(5)
+ .asSequence()
+ .map { it.toFile() }
+ .filter { it.isFile && it.extension == "sql" }
+ .forEach {
+ logger.debug(it.toString())
+ yield(Resource.build(it))
+ }
+ }
+}
+
+private fun Path.walk(maxDepth: Int = 2147483647, vararg options: FileVisitOption) = Files.walk(this, maxDepth, *options)
+private fun URI.walk(maxDepth: Int = 2147483647, vararg options: FileVisitOption) = Files.walk(Path.of(this), maxDepth, *options)
\ No newline at end of file
diff --git a/src/test/kotlin/fr/postgresjson/MigrationTest.kt b/src/test/kotlin/fr/postgresjson/MigrationTest.kt
index a531a63..5645a97 100644
--- a/src/test/kotlin/fr/postgresjson/MigrationTest.kt
+++ b/src/test/kotlin/fr/postgresjson/MigrationTest.kt
@@ -10,13 +10,12 @@ import org.amshove.kluent.shouldThrow
import org.junit.jupiter.api.Assertions
import org.junit.jupiter.api.Test
import org.junit.jupiter.api.TestInstance
-import java.io.File
@TestInstance(TestInstance.Lifecycle.PER_CLASS)
class MigrationTest() : TestAbstract() {
@Test
fun `run up query`() {
- val resources = File(this::class.java.getResource("/sql/migrations").toURI())
+ val resources = this::class.java.getResource("/sql/migrations").toURI()
val m = Migrations(resources, connection)
m.up().apply {
this `should contain` Pair("1", Migration.Status.OK)
@@ -28,7 +27,7 @@ class MigrationTest() : TestAbstract() {
@Test
fun `migration up Query should throw error if no down`() {
- val resources = File(this::class.java.getResource("/sql/migration_without_down").toURI())
+ val resources = this::class.java.getResource("/sql/migration_without_down").toURI()
invoking {
Migrations(resources, connection)
} shouldThrow Migrations.DownMigrationNotDefined::class
@@ -36,7 +35,7 @@ class MigrationTest() : TestAbstract() {
@Test
fun `run forced down query`() {
- val resources = File(this::class.java.getResource("/sql/migrations").toURI())
+ val resources = this::class.java.getResource("/sql/migrations").toURI()
val m = Migrations(resources, connection)
repeat(3) {
m.down(true).apply {
@@ -48,7 +47,7 @@ class MigrationTest() : TestAbstract() {
@Test
fun `run dry migrations`() {
- val resources = File(this::class.java.getResource("/sql/real_migrations").toURI())
+ val resources = this::class.java.getResource("/sql/real_migrations").toURI()
Migrations(resources, connection).apply {
runDry().size `should be equal to` 2
}
@@ -59,7 +58,7 @@ class MigrationTest() : TestAbstract() {
@Test
fun `run dry migrations launch twice`() {
- val resources = File(this::class.java.getResource("/sql/real_migrations").toURI())
+ val resources = this::class.java.getResource("/sql/real_migrations").toURI()
Migrations(resources, connection).apply {
runDry().size `should be equal to` 2
runDry().size `should be equal to` 2
@@ -68,7 +67,7 @@ class MigrationTest() : TestAbstract() {
@Test
fun `run migrations`() {
- val resources = File(this::class.java.getResource("/sql/real_migrations").toURI())
+ val resources = this::class.java.getResource("/sql/real_migrations").toURI()
Migrations(resources, connection).apply {
run().apply {
size `should be equal to` 1
@@ -78,8 +77,8 @@ class MigrationTest() : TestAbstract() {
@Test
fun `run migrations force down`() {
- val resources = File(this::class.java.getResource("/sql/real_migrations").toURI())
- val resourcesFunctions = File(this::class.java.getResource("/sql/function/Test").toURI())
+ val resources = this::class.java.getResource("/sql/real_migrations").toURI()
+ val resourcesFunctions = this::class.java.getResource("/sql/function/Test").toURI()
Migrations(listOf(resources, resourcesFunctions), connection).apply {
up().apply {
size `should be equal to` 6
@@ -94,7 +93,7 @@ class MigrationTest() : TestAbstract() {
@Test
fun `run functions migrations`() {
- val resources = File(this::class.java.getResource("/sql/function/Test").toURI())
+ val resources = this::class.java.getResource("/sql/function/Test").toURI()
Migrations(resources, connection).apply {
run().size `should be equal to` 5
}
@@ -110,7 +109,7 @@ class MigrationTest() : TestAbstract() {
@Test
fun `run functions migrations and drop if exist`() {
- val resources = File(this::class.java.getResource("/sql/function/Test1").toURI())
+ val resources = this::class.java.getResource("/sql/function/Test1").toURI()
Migrations(resources, connection).apply {
run().size `should be equal to` 1
}
@@ -123,7 +122,7 @@ class MigrationTest() : TestAbstract() {
Assertions.assertEquals(objTest!!.id, 3)
Assertions.assertEquals(objTest.name, "test")
- val resources2 = File(this::class.java.getResource("/sql/function/Test2").toURI())
+ val resources2 = this::class.java.getResource("/sql/function/Test2").toURI()
Migrations(resources2, connection).apply {
run().size `should be equal to` 1
}
diff --git a/src/test/kotlin/fr/postgresjson/RequesterTest.kt b/src/test/kotlin/fr/postgresjson/RequesterTest.kt
index 4ac074b..66021f4 100644
--- a/src/test/kotlin/fr/postgresjson/RequesterTest.kt
+++ b/src/test/kotlin/fr/postgresjson/RequesterTest.kt
@@ -6,17 +6,16 @@ import fr.postgresjson.entity.mutable.IdEntity
import org.junit.Assert
import org.junit.jupiter.api.Assertions.assertEquals
import org.junit.jupiter.api.Test
-import java.io.File
class RequesterTest : TestAbstract() {
class ObjTest(var name: String) : IdEntity(1)
@Test
fun `get query from file`() {
- val resources = File(this::class.java.getResource("/sql/query").toURI())
+ val resources = this::class.java.getResource("/sql/query").toURI()
val objTest: ObjTest? = Requester(connection)
.addQuery(resources)
- .getQuery("Test/selectOne")
+ .getQuery("selectOne")
.selectOne()
assertEquals(objTest!!.id, 2)
@@ -25,7 +24,7 @@ class RequesterTest : TestAbstract() {
@Test
fun `get function from file`() {
- val resources = File(this::class.java.getResource("/sql/function/Test").toURI())
+ val resources = this::class.java.getResource("/sql/function/Test").toURI()
val objTest: ObjTest? = Requester(connection)
.addFunction(resources)
.getFunction("test_function")
@@ -37,10 +36,10 @@ class RequesterTest : TestAbstract() {
@Test
fun `call exec on query`() {
- val resources = File(this::class.java.getResource("/sql/query").toURI())
+ val resources = this::class.java.getResource("/sql/query").toURI()
val result = Requester(connection)
.addQuery(resources)
- .getQuery("Test/selectOne")
+ .getQuery("selectOne")
.exec()
assertEquals(1, result.rowsAffected)
@@ -48,7 +47,7 @@ class RequesterTest : TestAbstract() {
@Test
fun `call exec on function`() {
- val resources = File(this::class.java.getResource("/sql/function/Test").toURI())
+ val resources = this::class.java.getResource("/sql/function/Test").toURI()
val result = Requester(connection)
.addFunction(resources)
.getFunction("test_function")
@@ -58,11 +57,11 @@ class RequesterTest : TestAbstract() {
}
@Test
- fun `call sendQuery on query`() {
- val resources = File(this::class.java.getResource("/sql/query").toURI())
+ fun `call sendQuery on query with name`() {
+ val resources = this::class.java.getResource("/sql/query").toURI()
val result = Requester(connection)
.addQuery(resources)
- .getQuery("Test/exec")
+ .getQuery("DeleteTest")
.sendQuery()
assertEquals(0, result)
@@ -70,7 +69,7 @@ class RequesterTest : TestAbstract() {
@Test
fun `call sendQuery on function`() {
- val resources = File(this::class.java.getResource("/sql/function/Test").toURI())
+ val resources = this::class.java.getResource("/sql/function/Test").toURI()
val result = Requester(connection)
.addFunction(resources)
.getFunction("function_void")
@@ -81,7 +80,7 @@ class RequesterTest : TestAbstract() {
@Test
fun `call selectOne on function`() {
- val resources = File(this::class.java.getResource("/sql/function/Test").toURI())
+ val resources = this::class.java.getResource("/sql/function/Test").toURI()
val obj: ObjTest = Requester(connection)
.addFunction(resources)
.getFunction("test_function")
@@ -92,7 +91,7 @@ class RequesterTest : TestAbstract() {
@Test
fun `call selectOne on function with object`() {
- val resources = File(this::class.java.getResource("/sql/function/Test").toURI())
+ val resources = this::class.java.getResource("/sql/function/Test").toURI()
val obj2 = ObjTest("original")
val obj: ObjTest = Requester(connection)
.addFunction(resources)
@@ -105,10 +104,10 @@ class RequesterTest : TestAbstract() {
@Test
fun `call selectOne on query`() {
- val resources = File(this::class.java.getResource("/sql/query").toURI())
+ val resources = this::class.java.getResource("/sql/query").toURI()
val obj: ObjTest = Requester(connection)
.addQuery(resources)
- .getQuery("Test/selectOneWithParameters")
+ .getQuery("selectOneWithParameters")
.selectOne(mapOf("name" to "myName"))!!
assertEquals("myName", obj.name)
@@ -116,7 +115,7 @@ class RequesterTest : TestAbstract() {
@Test
fun `call select (multiple) on function`() {
- val resources = File(this::class.java.getResource("/sql/function/Test").toURI())
+ val resources = this::class.java.getResource("/sql/function/Test").toURI()
val obj: List? = Requester(connection)
.addFunction(resources)
.getFunction("test_function_multiple")
@@ -127,10 +126,10 @@ class RequesterTest : TestAbstract() {
@Test
fun `call select paginated on query`() {
- val resources = File(this::class.java.getResource("/sql/query").toURI())
+ val resources = this::class.java.getResource("/sql/query").toURI()
val result: Paginated = Requester(connection)
.addQuery(resources)
- .getQuery("Test/selectPaginated")
+ .getQuery("selectPaginated")
.select(1, 2, mapOf("name" to "ff"))
Assert.assertNotNull(result)
Assert.assertEquals("ff", result.result[0].name)
@@ -141,7 +140,7 @@ class RequesterTest : TestAbstract() {
@Test
fun `call select paginated on function`() {
- val resources = File(this::class.java.getResource("/sql/function").toURI())
+ val resources = this::class.java.getResource("/sql/function").toURI()
val result: Paginated = Requester(connection)
.addFunction(resources)
.getFunction("test_function_paginated")
@@ -155,10 +154,10 @@ class RequesterTest : TestAbstract() {
@Test
fun `call selectOne on query with extra parameter`() {
- val resources = File(this::class.java.getResource("/sql/query").toURI())
+ val resources = this::class.java.getResource("/sql/query").toURI()
val obj: ObjTest = Requester(connection)
.addQuery(resources)
- .getQuery("Test/selectOneWithParameters")
+ .getQuery("selectOneWithParameters")
.selectOne(mapOf("name" to "myName")) {
assertEquals("myName", it!!.name)
Assert.assertEquals("plop", rows[0].getString("other"))
diff --git a/src/test/kotlin/fr/postgresjson/TestAbstract.kt b/src/test/kotlin/fr/postgresjson/TestAbstract.kt
index 346f148..e785de3 100644
--- a/src/test/kotlin/fr/postgresjson/TestAbstract.kt
+++ b/src/test/kotlin/fr/postgresjson/TestAbstract.kt
@@ -9,7 +9,7 @@ import java.io.File
@TestInstance(PER_CLASS)
abstract class TestAbstract {
- protected val connection = Connection(database = "test_json", username = "test", password = "test")
+ protected val connection = Connection(database = "json_test", username = "test", password = "test", port = 5555)
@BeforeEach
fun beforeAll() {
diff --git a/src/test/resources/sql/query/Test/exec.sql b/src/test/resources/sql/query/Test/exec.sql
index 32cdc6e..eb9e9a9 100644
--- a/src/test/resources/sql/query/Test/exec.sql
+++ b/src/test/resources/sql/query/Test/exec.sql
@@ -1 +1,2 @@
+-- name: DeleteTest
delete FROM test where 2038538 = 2;
\ No newline at end of file