This could be a follow up message for Aleksander Czajczynski, who wrote in the previous conversion "set datatypes support in Harbour":
I plan to make some work this year on supporting SQL queries on top of
RDD (any opened workarea - not only in-memory). But first something else
has priority!
While I was using ChatGTP, I was also looking at the same ideas for adding local SQL to Harbour.
I came across DuckDB as a good candidate to base our new functionality. MIT license.
DuckDB also has an extension for PostgreSQL
DuckDB is primarily written in C++ with a small C API layer for easy embedding.
The PostgreSQL extension for DuckDB allows you to query PostgreSQL databases from within DuckDB, treating remote PostgreSQL tables as if they were local tables. This is similar in concept to postgres_fdw in PostgreSQL, but in the opposite direction.
So we could run multiple postgres_scan() queries:
1. Each call can load a different PostgreSQL table into DuckDB local in-memory tables.
2. Use standard DuckDB SQL on those in-memory tables.
The DuckDB SQL is very similar to PostgreSQL.
It seems they even added timestamp with timezone support.
https://duckdb.org/https://github.com/duckdb/duckdbThe following is the code generated by ChatGPT.
// contrib/sqlquery_duckdb/sqlquery.c
// Extended prototype of SQLQUERY() for Harbour using DuckDB with PostgreSQL extension support
#include "hbapi.h"
#include "hbapiitm.h"
#include "hbapicls.h"
#include "hbapierr.h"
#include "duckdb.h"
static duckdb_database db = NULL;
static duckdb_connection conn = NULL;
static void load_postgres_extensions()
{
duckdb_result ext_result;
duckdb_query(conn, "INSTALL postgres;", &ext_result);
duckdb_destroy_result(&ext_result);
duckdb_query(conn, "LOAD postgres;", &ext_result);
duckdb_destroy_result(&ext_result);
}
HB_FUNC(SQLQUERY)
{
const char * sql = hb_parc(1);
if (!sql)
{
hb_errRT_BASE_SubstR( EG_ARG, 1001, NULL, "SQLQUERY", HB_ERR_ARGS_BASEPARAMS );
return;
}
// Initialize DuckDB only once
if (!db)
{
if (duckdb_open(NULL, &db) != DuckDBSuccess || duckdb_connect(db, &conn) != DuckDBSuccess)
{
hb_errRT_BASE_SubstR( EG_FAIL, 1002, NULL, "Could not initialize DuckDB", 0 );
return;
}
load_postgres_extensions();
}
duckdb_result result;
if (duckdb_query(conn, sql, &result) != DuckDBSuccess)
{
hb_retc("[ERROR] Query failed");
duckdb_destroy_result(&result);
return;
}
idx_t row_count = duckdb_row_count(&result);
idx_t column_count = duckdb_column_count(&result);
PHB_ITEM aResult = hb_itemArrayNew(row_count);
for (idx_t i = 0; i < row_count; ++i)
{
PHB_ITEM hRow = hb_hashNew(NULL);
for (idx_t j = 0; j < column_count; ++j)
{
const char * colname = duckdb_column_name(&result, j);
duckdb_type coltype = duckdb_column_type(&result, j);
PHB_ITEM val = NULL;
switch (coltype)
{
case DUCKDB_TYPE_BOOLEAN:
val = hb_itemPutL(NULL, duckdb_value_boolean(&result, j, i));
break;
case DUCKDB_TYPE_INTEGER:
val = hb_itemPutNI(NULL, duckdb_value_int32(&result, j, i));
break;
case DUCKDB_TYPE_BIGINT:
val = hb_itemPutNLL(NULL, duckdb_value_int64(&result, j, i));
break;
case DUCKDB_TYPE_DOUBLE:
val = hb_itemPutND(NULL, duckdb_value_double(&result, j, i));
break;
case DUCKDB_TYPE_VARCHAR:
case DUCKDB_TYPE_BLOB:
default:
val = hb_itemPutC(NULL, duckdb_value_varchar(&result, j, i));
break;
}
hb_hashAddCItem(hRow, colname, val);
hb_itemRelease(val);
}
hb_arraySetForward(aResult, i + 1, hRow);
hb_itemRelease(hRow);
}
duckdb_destroy_result(&result);
hb_itemReturnRelease(aResult);
}
HB_FUNC(SQLINSTALL)
{
const char * ext = hb_parc(1);
if (!ext || !conn)
{
hb_retc("[ERROR] Invalid arguments or connection not initialized");
return;
}
char cmd[256];
snprintf(cmd, sizeof(cmd), "INSTALL %s;", ext);
duckdb_result res;
duckdb_query(conn, cmd, &res);
duckdb_destroy_result(&res);
hb_retc("OK");
}
HB_FUNC(SQLLOAD)
{
const char * ext = hb_parc(1);
if (!ext || !conn)
{
hb_retc("[ERROR] Invalid arguments or connection not initialized");
return;
}
char cmd[256];
snprintf(cmd, sizeof(cmd), "LOAD %s;", ext);
duckdb_result res;
duckdb_query(conn, cmd, &res);
duckdb_destroy_result(&res);
hb_retc("OK");
}