From f0a76d9eafb7d594409710d4d9818d995efc5925 Mon Sep 17 00:00:00 2001 From: Lars Kanis Date: Fri, 25 Oct 2024 21:15:15 +0200 Subject: [PATCH 001/118] Remove workarounds for PostgreSQL static build They seem to be no longer necessary. --- Rakefile.cross | 12 ------------ 1 file changed, 12 deletions(-) diff --git a/Rakefile.cross b/Rakefile.cross index bb832e706..31edb597b 100644 --- a/Rakefile.cross +++ b/Rakefile.cross @@ -202,18 +202,6 @@ class CrossLibrary < OpenStruct # make libpq.dll task postgresql_lib => [ postgresql_global_makefile ] do |t| - # Work around missing dependency to libcommon in PostgreSQL-9.4.0 - chdir( static_postgresql_srcdir + "common" ) do - sh 'make', "-j#{NUM_CPUS}" - end - # Work around missing dependency to errorcodes.h in PostgreSQL-17.0 - chdir( static_postgresql_srcdir + "backend" + "utils" ) do - sh 'make', "-j#{NUM_CPUS}" - end - chdir( static_postgresql_srcdir + "port" ) do - sh 'make', "-j#{NUM_CPUS}" - end - chdir( postgresql_lib.dirname ) do sh 'make', "-j#{NUM_CPUS}", From 5128597e27a6040b49d817352569291be75c25ac Mon Sep 17 00:00:00 2001 From: Lars Kanis Date: Fri, 15 Nov 2024 22:11:50 +0100 Subject: [PATCH 002/118] Shrink the scope of one variable --- ext/pg_copy_coder.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ext/pg_copy_coder.c b/ext/pg_copy_coder.c index 6d8b60aff..bee42f1c2 100644 --- a/ext/pg_copy_coder.c +++ b/ext/pg_copy_coder.c @@ -831,7 +831,6 @@ pg_bin_dec_copy_row(t_pg_coder *conv, const char *input_line, int len, int _tupl for( fieldno = 0; fieldno < nfields; fieldno++){ long input_len; - VALUE field_value; /* read field size */ if (line_end_ptr - cur_ptr < 4 ) goto length_error; @@ -843,6 +842,7 @@ pg_bin_dec_copy_row(t_pg_coder *conv, const char *input_line, int len, int _tupl /* NULL indicator */ rb_ary_push(array, Qnil); } else { + VALUE field_value; if (line_end_ptr - cur_ptr < input_len ) goto length_error; /* copy input data to field_str */ From 25e584df2568058a161f736e221c02d07b5182e8 Mon Sep 17 00:00:00 2001 From: Lars Kanis Date: Sat, 16 Nov 2024 19:35:52 +0100 Subject: [PATCH 003/118] Allow type_spec.rb to be run directly by rspec without database server which is much faster --- spec/pg/type_spec.rb | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/spec/pg/type_spec.rb b/spec/pg/type_spec.rb index 95fa974de..48c003219 100644 --- a/spec/pg/type_spec.rb +++ b/spec/pg/type_spec.rb @@ -3,7 +3,10 @@ require 'pg' require 'time' - +unless defined?(ObjectSpace.memsize_of) + require "objspace" + DATA_OBJ_MEMSIZE = ObjectSpace.memsize_of(Object.new) +end describe "PG::Type derivations" do let!(:textenc_int) { PG::TextEncoder::Integer.new name: 'Integer', oid: 23 } From 9fecece6f7bab7f78a05e4220d6120a8e82acf03 Mon Sep 17 00:00:00 2001 From: Lars Kanis Date: Sat, 16 Nov 2024 19:36:38 +0100 Subject: [PATCH 004/118] Fix tab indention --- spec/pg/type_spec.rb | 34 +++++++++++++++++----------------- 1 file changed, 17 insertions(+), 17 deletions(-) diff --git a/spec/pg/type_spec.rb b/spec/pg/type_spec.rb index 48c003219..4d64adef5 100644 --- a/spec/pg/type_spec.rb +++ b/spec/pg/type_spec.rb @@ -936,26 +936,26 @@ def expect_deprecated_coder_init expect( t.elements_type ).to be_nil end - it "should deny changes when frozen" do - t = PG::TextEncoder::Array.new.freeze - expect{ t.format = 1 }.to raise_error(FrozenError) - expect{ t.oid = 0 }.to raise_error(FrozenError) - expect{ t.name = "x" }.to raise_error(FrozenError) - expect{ t.needs_quotation = true }.to raise_error(FrozenError) - expect{ t.delimiter = "," }.to raise_error(FrozenError) - expect{ t.elements_type = nil }.to raise_error(FrozenError) - end + it "should deny changes when frozen" do + t = PG::TextEncoder::Array.new.freeze + expect{ t.format = 1 }.to raise_error(FrozenError) + expect{ t.oid = 0 }.to raise_error(FrozenError) + expect{ t.name = "x" }.to raise_error(FrozenError) + expect{ t.needs_quotation = true }.to raise_error(FrozenError) + expect{ t.delimiter = "," }.to raise_error(FrozenError) + expect{ t.elements_type = nil }.to raise_error(FrozenError) + end - it "should be shareable for Ractor", :ractor do - t = PG::TextEncoder::Array.new.freeze - Ractor.make_shareable(t) - end + it "should be shareable for Ractor", :ractor do + t = PG::TextEncoder::Array.new.freeze + Ractor.make_shareable(t) + end - it "should give account about memory usage" do - expect( ObjectSpace.memsize_of(textenc_int_array) ).to be > DATA_OBJ_MEMSIZE - expect( ObjectSpace.memsize_of(textdec_bytea_array) ).to be > DATA_OBJ_MEMSIZE + it "should give account about memory usage" do + expect( ObjectSpace.memsize_of(textenc_int_array) ).to be > DATA_OBJ_MEMSIZE + expect( ObjectSpace.memsize_of(textdec_bytea_array) ).to be > DATA_OBJ_MEMSIZE + end end - end it "should encode Strings as base64 in TextEncoder" do e = PG::TextEncoder::ToBase64.new From a5a11c6c908f5506a4cb58710a10aaca98390d6a Mon Sep 17 00:00:00 2001 From: Lars Kanis Date: Mon, 18 Nov 2024 08:58:01 +0100 Subject: [PATCH 005/118] Avoid "warning: ISO C90 forbids mixed declarations and code [-Wdeclaration-after-statement]" --- ext/pg_binary_encoder.c | 13 ++++++------- ext/pg_connection.c | 3 ++- 2 files changed, 8 insertions(+), 8 deletions(-) diff --git a/ext/pg_binary_encoder.c b/ext/pg_binary_encoder.c index df4567600..77c29e6c5 100644 --- a/ext/pg_binary_encoder.c +++ b/ext/pg_binary_encoder.c @@ -271,14 +271,13 @@ pg_bin_enc_date(t_pg_coder *this, VALUE value, char *out, VALUE *intermediate, i case T_FALSE: write_nbo32(PG_INT32_MIN, out); return 4; + } { + VALUE year = rb_funcall(value, s_id_year, 0); + VALUE month = rb_funcall(value, s_id_month, 0); + VALUE day = rb_funcall(value, s_id_day, 0); + int jday = date2j(NUM2INT(year), NUM2INT(month), NUM2INT(day)) - POSTGRES_EPOCH_JDATE; + write_nbo32(jday, out); } - - VALUE year = rb_funcall(value, s_id_year, 0); - VALUE month = rb_funcall(value, s_id_month, 0); - VALUE day = rb_funcall(value, s_id_day, 0); - int jday = date2j(NUM2INT(year), NUM2INT(month), NUM2INT(day)) - POSTGRES_EPOCH_JDATE; - write_nbo32(jday, out); - }else{ /* first call -> determine the required length */ if(TYPE(value) == T_STRING){ diff --git a/ext/pg_connection.c b/ext/pg_connection.c index a36d09786..47b6a7314 100644 --- a/ext/pg_connection.c +++ b/ext/pg_connection.c @@ -2271,8 +2271,9 @@ pgconn_notifies(VALUE self) static int rb_io_descriptor(VALUE io) { + rb_io_t *fptr; Check_Type(io, T_FILE); - rb_io_t *fptr = RFILE(io)->fptr; + fptr = RFILE(io)->fptr; rb_io_check_closed(fptr); return fptr->fd; } From 6e887533129e245a74d4d1f290c111dd416ec407 Mon Sep 17 00:00:00 2001 From: Lars Kanis Date: Fri, 15 Nov 2024 22:21:07 +0100 Subject: [PATCH 006/118] Add PG::BinaryDecoder::Array Fixes #603 --- ext/pg_binary_decoder.c | 143 ++++++++++++++++++++++++++++++++++++++++ 1 file changed, 143 insertions(+) diff --git a/ext/pg_binary_decoder.c b/ext/pg_binary_decoder.c index 432e42bf3..03a83d1f5 100644 --- a/ext/pg_binary_decoder.c +++ b/ext/pg_binary_decoder.c @@ -133,6 +133,147 @@ pg_bin_dec_to_base64(t_pg_coder *conv, const char *val, int len, int tuple, int return out_value; } +/* + * Maximum number of array subscripts (arbitrary limit) + */ +#define MAXDIM 6 + +/* + * Document-class: PG::BinaryDecoder::Array < PG::CompositeDecoder + * + * This is a decoder class for conversion of binary array types. + * + * It returns an Array with possibly an arbitrary number of sub-Arrays. + * All values are decoded according to the #elements_type accessor. + * Sub-arrays are decoded recursively. + * + * This decoder simply ignores any dimension decorations preceding the array values. + * It returns all array values as regular ruby Array with a zero based index, regardless of the index given in the dimension decoration. + * + * An array decoder which respects dimension decorations is waiting to be implemented. + * + */ +static VALUE +pg_bin_dec_array(t_pg_coder *conv, const char *input_line, int len, int tuple, int field, int enc_idx) +{ + t_pg_composite_coder *this = (t_pg_composite_coder *)conv; + t_pg_coder_dec_func dec_func = pg_coder_dec_func(this->elem, this->comp.format); + + /* Current field */ + VALUE field_str; + + int i; + int ndim; + int nitems; + int flags; + int dim; + int dim_sizes[MAXDIM]; + VALUE arrays[MAXDIM]; + char *output_ptr; + const char *cur_ptr; + const char *line_end_ptr; + char *end_capa_ptr; + + /* Allocate a new string with embedded capacity and realloc later with + * exponential growing size when needed. */ + PG_RB_STR_NEW( field_str, output_ptr, end_capa_ptr ); + + /* set pointer variables for loop */ + cur_ptr = input_line; + line_end_ptr = input_line + len; + + /* read number of dimensions */ + if (line_end_ptr - cur_ptr < 4 ) goto length_error; + ndim = read_nbo32(cur_ptr); + if (ndim < 1 || ndim > MAXDIM) { + rb_raise( rb_eArgError, "unsupported number of array dimensions: %d", ndim ); + } + cur_ptr += 4; + + /* read flags */ + if (line_end_ptr - cur_ptr < 4 ) goto length_error; + flags = read_nbo32(cur_ptr); + if (flags != 0 && flags != 1) { + rb_raise( rb_eArgError, "unsupported binary array flags: %d", flags ); + } + cur_ptr += 4; + + /* ignore element OID */ + if (line_end_ptr - cur_ptr < 4 ) goto length_error; + cur_ptr += 4; + + nitems = 1; + for (i = 0; i < ndim; i++) { + /* read size of dimensions and ignore lower bound */ + if (line_end_ptr - cur_ptr < 8 ) goto length_error; + dim_sizes[i] = read_nbo32(cur_ptr); + nitems *= dim_sizes[i]; + /* TODO: check nitems to not overflow */ + cur_ptr += 8; + } + + dim = 0; + arrays[dim] = rb_ary_new2(dim_sizes[dim]); + for (i = 0; i < nitems; i++) { + int input_len; + + /* traverse dimensions down */ + while (dim < ndim - 1) { + dim++; + arrays[dim] = rb_ary_new2(dim_sizes[dim]); + rb_ary_push(arrays[dim - 1], arrays[dim]); + } + + /* read element length */ + if (line_end_ptr - cur_ptr < 4 ) goto length_error; + input_len = read_nbo32(cur_ptr); + cur_ptr += 4; + + /* convert and put element into array */ + if (input_len < 0) { + if (input_len != -1) goto length_error; + /* NULL indicator */ + rb_ary_push(arrays[dim], Qnil); + } else { + VALUE field_value; + if (line_end_ptr - cur_ptr < input_len ) goto length_error; + + /* copy input data to field_str */ + PG_RB_STR_ENSURE_CAPA( field_str, input_len, output_ptr, end_capa_ptr ); + memcpy(output_ptr, cur_ptr, input_len); + cur_ptr += input_len; + output_ptr += input_len; + /* convert field_str through the type map */ + rb_str_set_len( field_str, output_ptr - RSTRING_PTR(field_str) ); + field_value = dec_func(this->elem, RSTRING_PTR(field_str), input_len, tuple, field, enc_idx); + + rb_ary_push(arrays[dim], field_value); + + if( field_value == field_str ){ + /* Our output string will be send to the user, so we can not reuse + * it for the next field. */ + PG_RB_STR_NEW( field_str, output_ptr, end_capa_ptr ); + } + } + + /* Reset the pointer to the start of the output/buffer string. */ + output_ptr = RSTRING_PTR(field_str); + + /* traverse dimensions up */ + while (RARRAY_LEN(arrays[dim]) >= dim_sizes[dim] && dim > 0) { + dim--; + } + } + + if (cur_ptr < line_end_ptr) + rb_raise( rb_eArgError, "trailing data after binary array data at position: %ld", (long)(cur_ptr - input_line) + 1 ); + + return arrays[0]; + +length_error: + rb_raise( rb_eArgError, "premature end of binary array data at position: %ld", (long)(cur_ptr - input_line) + 1 ); +} + #define PG_INT64_MIN (-0x7FFFFFFFFFFFFFFFL - 1) #define PG_INT64_MAX 0x7FFFFFFFFFFFFFFFL @@ -305,6 +446,8 @@ init_pg_binary_decoder(void) /* dummy = rb_define_class_under( rb_mPG_BinaryDecoder, "Timestamp", rb_cPG_SimpleDecoder ); */ pg_define_coder( "Timestamp", pg_bin_dec_timestamp, rb_cPG_SimpleDecoder, rb_mPG_BinaryDecoder ); + /* dummy = rb_define_class_under( rb_mPG_BinaryDecoder, "Array", rb_cPG_CompositeDecoder ); */ + pg_define_coder( "Array", pg_bin_dec_array, rb_cPG_CompositeDecoder, rb_mPG_BinaryDecoder ); /* dummy = rb_define_class_under( rb_mPG_BinaryDecoder, "ToBase64", rb_cPG_CompositeDecoder ); */ pg_define_coder( "ToBase64", pg_bin_dec_to_base64, rb_cPG_CompositeDecoder, rb_mPG_BinaryDecoder ); } From 73a9e34d49afa24982101328135aca5d1f947726 Mon Sep 17 00:00:00 2001 From: Lars Kanis Date: Sat, 16 Nov 2024 10:36:47 +0100 Subject: [PATCH 007/118] BinaryDecoder::Array: Check limits on number of elements and allow zero dimensions --- ext/pg_binary_decoder.c | 17 ++++++++++++----- 1 file changed, 12 insertions(+), 5 deletions(-) diff --git a/ext/pg_binary_decoder.c b/ext/pg_binary_decoder.c index 03a83d1f5..5006a3c72 100644 --- a/ext/pg_binary_decoder.c +++ b/ext/pg_binary_decoder.c @@ -162,6 +162,7 @@ pg_bin_dec_array(t_pg_coder *conv, const char *input_line, int len, int tuple, i /* Current field */ VALUE field_str; + int32_t nitems32; int i; int ndim; int nitems; @@ -185,7 +186,7 @@ pg_bin_dec_array(t_pg_coder *conv, const char *input_line, int len, int tuple, i /* read number of dimensions */ if (line_end_ptr - cur_ptr < 4 ) goto length_error; ndim = read_nbo32(cur_ptr); - if (ndim < 1 || ndim > MAXDIM) { + if (ndim < 0 || ndim > MAXDIM) { rb_raise( rb_eArgError, "unsupported number of array dimensions: %d", ndim ); } cur_ptr += 4; @@ -202,18 +203,24 @@ pg_bin_dec_array(t_pg_coder *conv, const char *input_line, int len, int tuple, i if (line_end_ptr - cur_ptr < 4 ) goto length_error; cur_ptr += 4; - nitems = 1; + nitems32 = ndim == 0 ? 0 : 1; for (i = 0; i < ndim; i++) { + int64_t prod; + /* read size of dimensions and ignore lower bound */ if (line_end_ptr - cur_ptr < 8 ) goto length_error; dim_sizes[i] = read_nbo32(cur_ptr); - nitems *= dim_sizes[i]; - /* TODO: check nitems to not overflow */ + prod = (int64_t) nitems32 * (int64_t) dim_sizes[i]; + nitems32 = (int32_t) prod; + if (dim_sizes[i] < 0 || (int64_t) nitems32 != prod) { + rb_raise( rb_eArgError, "unsupported array size: %" PRId64, prod ); + } cur_ptr += 8; } + nitems = (int)nitems32; dim = 0; - arrays[dim] = rb_ary_new2(dim_sizes[dim]); + arrays[dim] = rb_ary_new2(ndim == 0 ? 0 : dim_sizes[dim]); for (i = 0; i < nitems; i++) { int input_len; From 560b13e0a371e4c037b2e50b7ab87c652ef94d5c Mon Sep 17 00:00:00 2001 From: Lars Kanis Date: Sat, 16 Nov 2024 19:33:53 +0100 Subject: [PATCH 008/118] BinaryDecoder::Array: add tests --- lib/pg/basic_type_registry.rb | 2 +- spec/pg/basic_type_map_for_results_spec.rb | 2 +- spec/pg/type_spec.rb | 90 ++++++++++++++++++++-- 3 files changed, 86 insertions(+), 8 deletions(-) diff --git a/lib/pg/basic_type_registry.rb b/lib/pg/basic_type_registry.rb index 4a6daaa70..9d583c71c 100644 --- a/lib/pg/basic_type_registry.rb +++ b/lib/pg/basic_type_registry.rb @@ -128,7 +128,7 @@ def initialize(connection, registry: nil) [0, :encoder, PG::TextEncoder::Array], [0, :decoder, PG::TextDecoder::Array], [1, :encoder, nil], - [1, :decoder, nil], + [1, :decoder, PG::BinaryDecoder::Array], ].inject([]) do |h, (format, direction, arraycoder)| coders = registry.coders_for(format, direction) || {} h[format] ||= {} diff --git a/spec/pg/basic_type_map_for_results_spec.rb b/spec/pg/basic_type_map_for_results_spec.rb index 04e46abc8..8df3d9e79 100644 --- a/spec/pg/basic_type_map_for_results_spec.rb +++ b/spec/pg/basic_type_map_for_results_spec.rb @@ -295,7 +295,7 @@ end end - [0].each do |format| + [0, 1].each do |format| it "should do format #{format} array type conversions" do res = @conn.exec_params( "SELECT CAST('{1,2,3}' AS INT2[]), CAST('{{1,2},{3,4}}' AS INT2[][]), CAST('{1,2,3}' AS INT4[]), diff --git a/spec/pg/type_spec.rb b/spec/pg/type_spec.rb index 4d64adef5..8019f3760 100644 --- a/spec/pg/type_spec.rb +++ b/spec/pg/type_spec.rb @@ -630,6 +630,8 @@ def expect_deprecated_coder_init let!(:textenc_string_array_with_delimiter) { PG::TextEncoder::Array.new elements_type: textenc_string, delimiter: ';' } let!(:textdec_string_array_with_delimiter) { PG::TextDecoder::Array.new elements_type: textdec_string, delimiter: ';' } let!(:textdec_bytea_array) { PG::TextDecoder::Array.new elements_type: textdec_bytea } + let!(:binarydec_array) { PG::BinaryDecoder::Array.new } + let!(:binarydec_int_array) { PG::BinaryDecoder::Array.new elements_type: PG::BinaryDecoder::Integer.new } # # Array parser specs are thankfully borrowed from here: @@ -638,9 +640,15 @@ def expect_deprecated_coder_init describe '#decode' do context 'one dimensional arrays' do context 'empty' do - it 'returns an empty array' do + it 'returns an empty array from text' do expect( textdec_string_array.decode(%[{}]) ).to eq( [] ) end + + it 'returns an empty array from binary' do + # binary '{}'::TEXT[] + b = "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x19" + expect( binarydec_array.decode(b) ).to eq( [] ) + end end context 'no strings' do @@ -714,7 +722,7 @@ def expect_deprecated_coder_init expect( textdec_string_array.decode(%({1,2,3}x)) ).to eq(['1','2','3']) expect( textdec_string_array.decode(%({{1,2},{2,3})) ).to eq([['1','2'],['2','3']]) expect( textdec_string_array.decode(%({{1,2},{2,3}}x)) ).to eq([['1','2'],['2','3']]) - expect( textdec_string_array.decode(%({[1,2},{2,3}}})) ).to eq(['[1','2']) + expect( textdec_string_array.decode(%({[1,2},{2,3}}})) ).to eq(["[1",'2']) end end @@ -766,11 +774,11 @@ def expect_deprecated_coder_init it 'returns an array of strings with a sub array and a quoted }' do expect( textdec_string_array.decode(%[{1,{"2,}3",NULL},4}]) ).to eq( ['1',['2,}3',nil],'4'] ) end - it 'returns an array of strings with a sub array and a quoted {' do - expect( textdec_string_array.decode(%[{1,{"2,{3"},4}]) ).to eq( ['1',['2,{3'],'4'] ) + it "returns an array of strings with a sub array and a quoted {" do + expect( textdec_string_array.decode(%[{1,{"2,{3"},4}]) ).to eq( ['1',["2,{3"],'4'] ) end - it 'returns an array of strings with a sub array and a quoted { and escaped quote' do - expect( textdec_string_array.decode(%[{1,{"2\\",{3"},4}]) ).to eq( ['1',['2",{3'],'4'] ) + it "returns an array of strings with a sub array and a quoted { and escaped quote" do + expect( textdec_string_array.decode(%[{1,{"2\\",{3"},4}]) ).to eq( ['1',["2\",{3"],'4'] ) end it 'returns an array of strings with a sub array with empty strings' do expect( textdec_string_array.decode(%[{1,{""},4,{""}}]) ).to eq( ['1',[''],'4',['']] ) @@ -793,6 +801,76 @@ def expect_deprecated_coder_init it 'returns an array of strings with sub arrays' do expect( textdec_string_array.decode(%[{1,{2,{3,4}},{NULL,6},7}]) ).to eq( ['1',['2',['3','4']],[nil,'6'],'7'] ) end + + # '[-1:1][-2:-2][-3:-2]={{{5,6}},{{6,7}},{{NULL,5}}}'::INT[] + let!(:bin_int_array_data) do + [ "00000003" + "00000001" + "00000017" + + "00000003" + "ffffffff" + + "00000001" + "fffffffe" + + "00000002" + "fffffffd" + + "00000004" + "00000005" + + "00000004" + "00000006" + + "00000004" + "00000006" + + "00000004" + "00000007" + + "ffffffff" + + "00000004" + "00000005" + ].pack("H*") + end + + # '[-1:1][-2:-2][-3:-2]={{{5,6}},{{6,7}},{{5,NULL}}}'::TEXT[] + let!(:bin_text_array_data) do + [ "00000003" + "00000001" + "00000019" + + "00000003" + "ffffffff" + + "00000001" + "fffffffe" + + "00000002" + "fffffffd" + + "00000001" + "35" + + "00000001" + "36" + + "00000002" + "3622" + + "00000001" + "37" + + "ffffffff" + + "00000001" + "35" + ].pack("H*") + end + + it 'can decode binary int[]' do + expect( binarydec_int_array.decode(bin_int_array_data) ).to eq( [[[5, 6]], [[6, 7]], [[nil, 5]]] ) + end + it 'can decode binary text[]' do + expect( binarydec_array.decode(bin_text_array_data) ).to eq( [[["5", "6"]], [["6\"", "7"]], [[nil, "5"]]] ) + end + it 'raises error when binary array is incomplete' do + (0 ... bin_int_array_data.bytesize).each do |i| + expect do + binarydec_int_array.decode(bin_int_array_data[0, i]) + end.to raise_error(ArgumentError, /premature/) + end + end + it 'raises error when binary array has additonal bytes' do + expect do + binarydec_int_array.decode(bin_int_array_data + "\0") + end.to raise_error(ArgumentError, /trailing/) + end + it 'raises error when binary array has invalid dimensions' do + d = ["00000007" + "00000001" + "00000019"].pack("H*") + expect do + binarydec_int_array.decode(d) + end.to raise_error(ArgumentError, /dimensions/) + end + it 'raises error when binary array has invalid flags' do + d = ["00000000" + "00000002" + "00000019"].pack("H*") + expect do + binarydec_int_array.decode(d) + end.to raise_error(ArgumentError, /flags/) + end + it 'raises error when binary array has invalid flags' do + d = ["00000002" + "00000000" + "00000019" + + "00010000" + "ffffffff" + + "00010000" + "fffffffe" + ].pack("H*") + expect do + binarydec_int_array.decode(d) + end.to raise_error(ArgumentError, /array size/) + end end it 'should decode array of types with decoder in ruby space' do From 3999b42689e618e861a74532ecb8e5d8b027bde9 Mon Sep 17 00:00:00 2001 From: Lars Kanis Date: Sun, 17 Nov 2024 16:58:17 +0100 Subject: [PATCH 009/118] Add PG::BinaryEncoder::Array --- ext/pg_binary_encoder.c | 190 ++++++++++++++++++ lib/pg/basic_type_registry.rb | 2 +- .../pg/basic_type_map_based_on_result_spec.rb | 107 +++++----- spec/pg/type_spec.rb | 73 ++++++- 4 files changed, 309 insertions(+), 63 deletions(-) diff --git a/ext/pg_binary_encoder.c b/ext/pg_binary_encoder.c index 77c29e6c5..3d7922a56 100644 --- a/ext/pg_binary_encoder.c +++ b/ext/pg_binary_encoder.c @@ -304,6 +304,194 @@ pg_bin_enc_date(t_pg_coder *this, VALUE value, char *out, VALUE *intermediate, i return 4; } +/* + * Maximum number of array subscripts (arbitrary limit) + */ +#define MAXDIM 6 + +/* + * Document-class: PG::BinaryEncoder::Array < PG::CompositeEncoder + * + * This is the encoder class for PostgreSQL array types in binary format. + * + * All values are encoded according to the #elements_type + * accessor. Sub-arrays are encoded recursively. + * + * This encoder expects an Array of values or sub-arrays as input. + * Other values are passed through as byte string without interpretation. + * + * The accessors needs_quotation and delimiter are ignored for binary encoding. + * + */ +static int +pg_bin_enc_array(t_pg_coder *conv, VALUE value, char *out, VALUE *intermediate, int enc_idx) +{ + if (TYPE(value) == T_ARRAY) { + t_pg_composite_coder *this = (t_pg_composite_coder *)conv; + t_pg_coder_enc_func enc_func = pg_coder_enc_func(this->elem); + int dim_sizes[MAXDIM]; + int ndim = 1; + int nitems = 1; + VALUE el1 = value; + + if (RARRAY_LEN(value) == 0) { + nitems = 0; + ndim = 0; + dim_sizes[0] = 0; + } else { + /* Determine number of dimensions, sizes of dimensions and number of items */ + while(1) { + VALUE el2; + + dim_sizes[ndim-1] = RARRAY_LENINT(el1); + nitems *= dim_sizes[ndim-1]; + el2 = rb_ary_entry(el1, 0); + if (TYPE(el2) == T_ARRAY) { + ndim++; + if (ndim > MAXDIM) + rb_raise( rb_eArgError, "unsupported number of array dimensions: >%d", ndim ); + } else { + break; + } + el1 = el2; + } + } + + if(out){ + /* Second encoder pass -> write data to `out` */ + int dimpos[MAXDIM]; + VALUE arrays[MAXDIM]; + int dim = 0; + int item_idx = 0; + int i; + char *orig_out = out; + Oid elem_oid = this->elem ? this->elem->oid : 0; + + write_nbo32(ndim, out); out += 4; + write_nbo32(1 /* flags */, out); out += 4; + write_nbo32(elem_oid, out); out += 4; + for (i = 0; i < ndim; i++) { + dimpos[i] = 0; + write_nbo32(dim_sizes[i], out); out += 4; + write_nbo32(1 /* offset */, out); out += 4; + } + arrays[0] = value; + + while(1) { + /* traverse tree down */ + while (dim < ndim - 1) { + arrays[dim + 1] = rb_ary_entry(arrays[dim], dimpos[dim]); + dim++; + } + + for (i = 0; i < dim_sizes[dim]; i++) { + VALUE item = rb_ary_entry(arrays[dim], i); + + if (NIL_P(item)) { + write_nbo32(-1, out); out += 4; + } else { + /* Encoded string is returned in subint */ + int strlen; + VALUE is_one_pass = rb_ary_entry(*intermediate, item_idx++); + VALUE subint = rb_ary_entry(*intermediate, item_idx++); + + if (is_one_pass == Qtrue) { + strlen = RSTRING_LENINT(subint); + memcpy( out + 4, RSTRING_PTR(subint), strlen); + } else { + strlen = enc_func(this->elem, item, out + 4, &subint, enc_idx); + } + write_nbo32(strlen, out); + out += 4 /* length */ + strlen; + } + } + + /* traverse tree up and go to next sibling array */ + do { + if (dim > 0) { + dimpos[dim] = 0; + dim--; + dimpos[dim]++; + } else { + goto finished2; + } + } while (dimpos[dim] >= dim_sizes[dim]); + } + finished2: + return (int)(out - orig_out); + + } else { + /* First encoder pass -> determine required buffer space for `out` */ + + int dimpos[MAXDIM]; + VALUE arrays[MAXDIM]; + int dim = 0; + int item_idx = 0; + int i; + int size_sum = 0; + + *intermediate = rb_ary_new2(nitems); + + for (i = 0; i < MAXDIM; i++) { + dimpos[i] = 0; + } + arrays[0] = value; + + while(1) { + + /* traverse tree down */ + while (dim < ndim - 1) { + arrays[dim + 1] = rb_ary_entry(arrays[dim], dimpos[dim]); + dim++; + } + + for (i = 0; i < dim_sizes[dim]; i++) { + VALUE item = rb_ary_entry(arrays[dim], i); + + if (NIL_P(item)) { + size_sum += 4 /* length bytes = -1 */; + } else { + VALUE subint; + int strlen = enc_func(this->elem, item, NULL, &subint, enc_idx); + + /* Gather all intermediate values of elements into an array, which is returned as intermediate for the array encoder */ + if( strlen == -1 ){ + /* Encoded string is returned in subint */ + rb_ary_store(*intermediate, item_idx++, Qtrue); + rb_ary_store(*intermediate, item_idx++, subint); + + strlen = RSTRING_LENINT(subint); + } else { + /* Two passes necessary */ + rb_ary_store(*intermediate, item_idx++, Qfalse); + rb_ary_store(*intermediate, item_idx++, subint); + } + size_sum += 4 /* length bytes */ + strlen; + } + } + + /* traverse tree up and go to next sibling array */ + do { + if (dim > 0) { + dimpos[dim] = 0; + dim--; + dimpos[dim]++; + } else { + goto finished1; + } + } while (dimpos[dim] >= dim_sizes[dim]); + } + finished1:; + + return 4 /* ndim */ + 4 /* flags */ + 4 /* oid */ + + ndim * (4 /* dim size */ + 4 /* dim offset */) + + size_sum; + } + } else { + return pg_coder_enc_to_s( conv, value, out, intermediate, enc_idx ); + } +} + /* * Document-class: PG::BinaryEncoder::FromBase64 < PG::CompositeEncoder * @@ -381,6 +569,8 @@ init_pg_binary_encoder(void) /* dummy = rb_define_class_under( rb_mPG_BinaryEncoder, "Date", rb_cPG_SimpleEncoder ); */ pg_define_coder( "Date", pg_bin_enc_date, rb_cPG_SimpleEncoder, rb_mPG_BinaryEncoder ); + /* dummy = rb_define_class_under( rb_mPG_BinaryEncoder, "Array", rb_cPG_CompositeEncoder ); */ + pg_define_coder( "Array", pg_bin_enc_array, rb_cPG_CompositeEncoder, rb_mPG_BinaryEncoder ); /* dummy = rb_define_class_under( rb_mPG_BinaryEncoder, "FromBase64", rb_cPG_CompositeEncoder ); */ pg_define_coder( "FromBase64", pg_bin_enc_from_base64, rb_cPG_CompositeEncoder, rb_mPG_BinaryEncoder ); } diff --git a/lib/pg/basic_type_registry.rb b/lib/pg/basic_type_registry.rb index 9d583c71c..02c2cca2b 100644 --- a/lib/pg/basic_type_registry.rb +++ b/lib/pg/basic_type_registry.rb @@ -127,7 +127,7 @@ def initialize(connection, registry: nil) @maps = [ [0, :encoder, PG::TextEncoder::Array], [0, :decoder, PG::TextDecoder::Array], - [1, :encoder, nil], + [1, :encoder, PG::BinaryEncoder::Array], [1, :decoder, PG::BinaryDecoder::Array], ].inject([]) do |h, (format, direction, arraycoder)| coders = registry.coders_for(format, direction) || {} diff --git a/spec/pg/basic_type_map_based_on_result_spec.rb b/spec/pg/basic_type_map_based_on_result_spec.rb index e8c26e2f6..da5e14203 100644 --- a/spec/pg/basic_type_map_based_on_result_spec.rb +++ b/spec/pg/basic_type_map_based_on_result_spec.rb @@ -29,64 +29,46 @@ Ractor.make_shareable(basic_type_mapping) end - it "should be usable with Ractor in text format", :ractor do - vals = Ractor.new(@conninfo) do |conninfo| - conn = PG.connect(conninfo) - basic_type_mapping = PG::BasicTypeMapBasedOnResult.new(conn) - conn.exec( "CREATE TEMP TABLE copytable (t TEXT, i INT, ai INT[])" ) - - # Retrieve table OIDs per empty result set. - res = conn.exec( "SELECT * FROM copytable LIMIT 0" ) - tm = basic_type_mapping.build_column_map( res ) - row_encoder = PG::TextEncoder::CopyRow.new type_map: tm - - conn.copy_data( "COPY copytable FROM STDIN", row_encoder ) do |res| - conn.put_copy_data ['b', 234, [2,3]] - end - res = conn.exec( "SELECT * FROM copytable" ) - res.values - ensure - conn&.finish - end.take - - expect( vals ).to eq( [['b', '234', '{2,3}']] ) - end + [1, 0].each do |format| + it "should be usable with Ractor in format #{format}", :ractor do + vals = Ractor.new(@conninfo, format) do |conninfo, format| + conn = PG.connect(conninfo) + basic_type_mapping = PG::BasicTypeMapBasedOnResult.new(conn) + conn.exec( "CREATE TEMP TABLE copytable (t TEXT, i INT, ai INT[])" ) - it "should be usable with Ractor in binary format", :ractor do - vals = Ractor.new(@conninfo) do |conninfo| - conn = PG.connect(conninfo) - basic_type_mapping = PG::BasicTypeMapBasedOnResult.new(conn) - conn.exec( "CREATE TEMP TABLE copytable (t TEXT, i INT)" ) - - # Retrieve table OIDs per empty result set. - res = conn.exec( "SELECT * FROM copytable LIMIT 0", [], 1) - tm = basic_type_mapping.build_column_map( res ) - row_encoder = PG::BinaryEncoder::CopyRow.new type_map: tm + # Retrieve table OIDs per empty result set. + res = conn.exec( "SELECT * FROM copytable LIMIT 0", [], format ) + tm = basic_type_mapping.build_column_map( res ) + nsp = format==1 ? PG::BinaryEncoder : PG::TextEncoder + row_encoder = nsp::CopyRow.new type_map: tm - conn.copy_data( "COPY copytable FROM STDIN WITH (FORMAT binary)", row_encoder ) do |res| - conn.put_copy_data ['b', 234] - end - res = conn.exec( "SELECT * FROM copytable" ) - res.values - ensure - conn&.finish - end.take + conn.copy_data( "COPY copytable FROM STDIN WITH (FORMAT #{ format==1 ? "binary" : "text" })", row_encoder ) do |res| + conn.put_copy_data ['b', 234, [2,3]] + end + res = conn.exec( "SELECT * FROM copytable" ) + res.values + ensure + conn&.finish + end.take - expect( vals ).to eq( [['b', '234']] ) + expect( vals ).to eq( [['b', '234', '{2,3}']] ) + end end context "with usage of result oids for bind params encoder selection" do - it "can type cast query params" do - @conn.exec( "CREATE TEMP TABLE copytable (t TEXT, i INT, ai INT[], by BYTEA)" ) + [1, 0].each do |format| + it "can type cast query params to format #{format}" do + @conn.exec( "CREATE TEMP TABLE copytable (t TEXT, i INT, ai INT[], by BYTEA)" ) - # Retrieve table OIDs per empty result. - res = @conn.exec( "SELECT * FROM copytable LIMIT 0" ) - tm = basic_type_mapping.build_column_map( res ) + # Retrieve table OIDs per empty result. + res = @conn.exec( "SELECT * FROM copytable LIMIT 0", [], format ) + tm = basic_type_mapping.build_column_map( res ) - @conn.exec_params( "INSERT INTO copytable VALUES ($1, $2, $3, $4)", ['a', 123, [5,4,3], "\0\xFF'"], 0, tm ) - @conn.exec_params( "INSERT INTO copytable VALUES ($1, $2, $3, $4)", ['b', 234, [2,3], "\"\n\r"], 0, tm ) - res = @conn.exec( "SELECT * FROM copytable" ) - expect( res.values ).to eq( [['a', '123', '{5,4,3}', '\x00ff27'], ['b', '234', '{2,3}', '\x220a0d']] ) + @conn.exec_params( "INSERT INTO copytable VALUES ($1, $2, $3, $4)", ['a', 123, [5,4,3], "\0\xFF'"], 0, tm ) + @conn.exec_params( "INSERT INTO copytable VALUES ($1, $2, $3, $4)", ['b', 234, [2,3], "\"\n\r"], 0, tm ) + res = @conn.exec( "SELECT * FROM copytable" ) + expect( res.values ).to eq( [['a', '123', '{5,4,3}', '\x00ff27'], ['b', '234', '{2,3}', '\x220a0d']] ) + end end it "can do JSON conversions", :postgresql_94 do @@ -124,20 +106,23 @@ end context "with usage of result oids for copy encoder selection" do - it "can type cast #copy_data text input with encoder" do - @conn.exec( "CREATE TEMP TABLE copytable (t TEXT, i INT, ai INT[])" ) + [1, 0].each do |format| + it "can type cast #copy_data text input with encoder to format #{format}" do + @conn.exec( "CREATE TEMP TABLE copytable (t TEXT, i INT, ai INT[])" ) - # Retrieve table OIDs per empty result set. - res = @conn.exec( "SELECT * FROM copytable LIMIT 0" ) - tm = basic_type_mapping.build_column_map( res ) - row_encoder = PG::TextEncoder::CopyRow.new type_map: tm + # Retrieve table OIDs per empty result set. + res = @conn.exec( "SELECT * FROM copytable LIMIT 0", [], format ) + tm = basic_type_mapping.build_column_map( res ) + nsp = format==1 ? PG::BinaryEncoder : PG::TextEncoder + row_encoder = nsp::CopyRow.new type_map: tm - @conn.copy_data( "COPY copytable FROM STDIN", row_encoder ) do |res| - @conn.put_copy_data ['a', 123, [5,4,3]] - @conn.put_copy_data ['b', 234, [2,3]] + @conn.copy_data( "COPY copytable FROM STDIN WITH (FORMAT #{ format==1 ? "binary" : "text" })", row_encoder ) do |res| + @conn.put_copy_data ['a', 123, [5,4,3]] + @conn.put_copy_data ['b', 234, [2,3]] + end + res = @conn.exec( "SELECT * FROM copytable" ) + expect( res.values ).to eq( [['a', '123', '{5,4,3}'], ['b', '234', '{2,3}']] ) end - res = @conn.exec( "SELECT * FROM copytable" ) - expect( res.values ).to eq( [['a', '123', '{5,4,3}'], ['b', '234', '{2,3}']] ) end [1, 0].each do |format| diff --git a/spec/pg/type_spec.rb b/spec/pg/type_spec.rb index 8019f3760..f741de3fd 100644 --- a/spec/pg/type_spec.rb +++ b/spec/pg/type_spec.rb @@ -632,6 +632,7 @@ def expect_deprecated_coder_init let!(:textdec_bytea_array) { PG::TextDecoder::Array.new elements_type: textdec_bytea } let!(:binarydec_array) { PG::BinaryDecoder::Array.new } let!(:binarydec_int_array) { PG::BinaryDecoder::Array.new elements_type: PG::BinaryDecoder::Integer.new } + let!(:binaryenc_array) { PG::BinaryEncoder::Array.new } # # Array parser specs are thankfully borrowed from here: @@ -817,7 +818,7 @@ def expect_deprecated_coder_init ].pack("H*") end - # '[-1:1][-2:-2][-3:-2]={{{5,6}},{{6,7}},{{5,NULL}}}'::TEXT[] + # '[-1:1][-2:-2][-3:-2]={{{5,6"}},{{6,7}},{{5,NULL}}}'::TEXT[] let!(:bin_text_array_data) do [ "00000003" + "00000001" + "00000019" + "00000003" + "ffffffff" + @@ -901,13 +902,51 @@ def expect_deprecated_coder_init it 'encodes an array of float8 with sub arrays' do expect( textenc_float_array.encode([1000.11,[-0.00000221,[3.31,-441]],[nil,6.61],-7.71]) ).to match(Regexp.new(%[^{1000.1*,{-2.2*e-*6,{3.3*,-441.0}},{NULL,6.6*},-7.7*}$].gsub(/([\.\+\{\}\,])/, "\\\\\\1").gsub(/\*/, "\\d*"))) end + + let!(:binaryenc_int4_array) { PG::BinaryEncoder::Array.new elements_type: PG::BinaryEncoder::Int4.new(oid: 0x17) } + + it 'encodes an array of int4 with sub arrays' do + exp = ["00000003" + "00000001" + "00000017" + + "00000003" + "00000001" + + "00000001" + "00000001" + + "00000002" + "00000001" + + "00000004" + "00000005" + + "00000004" + "00000006" + + "00000004" + "00000006" + + "00000004" + "00000007" + + "ffffffff" + + "00000004" + "00000005" + ].pack("H*") + + expect( binaryenc_int4_array.encode([[[5,6]],[[6,7]],[[nil,5]]]) ).to eq( exp ) + end + + let!(:binaryenc_text_array) { PG::BinaryEncoder::Array.new elements_type: PG::BinaryEncoder::String.new(oid: 0x19) } + + it 'encodes an array of text with sub arrays' do + exp =["00000003" + "00000001" + "00000019" + + "00000003" + "00000001" + + "00000001" + "00000001" + + "00000002" + "00000001" + + "00000001" + "35" + + "00000001" + "36" + + "00000002" + "3622" + + "00000001" + "37" + + "ffffffff" + + "00000001" + "35" + ].pack("H*") + + expect( binaryenc_text_array.encode([[[5,6]],[["6\"",7]],[[nil,5]]]) ).to eq( exp ) + end end + context 'two dimensional arrays' do it 'encodes an array of timestamps with sub arrays' do expect( textenc_timestamp_array.encode([Time.new(2014,12,31),[nil, Time.new(2016,01,02, 23, 23, 59.99)]]) ). to eq( %[{2014-12-31 00:00:00.000000000,{NULL,2016-01-02 23:23:59.990000000}}] ) end end + context 'one dimensional array' do it 'can encode empty arrays' do expect( textenc_int_array.encode([]) ).to eq( '{}' ) @@ -919,6 +958,38 @@ def expect_deprecated_coder_init it 'respects a different delimiter' do expect( textenc_string_array_with_delimiter.encode(['a','b,','c']) ).to eq( '{a;b,;c}' ) end + + it 'encodes an array' do + exp =["00000001" + "00000001" + "00000000" + + "00000002" + "00000001" + + "ffffffff" + + "00000002" + "3622" + ].pack("H*") + + expect( binaryenc_array.encode([nil, "6\""]) ).to eq( exp ) + end + end + + context 'other dimensional array' do + it 'encodes an empty array as zero dimensions' do + exp =["00000000" + "00000001" + "00000000"].pack("H*") + expect( binaryenc_array.encode([]) ).to eq( exp ) + end + it 'encodes a 6 dimensional array' do + exp =["00000006" + "00000001" + "00000000" + + "00000001" + "00000001" + + "00000001" + "00000001" + + "00000001" + "00000001" + + "00000001" + "00000001" + + "00000001" + "00000001" + + "00000001" + "00000001" + + "ffffffff" + ].pack("H*") + expect( binaryenc_array.encode([[[[[[nil]]]]]]) ).to eq( exp ) + end + it 'raises an error at too many dimensions' do + expect{ binaryenc_array.encode([[[[[[[nil]]]]]]]) }.to raise_error( ArgumentError, /number of array dimensions/) + end end context 'array of types with encoder in ruby space' do From 450e9c4f51723cb06146e7b8103c0605504c15b8 Mon Sep 17 00:00:00 2001 From: Lars Kanis Date: Mon, 18 Nov 2024 08:13:21 +0100 Subject: [PATCH 010/118] PG::BinaryEncoder::Array: Add checks for Array uniformity --- ext/pg_binary_encoder.c | 9 ++++++++- spec/pg/type_spec.rb | 16 ++++++++++++++-- 2 files changed, 22 insertions(+), 3 deletions(-) diff --git a/ext/pg_binary_encoder.c b/ext/pg_binary_encoder.c index 3d7922a56..601bd333e 100644 --- a/ext/pg_binary_encoder.c +++ b/ext/pg_binary_encoder.c @@ -441,7 +441,14 @@ pg_bin_enc_array(t_pg_coder *conv, VALUE value, char *out, VALUE *intermediate, /* traverse tree down */ while (dim < ndim - 1) { - arrays[dim + 1] = rb_ary_entry(arrays[dim], dimpos[dim]); + VALUE array = rb_ary_entry(arrays[dim], dimpos[dim]); + if (TYPE(array) != T_ARRAY) { + rb_raise( rb_eArgError, "expected Array instead of %+"PRIsVALUE" in dimension %d", array, dim + 1 ); + } + if (dim_sizes[dim + 1] != RARRAY_LEN(array)) { + rb_raise( rb_eArgError, "varying number of array elements (%d and %d) in dimension %d", dim_sizes[dim + 1], RARRAY_LENINT(array), dim + 1 ); + } + arrays[dim + 1] = array; dim++; } diff --git a/spec/pg/type_spec.rb b/spec/pg/type_spec.rb index f741de3fd..a9f45b8b1 100644 --- a/spec/pg/type_spec.rb +++ b/spec/pg/type_spec.rb @@ -851,12 +851,18 @@ def expect_deprecated_coder_init binarydec_int_array.decode(bin_int_array_data + "\0") end.to raise_error(ArgumentError, /trailing/) end - it 'raises error when binary array has invalid dimensions' do + it 'raises error when binary array has too many dimensions' do d = ["00000007" + "00000001" + "00000019"].pack("H*") expect do binarydec_int_array.decode(d) end.to raise_error(ArgumentError, /dimensions/) end + it 'raises error when binary array has invalid dimensions' do + d = ["ffffffff" + "00000001" + "00000019"].pack("H*") + expect do + binarydec_int_array.decode(d) + end.to raise_error(ArgumentError, /dimensions/) + end it 'raises error when binary array has invalid flags' do d = ["00000000" + "00000002" + "00000019"].pack("H*") expect do @@ -987,9 +993,15 @@ def expect_deprecated_coder_init ].pack("H*") expect( binaryenc_array.encode([[[[[[nil]]]]]]) ).to eq( exp ) end - it 'raises an error at too many dimensions' do + it 'raises an error on too many dimensions' do expect{ binaryenc_array.encode([[[[[[[nil]]]]]]]) }.to raise_error( ArgumentError, /number of array dimensions/) end + it 'raises an error on changed dimensions' do + expect{ binaryenc_array.encode([[1], 2]) }.to raise_error( ArgumentError, /Array instead of 2 /) + end + it 'raises an error on varying array sizes' do + expect{ binaryenc_array.encode([[1], [2,3]]) }.to raise_error( ArgumentError, /varying number /) + end end context 'array of types with encoder in ruby space' do From 6c43eb5b9755918a4debb35baa2ab20982f6b5da Mon Sep 17 00:00:00 2001 From: Lars Kanis Date: Mon, 18 Nov 2024 08:14:11 +0100 Subject: [PATCH 011/118] PG::BinaryDecoder::Array: Add a spec for the maximum supported dimensions --- spec/pg/type_spec.rb | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/spec/pg/type_spec.rb b/spec/pg/type_spec.rb index a9f45b8b1..1393f7793 100644 --- a/spec/pg/type_spec.rb +++ b/spec/pg/type_spec.rb @@ -839,6 +839,18 @@ def expect_deprecated_coder_init it 'can decode binary text[]' do expect( binarydec_array.decode(bin_text_array_data) ).to eq( [[["5", "6"]], [["6\"", "7"]], [[nil, "5"]]] ) end + it 'can decode binary text[] with 6 dimensions' do + d = ["00000006" + "00000001" + "00000019" + + "00000001" + "ffffffff" + + "00000001" + "fffffffe" + + "00000001" + "fffffffd" + + "00000001" + "ffffffff" + + "00000001" + "fffffffe" + + "00000001" + "fffffffd" + + "ffffffff" + ].pack("H*") + expect( binarydec_array.decode(d) ).to eq( [[[[[[nil]]]]]] ) + end it 'raises error when binary array is incomplete' do (0 ... bin_int_array_data.bytesize).each do |i| expect do From 766752f2c2ee5d51d93cb193d10a0f8038d593e8 Mon Sep 17 00:00:00 2001 From: Lars Kanis Date: Mon, 18 Nov 2024 08:24:45 +0100 Subject: [PATCH 012/118] Extend Array specs to use mixed one and two-dimensional array --- spec/pg/basic_type_map_based_on_result_spec.rb | 4 ++-- spec/pg/basic_type_map_for_results_spec.rb | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/spec/pg/basic_type_map_based_on_result_spec.rb b/spec/pg/basic_type_map_based_on_result_spec.rb index da5e14203..6e49d98e6 100644 --- a/spec/pg/basic_type_map_based_on_result_spec.rb +++ b/spec/pg/basic_type_map_based_on_result_spec.rb @@ -117,11 +117,11 @@ row_encoder = nsp::CopyRow.new type_map: tm @conn.copy_data( "COPY copytable FROM STDIN WITH (FORMAT #{ format==1 ? "binary" : "text" })", row_encoder ) do |res| - @conn.put_copy_data ['a', 123, [5,4,3]] + @conn.put_copy_data ['a', 123, [[5,4],[3,2]]] @conn.put_copy_data ['b', 234, [2,3]] end res = @conn.exec( "SELECT * FROM copytable" ) - expect( res.values ).to eq( [['a', '123', '{5,4,3}'], ['b', '234', '{2,3}']] ) + expect( res.values ).to eq( [['a', '123', '{{5,4},{3,2}}'], ['b', '234', '{2,3}']] ) end end diff --git a/spec/pg/basic_type_map_for_results_spec.rb b/spec/pg/basic_type_map_for_results_spec.rb index 8df3d9e79..2aabaa9bd 100644 --- a/spec/pg/basic_type_map_for_results_spec.rb +++ b/spec/pg/basic_type_map_for_results_spec.rb @@ -399,7 +399,7 @@ context "with usage of result oids for copy decoder selection" do it "can type cast #copy_data text output with decoder" do @conn.exec( "CREATE TEMP TABLE copytable (t TEXT, i INT, ai INT[])" ) - @conn.exec( "INSERT INTO copytable VALUES ('a', 123, '{5,4,3}'), ('b', 234, '{2,3}')" ) + @conn.exec( "INSERT INTO copytable VALUES ('a', 123, '{{5,4},{3,2}}'), ('b', 234, '{2,3}')" ) # Retrieve table OIDs per empty result. res = @conn.exec( "SELECT * FROM copytable LIMIT 0" ) @@ -412,7 +412,7 @@ rows << row end end - expect( rows ).to eq( [['a', 123, [5,4,3]], ['b', 234, [2,3]]] ) + expect( rows ).to eq( [['a', 123, [[5,4],[3,2]]], ['b', 234, [2,3]]] ) end it "can type cast #copy_data binary output with decoder" do From 9cee4f9a34db42a09b4fba20d725aa5c6a4dc06c Mon Sep 17 00:00:00 2001 From: Lars Kanis Date: Mon, 18 Nov 2024 10:41:14 +0100 Subject: [PATCH 013/118] Add BinaryEncoder::Array and BinaryDecoder::Array to coder matrix in the README --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 838a2055b..f794f734d 100644 --- a/README.md +++ b/README.md @@ -141,7 +141,7 @@ The following PostgreSQL column types are supported by ruby-pg (TE = Text Encode * Date: [TE](rdoc-ref:PG::TextEncoder::Date), [TD](rdoc-ref:PG::TextDecoder::Date), [BE](rdoc-ref:PG::BinaryEncoder::Date), [BD](rdoc-ref:PG::BinaryDecoder::Date) * JSON and JSONB: [TE](rdoc-ref:PG::TextEncoder::JSON), [TD](rdoc-ref:PG::TextDecoder::JSON) * Inet: [TE](rdoc-ref:PG::TextEncoder::Inet), [TD](rdoc-ref:PG::TextDecoder::Inet) -* Array: [TE](rdoc-ref:PG::TextEncoder::Array), [TD](rdoc-ref:PG::TextDecoder::Array) +* Array: [TE](rdoc-ref:PG::TextEncoder::Array), [TD](rdoc-ref:PG::TextDecoder::Array), [BE](rdoc-ref:PG::BinaryEncoder::Array), [BD](rdoc-ref:PG::BinaryDecoder::Array) * Composite Type (also called "Row" or "Record"): [TE](rdoc-ref:PG::TextEncoder::Record), [TD](rdoc-ref:PG::TextDecoder::Record) The following text and binary formats can also be encoded although they are not used as column type: From 33db8fd33ee9332abe3f716fa045c8641cedd6cc Mon Sep 17 00:00:00 2001 From: Lars Kanis Date: Thu, 21 Nov 2024 11:18:29 +0100 Subject: [PATCH 014/118] Merge text and binary specs for COPY decoder selection It was separated due to Array not available for binary format, but it is implemented now. --- spec/pg/basic_type_map_for_results_spec.rb | 58 +++++++++------------- 1 file changed, 23 insertions(+), 35 deletions(-) diff --git a/spec/pg/basic_type_map_for_results_spec.rb b/spec/pg/basic_type_map_for_results_spec.rb index 2aabaa9bd..327d943db 100644 --- a/spec/pg/basic_type_map_for_results_spec.rb +++ b/spec/pg/basic_type_map_for_results_spec.rb @@ -397,44 +397,32 @@ end context "with usage of result oids for copy decoder selection" do - it "can type cast #copy_data text output with decoder" do - @conn.exec( "CREATE TEMP TABLE copytable (t TEXT, i INT, ai INT[])" ) - @conn.exec( "INSERT INTO copytable VALUES ('a', 123, '{{5,4},{3,2}}'), ('b', 234, '{2,3}')" ) - - # Retrieve table OIDs per empty result. - res = @conn.exec( "SELECT * FROM copytable LIMIT 0" ) - tm = basic_type_mapping.build_column_map( res ) - row_decoder = PG::TextDecoder::CopyRow.new(type_map: tm).freeze - - rows = [] - @conn.copy_data( "COPY copytable TO STDOUT", row_decoder ) do |res| - while row=@conn.get_copy_data - rows << row + [0, 1].each do |format| + it "can type cast #copy_data output in format #{format} with decoder" do + @conn.exec( "CREATE TEMP TABLE copytable (t TEXT, i INT, ai INT[], b BYTEA, ts timestamp)" ) + @conn.exec( "INSERT INTO copytable VALUES ('a', 1234, '{{5,4},{3,2}}', '\\xff000a0d27', '2023-03-17 03:04:05.678912'), ('b', -444, '{2,3}', '\\x202078797a2020', '1990-12-17 15:14:45')" ) + + # Retrieve table OIDs per empty result. + res = @conn.exec( "SELECT * FROM copytable LIMIT 0", [], format ) + tm = basic_type_mapping.build_column_map( res ) + nsp = format==1 ? PG::BinaryDecoder : PG::TextDecoder + row_decoder = nsp::CopyRow.new(type_map: tm).freeze + + rows = [] + @conn.copy_data( "COPY copytable TO STDOUT WITH (FORMAT #{ format==1 ? "binary" : "text" })", row_decoder ) do |res| + while row=@conn.get_copy_data + rows << row + end end - end - expect( rows ).to eq( [['a', 123, [[5,4],[3,2]]], ['b', 234, [2,3]]] ) - end - it "can type cast #copy_data binary output with decoder" do - @conn.exec( "CREATE TEMP TABLE copytable (b BYTEA, i INT, ts timestamp)" ) - @conn.exec( "INSERT INTO copytable VALUES ('\\xff000a0d27', 1234, '2023-03-17 03:04:05.678912'), ('\\x202078797a2020', '-444', '1990-12-17 15:14:45')" ) - - # Retrieve table OIDs per empty result. - res = @conn.exec_params( "SELECT * FROM copytable LIMIT 0", [], 1 ) - tm = basic_type_mapping.build_column_map( res ) - row_decoder = PG::BinaryDecoder::CopyRow.new(type_map: tm).freeze - - rows = [] - @conn.copy_data( "COPY copytable TO STDOUT WITH (FORMAT binary)", row_decoder ) do |res| - while row=@conn.get_copy_data - rows << row - end + expect( rows.map{|l| l[0,4] } ).to eq( [['a', 1234, [[5,4],[3,2]], "\xff\x00\n\r'".b], ['b', -444, [2,3], " xyz "]] ) + # For compatibility reason the timestamp in text format is encoded as local time (TimestampWithoutTimeZone) instead of UTC + tmeth = format == 1 ? :utc : :local + expect( rows[0][4] ). + to be_within(0.000001).of( Time.send(tmeth, 2023, 3, 17, 3, 4, 5.678912) ) + expect( rows[1][4] ). + to be_within(0.000001).of( Time.send(tmeth, 1990, 12, 17, 15, 14, 45) ) end - expect( rows.map{|l| l[0,2] } ).to eq( [["\xff\x00\n\r'".b, 1234], [" xyz ", -444]] ) - expect( rows[0][2] ). - to be_within(0.000001).of( Time.utc(2023, 3, 17, 3, 4, 5.678912) ) - expect( rows[1][2] ). - to be_within(0.000001).of( Time.utc(1990, 12, 17, 15, 14, 45) ) end end end From 6bd31300911ebdd00ad3e5fd19cf59ded82a436a Mon Sep 17 00:00:00 2001 From: Lars Kanis Date: Fri, 22 Nov 2024 17:48:28 +0100 Subject: [PATCH 015/118] Drop support for PostgreSQL < 10 --- .github/workflows/source-gem.yml | 6 ++-- .travis.yml | 4 +-- README.ja.md | 4 +-- README.md | 4 +-- ext/extconf.rb | 7 ++-- ext/gvl_wrappers.c | 4 --- ext/pg.c | 6 +--- ext/pg_connection.c | 18 ---------- ext/pg_result.c | 5 --- lib/pg/connection.rb | 34 +++++++------------ pg.gemspec | 2 +- spec/helpers.rb | 4 --- .../pg/basic_type_map_based_on_result_spec.rb | 2 +- spec/pg/basic_type_map_for_results_spec.rb | 2 +- spec/pg/connection_async_spec.rb | 4 +-- spec/pg/connection_spec.rb | 20 +++++------ spec/pg/result_spec.rb | 4 +-- spec/pg/scheduler_spec.rb | 2 +- translation/po/all.pot | 4 +-- translation/po/ja.po | 8 ++--- 20 files changed, 49 insertions(+), 95 deletions(-) diff --git a/.github/workflows/source-gem.yml b/.github/workflows/source-gem.yml index 2f37bf59e..f17344136 100644 --- a/.github/workflows/source-gem.yml +++ b/.github/workflows/source-gem.yml @@ -48,8 +48,8 @@ jobs: PGVER: "17" - os: windows ruby: "2.5" - PGVERSION: 9.4.26-1-windows-x64 - PGVER: "9.4" + PGVERSION: 10.20-1-windows-x64 + PGVER: "10" - os: windows ruby: "mswin" PGVERSION: 17.0-1-windows-x64 @@ -63,7 +63,7 @@ jobs: - os: ubuntu os_ver: "20.04" ruby: "2.5" - PGVER: "9.3" + PGVER: "10" - os: ubuntu ruby: "truffleruby" PGVER: "13" diff --git a/.travis.yml b/.travis.yml index a6e4b9072..88b5c4ba3 100644 --- a/.travis.yml +++ b/.travis.yml @@ -19,8 +19,8 @@ matrix: - rvm: "2.5" env: - - "PGVERSION=9.3" - # Use Ubuntu-16.04 since postgresql-9.3 depends on openssl-1.0.0, which isn't available in 20.04 + - "PGVERSION=10" + # Use Ubuntu-16.04 which provides openssl-1.0.0, which isn't available in 20.04 dist: xenial - rvm: ruby-head env: diff --git a/README.ja.md b/README.ja.md index 977d282e8..b1cdeae75 100644 --- a/README.ja.md +++ b/README.ja.md @@ -12,7 +12,7 @@ Pgは[PostgreSQL RDBMS](http://www.postgresql.org/)へのRubyのインターフェースです。[PostgreSQL -9.3以降](http://www.postgresql.org/support/versioning/)で動作します。 +10以降](http://www.postgresql.org/support/versioning/)で動作します。 簡単な使用例は次の通りです。 ```ruby @@ -42,7 +42,7 @@ Actionsのビルド状況](https://github.com/ged/ruby-pg/actions/workflows/sour ## 要件 * Ruby 2.5かそれより新しいバージョン -* PostgreSQL 9.3.xかそれ以降のバージョン(ヘッダー付属のもの、例えば-devの名前のパッケージ)。 +* PostgreSQL 10.xかそれ以降のバージョン(ヘッダー付属のもの、例えば-devの名前のパッケージ)。 それより前のバージョンのRubyやPostgreSQLでも通常は同様に動作しますが、定期的なテストはされていません。 diff --git a/README.md b/README.md index 838a2055b..6da56e3a7 100644 --- a/README.md +++ b/README.md @@ -11,7 +11,7 @@ ## Description Pg is the Ruby interface to the [PostgreSQL RDBMS](http://www.postgresql.org/). -It works with [PostgreSQL 9.3 and later](http://www.postgresql.org/support/versioning/). +It works with [PostgreSQL 10 and later](http://www.postgresql.org/support/versioning/). A small example usage: ```ruby @@ -40,7 +40,7 @@ A small example usage: ## Requirements * Ruby 2.5 or newer -* PostgreSQL 9.3.x or later (with headers, -dev packages, etc). +* PostgreSQL 10.x or later (with headers, -dev packages, etc). It usually works with earlier versions of Ruby/PostgreSQL as well, but those are not regularly tested. diff --git a/ext/extconf.rb b/ext/extconf.rb index 1df354ccb..fd798fd4e 100644 --- a/ext/extconf.rb +++ b/ext/extconf.rb @@ -144,13 +144,10 @@ module PG end end -have_func 'PQconninfo', 'libpq-fe.h' or +have_func 'PQencryptPasswordConn', 'libpq-fe.h' or # since PostgreSQL-10 abort "Your PostgreSQL is too old. Either install an older version " + - "of this gem or upgrade your database to at least PostgreSQL-9.3." + "of this gem or upgrade your database to at least PostgreSQL-10." # optional headers/functions -have_func 'PQsslAttribute', 'libpq-fe.h' # since PostgreSQL-9.5 -have_func 'PQresultVerboseErrorMessage', 'libpq-fe.h' # since PostgreSQL-9.6 -have_func 'PQencryptPasswordConn', 'libpq-fe.h' # since PostgreSQL-10 have_func 'PQresultMemorySize', 'libpq-fe.h' # since PostgreSQL-12 have_func 'PQenterPipelineMode', 'libpq-fe.h' do |src| # since PostgreSQL-14 # Ensure header files fit as well diff --git a/ext/gvl_wrappers.c b/ext/gvl_wrappers.c index f570913b5..3a1dae882 100644 --- a/ext/gvl_wrappers.c +++ b/ext/gvl_wrappers.c @@ -5,10 +5,6 @@ #include "pg.h" -#ifndef HAVE_PQENCRYPTPASSWORDCONN -char *PQencryptPasswordConn(PGconn *conn, const char *passwd, const char *user, const char *algorithm){return NULL;} -#endif - #ifdef ENABLE_GVL_UNLOCK FOR_EACH_BLOCKING_FUNCTION( DEFINE_GVL_WRAPPER_STRUCT ); FOR_EACH_BLOCKING_FUNCTION( DEFINE_GVL_SKELETON ); diff --git a/ext/pg.c b/ext/pg.c index c01e91570..f0b58a2f4 100644 --- a/ext/pg.c +++ b/ext/pg.c @@ -451,14 +451,12 @@ Init_pg_ext(void) rb_define_const(rb_mPGconstants, "PQERRORS_SQLSTATE", INT2FIX(PQERRORS_SQLSTATE)); #endif -#ifdef HAVE_PQRESULTVERBOSEERRORMESSAGE /* See Connection#set_error_context_visibility */ rb_define_const(rb_mPGconstants, "PQSHOW_CONTEXT_NEVER", INT2FIX(PQSHOW_CONTEXT_NEVER)); /* See Connection#set_error_context_visibility */ rb_define_const(rb_mPGconstants, "PQSHOW_CONTEXT_ERRORS", INT2FIX(PQSHOW_CONTEXT_ERRORS)); /* See Connection#set_error_context_visibility */ rb_define_const(rb_mPGconstants, "PQSHOW_CONTEXT_ALWAYS", INT2FIX(PQSHOW_CONTEXT_ALWAYS)); -#endif /****** PG::Connection CLASS CONSTANTS: Check Server Status ******/ @@ -530,16 +528,14 @@ Init_pg_ext(void) */ rb_define_const(rb_mPGconstants, "PG_DIAG_SEVERITY", INT2FIX(PG_DIAG_SEVERITY)); -#ifdef PG_DIAG_SEVERITY_NONLOCALIZED /* Result#result_error_field argument constant * * The severity; the field contents are ERROR, FATAL, or PANIC (in an error message), or WARNING, NOTICE, DEBUG, INFO, or LOG (in a notice message). * This is identical to the PG_DIAG_SEVERITY field except that the contents are never localized. * - * Available since PostgreSQL-9.6 */ rb_define_const(rb_mPGconstants, "PG_DIAG_SEVERITY_NONLOCALIZED", INT2FIX(PG_DIAG_SEVERITY_NONLOCALIZED)); -#endif + /* Result#result_error_field argument constant * * The SQLSTATE code for the error. diff --git a/ext/pg_connection.c b/ext/pg_connection.c index 47b6a7314..1b3ece847 100644 --- a/ext/pg_connection.c +++ b/ext/pg_connection.c @@ -419,7 +419,6 @@ pgconn_s_conninfo_parse(VALUE self, VALUE conninfo) } -#ifdef HAVE_PQENCRYPTPASSWORDCONN static VALUE pgconn_sync_encrypt_password(int argc, VALUE *argv, VALUE self) { @@ -443,7 +442,6 @@ pgconn_sync_encrypt_password(int argc, VALUE *argv, VALUE self) return rval; } -#endif /* @@ -760,7 +758,6 @@ pgconn_options(VALUE self) * * Returns the connection options used by a live connection. * - * Available since PostgreSQL-9.3 */ static VALUE pgconn_conninfo( VALUE self ) @@ -2703,7 +2700,6 @@ pgconn_set_error_verbosity(VALUE self, VALUE in_verbosity) return INT2FIX(PQsetErrorVerbosity(conn, verbosity)); } -#ifdef HAVE_PQRESULTVERBOSEERRORMESSAGE /* * call-seq: * conn.set_error_context_visibility( context_visibility ) -> Integer @@ -2723,7 +2719,6 @@ pgconn_set_error_verbosity(VALUE self, VALUE in_verbosity) * * See also corresponding {libpq function}[https://www.postgresql.org/docs/current/libpq-control.html#LIBPQ-PQSETERRORCONTEXTVISIBILITY]. * - * Available since PostgreSQL-9.6 */ static VALUE pgconn_set_error_context_visibility(VALUE self, VALUE in_context_visibility) @@ -2732,7 +2727,6 @@ pgconn_set_error_context_visibility(VALUE self, VALUE in_context_visibility) PGContextVisibility context_visibility = NUM2INT(in_context_visibility); return INT2FIX(PQsetErrorContextVisibility(conn, context_visibility)); } -#endif /* * call-seq: @@ -3509,14 +3503,12 @@ pgconn_async_describe_prepared(VALUE self, VALUE stmt_name) } -#ifdef HAVE_PQSSLATTRIBUTE /* * call-seq: * conn.ssl_in_use? -> Boolean * * Returns +true+ if the connection uses SSL/TLS, +false+ if not. * - * Available since PostgreSQL-9.5 */ static VALUE pgconn_ssl_in_use(VALUE self) @@ -3550,7 +3542,6 @@ pgconn_ssl_in_use(VALUE self) * * See also #ssl_attribute_names and the {corresponding libpq function}[https://www.postgresql.org/docs/current/libpq-status.html#LIBPQ-PQSSLATTRIBUTE]. * - * Available since PostgreSQL-9.5 */ static VALUE pgconn_ssl_attribute(VALUE self, VALUE attribute_name) @@ -3569,7 +3560,6 @@ pgconn_ssl_attribute(VALUE self, VALUE attribute_name) * * See also #ssl_attribute * - * Available since PostgreSQL-9.5 */ static VALUE pgconn_ssl_attribute_names(VALUE self) @@ -3585,8 +3575,6 @@ pgconn_ssl_attribute_names(VALUE self) } -#endif - #ifdef HAVE_PQENTERPIPELINEMODE /* @@ -4592,9 +4580,7 @@ init_pg_connection(void) /****** PG::Connection INSTANCE METHODS: Control Functions ******/ rb_define_method(rb_cPGconn, "set_error_verbosity", pgconn_set_error_verbosity, 1); -#ifdef HAVE_PQRESULTVERBOSEERRORMESSAGE rb_define_method(rb_cPGconn, "set_error_context_visibility", pgconn_set_error_context_visibility, 1 ); -#endif rb_define_method(rb_cPGconn, "trace", pgconn_trace, 1); rb_define_method(rb_cPGconn, "untrace", pgconn_untrace, 0); @@ -4616,15 +4602,11 @@ init_pg_connection(void) rb_define_method(rb_cPGconn, "sync_get_last_result", pgconn_sync_get_last_result, 0); rb_define_method(rb_cPGconn, "get_last_result", pgconn_async_get_last_result, 0); rb_define_alias(rb_cPGconn, "async_get_last_result", "get_last_result"); -#ifdef HAVE_PQENCRYPTPASSWORDCONN rb_define_method(rb_cPGconn, "sync_encrypt_password", pgconn_sync_encrypt_password, -1); -#endif -#ifdef HAVE_PQSSLATTRIBUTE rb_define_method(rb_cPGconn, "ssl_in_use?", pgconn_ssl_in_use, 0); rb_define_method(rb_cPGconn, "ssl_attribute", pgconn_ssl_attribute, 1); rb_define_method(rb_cPGconn, "ssl_attribute_names", pgconn_ssl_attribute_names, 0); -#endif #ifdef HAVE_PQENTERPIPELINEMODE rb_define_method(rb_cPGconn, "pipeline_status", pgconn_pipeline_status, 0); diff --git a/ext/pg_result.c b/ext/pg_result.c index 43d838f22..909c293f2 100644 --- a/ext/pg_result.c +++ b/ext/pg_result.c @@ -613,14 +613,12 @@ pgresult_error_message(VALUE self) return ret; } -#ifdef HAVE_PQRESULTVERBOSEERRORMESSAGE /* * call-seq: * res.verbose_error_message( verbosity, show_context ) -> String * * Returns a reformatted version of the error message associated with a PGresult object. * - * Available since PostgreSQL-9.6 */ static VALUE pgresult_verbose_error_message(VALUE self, VALUE verbosity, VALUE show_context) @@ -639,7 +637,6 @@ pgresult_verbose_error_message(VALUE self, VALUE verbosity, VALUE show_context) return ret; } -#endif /* * call-seq: @@ -1707,10 +1704,8 @@ init_pg_result(void) rb_define_singleton_method(rb_cPGresult, "res_status", pgresult_s_res_status, 1); rb_define_method(rb_cPGresult, "error_message", pgresult_error_message, 0); rb_define_alias( rb_cPGresult, "result_error_message", "error_message"); -#ifdef HAVE_PQRESULTVERBOSEERRORMESSAGE rb_define_method(rb_cPGresult, "verbose_error_message", pgresult_verbose_error_message, 2); rb_define_alias( rb_cPGresult, "result_verbose_error_message", "verbose_error_message"); -#endif rb_define_method(rb_cPGresult, "error_field", pgresult_error_field, 1); rb_define_alias( rb_cPGresult, "result_error_field", "error_field" ); rb_define_method(rb_cPGresult, "clear", pg_result_clear, 0); diff --git a/lib/pg/connection.rb b/lib/pg/connection.rb index a9052ce78..c0fd1b2a1 100644 --- a/lib/pg/connection.rb +++ b/lib/pg/connection.rb @@ -356,21 +356,18 @@ def conninfo_hash end end - # Method 'ssl_attribute' was introduced in PostgreSQL 9.5. - if self.instance_methods.find{|m| m.to_sym == :ssl_attribute } - # call-seq: - # conn.ssl_attributes -> Hash - # - # Returns SSL-related information about the connection as key/value pairs - # - # The available attributes varies depending on the SSL library being used, - # and the type of connection. - # - # See also #ssl_attribute - def ssl_attributes - ssl_attribute_names.each.with_object({}) do |n,h| - h[n] = ssl_attribute(n) - end + # call-seq: + # conn.ssl_attributes -> Hash + # + # Returns SSL-related information about the connection as key/value pairs + # + # The available attributes varies depending on the SSL library being used, + # and the type of connection. + # + # See also #ssl_attribute + def ssl_attributes + ssl_attribute_names.each.with_object({}) do |n,h| + h[n] = ssl_attribute(n) end end @@ -921,14 +918,9 @@ def ping(*args) :set_client_encoding => [:async_set_client_encoding, :sync_set_client_encoding], :client_encoding= => [:async_set_client_encoding, :sync_set_client_encoding], :cancel => [:async_cancel, :sync_cancel], + :encrypt_password => [:async_encrypt_password, :sync_encrypt_password], } private_constant :REDIRECT_METHODS - - if PG::Connection.instance_methods.include? :async_encrypt_password - REDIRECT_METHODS.merge!({ - :encrypt_password => [:async_encrypt_password, :sync_encrypt_password], - }) - end PG.make_shareable(REDIRECT_METHODS) def async_send_api=(enable) diff --git a/pg.gemspec b/pg.gemspec index 356bdfd09..6910e4d15 100644 --- a/pg.gemspec +++ b/pg.gemspec @@ -10,7 +10,7 @@ Gem::Specification.new do |spec| spec.email = ["ged@FaerieMUD.org", "lars@greiz-reinsdorf.de"] spec.summary = "Pg is the Ruby interface to the PostgreSQL RDBMS" - spec.description = "Pg is the Ruby interface to the PostgreSQL RDBMS. It works with PostgreSQL 9.3 and later." + spec.description = "Pg is the Ruby interface to the PostgreSQL RDBMS. It works with PostgreSQL 10 and later." spec.homepage = "https://github.com/ged/ruby-pg" spec.license = "BSD-2-Clause" spec.required_ruby_version = ">= 2.5" diff --git a/spec/helpers.rb b/spec/helpers.rb index 6c66562a4..c2fdf6e04 100644 --- a/spec/helpers.rb +++ b/spec/helpers.rb @@ -700,10 +700,6 @@ def set_etc_hosts(hostaddr, hostname) config.filter_run_excluding :windows end - config.filter_run_excluding( :postgresql_94 ) if PG.library_version < 90400 - config.filter_run_excluding( :postgresql_95 ) if PG.library_version < 90500 - config.filter_run_excluding( :postgresql_96 ) if PG.library_version < 90600 - config.filter_run_excluding( :postgresql_10 ) if PG.library_version < 100000 config.filter_run_excluding( :postgresql_12 ) if PG.library_version < 120000 config.filter_run_excluding( :postgresql_14 ) if PG.library_version < 140000 config.filter_run_excluding( :unix_socket ) if RUBY_PLATFORM=~/mingw|mswin/i diff --git a/spec/pg/basic_type_map_based_on_result_spec.rb b/spec/pg/basic_type_map_based_on_result_spec.rb index e8c26e2f6..66e985c1f 100644 --- a/spec/pg/basic_type_map_based_on_result_spec.rb +++ b/spec/pg/basic_type_map_based_on_result_spec.rb @@ -89,7 +89,7 @@ expect( res.values ).to eq( [['a', '123', '{5,4,3}', '\x00ff27'], ['b', '234', '{2,3}', '\x220a0d']] ) end - it "can do JSON conversions", :postgresql_94 do + it "can do JSON conversions" do ['JSON', 'JSONB'].each do |type| sql = "SELECT CAST('123' AS #{type}), CAST('12.3' AS #{type}), diff --git a/spec/pg/basic_type_map_for_results_spec.rb b/spec/pg/basic_type_map_for_results_spec.rb index 04e46abc8..991abeed1 100644 --- a/spec/pg/basic_type_map_for_results_spec.rb +++ b/spec/pg/basic_type_map_for_results_spec.rb @@ -275,7 +275,7 @@ end [0].each do |format| - it "should do format #{format} JSON conversions", :postgresql_94 do + it "should do format #{format} JSON conversions" do ['JSON', 'JSONB'].each do |type| res = @conn.exec_params( "SELECT CAST('123' AS #{type}), CAST('12.3' AS #{type}), diff --git a/spec/pg/connection_async_spec.rb b/spec/pg/connection_async_spec.rb index efbe01b50..da7174e67 100644 --- a/spec/pg/connection_async_spec.rb +++ b/spec/pg/connection_async_spec.rb @@ -8,7 +8,7 @@ describe PG::Connection do - it "tries to connect to localhost with IPv6 and IPv4", :ipv6, :postgresql_10 do + it "tries to connect to localhost with IPv6 and IPv4", :ipv6 do uri = "postgres://localhost:#{@port+1}/test" expect(described_class).to receive(:parse_connect_args).once.ordered.with(uri, any_args).and_call_original expect(described_class).to receive(:parse_connect_args).once.ordered.with(hash_including(hostaddr: "::1,127.0.0.1")).and_call_original @@ -135,7 +135,7 @@ def interrupt_thread(exc=nil) end end - it "doesn't duplicate hosts in conn.reset", :without_transaction, :ipv6, :postgresql_10 do + it "doesn't duplicate hosts in conn.reset", :without_transaction, :ipv6, :postgresql_12 do set_etc_hosts "::1", "rubypg_test2 rubypg_test_ipv6" set_etc_hosts "127.0.0.1", "rubypg_test2 rubypg_test_ipv4" conn = described_class.connect( "postgres://rubypg_test2/test" ) diff --git a/spec/pg/connection_spec.rb b/spec/pg/connection_spec.rb index 0f2148638..f0454aa8b 100644 --- a/spec/pg/connection_spec.rb +++ b/spec/pg/connection_spec.rb @@ -404,7 +404,7 @@ @dbms&.teardown end - it "honors target_session_attrs requirements", :postgresql_10 do + it "honors target_session_attrs requirements" do uri = "postgres://localhost:#{@port_ro},localhost:#{@port}/postgres?target_session_attrs=read-write" PG.connect(uri) do |conn| expect( conn.port ).to eq( @port ) @@ -417,7 +417,7 @@ end end - it "stops hosts iteration on authentication errors", :without_transaction, :ipv6, :postgresql_10 do + it "stops hosts iteration on authentication errors", :without_transaction, :ipv6 do @conn.exec("DROP USER IF EXISTS testusermd5") @conn.exec("CREATE USER testusermd5 PASSWORD 'secret'") @@ -559,7 +559,7 @@ res = @conn2.query("SELECT 4") end - it "can work with changing IO while connection setup", :postgresql_95 do + it "can work with changing IO while connection setup" do # The file_no of the socket IO can change while connecting. # This can happen when alternative hosts are tried, # while GSS authentication @@ -835,7 +835,7 @@ expect( new ).to eq( PG::PQERRORS_TERSE ) end - it "can set error context visibility", :postgresql_96 do + it "can set error context visibility" do old = @conn.set_error_context_visibility( PG::PQSHOW_CONTEXT_NEVER ) new = @conn.set_error_context_visibility( old ) expect( new ).to eq( PG::PQSHOW_CONTEXT_NEVER ) @@ -1525,21 +1525,21 @@ describe "connection information related to SSL" do - it "can retrieve connection's ssl state", :postgresql_95 do + it "can retrieve connection's ssl state" do expect( @conn.ssl_in_use? ).to be true end - it "can retrieve connection's ssl attribute_names", :postgresql_95 do + it "can retrieve connection's ssl attribute_names" do expect( @conn.ssl_attribute_names ).to be_a(Array) end - it "can retrieve a single ssl connection attribute", :postgresql_95 do + it "can retrieve a single ssl connection attribute" do expect( @conn.ssl_attribute('dbname') ).to eq( nil ) expect( @conn.ssl_attribute('protocol') ).to match( /^TLSv/ ) expect( @conn.ssl_attribute('key_bits') ).to match( /^\d+$/ ) end - it "can retrieve all connection's ssl attributes", :postgresql_95 do + it "can retrieve all connection's ssl attributes" do expect( @conn.ssl_attributes ).to be_a_kind_of( Hash ) end end @@ -1554,7 +1554,7 @@ end end - it "can connect concurrently in parallel threads", :postgresql_95 do + it "can connect concurrently in parallel threads" do res = 5.times.map do |idx| Thread.new do PG.connect(@conninfo) do |conn| @@ -1583,7 +1583,7 @@ end end - describe "password encryption method", :postgresql_10 do + describe "password encryption method" do it "can encrypt without algorithm" do expect( @conn.encrypt_password("postgres", "postgres") ).to match( /\S+/ ) expect( @conn.encrypt_password("postgres", "postgres", nil) ).to match( /\S+/ ) diff --git a/spec/pg/result_spec.rb b/spec/pg/result_spec.rb index 9a33c1b6f..836b7699e 100644 --- a/spec/pg/result_spec.rb +++ b/spec/pg/result_spec.rb @@ -350,7 +350,7 @@ ).to match( /^parserOpenTable$|^RangeVarGetRelid$/ ) end - it "encapsulates PG_DIAG_SEVERITY_NONLOCALIZED error in a PG::Error object", :postgresql_96 do + it "encapsulates PG_DIAG_SEVERITY_NONLOCALIZED error in a PG::Error object" do result = nil begin @conn.exec( "SELECT * FROM nonexistent_table" ) @@ -395,7 +395,7 @@ expect( res.result_error_message ).to match(/"xyz"/) end - it "provides a verbose error message", :postgresql_96 do + it "provides a verbose error message" do @conn.send_query("SELECT xyz") res = @conn.get_result; @conn.get_result # PQERRORS_TERSE should give a single line result diff --git a/spec/pg/scheduler_spec.rb b/spec/pg/scheduler_spec.rb index 95e182607..61935aabd 100644 --- a/spec/pg/scheduler_spec.rb +++ b/spec/pg/scheduler_spec.rb @@ -246,7 +246,7 @@ end end - it "can encrypt_password", :postgresql_10 do + it "can encrypt_password" do run_with_scheduler do |conn| res = conn.encrypt_password "passw", "myuser" expect( res ).to match( /\S+/ ) diff --git a/translation/po/all.pot b/translation/po/all.pot index 56b361e6d..37d687890 100644 --- a/translation/po/all.pot +++ b/translation/po/all.pot @@ -56,7 +56,7 @@ msgstr "" #, markdown-text msgid "" "Pg is the Ruby interface to the [PostgreSQL " -"RDBMS](http://www.postgresql.org/). It works with [PostgreSQL 9.3 and " +"RDBMS](http://www.postgresql.org/). It works with [PostgreSQL 10 and " "later](http://www.postgresql.org/support/versioning/)." msgstr "" @@ -118,7 +118,7 @@ msgstr "" #. type: Bullet: '* ' #: ../README.md:44 #, markdown-text -msgid "PostgreSQL 9.3.x or later (with headers, -dev packages, etc)." +msgid "PostgreSQL 10.x or later (with headers, -dev packages, etc)." msgstr "" #. type: Plain text diff --git a/translation/po/ja.po b/translation/po/ja.po index 3e76a2506..39761abd7 100644 --- a/translation/po/ja.po +++ b/translation/po/ja.po @@ -65,11 +65,11 @@ msgstr "説明" #: ../README.md:15 msgid "" "Pg is the Ruby interface to the [PostgreSQL RDBMS](http://www.postgresql." -"org/). It works with [PostgreSQL 9.3 and later](http://www.postgresql.org/" +"org/). It works with [PostgreSQL 10 and later](http://www.postgresql.org/" "support/versioning/)." msgstr "" "Pgは[PostgreSQL RDBMS](http://www.postgresql.org/)へのRubyのインターフェース" -"です。[PostgreSQL 9.3以降](http://www.postgresql.org/support/versioning/)で動" +"です。[PostgreSQL 10以降](http://www.postgresql.org/support/versioning/)で動" "作します。" #. type: Plain text @@ -149,9 +149,9 @@ msgstr "Ruby 2.5かそれより新しいバージョン" #. type: Bullet: '* ' #: ../README.md:44 -msgid "PostgreSQL 9.3.x or later (with headers, -dev packages, etc)." +msgid "PostgreSQL 10.x or later (with headers, -dev packages, etc)." msgstr "" -"PostgreSQL 9.3.xかそれ以降のバージョン(ヘッダー付属のもの、例えば-devの名前" +"PostgreSQL 10.xかそれ以降のバージョン(ヘッダー付属のもの、例えば-devの名前" "のパッケージ)。" #. type: Plain text From 13569b6e127b4e50887d10d9f3661b39c40a0120 Mon Sep 17 00:00:00 2001 From: Lars Kanis Date: Fri, 22 Nov 2024 18:27:17 +0100 Subject: [PATCH 016/118] Drop support for Ruby < 2.7 --- .github/workflows/binary-gems.yml | 2 +- .github/workflows/source-gem.yml | 4 ++-- .travis.yml | 2 +- README.ja.md | 2 +- README.md | 2 +- Rakefile.cross | 2 +- ext/extconf.rb | 6 +----- ext/pg.h | 7 ------- ext/pg_coder.c | 4 ++-- ext/pg_connection.c | 2 +- ext/pg_copy_coder.c | 2 +- ext/pg_record_coder.c | 2 +- ext/pg_result.c | 6 +----- ext/pg_tuple.c | 2 +- ext/pg_type_map.c | 2 +- ext/pg_type_map_all_strings.c | 2 +- ext/pg_type_map_by_class.c | 2 +- ext/pg_type_map_by_column.c | 2 +- ext/pg_type_map_by_mri_type.c | 2 +- ext/pg_type_map_by_oid.c | 2 +- ext/pg_type_map_in_ruby.c | 2 +- pg.gemspec | 2 +- spec/pg/gc_compact_spec.rb | 2 +- translation/po/all.pot | 2 +- translation/po/ja.po | 4 ++-- 25 files changed, 27 insertions(+), 42 deletions(-) diff --git a/.github/workflows/binary-gems.yml b/.github/workflows/binary-gems.yml index 8067c131a..48bb03d7c 100644 --- a/.github/workflows/binary-gems.yml +++ b/.github/workflows/binary-gems.yml @@ -58,7 +58,7 @@ jobs: platform: "x86-mingw32" PGVERSION: 10.20-1-windows - os: windows-latest - ruby: "2.5" + ruby: "2.7" platform: "x64-mingw32" PGVERSION: 10.20-1-windows diff --git a/.github/workflows/source-gem.yml b/.github/workflows/source-gem.yml index f17344136..28c001457 100644 --- a/.github/workflows/source-gem.yml +++ b/.github/workflows/source-gem.yml @@ -47,7 +47,7 @@ jobs: PGVERSION: 17.0-1-windows-x64 PGVER: "17" - os: windows - ruby: "2.5" + ruby: "2.7" PGVERSION: 10.20-1-windows-x64 PGVER: "10" - os: windows @@ -62,7 +62,7 @@ jobs: PGVER: "12" - os: ubuntu os_ver: "20.04" - ruby: "2.5" + ruby: "2.7" PGVER: "10" - os: ubuntu ruby: "truffleruby" diff --git a/.travis.yml b/.travis.yml index 88b5c4ba3..0d8a36357 100644 --- a/.travis.yml +++ b/.travis.yml @@ -17,7 +17,7 @@ matrix: script: | docker run --rm -t --network=host ruby-pg - - rvm: "2.5" + - rvm: "2.7" env: - "PGVERSION=10" # Use Ubuntu-16.04 which provides openssl-1.0.0, which isn't available in 20.04 diff --git a/README.ja.md b/README.ja.md index b1cdeae75..95ab6a79a 100644 --- a/README.ja.md +++ b/README.ja.md @@ -41,7 +41,7 @@ Actionsのビルド状況](https://github.com/ged/ruby-pg/actions/workflows/sour ## 要件 -* Ruby 2.5かそれより新しいバージョン +* Ruby 2.7かそれより新しいバージョン * PostgreSQL 10.xかそれ以降のバージョン(ヘッダー付属のもの、例えば-devの名前のパッケージ)。 それより前のバージョンのRubyやPostgreSQLでも通常は同様に動作しますが、定期的なテストはされていません。 diff --git a/README.md b/README.md index 6da56e3a7..1d788e0ff 100644 --- a/README.md +++ b/README.md @@ -39,7 +39,7 @@ A small example usage: ## Requirements -* Ruby 2.5 or newer +* Ruby 2.7 or newer * PostgreSQL 10.x or later (with headers, -dev packages, etc). It usually works with earlier versions of Ruby/PostgreSQL as well, but those are diff --git a/Rakefile.cross b/Rakefile.cross index 31edb597b..82a923d5d 100644 --- a/Rakefile.cross +++ b/Rakefile.cross @@ -283,7 +283,7 @@ CrossLibraries.each do |xlib| (cp build/gem/gem-*.pem ~/.gem/ || true) && sudo apt-get update && sudo apt-get install -y bison flex && bundle install --local && - rake native:#{platform} pkg/#{$gem_spec.full_name}-#{platform}.gem MAKEOPTS=-j`nproc` RUBY_CC_VERSION=3.3.0:3.2.0:3.1.0:3.0.0:2.7.0:2.6.0:2.5.0 + rake native:#{platform} pkg/#{$gem_spec.full_name}-#{platform}.gem MAKEOPTS=-j`nproc` RUBY_CC_VERSION=3.3.0:3.2.0:3.1.0:3.0.0:2.7.0 EOT end desc "Build the windows binary gems" diff --git a/ext/extconf.rb b/ext/extconf.rb index fd798fd4e..059ac00b9 100644 --- a/ext/extconf.rb +++ b/ext/extconf.rb @@ -79,7 +79,7 @@ module PG "./postgresql_lib_path.rb" => "$(RUBYLIBDIR)/pg/" } -if RUBY_VERSION >= '2.3.0' && /solaris/ =~ RUBY_PLATFORM +if /solaris/ =~ RUBY_PLATFORM append_cppflags( '-D__EXTENSIONS__' ) end @@ -154,13 +154,9 @@ module PG src + " int con(){ return PGRES_PIPELINE_SYNC; }" end have_func 'timegm' -have_func 'rb_gc_adjust_memory_usage' # since ruby-2.4 -have_func 'rb_gc_mark_movable' # since ruby-2.7 have_func 'rb_io_wait' # since ruby-3.0 have_func 'rb_io_descriptor' # since ruby-3.1 -# unistd.h confilicts with ruby/win32.h when cross compiling for win32 and ruby 1.9.1 -have_header 'unistd.h' have_header 'inttypes.h' have_header('ruby/fiber/scheduler.h') if RUBY_PLATFORM=~/mingw|mswin/ diff --git a/ext/pg.h b/ext/pg.h index 744a090aa..9b208942e 100644 --- a/ext/pg.h +++ b/ext/pg.h @@ -76,14 +76,7 @@ typedef long suseconds_t; #define PG_MAX_COLUMNS 4000 #endif -#ifdef HAVE_RB_GC_MARK_MOVABLE -#define pg_compact_callback(x) (x) #define pg_gc_location(x) x = rb_gc_location(x) -#else -#define rb_gc_mark_movable(x) rb_gc_mark(x) -#define pg_compact_callback(x) {(x)} -#define pg_gc_location(x) UNUSED(x) -#endif /* For compatibility with ruby < 3.0 */ #ifndef RUBY_TYPED_FROZEN_SHAREABLE diff --git a/ext/pg_coder.c b/ext/pg_coder.c index 77e70d988..2f7a8f1f2 100644 --- a/ext/pg_coder.c +++ b/ext/pg_coder.c @@ -95,7 +95,7 @@ const rb_data_type_t pg_coder_type = { (RUBY_DATA_FUNC) NULL, RUBY_TYPED_DEFAULT_FREE, pg_coder_memsize, - pg_compact_callback(pg_coder_compact), + pg_coder_compact, }, 0, 0, @@ -119,7 +119,7 @@ static const rb_data_type_t pg_composite_coder_type = { (RUBY_DATA_FUNC) NULL, RUBY_TYPED_DEFAULT_FREE, pg_composite_coder_memsize, - pg_compact_callback(pg_composite_coder_compact), + pg_composite_coder_compact, }, &pg_coder_type, 0, diff --git a/ext/pg_connection.c b/ext/pg_connection.c index 1b3ece847..a404c09ed 100644 --- a/ext/pg_connection.c +++ b/ext/pg_connection.c @@ -230,7 +230,7 @@ static const rb_data_type_t pg_connection_type = { pgconn_gc_mark, pgconn_gc_free, pgconn_memsize, - pg_compact_callback(pgconn_gc_compact), + pgconn_gc_compact, }, 0, 0, diff --git a/ext/pg_copy_coder.c b/ext/pg_copy_coder.c index bee42f1c2..8389aef07 100644 --- a/ext/pg_copy_coder.c +++ b/ext/pg_copy_coder.c @@ -51,7 +51,7 @@ static const rb_data_type_t pg_copycoder_type = { pg_copycoder_mark, RUBY_TYPED_DEFAULT_FREE, pg_copycoder_memsize, - pg_compact_callback(pg_copycoder_compact), + pg_copycoder_compact, }, &pg_coder_type, 0, diff --git a/ext/pg_record_coder.c b/ext/pg_record_coder.c index 7319b809e..0d1fb16f4 100644 --- a/ext/pg_record_coder.c +++ b/ext/pg_record_coder.c @@ -43,7 +43,7 @@ static const rb_data_type_t pg_recordcoder_type = { pg_recordcoder_mark, RUBY_TYPED_DEFAULT_FREE, pg_recordcoder_memsize, - pg_compact_callback(pg_recordcoder_compact), + pg_recordcoder_compact, }, &pg_coder_type, 0, diff --git a/ext/pg_result.c b/ext/pg_result.c index 909c293f2..5f9d06725 100644 --- a/ext/pg_result.c +++ b/ext/pg_result.c @@ -147,9 +147,7 @@ pgresult_clear( void *_this ) t_pg_result *this = (t_pg_result *)_this; if( this->pgresult && !this->autoclear ){ PQclear(this->pgresult); -#ifdef HAVE_RB_GC_ADJUST_MEMORY_USAGE rb_gc_adjust_memory_usage(-this->result_size); -#endif } this->result_size = 0; this->nfields = -1; @@ -180,7 +178,7 @@ static const rb_data_type_t pgresult_type = { pgresult_gc_mark, pgresult_gc_free, pgresult_memsize, - pg_compact_callback(pgresult_gc_compact), + pgresult_gc_compact, }, 0, 0, RUBY_TYPED_FREE_IMMEDIATELY | RUBY_TYPED_WB_PROTECTED | PG_RUBY_TYPED_FROZEN_SHAREABLE, @@ -253,9 +251,7 @@ pg_new_result(PGresult *result, VALUE rb_pgconn) */ this->result_size = pgresult_approx_size(result); -#ifdef HAVE_RB_GC_ADJUST_MEMORY_USAGE rb_gc_adjust_memory_usage(this->result_size); -#endif return self; } diff --git a/ext/pg_tuple.c b/ext/pg_tuple.c index 8082fa0ec..6c06ad70c 100644 --- a/ext/pg_tuple.c +++ b/ext/pg_tuple.c @@ -125,7 +125,7 @@ static const rb_data_type_t pg_tuple_type = { pg_tuple_gc_mark, pg_tuple_gc_free, pg_tuple_memsize, - pg_compact_callback(pg_tuple_gc_compact), + pg_tuple_gc_compact, }, 0, 0, RUBY_TYPED_FREE_IMMEDIATELY | RUBY_TYPED_WB_PROTECTED | PG_RUBY_TYPED_FROZEN_SHAREABLE, diff --git a/ext/pg_type_map.c b/ext/pg_type_map.c index 73bf27b91..f6db5c0aa 100644 --- a/ext/pg_type_map.c +++ b/ext/pg_type_map.c @@ -33,7 +33,7 @@ const rb_data_type_t pg_typemap_type = { pg_typemap_mark, RUBY_TYPED_DEFAULT_FREE, pg_typemap_memsize, - pg_compact_callback(pg_typemap_compact), + pg_typemap_compact, }, 0, 0, diff --git a/ext/pg_type_map_all_strings.c b/ext/pg_type_map_all_strings.c index ad47cff8a..4b8fc2578 100644 --- a/ext/pg_type_map_all_strings.c +++ b/ext/pg_type_map_all_strings.c @@ -14,7 +14,7 @@ static const rb_data_type_t pg_tmas_type = { pg_typemap_mark, RUBY_TYPED_DEFAULT_FREE, pg_typemap_memsize, - pg_compact_callback(pg_typemap_compact), + pg_typemap_compact, }, &pg_typemap_type, 0, diff --git a/ext/pg_type_map_by_class.c b/ext/pg_type_map_by_class.c index 68e9adca2..5a0feab55 100644 --- a/ext/pg_type_map_by_class.c +++ b/ext/pg_type_map_by_class.c @@ -153,7 +153,7 @@ static const rb_data_type_t pg_tmbk_type = { pg_tmbk_mark, RUBY_TYPED_DEFAULT_FREE, pg_tmbk_memsize, - pg_compact_callback(pg_tmbk_compact), + pg_tmbk_compact, }, &pg_typemap_type, 0, diff --git a/ext/pg_type_map_by_column.c b/ext/pg_type_map_by_column.c index 35fbafba3..20e3a3893 100644 --- a/ext/pg_type_map_by_column.c +++ b/ext/pg_type_map_by_column.c @@ -228,7 +228,7 @@ static const rb_data_type_t pg_tmbc_type = { pg_tmbc_mark, pg_tmbc_free, pg_tmbc_memsize, - pg_compact_callback(pg_tmbc_compact), + pg_tmbc_compact, }, &pg_typemap_type, 0, diff --git a/ext/pg_type_map_by_mri_type.c b/ext/pg_type_map_by_mri_type.c index c7e50fdcd..84ac0a2df 100644 --- a/ext/pg_type_map_by_mri_type.c +++ b/ext/pg_type_map_by_mri_type.c @@ -130,7 +130,7 @@ static const rb_data_type_t pg_tmbmt_type = { pg_tmbmt_mark, RUBY_TYPED_DEFAULT_FREE, pg_tmbmt_memsize, - pg_compact_callback(pg_tmbmt_compact), + pg_tmbmt_compact, }, &pg_typemap_type, 0, diff --git a/ext/pg_type_map_by_oid.c b/ext/pg_type_map_by_oid.c index 1b8be4859..6439684ef 100644 --- a/ext/pg_type_map_by_oid.c +++ b/ext/pg_type_map_by_oid.c @@ -190,7 +190,7 @@ static const rb_data_type_t pg_tmbo_type = { pg_tmbo_mark, RUBY_TYPED_DEFAULT_FREE, pg_tmbo_memsize, - pg_compact_callback(pg_tmbo_compact), + pg_tmbo_compact, }, &pg_typemap_type, 0, diff --git a/ext/pg_type_map_in_ruby.c b/ext/pg_type_map_in_ruby.c index 0de715e99..836afdaf7 100644 --- a/ext/pg_type_map_in_ruby.c +++ b/ext/pg_type_map_in_ruby.c @@ -40,7 +40,7 @@ static const rb_data_type_t pg_tmir_type = { pg_typemap_mark, RUBY_TYPED_DEFAULT_FREE, pg_tmir_memsize, - pg_compact_callback(pg_tmir_compact), + pg_tmir_compact, }, &pg_typemap_type, 0, diff --git a/pg.gemspec b/pg.gemspec index 6910e4d15..310dc354a 100644 --- a/pg.gemspec +++ b/pg.gemspec @@ -13,7 +13,7 @@ Gem::Specification.new do |spec| spec.description = "Pg is the Ruby interface to the PostgreSQL RDBMS. It works with PostgreSQL 10 and later." spec.homepage = "https://github.com/ged/ruby-pg" spec.license = "BSD-2-Clause" - spec.required_ruby_version = ">= 2.5" + spec.required_ruby_version = ">= 2.7" spec.metadata["homepage_uri"] = spec.homepage spec.metadata["source_code_uri"] = "https://github.com/ged/ruby-pg" diff --git a/spec/pg/gc_compact_spec.rb b/spec/pg/gc_compact_spec.rb index d480a1dcf..198cb7495 100644 --- a/spec/pg/gc_compact_spec.rb +++ b/spec/pg/gc_compact_spec.rb @@ -15,7 +15,7 @@ # pg_tmbo_mark, # RUBY_TYPED_DEFAULT_FREE, # pg_tmbo_memsize, -# // pg_compact_callback(pg_tmbo_compact), +# // pg_tmbo_compact, # }, # # This should result in a segmentation fault aborting the whole process. diff --git a/translation/po/all.pot b/translation/po/all.pot index 37d687890..5a21ff434 100644 --- a/translation/po/all.pot +++ b/translation/po/all.pot @@ -112,7 +112,7 @@ msgstr "" #. type: Bullet: '* ' #: ../README.md:44 #, markdown-text -msgid "Ruby 2.5 or newer" +msgid "Ruby 2.7 or newer" msgstr "" #. type: Bullet: '* ' diff --git a/translation/po/ja.po b/translation/po/ja.po index 39761abd7..f67da8a75 100644 --- a/translation/po/ja.po +++ b/translation/po/ja.po @@ -144,8 +144,8 @@ msgstr "要件" #. type: Bullet: '* ' #: ../README.md:44 -msgid "Ruby 2.5 or newer" -msgstr "Ruby 2.5かそれより新しいバージョン" +msgid "Ruby 2.7 or newer" +msgstr "Ruby 2.7かそれより新しいバージョン" #. type: Bullet: '* ' #: ../README.md:44 From b96ad11012ce9896f4d3a92483d8f8a6f587a95c Mon Sep 17 00:00:00 2001 From: Lars Kanis Date: Sat, 23 Nov 2024 17:21:32 +0100 Subject: [PATCH 017/118] Refine documentation to TcpGateScheduler --- spec/helpers/tcp_gate_scheduler.rb | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/spec/helpers/tcp_gate_scheduler.rb b/spec/helpers/tcp_gate_scheduler.rb index c16163d15..c1d17ca21 100644 --- a/spec/helpers/tcp_gate_scheduler.rb +++ b/spec/helpers/tcp_gate_scheduler.rb @@ -2,19 +2,19 @@ # This is a special scheduler for testing compatibility to Fiber.scheduler of functions using a TCP connection. # -# It works as a gate between the client and the server. +# It works as a gatekeeper between the client and the server. # Data is transferred only, when the scheduler receives wait_io requests. -# The TCP communication in a C extension can be verified in a (mostly) timing insensitive way. -# If a call does IO but doesn't call the scheduler, the test will block and can be caught by an external timeout. +# The TCP communication in a C extension can be verified in a timing insensitive way. +# If a call waits for IO but doesn't call the scheduler, the test will block and can be caught by an external timeout. # -# PG.connect -# port:5444 TcpGateScheduler DB +# PG.connect intern extern +# port:5444 side TcpGateScheduler side DB # ------------- ---------------------------------------- -------- # | scheduler | | TCPServer TCPSocket | | | # | specs |----->| port 5444 port 5432|----->|Server| # ------------- ^ | | | port | -# '------- wait_readable: <-send data-- | | 5432 | -# observe fd| wait_writable: --send data-> | -------- +# '------- wait_readable: <--read data-- | | 5432 | +# observe fd| wait_writable: --write data-> | -------- # ---------------------------------------- module Helpers From ba6da795b7c55a38a25fd7f71f906078b007326f Mon Sep 17 00:00:00 2001 From: Lars Kanis Date: Sat, 23 Nov 2024 17:22:23 +0100 Subject: [PATCH 018/118] Fix writing lots of data through the TcpGateScheduler 3 tests failed after upgrade to Linux-6.10 due to starvation of data transfer in write direction. This was because we stopped the transfer when less than the requested size could be read. Stopping only when no data at all was read fixes this issue. On Windows reading after partial data read can lead to Errno::ECONNABORTED, so this is considered a EOF condition. It also turned out, that the same issue led to starvation of SSL encrypted traffix. So this patch re-enables SSL for scheduler tests which was disabled in commit f270b714c66690b18a0e28bf0af667212cb00bee Fixes #601 --- spec/helpers.rb | 4 ++-- spec/helpers/tcp_gate_scheduler.rb | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/spec/helpers.rb b/spec/helpers.rb index c2fdf6e04..6a4e23d10 100644 --- a/spec/helpers.rb +++ b/spec/helpers.rb @@ -594,7 +594,7 @@ def scheduler_setup # Run examples with gated scheduler sched = Helpers::TcpGateScheduler.new(external_host: 'localhost', external_port: ENV['PGPORT'].to_i, debug: ENV['PG_DEBUG']=='1') Fiber.set_scheduler(sched) - @conninfo_gate = @conninfo.gsub(/(^| )port=\d+/, " port=#{sched.internal_port} sslmode=disable") + @conninfo_gate = @conninfo.gsub(/(^| )port=\d+/, " port=#{sched.internal_port}") # Run examples with default scheduler #Fiber.set_scheduler(Helpers::Scheduler.new) @@ -640,7 +640,7 @@ def run_with_scheduler(timeout=10) def gate_setup # Run examples with gate gate = Helpers::TcpGateSwitcher.new(external_host: 'localhost', external_port: ENV['PGPORT'].to_i, debug: ENV['PG_DEBUG']=='1') - @conninfo_gate = @conninfo.gsub(/(^| )port=\d+/, " port=#{gate.internal_port} sslmode=disable") + @conninfo_gate = @conninfo.gsub(/(^| )port=\d+/, " port=#{gate.internal_port}") # Run examples without gate #@conninfo_gate = @conninfo diff --git a/spec/helpers/tcp_gate_scheduler.rb b/spec/helpers/tcp_gate_scheduler.rb index c1d17ca21..31bcf9f52 100644 --- a/spec/helpers/tcp_gate_scheduler.rb +++ b/spec/helpers/tcp_gate_scheduler.rb @@ -148,12 +148,12 @@ def write( transfer_until: ) rescue IO::WaitReadable, Errno::EINTR @internal_io.wait_readable retry - rescue EOFError, Errno::ECONNRESET + rescue EOFError, Errno::ECONNRESET, Errno::ECONNABORTED puts "write_eof from #{write_fds}" @external_io.close_write break end - break if @transfer_until != :eof && (!read_str || read_str.bytesize < len) + break if @transfer_until != :eof && (!read_str || read_str.empty?) end @pending_write = false end From ffe5054d8b6b85227092ed35802dde26684f54b6 Mon Sep 17 00:00:00 2001 From: Lars Kanis Date: Sat, 23 Nov 2024 19:18:18 +0100 Subject: [PATCH 019/118] Move short cut for $scheduler_timeout to the same file where it is defined --- spec/helpers.rb | 9 +++++++++ spec/pg/scheduler_spec.rb | 8 -------- 2 files changed, 9 insertions(+), 8 deletions(-) diff --git a/spec/helpers.rb b/spec/helpers.rb index 6a4e23d10..b4cdcadc0 100644 --- a/spec/helpers.rb +++ b/spec/helpers.rb @@ -727,3 +727,12 @@ def set_etc_hosts(hostaddr, hostname) $pg_server.teardown end end + + +# Do not wait for threads doing blocking calls at the process shutdown. +# Instead exit immediately after printing the rspec report, if we know there are pending IO calls, which do not react on ruby interrupts. +END{ + if $scheduler_timeout + exit!(1) + end +} diff --git a/spec/pg/scheduler_spec.rb b/spec/pg/scheduler_spec.rb index 61935aabd..fac371218 100644 --- a/spec/pg/scheduler_spec.rb +++ b/spec/pg/scheduler_spec.rb @@ -264,11 +264,3 @@ end end end - -# Do not wait for threads doing blocking calls at the process shutdown. -# Instead exit immediately after printing the rspec report, if we know there are pending IO calls, which do not react on ruby interrupts. -END{ - if $scheduler_timeout - exit!(1) - end -} From 1393d19285e0b8a25815870210857d333aaa0fae Mon Sep 17 00:00:00 2001 From: Lars Kanis Date: Sat, 23 Nov 2024 19:47:27 +0100 Subject: [PATCH 020/118] Initialize "intermediate" VALUE for encoding It is no good practice to put uninitialized VALUEs into ruby array and it can lead to C/Java exceptions in Truffleruby. So better initialize them to nil. Also remove the GC_GUARD, since it's not necessary. --- ext/pg_coder.c | 4 +--- ext/pg_connection.c | 7 ++----- ext/pg_text_encoder.c | 4 ++-- 3 files changed, 5 insertions(+), 10 deletions(-) diff --git a/ext/pg_coder.c b/ext/pg_coder.c index 2f7a8f1f2..ef98dee16 100644 --- a/ext/pg_coder.c +++ b/ext/pg_coder.c @@ -175,7 +175,7 @@ static VALUE pg_coder_encode(int argc, VALUE *argv, VALUE self) { VALUE res; - VALUE intermediate; + VALUE intermediate = Qnil; VALUE value; int len, len2; int enc_idx; @@ -213,8 +213,6 @@ pg_coder_encode(int argc, VALUE *argv, VALUE self) } rb_str_set_len( res, len2 ); - RB_GC_GUARD(intermediate); - return res; } diff --git a/ext/pg_connection.c b/ext/pg_connection.c index a404c09ed..a3e90f6e9 100644 --- a/ext/pg_connection.c +++ b/ext/pg_connection.c @@ -1297,7 +1297,7 @@ alloc_query_params(struct query_params_data *paramsData) paramsData->lengths[i] = 0; } else { t_pg_coder_enc_func enc_func = pg_coder_enc_func( conv ); - VALUE intermediate; + VALUE intermediate = Qnil; /* 1st pass for retiving the required memory space */ int len = enc_func(conv, param_value, NULL, &intermediate, paramsData->enc_idx); @@ -1337,8 +1337,6 @@ alloc_query_params(struct query_params_data *paramsData) required_pool_size += len; } } - - RB_GC_GUARD(intermediate); } } } @@ -2566,7 +2564,7 @@ pgconn_sync_put_copy_data(int argc, VALUE *argv, VALUE self) VALUE value; VALUE buffer = Qnil; VALUE encoder; - VALUE intermediate; + VALUE intermediate = Qnil; t_pg_coder *p_coder = NULL; rb_scan_args( argc, argv, "11", &value, &encoder ); @@ -2605,7 +2603,6 @@ pgconn_sync_put_copy_data(int argc, VALUE *argv, VALUE self) if(ret == -1) pg_raise_conn_error( rb_ePGerror, self, "%s", PQerrorMessage(this->pgconn)); - RB_GC_GUARD(intermediate); RB_GC_GUARD(buffer); return (ret) ? Qtrue : Qfalse; diff --git a/ext/pg_text_encoder.c b/ext/pg_text_encoder.c index a356b7581..0e0e7fbf1 100644 --- a/ext/pg_text_encoder.c +++ b/ext/pg_text_encoder.c @@ -231,7 +231,7 @@ pg_text_enc_integer(t_pg_coder *this, VALUE value, char *out, VALUE *intermediat * */ static int -pg_text_enc_float(t_pg_coder *conv, VALUE value, char *out, VALUE *intermediate, int enc_idx) +pg_text_enc_float(t_pg_coder *conv, VALUE value, char *out, VALUE *intermediate1, int enc_idx) { if(out){ double dvalue = NUM2DBL(value); @@ -239,7 +239,6 @@ pg_text_enc_float(t_pg_coder *conv, VALUE value, char *out, VALUE *intermediate, int neg = 0; int exp2i, exp10i, i; unsigned long long ll, remainder, oldval; - VALUE intermediate; /* Cast to the same strings as value.to_s . */ if( isinf(dvalue) ){ @@ -283,6 +282,7 @@ pg_text_enc_float(t_pg_coder *conv, VALUE value, char *out, VALUE *intermediate, if( exp10i <= -5 || exp10i >= 15 ) { /* Write the float in exponent format (1.23e45) */ + VALUE intermediate; /* write fraction digits from right to left */ for( i = MAX_DOUBLE_DIGITS; i > 1; i--){ From 042615f5e99e86221669b41a035e53b0b90d7e7d Mon Sep 17 00:00:00 2001 From: Lars Kanis Date: Sat, 23 Nov 2024 21:37:49 +0100 Subject: [PATCH 021/118] Fix issues on Truffleruby in CI --- spec/helpers/tcp_gate_switcher.rb | 6 ++++-- spec/pg/connection_spec.rb | 3 --- 2 files changed, 4 insertions(+), 5 deletions(-) diff --git a/spec/helpers/tcp_gate_switcher.rb b/spec/helpers/tcp_gate_switcher.rb index fc72b6911..2fa852e53 100644 --- a/spec/helpers/tcp_gate_switcher.rb +++ b/spec/helpers/tcp_gate_switcher.rb @@ -8,7 +8,7 @@ # If a call does IO but doesn't handle non-blocking state, the test will block and can be caught by an external timeout. # # -# PG.connect TcpGateSwitcher +# PG.connect intern TcpGateSwitcher extern # port:5444 .--------------------------------------. # .--start/stop---------------> T | DB # .-----|-----. | | / | .------. @@ -95,10 +95,12 @@ def write begin read_str = @internal_io.read_nonblock(65536) print_data("write-transfer #{write_fds}", read_str) + # Workaround for sporadic "SSL error: ssl/tls alert bad record mac" + sleep 0.001 if RUBY_ENGINE=="truffleruby" @external_io.write(read_str) rescue IO::WaitReadable, Errno::EINTR @internal_io.wait_readable - rescue EOFError, Errno::ECONNRESET + rescue EOFError, Errno::ECONNRESET, Errno::EPIPE puts "write_eof from #{write_fds}" @external_io.close_write break diff --git a/spec/pg/connection_spec.rb b/spec/pg/connection_spec.rb index f0454aa8b..e8b45e56e 100644 --- a/spec/pg/connection_spec.rb +++ b/spec/pg/connection_spec.rb @@ -701,9 +701,6 @@ unless RUBY_PLATFORM =~ /i386-mingw|x86_64-darwin|x86_64-linux/ skip "this spec depends on out-of-memory condition in put_copy_data, which is not reliable on all platforms" end - if RUBY_ENGINE == "truffleruby" - skip "TcpGateSwitcher responds with Errno::EPIPE: Broken pipe on Truffleruby" - end run_with_gate(200) do |conn, gate| conn.setnonblocking(true) From 305539dbd414d4d6eda3fe268cb69031cee3a0cd Mon Sep 17 00:00:00 2001 From: Lars Kanis Date: Sat, 23 Nov 2024 22:24:13 +0100 Subject: [PATCH 022/118] Add Connection#set_chunked_rows_mode which is new in PostgreSQL-17 as PQsetChunkedRowsMode() --- ext/extconf.rb | 1 + ext/pg.c | 4 + ext/pg_connection.c | 54 +++++- ext/pg_result.c | 9 +- spec/helpers.rb | 1 + spec/pg/connection_spec.rb | 61 ++++++ spec/pg/result_spec.rb | 368 +++++++++++++++++++------------------ 7 files changed, 306 insertions(+), 192 deletions(-) diff --git a/ext/extconf.rb b/ext/extconf.rb index 059ac00b9..73f579bc0 100644 --- a/ext/extconf.rb +++ b/ext/extconf.rb @@ -153,6 +153,7 @@ module PG # Ensure header files fit as well src + " int con(){ return PGRES_PIPELINE_SYNC; }" end +have_func 'PQsetChunkedRowsMode', 'libpq-fe.h' # since PostgreSQL-17 have_func 'timegm' have_func 'rb_io_wait' # since ruby-3.0 have_func 'rb_io_descriptor' # since ruby-3.1 diff --git a/ext/pg.c b/ext/pg.c index f0b58a2f4..d60d1c666 100644 --- a/ext/pg.c +++ b/ext/pg.c @@ -504,6 +504,10 @@ Init_pg_ext(void) rb_define_const(rb_mPGconstants, "PGRES_COPY_BOTH", INT2FIX(PGRES_COPY_BOTH)); /* Result#result_status constant - Single tuple from larger resultset. */ rb_define_const(rb_mPGconstants, "PGRES_SINGLE_TUPLE", INT2FIX(PGRES_SINGLE_TUPLE)); +#ifdef HAVE_PQSETCHUNKEDROWSMODE + /* Result#result_status constant - tuple chunk from larger resultset. */ + rb_define_const(rb_mPGconstants, "PGRES_TUPLES_CHUNK", INT2FIX(PGRES_TUPLES_CHUNK)); +#endif #ifdef HAVE_PQENTERPIPELINEMODE /* Result#result_status constant - The PG::Result represents a synchronization point in pipeline mode, requested by Connection#pipeline_sync. diff --git a/ext/pg_connection.c b/ext/pg_connection.c index a3e90f6e9..3115fc6c7 100644 --- a/ext/pg_connection.c +++ b/ext/pg_connection.c @@ -1583,6 +1583,7 @@ pgconn_sync_describe_portal(VALUE self, VALUE stmt_name) * * +PGRES_FATAL_ERROR+ * * +PGRES_COPY_BOTH+ * * +PGRES_SINGLE_TUPLE+ + * * +PGRES_TUPLES_CHUNK+ * * +PGRES_PIPELINE_SYNC+ * * +PGRES_PIPELINE_ABORTED+ */ @@ -1807,14 +1808,11 @@ pgconn_escape_identifier(VALUE self, VALUE string) * (column names, types, etc) that an ordinary Result object for the query * would have. * - * *Caution:* While processing a query, the server may return some rows and - * then encounter an error, causing the query to be aborted. Ordinarily, pg - * discards any such rows and reports only the error. But in single-row mode, - * those rows will have already been returned to the application. Hence, the - * application will see some Result objects followed by an Error raised in get_result. - * For proper transactional behavior, the application must be designed to discard - * or undo whatever has been done with the previously-processed rows, if the query - * ultimately fails. + * *Caution:* While processing a query, the server may return some rows and then encounter an error, causing the query to be aborted. + * Ordinarily, pg discards any such rows and reports only the error. + * But in single-row or chunked mode, some rows may have already been returned to the application. + * Hence, the application will see some PGRES_SINGLE_TUPLE or PGRES_TUPLES_CHUNK PG::Result objects followed by a PG::Error raised in get_result. + * For proper transactional behavior, the application must be designed to discard or undo whatever has been done with the previously-processed rows, if the query ultimately fails. * * Example: * conn.send_query( "your SQL command" ) @@ -1839,6 +1837,43 @@ pgconn_set_single_row_mode(VALUE self) return self; } +#ifdef HAVE_PQSETCHUNKEDROWSMODE +/* + * call-seq: + * conn.set_chunked_rows_mode -> self + * + * Select chunked mode for the currently-executing query. + * + * This function is similar to set_single_row_mode, except that it specifies retrieval of up to +chunk_size+ rows per PGresult, not necessarily just one row. + * This function can only be called immediately after send_query or one of its sibling functions, before any other operation on the connection such as consume_input or get_result. + * If called at the correct time, the function activates chunked mode for the current query. + * Otherwise the mode stays unchanged and the function raises an error. + * In any case, the mode reverts to normal after completion of the current query. + * + * Example: + * conn.send_query( "your SQL command" ) + * conn.set_chunked_rows_mode(10) + * loop do + * res = conn.get_result or break + * res.check + * res.each do |row| + * # do something with the received max. 10 rows + * end + * end + */ +static VALUE +pgconn_set_chunked_rows_mode(VALUE self, VALUE chunk_size) +{ + PGconn *conn = pg_get_pgconn(self); + + rb_check_frozen(self); + if( PQsetChunkedRowsMode(conn, NUM2INT(chunk_size)) == 0 ) + pg_raise_conn_error( rb_ePGerror, self, "%s", PQerrorMessage(conn)); + + return self; +} +#endif + static VALUE pgconn_send_query_params(int argc, VALUE *argv, VALUE self); /* @@ -4546,6 +4581,9 @@ init_pg_connection(void) rb_define_method(rb_cPGconn, "escape_bytea", pgconn_s_escape_bytea, 1); rb_define_method(rb_cPGconn, "unescape_bytea", pgconn_s_unescape_bytea, 1); rb_define_method(rb_cPGconn, "set_single_row_mode", pgconn_set_single_row_mode, 0); +#ifdef HAVE_PQSETCHUNKEDROWSMODE + rb_define_method(rb_cPGconn, "set_chunked_rows_mode", pgconn_set_chunked_rows_mode, 1); +#endif /****** PG::Connection INSTANCE METHODS: Asynchronous Command Processing ******/ rb_define_method(rb_cPGconn, "send_query", pgconn_send_query, -1); diff --git a/ext/pg_result.c b/ext/pg_result.c index 5f9d06725..4ea56c452 100644 --- a/ext/pg_result.c +++ b/ext/pg_result.c @@ -319,6 +319,9 @@ pg_result_check( VALUE self ) case PGRES_COMMAND_OK: #ifdef HAVE_PQENTERPIPELINEMODE case PGRES_PIPELINE_SYNC: +#endif +#ifdef HAVE_PQSETCHUNKEDROWSMODE + case PGRES_TUPLES_CHUNK: #endif return self; case PGRES_BAD_RESPONSE: @@ -545,6 +548,7 @@ static void pgresult_init_fnames(VALUE self) * * +PGRES_FATAL_ERROR+ * * +PGRES_COPY_BOTH+ * * +PGRES_SINGLE_TUPLE+ + * * +PGRES_TUPLES_CHUNK+ * * +PGRES_PIPELINE_SYNC+ * * +PGRES_PIPELINE_ABORTED+ * @@ -1521,6 +1525,9 @@ pgresult_stream_any(VALUE self, int (*yielder)(VALUE, int, int, void*), void* da return self; rb_raise( rb_eInvalidResultStatus, "PG::Result is not in single row mode"); case PGRES_SINGLE_TUPLE: +#ifdef HAVE_PQSETCHUNKEDROWSMODE + case PGRES_TUPLES_CHUNK: +#endif break; default: pg_result_check( self ); @@ -1565,7 +1572,7 @@ pgresult_stream_any(VALUE self, int (*yielder)(VALUE, int, int, void*), void* da * wrapping each row into a dedicated result object, it delivers data in nearly * the same speed as with ordinary results. * - * The base result must be in status PGRES_SINGLE_TUPLE. + * The base result must be in status PGRES_SINGLE_TUPLE or PGRES_TUPLES_CHUNK. * It iterates over all tuples until the status changes to PGRES_TUPLES_OK. * A PG::Error is raised for any errors from the server. * diff --git a/spec/helpers.rb b/spec/helpers.rb index b4cdcadc0..7214ec10a 100644 --- a/spec/helpers.rb +++ b/spec/helpers.rb @@ -702,6 +702,7 @@ def set_etc_hosts(hostaddr, hostname) config.filter_run_excluding( :postgresql_12 ) if PG.library_version < 120000 config.filter_run_excluding( :postgresql_14 ) if PG.library_version < 140000 + config.filter_run_excluding( :postgresql_17 ) if PG.library_version < 170000 config.filter_run_excluding( :unix_socket ) if RUBY_PLATFORM=~/mingw|mswin/i config.filter_run_excluding( :scheduler ) if RUBY_VERSION < "3.0" || (RUBY_PLATFORM =~ /mingw|mswin/ && RUBY_VERSION < "3.1") || !Fiber.respond_to?(:scheduler) config.filter_run_excluding( :scheduler_address_resolve ) if RUBY_VERSION < "3.1" diff --git a/spec/pg/connection_spec.rb b/spec/pg/connection_spec.rb index f0454aa8b..50050cbd7 100644 --- a/spec/pg/connection_spec.rb +++ b/spec/pg/connection_spec.rb @@ -2047,6 +2047,67 @@ def wait_check_socket(conn) end + describe "set_chunked_rows_mode", :postgresql_17 do + + it "raises an error when called at the wrong time" do + expect { + @conn.set_chunked_rows_mode(2) + }.to raise_error(PG::Error){|err| expect(err).to have_attributes(connection: @conn) } + end + + it "should work in single row mode" do + @conn.send_query( "SELECT generate_series(1,12)" ) + @conn.set_chunked_rows_mode(3) + + results = [] + loop do + @conn.block + res = @conn.get_result or break + results << res + end + expect( results.length ).to eq( 5 ) + results[0..-2].each do |res| + expect( res.result_status ).to eq( PG::PGRES_TUPLES_CHUNK ) + values = res.field_values('generate_series') + expect( values.length ).to eq( 3 ) + expect( values.first.to_i ).to be > 0 + end + expect( results.last.result_status ).to eq( PG::PGRES_TUPLES_OK ) + expect( results.last.ntuples ).to eq( 0 ) + end + + it "should receive rows before entire query is finished" do + @conn.send_query( "SELECT generate_series(0,999), NULL UNION ALL SELECT 1000, pg_sleep(10);" ) + @conn.set_chunked_rows_mode(4) + + start_time = Time.now + res = @conn.get_result + res.check + + expect( (Time.now - start_time) ).to be < 9 + expect( res.values ).to eq([["0", nil], ["1", nil], ["2", nil], ["3", nil]]) + @conn.cancel + end + + it "should receive rows before entire query fails" do + @conn.exec( "CREATE FUNCTION errfunc() RETURNS int AS $$ BEGIN RAISE 'test-error'; END; $$ LANGUAGE plpgsql;" ) + @conn.send_query( "SELECT generate_series(0,999), NULL UNION ALL SELECT 1000, errfunc();" ) + @conn.set_chunked_rows_mode(5) + + first_result = nil + expect do + loop do + res = @conn.get_result or break + res.check + first_result ||= res + end + end.to raise_error(PG::Error){|err| expect(err).to have_attributes(connection: @conn) } + expect( first_result.kind_of?(PG::Result) ).to be_truthy + expect( first_result.result_status ).to eq( PG::PGRES_TUPLES_CHUNK ) + end + + end + context "pipeline mode", :postgresql_14 do describe "pipeline_status" do diff --git a/spec/pg/result_spec.rb b/spec/pg/result_spec.rb index 836b7699e..d34a07996 100644 --- a/spec/pg/result_spec.rb +++ b/spec/pg/result_spec.rb @@ -128,189 +128,191 @@ expect( res.each.to_a ).to eq [{:a=>'1', :b=>'2'}] end - context "result streaming in single row mode" do - let!(:textdec_int){ PG::TextDecoder::Integer.new name: 'INT4', oid: 23 } - - it "can iterate over all rows as Hash" do - @conn.send_query( "SELECT generate_series(2,4) AS a; SELECT 1 AS b, generate_series(5,6) AS c" ) - @conn.set_single_row_mode - expect( - @conn.get_result.stream_each.to_a - ).to eq( - [{'a'=>"2"}, {'a'=>"3"}, {'a'=>"4"}] - ) - expect( - @conn.get_result.enum_for(:stream_each).to_a - ).to eq( - [{'b'=>"1", 'c'=>"5"}, {'b'=>"1", 'c'=>"6"}] - ) - expect( @conn.get_result ).to be_nil - end - - it "can iterate over all rows as Hash with symbols and typemap" do - @conn.send_query( "SELECT generate_series(2,4) AS a" ) - @conn.set_single_row_mode - res = @conn.get_result.field_names_as(:symbol) - res.type_map = PG::TypeMapByColumn.new [textdec_int] - expect( - res.stream_each.to_a - ).to eq( - [{:a=>2}, {:a=>3}, {:a=>4}] - ) - expect( @conn.get_result ).to be_nil - end - - it "keeps last result on error while iterating stream_each" do - @conn.send_query( "SELECT generate_series(2,4) AS a" ) - @conn.set_single_row_mode - res = @conn.get_result - expect do - res.stream_each_row do - raise ZeroDivisionError - end - end.to raise_error(ZeroDivisionError) - expect( res.values ).to eq([["2"]]) - end - - it "can iterate over all rows as Array" do - @conn.send_query( "SELECT generate_series(2,4) AS a; SELECT 1 AS b, generate_series(5,6) AS c" ) - @conn.set_single_row_mode - expect( - @conn.get_result.enum_for(:stream_each_row).to_a - ).to eq( - [["2"], ["3"], ["4"]] - ) - expect( - @conn.get_result.stream_each_row.to_a - ).to eq( - [["1", "5"], ["1", "6"]] - ) - expect( @conn.get_result ).to be_nil - end - - it "keeps last result on error while iterating stream_each_row" do - @conn.send_query( "SELECT generate_series(2,4) AS a" ) - @conn.set_single_row_mode - res = @conn.get_result - expect do - res.stream_each_row do - raise ZeroDivisionError - end - end.to raise_error(ZeroDivisionError) - expect( res.values ).to eq([["2"]]) - end - - it "can iterate over all rows as PG::Tuple" do - @conn.send_query( "SELECT generate_series(2,4) AS a; SELECT 1 AS b, generate_series(5,6) AS c" ) - @conn.set_single_row_mode - tuples = @conn.get_result.stream_each_tuple.to_a - expect( tuples[0][0] ).to eq( "2" ) - expect( tuples[1]["a"] ).to eq( "3" ) - expect( tuples.size ).to eq( 3 ) - - tuples = @conn.get_result.enum_for(:stream_each_tuple).to_a - expect( tuples[-1][-1] ).to eq( "6" ) - expect( tuples[-2]["b"] ).to eq( "1" ) - expect( tuples.size ).to eq( 2 ) - - expect( @conn.get_result ).to be_nil - end - - it "clears result on error while iterating stream_each_tuple" do - @conn.send_query( "SELECT generate_series(2,4) AS a" ) - @conn.set_single_row_mode - res = @conn.get_result - expect do - res.stream_each_tuple do - raise ZeroDivisionError - end - end.to raise_error(ZeroDivisionError) - expect( res.cleared? ).to eq(true) - end - - it "should reuse field names in stream_each_tuple" do - @conn.send_query( "SELECT generate_series(2,3) AS a" ) - @conn.set_single_row_mode - tuple1, tuple2 = *@conn.get_result.stream_each_tuple.to_a - expect( tuple1.keys[0].object_id ).to eq(tuple2.keys[0].object_id) - end - - it "can iterate over all rows as PG::Tuple with symbols and typemap" do - @conn.send_query( "SELECT generate_series(2,4) AS a" ) - @conn.set_single_row_mode - res = @conn.get_result.field_names_as(:symbol) - res.type_map = PG::TypeMapByColumn.new [textdec_int] - tuples = res.stream_each_tuple.to_a - expect( tuples[0][0] ).to eq( 2 ) - expect( tuples[1][:a] ).to eq( 3 ) - expect( @conn.get_result ).to be_nil - end - - it "can handle commands not returning tuples" do - @conn.send_query( "CREATE TEMP TABLE test_single_row_mode (a int)" ) - @conn.set_single_row_mode - res1 = @conn.get_result - res2 = res1.stream_each_tuple { raise "this shouldn't be called" } - expect( res2 ).to be_equal( res1 ) - expect( @conn.get_result ).to be_nil - @conn.exec( "DROP TABLE test_single_row_mode" ) - end - - it "complains when not in single row mode" do - @conn.send_query( "SELECT generate_series(2,4)" ) - expect{ - @conn.get_result.stream_each_row.to_a - }.to raise_error(PG::InvalidResultStatus, /not in single row mode/) - end - - it "complains when intersected with get_result" do - @conn.send_query( "SELECT 1" ) - @conn.set_single_row_mode - expect{ - @conn.get_result.stream_each_row.each{ @conn.get_result } - }.to raise_error(PG::NoResultError, /no result received/) - end - - it "raises server errors" do - @conn.send_query( "SELECT 0/0" ) - expect{ - @conn.get_result.stream_each_row.to_a - }.to raise_error(PG::DivisionByZero) - end - - it "raises an error if result number of fields change" do - @conn.send_query( "SELECT 1" ) - @conn.set_single_row_mode - res = @conn.get_result - expect{ - res.stream_each_row do - @conn.discard_results - @conn.send_query("SELECT 2,3"); - @conn.set_single_row_mode - end - }.to raise_error(PG::InvalidChangeOfResultFields, /from 1 to 2 /) - expect( res.cleared? ).to be true - end - - it "raises an error if there is a timeout during streaming" do - @conn.exec( "SET local statement_timeout = 20" ) - - @conn.send_query( "SELECT 1, true UNION ALL SELECT 2, (pg_sleep(0.1) IS NULL)" ) - @conn.set_single_row_mode - expect{ - @conn.get_result.stream_each_row do |row| - # No-op - end - }.to raise_error(PG::QueryCanceled, /statement timeout/) - end - - it "should deny streaming when frozen" do - @conn.send_query( "SELECT 1" ) - @conn.set_single_row_mode - res = @conn.get_result.freeze - expect{ - res.stream_each_row - }.to raise_error(FrozenError) + [[:single, nil, [:set_single_row_mode]], [:chunked, :postgresql_17, [:set_chunked_rows_mode, 3]]].each do |mode_name, guard, row_mode| + context "result streaming in #{mode_name} row mode", guard do + let!(:textdec_int){ PG::TextDecoder::Integer.new name: 'INT4', oid: 23 } + + it "can iterate over all rows as Hash" do + @conn.send_query( "SELECT generate_series(2,4) AS a; SELECT 1 AS b, generate_series(5,6) AS c" ) + @conn.send(*row_mode) + expect( + @conn.get_result.stream_each.to_a + ).to eq( + [{'a'=>"2"}, {'a'=>"3"}, {'a'=>"4"}] + ) + expect( + @conn.get_result.enum_for(:stream_each).to_a + ).to eq( + [{'b'=>"1", 'c'=>"5"}, {'b'=>"1", 'c'=>"6"}] + ) + expect( @conn.get_result ).to be_nil + end + + it "can iterate over all rows as Hash with symbols and typemap" do + @conn.send_query( "SELECT generate_series(2,4) AS a" ) + @conn.send(*row_mode) + res = @conn.get_result.field_names_as(:symbol) + res.type_map = PG::TypeMapByColumn.new [textdec_int] + expect( + res.stream_each.to_a + ).to eq( + [{:a=>2}, {:a=>3}, {:a=>4}] + ) + expect( @conn.get_result ).to be_nil + end + + it "keeps last result on error while iterating stream_each" do + @conn.send_query( "SELECT generate_series(2,6) AS a" ) + @conn.send(*row_mode) + res = @conn.get_result + expect do + res.stream_each_row do + raise ZeroDivisionError + end + end.to raise_error(ZeroDivisionError) + expect( res.values ).to eq(mode_name==:single ? [["2"]] : [["2"], ["3"], ["4"]]) + end + + it "can iterate over all rows as Array" do + @conn.send_query( "SELECT generate_series(2,4) AS a; SELECT 1 AS b, generate_series(5,6) AS c" ) + @conn.send(*row_mode) + expect( + @conn.get_result.enum_for(:stream_each_row).to_a + ).to eq( + [["2"], ["3"], ["4"]] + ) + expect( + @conn.get_result.stream_each_row.to_a + ).to eq( + [["1", "5"], ["1", "6"]] + ) + expect( @conn.get_result ).to be_nil + end + + it "keeps last result on error while iterating stream_each_row" do + @conn.send_query( "SELECT generate_series(2,6) AS a" ) + @conn.send(*row_mode) + res = @conn.get_result + expect do + res.stream_each_row do + raise ZeroDivisionError + end + end.to raise_error(ZeroDivisionError) + expect( res.values ).to eq(mode_name==:single ? [["2"]] : [["2"], ["3"], ["4"]]) + end + + it "can iterate over all rows as PG::Tuple" do + @conn.send_query( "SELECT generate_series(2,4) AS a; SELECT 1 AS b, generate_series(5,6) AS c" ) + @conn.send(*row_mode) + tuples = @conn.get_result.stream_each_tuple.to_a + expect( tuples[0][0] ).to eq( "2" ) + expect( tuples[1]["a"] ).to eq( "3" ) + expect( tuples.size ).to eq( 3 ) + + tuples = @conn.get_result.enum_for(:stream_each_tuple).to_a + expect( tuples[-1][-1] ).to eq( "6" ) + expect( tuples[-2]["b"] ).to eq( "1" ) + expect( tuples.size ).to eq( 2 ) + + expect( @conn.get_result ).to be_nil + end + + it "clears result on error while iterating stream_each_tuple" do + @conn.send_query( "SELECT generate_series(2,4) AS a" ) + @conn.send(*row_mode) + res = @conn.get_result + expect do + res.stream_each_tuple do + raise ZeroDivisionError + end + end.to raise_error(ZeroDivisionError) + expect( res.cleared? ).to eq(true) + end + + it "should reuse field names in stream_each_tuple" do + @conn.send_query( "SELECT generate_series(2,3) AS a" ) + @conn.send(*row_mode) + tuple1, tuple2 = *@conn.get_result.stream_each_tuple.to_a + expect( tuple1.keys[0].object_id ).to eq(tuple2.keys[0].object_id) + end + + it "can iterate over all rows as PG::Tuple with symbols and typemap" do + @conn.send_query( "SELECT generate_series(2,4) AS a" ) + @conn.send(*row_mode) + res = @conn.get_result.field_names_as(:symbol) + res.type_map = PG::TypeMapByColumn.new [textdec_int] + tuples = res.stream_each_tuple.to_a + expect( tuples[0][0] ).to eq( 2 ) + expect( tuples[1][:a] ).to eq( 3 ) + expect( @conn.get_result ).to be_nil + end + + it "can handle commands not returning tuples" do + @conn.send_query( "CREATE TEMP TABLE test_single_row_mode (a int)" ) + @conn.send(*row_mode) + res1 = @conn.get_result + res2 = res1.stream_each_tuple { raise "this shouldn't be called" } + expect( res2 ).to be_equal( res1 ) + expect( @conn.get_result ).to be_nil + @conn.exec( "DROP TABLE test_single_row_mode" ) + end + + it "complains when not in single row mode" do + @conn.send_query( "SELECT generate_series(2,4)" ) + expect{ + @conn.get_result.stream_each_row.to_a + }.to raise_error(PG::InvalidResultStatus, /not in single row mode/) + end + + it "complains when intersected with get_result" do + @conn.send_query( "SELECT 1" ) + @conn.send(*row_mode) + expect{ + @conn.get_result.stream_each_row.each{ @conn.get_result } + }.to raise_error(PG::NoResultError, /no result received/) + end + + it "raises server errors" do + @conn.send_query( "SELECT 0/0" ) + expect{ + @conn.get_result.stream_each_row.to_a + }.to raise_error(PG::DivisionByZero) + end + + it "raises an error if result number of fields change" do + @conn.send_query( "SELECT 1" ) + @conn.send(*row_mode) + res = @conn.get_result + expect{ + res.stream_each_row do + @conn.discard_results + @conn.send_query("SELECT 2,3"); + @conn.send(*row_mode) + end + }.to raise_error(PG::InvalidChangeOfResultFields, /from 1 to 2 /) + expect( res.cleared? ).to be true + end + + it "raises an error if there is a timeout during streaming" do + @conn.exec( "SET local statement_timeout = 20" ) + + @conn.send_query( "SELECT 1, true UNION ALL SELECT 2, (pg_sleep(0.1) IS NULL)" ) + @conn.send(*row_mode) + expect{ + @conn.get_result.stream_each_row do |row| + # No-op + end + }.to raise_error(PG::QueryCanceled, /statement timeout/) + end + + it "should deny streaming when frozen" do + @conn.send_query( "SELECT 1" ) + @conn.send(*row_mode) + res = @conn.get_result.freeze + expect{ + res.stream_each_row + }.to raise_error(FrozenError) + end end end From 269116d4654f634fb51c699b9aa5a3c7a0de24f9 Mon Sep 17 00:00:00 2001 From: Lars Kanis Date: Sun, 24 Nov 2024 11:29:00 +0100 Subject: [PATCH 023/118] Improve error message of single_row_mode and chunked_rows_mode --- ext/pg_connection.c | 6 ++++-- spec/pg/connection_spec.rb | 10 ++++++++-- 2 files changed, 12 insertions(+), 4 deletions(-) diff --git a/ext/pg_connection.c b/ext/pg_connection.c index 3115fc6c7..5276844cc 100644 --- a/ext/pg_connection.c +++ b/ext/pg_connection.c @@ -1832,7 +1832,7 @@ pgconn_set_single_row_mode(VALUE self) rb_check_frozen(self); if( PQsetSingleRowMode(conn) == 0 ) - pg_raise_conn_error( rb_ePGerror, self, "%s", PQerrorMessage(conn)); + pg_raise_conn_error( rb_ePGerror, self, "PQsetSingleRowMode %s", PQerrorMessage(conn)); return self; } @@ -1860,6 +1860,8 @@ pgconn_set_single_row_mode(VALUE self) * # do something with the received max. 10 rows * end * end + * + * Available since PostgreSQL-17 */ static VALUE pgconn_set_chunked_rows_mode(VALUE self, VALUE chunk_size) @@ -1868,7 +1870,7 @@ pgconn_set_chunked_rows_mode(VALUE self, VALUE chunk_size) rb_check_frozen(self); if( PQsetChunkedRowsMode(conn, NUM2INT(chunk_size)) == 0 ) - pg_raise_conn_error( rb_ePGerror, self, "%s", PQerrorMessage(conn)); + pg_raise_conn_error( rb_ePGerror, self, "PQsetChunkedRowsMode %s", PQerrorMessage(conn)); return self; } diff --git a/spec/pg/connection_spec.rb b/spec/pg/connection_spec.rb index 23b48c710..c21d3a8b6 100644 --- a/spec/pg/connection_spec.rb +++ b/spec/pg/connection_spec.rb @@ -1988,7 +1988,7 @@ def wait_check_socket(conn) it "raises an error when called at the wrong time" do expect { @conn.set_single_row_mode - }.to raise_error(PG::Error){|err| expect(err).to have_attributes(connection: @conn) } + }.to raise_error(PG::Error, /PQsetSingleRowMode/){|err| expect(err).to have_attributes(connection: @conn) } end it "should work in single row mode" do @@ -2049,7 +2049,13 @@ def wait_check_socket(conn) it "raises an error when called at the wrong time" do expect { @conn.set_chunked_rows_mode(2) - }.to raise_error(PG::Error){|err| expect(err).to have_attributes(connection: @conn) } + }.to raise_error(PG::Error, /PQsetChunkedRowsMode/){|err| expect(err).to have_attributes(connection: @conn) } + end + + it "raises an error when called with wrong arguments" do + expect { @conn.set_chunked_rows_mode(:nonint) }.to raise_error(TypeError) + expect { @conn.set_chunked_rows_mode(0) }.to raise_error(PG::Error, /PQsetChunkedRowsMode/) + expect { @conn.set_chunked_rows_mode(-2) }.to raise_error(PG::Error) end it "should work in single row mode" do From be42490f12dca8ae4f063becd632a0a956c64c55 Mon Sep 17 00:00:00 2001 From: Lars Kanis Date: Sun, 24 Nov 2024 12:48:41 +0100 Subject: [PATCH 024/118] Add PG::Connection#close_prepared and siblings which are new in PostgreSQL-17 --- ext/gvl_wrappers.c | 8 ++ ext/gvl_wrappers.h | 16 ++++ ext/pg_connection.c | 167 ++++++++++++++++++++++++++++++++++++- lib/pg/connection.rb | 6 ++ spec/pg/connection_spec.rb | 17 ++++ 5 files changed, 213 insertions(+), 1 deletion(-) diff --git a/ext/gvl_wrappers.c b/ext/gvl_wrappers.c index 3a1dae882..db9357077 100644 --- a/ext/gvl_wrappers.c +++ b/ext/gvl_wrappers.c @@ -5,6 +5,14 @@ #include "pg.h" + +#ifndef HAVE_PQSETCHUNKEDROWSMODE +PGresult *PQclosePrepared(PGconn *conn, const char *stmtName){return NULL;} +PGresult *PQclosePortal(PGconn *conn, const char *portalName){return NULL;} +int PQsendClosePrepared(PGconn *conn, const char *stmtName){return 0;} +int PQsendClosePortal(PGconn *conn, const char *portalName){return 0;} +#endif + #ifdef ENABLE_GVL_UNLOCK FOR_EACH_BLOCKING_FUNCTION( DEFINE_GVL_WRAPPER_STRUCT ); FOR_EACH_BLOCKING_FUNCTION( DEFINE_GVL_SKELETON ); diff --git a/ext/gvl_wrappers.h b/ext/gvl_wrappers.h index ce226c8b4..0c2c5b848 100644 --- a/ext/gvl_wrappers.h +++ b/ext/gvl_wrappers.h @@ -149,6 +149,12 @@ #define FOR_EACH_PARAM_OF_PQdescribePortal(param) \ param(PGconn *, conn) +#define FOR_EACH_PARAM_OF_PQclosePrepared(param) \ + param(PGconn *, conn) + +#define FOR_EACH_PARAM_OF_PQclosePortal(param) \ + param(PGconn *, conn) + #define FOR_EACH_PARAM_OF_PQgetResult(param) #define FOR_EACH_PARAM_OF_PQputCopyData(param) \ @@ -196,6 +202,12 @@ #define FOR_EACH_PARAM_OF_PQsendDescribePortal(param) \ param(PGconn *, conn) +#define FOR_EACH_PARAM_OF_PQsendClosePrepared(param) \ + param(PGconn *, conn) + +#define FOR_EACH_PARAM_OF_PQsendClosePortal(param) \ + param(PGconn *, conn) + #define FOR_EACH_PARAM_OF_PQsetClientEncoding(param) \ param(PGconn *, conn) @@ -225,6 +237,8 @@ function(PQprepare, GVL_TYPE_NONVOID, PGresult *, const Oid *, paramTypes) \ function(PQdescribePrepared, GVL_TYPE_NONVOID, PGresult *, const char *, stmtName) \ function(PQdescribePortal, GVL_TYPE_NONVOID, PGresult *, const char *, portalName) \ + function(PQclosePrepared, GVL_TYPE_NONVOID, PGresult *, const char *, stmtName) \ + function(PQclosePortal, GVL_TYPE_NONVOID, PGresult *, const char *, portalName) \ function(PQgetResult, GVL_TYPE_NONVOID, PGresult *, PGconn *, conn) \ function(PQputCopyData, GVL_TYPE_NONVOID, int, int, nbytes) \ function(PQputCopyEnd, GVL_TYPE_NONVOID, int, const char *, errormsg) \ @@ -236,6 +250,8 @@ function(PQsendQueryPrepared, GVL_TYPE_NONVOID, int, int, resultFormat) \ function(PQsendDescribePrepared, GVL_TYPE_NONVOID, int, const char *, stmt) \ function(PQsendDescribePortal, GVL_TYPE_NONVOID, int, const char *, portal) \ + function(PQsendClosePrepared, GVL_TYPE_NONVOID, int, const char *, stmt) \ + function(PQsendClosePortal, GVL_TYPE_NONVOID, int, const char *, portal) \ function(PQsetClientEncoding, GVL_TYPE_NONVOID, int, const char *, encoding) \ function(PQisBusy, GVL_TYPE_NONVOID, int, PGconn *, conn) \ function(PQencryptPasswordConn, GVL_TYPE_NONVOID, char *, const char *, algorithm) \ diff --git a/ext/pg_connection.c b/ext/pg_connection.c index 5276844cc..b9564efd6 100644 --- a/ext/pg_connection.c +++ b/ext/pg_connection.c @@ -1567,6 +1567,54 @@ pgconn_sync_describe_portal(VALUE self, VALUE stmt_name) } +#ifdef HAVE_PQSETCHUNKEDROWSMODE +/* + * call-seq: + * conn.sync_close_prepared( stmt_name ) -> PG::Result + * + * This function has the same behavior as #async_close_prepared, but is implemented using the synchronous command processing API of libpq. + * See #async_exec for the differences between the two API variants. + * It's not recommended to use explicit sync or async variants but #close_prepared instead, unless you have a good reason to do so. + * + * Available since PostgreSQL-17. + */ +static VALUE +pgconn_sync_close_prepared(VALUE self, VALUE stmt_name) +{ + PGresult *result; + VALUE rb_pgresult; + t_pg_connection *this = pg_get_connection_safe( self ); + const char *stmt = NIL_P(stmt_name) ? NULL : pg_cstr_enc(stmt_name, this->enc_idx); + result = gvl_PQclosePrepared(this->pgconn, stmt); + rb_pgresult = pg_new_result(result, self); + pg_result_check(rb_pgresult); + return rb_pgresult; +} + +/* + * call-seq: + * conn.sync_close_portal( portal_name ) -> PG::Result + * + * This function has the same behavior as #async_close_portal, but is implemented using the synchronous command processing API of libpq. + * See #async_exec for the differences between the two API variants. + * It's not recommended to use explicit sync or async variants but #close_portal instead, unless you have a good reason to do so. + * + * Available since PostgreSQL-17. + */ +static VALUE +pgconn_sync_close_portal(VALUE self, VALUE stmt_name) +{ + PGresult *result; + VALUE rb_pgresult; + t_pg_connection *this = pg_get_connection_safe( self ); + const char *stmt = NIL_P(stmt_name) ? NULL : pg_cstr_enc(stmt_name, this->enc_idx); + result = gvl_PQclosePortal(this->pgconn, stmt); + rb_pgresult = pg_new_result(result, self); + pg_result_check(rb_pgresult); + return rb_pgresult; +} +#endif + /* * call-seq: * conn.make_empty_pgresult( status ) -> PG::Result @@ -2140,12 +2188,56 @@ pgconn_send_describe_portal(VALUE self, VALUE portal) t_pg_connection *this = pg_get_connection_safe( self ); /* returns 0 on failure */ if(gvl_PQsendDescribePortal(this->pgconn, pg_cstr_enc(portal, this->enc_idx)) == 0) - pg_raise_conn_error( rb_eUnableToSend, self, "%s", PQerrorMessage(this->pgconn)); + pg_raise_conn_error( rb_eUnableToSend, self, "PQsendDescribePortal %s", PQerrorMessage(this->pgconn)); pgconn_wait_for_flush( self ); return Qnil; } +#ifdef HAVE_PQSETCHUNKEDROWSMODE +/* + * call-seq: + * conn.send_close_prepared( statement_name ) -> nil + * + * Asynchronously send _command_ to the server. Does not block. + * Use in combination with +conn.get_result+. + * + * Available since PostgreSQL-17. + */ +static VALUE +pgconn_send_close_prepared(VALUE self, VALUE stmt_name) +{ + t_pg_connection *this = pg_get_connection_safe( self ); + /* returns 0 on failure */ + if(gvl_PQsendClosePrepared(this->pgconn, pg_cstr_enc(stmt_name, this->enc_idx)) == 0) + pg_raise_conn_error( rb_eUnableToSend, self, "PQsendClosePrepared %s", PQerrorMessage(this->pgconn)); + + pgconn_wait_for_flush( self ); + return Qnil; +} + + +/* + * call-seq: + * conn.send_close_portal( portal_name ) -> nil + * + * Asynchronously send _command_ to the server. Does not block. + * Use in combination with +conn.get_result+. + * + * Available since PostgreSQL-17. + */ +static VALUE +pgconn_send_close_portal(VALUE self, VALUE portal) +{ + t_pg_connection *this = pg_get_connection_safe( self ); + /* returns 0 on failure */ + if(gvl_PQsendClosePortal(this->pgconn, pg_cstr_enc(portal, this->enc_idx)) == 0) + pg_raise_conn_error( rb_eUnableToSend, self, "PQsendClosePortal %s", PQerrorMessage(this->pgconn)); + + pgconn_wait_for_flush( self ); + return Qnil; +} +#endif static VALUE pgconn_sync_get_result(VALUE self) @@ -3536,6 +3628,67 @@ pgconn_async_describe_prepared(VALUE self, VALUE stmt_name) return rb_pgresult; } +#ifdef HAVE_PQSETCHUNKEDROWSMODE +/* + * call-seq: + * conn.close_prepared( statement_name ) -> PG::Result + * + * Submits a request to close the specified prepared statement, and waits for completion. + * close_prepared allows an application to close a previously prepared statement. + * Closing a statement releases all of its associated resources on the server and allows its name to be reused. + * + * statement_name can be "" or +nil+ to reference the unnamed statement. + * It is fine if no statement exists with this name, in that case the operation is a no-op. + * On success, a PG::Result with status PGRES_COMMAND_OK is returned. + * + * See also corresponding {libpq function}[https://www.postgresql.org/docs/current/libpq-exec.html#LIBPQ-PQCLOSEPREPARED]. + * + * Available since PostgreSQL-17. + */ +static VALUE +pgconn_async_close_prepared(VALUE self, VALUE stmt_name) +{ + VALUE rb_pgresult = Qnil; + + pgconn_discard_results( self ); + pgconn_send_close_prepared( self, stmt_name ); + rb_pgresult = pgconn_async_get_last_result( self ); + + if ( rb_block_given_p() ) { + return rb_ensure( rb_yield, rb_pgresult, pg_result_clear, rb_pgresult ); + } + return rb_pgresult; +} + +/* + * call-seq: + * conn.close_portal( portal_name ) -> PG::Result + * + * Submits a request to close the specified portal, and waits for completion. + * + * close_portal allows an application to trigger a close of a previously created portal. + * Closing a portal releases all of its associated resources on the server and allows its name to be reused. + * (pg does not provide any direct access to portals, but you can use this function to close a cursor created with a DECLARE CURSOR SQL command.) + * + * See also corresponding {libpq function}[https://www.postgresql.org/docs/current/libpq-exec.html#LIBPQ-PQCLOSEPORTAL]. + * + * Available since PostgreSQL-17. + */ +static VALUE +pgconn_async_close_portal(VALUE self, VALUE portal) +{ + VALUE rb_pgresult = Qnil; + + pgconn_discard_results( self ); + pgconn_send_close_portal( self, portal ); + rb_pgresult = pgconn_async_get_last_result( self ); + + if ( rb_block_given_p() ) { + return rb_ensure( rb_yield, rb_pgresult, pg_result_clear, rb_pgresult ); + } + return rb_pgresult; +} +#endif /* * call-seq: @@ -4559,6 +4712,10 @@ init_pg_connection(void) rb_define_method(rb_cPGconn, "sync_exec_prepared", pgconn_sync_exec_prepared, -1); rb_define_method(rb_cPGconn, "sync_describe_prepared", pgconn_sync_describe_prepared, 1); rb_define_method(rb_cPGconn, "sync_describe_portal", pgconn_sync_describe_portal, 1); +#ifdef HAVE_PQSETCHUNKEDROWSMODE + rb_define_method(rb_cPGconn, "sync_close_prepared", pgconn_sync_close_prepared, 1); + rb_define_method(rb_cPGconn, "sync_close_portal", pgconn_sync_close_portal, 1); +#endif rb_define_method(rb_cPGconn, "exec", pgconn_async_exec, -1); rb_define_method(rb_cPGconn, "exec_params", pgconn_async_exec_params, -1); @@ -4566,6 +4723,10 @@ init_pg_connection(void) rb_define_method(rb_cPGconn, "exec_prepared", pgconn_async_exec_prepared, -1); rb_define_method(rb_cPGconn, "describe_prepared", pgconn_async_describe_prepared, 1); rb_define_method(rb_cPGconn, "describe_portal", pgconn_async_describe_portal, 1); +#ifdef HAVE_PQSETCHUNKEDROWSMODE + rb_define_method(rb_cPGconn, "close_prepared", pgconn_async_close_prepared, 1); + rb_define_method(rb_cPGconn, "close_portal", pgconn_async_close_portal, 1); +#endif rb_define_alias(rb_cPGconn, "async_exec", "exec"); rb_define_alias(rb_cPGconn, "async_query", "async_exec"); @@ -4574,6 +4735,10 @@ init_pg_connection(void) rb_define_alias(rb_cPGconn, "async_exec_prepared", "exec_prepared"); rb_define_alias(rb_cPGconn, "async_describe_prepared", "describe_prepared"); rb_define_alias(rb_cPGconn, "async_describe_portal", "describe_portal"); +#ifdef HAVE_PQSETCHUNKEDROWSMODE + rb_define_alias(rb_cPGconn, "async_close_prepared", "close_prepared"); + rb_define_alias(rb_cPGconn, "async_close_portal", "close_portal"); +#endif rb_define_method(rb_cPGconn, "make_empty_pgresult", pgconn_make_empty_pgresult, 1); rb_define_method(rb_cPGconn, "escape_string", pgconn_s_escape, 1); diff --git a/lib/pg/connection.rb b/lib/pg/connection.rb index c0fd1b2a1..04613f103 100644 --- a/lib/pg/connection.rb +++ b/lib/pg/connection.rb @@ -921,6 +921,12 @@ def ping(*args) :encrypt_password => [:async_encrypt_password, :sync_encrypt_password], } private_constant :REDIRECT_METHODS + if PG::Connection.instance_methods.include? :async_close_prepared + REDIRECT_METHODS.merge!({ + :close_prepared => [:async_close_prepared, :sync_close_prepared], + :close_portal => [:async_close_portal, :sync_close_portal], + }) + end PG.make_shareable(REDIRECT_METHODS) def async_send_api=(enable) diff --git a/spec/pg/connection_spec.rb b/spec/pg/connection_spec.rb index c21d3a8b6..bdaf597d5 100644 --- a/spec/pg/connection_spec.rb +++ b/spec/pg/connection_spec.rb @@ -2369,6 +2369,7 @@ def wait_check_socket(conn) @conn.prepare("weiß2", "VALUES(123)") r = @conn.describe_prepared("weiß2".encode("utf-16be")) expect( r.nfields ).to eq( 1 ) + expect { @conn.prepare("weiß2", "VALUES(123)") }.to raise_error(PG::DuplicatePstatement) end it "should convert strings to #describe_portal" do @@ -2377,6 +2378,22 @@ def wait_check_socket(conn) expect( r.nfields ).to eq( 3 ) end + it "should convert strings to #close_prepared", :postgresql_17 do + @conn.prepare("weiß5", "VALUES(123)") + r = @conn.close_prepared("weiß5".encode("utf-16be")) + expect( r.nfields ).to eq( 0 ) + @conn.prepare("weiß5", "VALUES(123)") + r = @conn.close_prepared("weiß5".encode("utf-16be")) + end + + it "should convert strings to #close_portal", :postgresql_17 do + @conn.exec "DECLARE cörsör5 CURSOR FOR VALUES(1,2,3)" + r = @conn.close_portal("cörsör5".encode("utf-16le")) + expect( r.nfields ).to eq( 0 ) + @conn.exec "DECLARE cörsör5 CURSOR FOR VALUES(1,2,3)" + r = @conn.close_portal("cörsör5".encode("utf-16le")) + end + it "should convert query string to #send_query" do @conn.send_query("VALUES('grün')".encode("utf-16be")) expect( @conn.get_last_result.values ).to eq( [['grün']] ) From 480668f2eed3d84f9508a351d022247bbf8da4b1 Mon Sep 17 00:00:00 2001 From: Lars Kanis Date: Sun, 24 Nov 2024 12:50:55 +0100 Subject: [PATCH 025/118] Add function name to errors raised by PQsend... These functions don't emit an error by PQerrorMessage() so far. To make the error more useful, this adds at least the failing function as error message. --- ext/pg_connection.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/ext/pg_connection.c b/ext/pg_connection.c index b9564efd6..5dab3c031 100644 --- a/ext/pg_connection.c +++ b/ext/pg_connection.c @@ -1947,7 +1947,7 @@ pgconn_send_query(int argc, VALUE *argv, VALUE self) /* If called with no or nil parameters, use PQexec for compatibility */ if ( argc == 1 || (argc >= 2 && argc <= 4 && NIL_P(argv[1]) )) { if(gvl_PQsendQuery(this->pgconn, pg_cstr_enc(argv[0], this->enc_idx)) == 0) - pg_raise_conn_error( rb_eUnableToSend, self, "%s", PQerrorMessage(this->pgconn)); + pg_raise_conn_error( rb_eUnableToSend, self, "PQsendQuery %s", PQerrorMessage(this->pgconn)); pgconn_wait_for_flush( self ); return Qnil; @@ -2022,7 +2022,7 @@ pgconn_send_query_params(int argc, VALUE *argv, VALUE self) free_query_params( ¶msData ); if(result == 0) - pg_raise_conn_error( rb_eUnableToSend, self, "%s", PQerrorMessage(this->pgconn)); + pg_raise_conn_error( rb_eUnableToSend, self, "PQsendQueryParams %s", PQerrorMessage(this->pgconn)); pgconn_wait_for_flush( self ); return Qnil; @@ -2083,7 +2083,7 @@ pgconn_send_prepare(int argc, VALUE *argv, VALUE self) xfree(paramTypes); if(result == 0) { - pg_raise_conn_error( rb_eUnableToSend, self, "%s", PQerrorMessage(this->pgconn)); + pg_raise_conn_error( rb_eUnableToSend, self, "PQsendPrepare %s", PQerrorMessage(this->pgconn)); } pgconn_wait_for_flush( self ); return Qnil; @@ -2149,7 +2149,7 @@ pgconn_send_query_prepared(int argc, VALUE *argv, VALUE self) free_query_params( ¶msData ); if(result == 0) - pg_raise_conn_error( rb_eUnableToSend, self, "%s", PQerrorMessage(this->pgconn)); + pg_raise_conn_error( rb_eUnableToSend, self, "PQsendQueryPrepared %s", PQerrorMessage(this->pgconn)); pgconn_wait_for_flush( self ); return Qnil; @@ -2168,7 +2168,7 @@ pgconn_send_describe_prepared(VALUE self, VALUE stmt_name) t_pg_connection *this = pg_get_connection_safe( self ); /* returns 0 on failure */ if(gvl_PQsendDescribePrepared(this->pgconn, pg_cstr_enc(stmt_name, this->enc_idx)) == 0) - pg_raise_conn_error( rb_eUnableToSend, self, "%s", PQerrorMessage(this->pgconn)); + pg_raise_conn_error( rb_eUnableToSend, self, "PQsendDescribePrepared %s", PQerrorMessage(this->pgconn)); pgconn_wait_for_flush( self ); return Qnil; From 5d660c28072f5cbfac53f078cfde96fa4151cde9 Mon Sep 17 00:00:00 2001 From: Lars Kanis Date: Sun, 24 Nov 2024 20:18:04 +0100 Subject: [PATCH 026/118] Remove duplicated C code --- ext/pg_connection.c | 174 ++++++++++++++++---------------------------- 1 file changed, 62 insertions(+), 112 deletions(-) diff --git a/ext/pg_connection.c b/ext/pg_connection.c index 5dab3c031..ddaeaa744 100644 --- a/ext/pg_connection.c +++ b/ext/pg_connection.c @@ -1511,6 +1511,19 @@ pgconn_sync_exec_prepared(int argc, VALUE *argv, VALUE self) return rb_pgresult; } +static VALUE +pgconn_sync_describe_close_prepared_portal(VALUE self, VALUE name, PGresult *(*func)(PGconn *, const char *)) +{ + PGresult *result; + VALUE rb_pgresult; + t_pg_connection *this = pg_get_connection_safe( self ); + const char *stmt = NIL_P(name) ? NULL : pg_cstr_enc(name, this->enc_idx); + result = func(this->pgconn, stmt); + rb_pgresult = pg_new_result(result, self); + pg_result_check(rb_pgresult); + return rb_pgresult; +} + /* * call-seq: * conn.sync_describe_prepared( statement_name ) -> PG::Result @@ -1522,20 +1535,7 @@ pgconn_sync_exec_prepared(int argc, VALUE *argv, VALUE self) static VALUE pgconn_sync_describe_prepared(VALUE self, VALUE stmt_name) { - PGresult *result; - VALUE rb_pgresult; - t_pg_connection *this = pg_get_connection_safe( self ); - const char *stmt; - if(NIL_P(stmt_name)) { - stmt = NULL; - } - else { - stmt = pg_cstr_enc(stmt_name, this->enc_idx); - } - result = gvl_PQdescribePrepared(this->pgconn, stmt); - rb_pgresult = pg_new_result(result, self); - pg_result_check(rb_pgresult); - return rb_pgresult; + return pgconn_sync_describe_close_prepared_portal(self, stmt_name, gvl_PQdescribePrepared); } @@ -1550,20 +1550,7 @@ pgconn_sync_describe_prepared(VALUE self, VALUE stmt_name) static VALUE pgconn_sync_describe_portal(VALUE self, VALUE stmt_name) { - PGresult *result; - VALUE rb_pgresult; - t_pg_connection *this = pg_get_connection_safe( self ); - const char *stmt; - if(NIL_P(stmt_name)) { - stmt = NULL; - } - else { - stmt = pg_cstr_enc(stmt_name, this->enc_idx); - } - result = gvl_PQdescribePortal(this->pgconn, stmt); - rb_pgresult = pg_new_result(result, self); - pg_result_check(rb_pgresult); - return rb_pgresult; + return pgconn_sync_describe_close_prepared_portal(self, stmt_name, gvl_PQdescribePortal); } @@ -1581,14 +1568,7 @@ pgconn_sync_describe_portal(VALUE self, VALUE stmt_name) static VALUE pgconn_sync_close_prepared(VALUE self, VALUE stmt_name) { - PGresult *result; - VALUE rb_pgresult; - t_pg_connection *this = pg_get_connection_safe( self ); - const char *stmt = NIL_P(stmt_name) ? NULL : pg_cstr_enc(stmt_name, this->enc_idx); - result = gvl_PQclosePrepared(this->pgconn, stmt); - rb_pgresult = pg_new_result(result, self); - pg_result_check(rb_pgresult); - return rb_pgresult; + return pgconn_sync_describe_close_prepared_portal(self, stmt_name, gvl_PQclosePrepared); } /* @@ -1604,14 +1584,7 @@ pgconn_sync_close_prepared(VALUE self, VALUE stmt_name) static VALUE pgconn_sync_close_portal(VALUE self, VALUE stmt_name) { - PGresult *result; - VALUE rb_pgresult; - t_pg_connection *this = pg_get_connection_safe( self ); - const char *stmt = NIL_P(stmt_name) ? NULL : pg_cstr_enc(stmt_name, this->enc_idx); - result = gvl_PQclosePortal(this->pgconn, stmt); - rb_pgresult = pg_new_result(result, self); - pg_result_check(rb_pgresult); - return rb_pgresult; + return pgconn_sync_describe_close_prepared_portal(self, stmt_name, gvl_PQclosePortal); } #endif @@ -2155,6 +2128,20 @@ pgconn_send_query_prepared(int argc, VALUE *argv, VALUE self) return Qnil; } + +static VALUE +pgconn_send_describe_close_prepared_portal(VALUE self, VALUE name, int (*func)(PGconn *, const char *), const char *funame) +{ + t_pg_connection *this = pg_get_connection_safe( self ); + const char *stmt = NIL_P(name) ? NULL : pg_cstr_enc(name, this->enc_idx); + /* returns 0 on failure */ + if(func(this->pgconn, stmt) == 0) + pg_raise_conn_error( rb_eUnableToSend, self, "%s %s", funame, PQerrorMessage(this->pgconn)); + + pgconn_wait_for_flush( self ); + return Qnil; +} + /* * call-seq: * conn.send_describe_prepared( statement_name ) -> nil @@ -2165,13 +2152,9 @@ pgconn_send_query_prepared(int argc, VALUE *argv, VALUE self) static VALUE pgconn_send_describe_prepared(VALUE self, VALUE stmt_name) { - t_pg_connection *this = pg_get_connection_safe( self ); - /* returns 0 on failure */ - if(gvl_PQsendDescribePrepared(this->pgconn, pg_cstr_enc(stmt_name, this->enc_idx)) == 0) - pg_raise_conn_error( rb_eUnableToSend, self, "PQsendDescribePrepared %s", PQerrorMessage(this->pgconn)); - - pgconn_wait_for_flush( self ); - return Qnil; + return pgconn_send_describe_close_prepared_portal( + self, stmt_name, gvl_PQsendDescribePrepared, + "PQsendDescribePrepared"); } @@ -2185,13 +2168,9 @@ pgconn_send_describe_prepared(VALUE self, VALUE stmt_name) static VALUE pgconn_send_describe_portal(VALUE self, VALUE portal) { - t_pg_connection *this = pg_get_connection_safe( self ); - /* returns 0 on failure */ - if(gvl_PQsendDescribePortal(this->pgconn, pg_cstr_enc(portal, this->enc_idx)) == 0) - pg_raise_conn_error( rb_eUnableToSend, self, "PQsendDescribePortal %s", PQerrorMessage(this->pgconn)); - - pgconn_wait_for_flush( self ); - return Qnil; + return pgconn_send_describe_close_prepared_portal( + self, portal, gvl_PQsendDescribePortal, + "PQsendDescribePortal"); } #ifdef HAVE_PQSETCHUNKEDROWSMODE @@ -2207,13 +2186,9 @@ pgconn_send_describe_portal(VALUE self, VALUE portal) static VALUE pgconn_send_close_prepared(VALUE self, VALUE stmt_name) { - t_pg_connection *this = pg_get_connection_safe( self ); - /* returns 0 on failure */ - if(gvl_PQsendClosePrepared(this->pgconn, pg_cstr_enc(stmt_name, this->enc_idx)) == 0) - pg_raise_conn_error( rb_eUnableToSend, self, "PQsendClosePrepared %s", PQerrorMessage(this->pgconn)); - - pgconn_wait_for_flush( self ); - return Qnil; + return pgconn_send_describe_close_prepared_portal( + self, stmt_name, gvl_PQsendClosePrepared, + "PQsendClosePrepared"); } @@ -2229,13 +2204,9 @@ pgconn_send_close_prepared(VALUE self, VALUE stmt_name) static VALUE pgconn_send_close_portal(VALUE self, VALUE portal) { - t_pg_connection *this = pg_get_connection_safe( self ); - /* returns 0 on failure */ - if(gvl_PQsendClosePortal(this->pgconn, pg_cstr_enc(portal, this->enc_idx)) == 0) - pg_raise_conn_error( rb_eUnableToSend, self, "PQsendClosePortal %s", PQerrorMessage(this->pgconn)); - - pgconn_wait_for_flush( self ); - return Qnil; + return pgconn_send_describe_close_prepared_portal( + self, portal, gvl_PQsendClosePortal, + "PQsendClosePortal"); } #endif @@ -3580,6 +3551,21 @@ pgconn_async_exec_prepared(int argc, VALUE *argv, VALUE self) return rb_pgresult; } +static VALUE +pgconn_async_describe_close_prepared_potral(VALUE self, VALUE name, VALUE +(*func)(VALUE, VALUE)) +{ + VALUE rb_pgresult = Qnil; + + pgconn_discard_results( self ); + func( self, name ); + rb_pgresult = pgconn_async_get_last_result( self ); + + if ( rb_block_given_p() ) { + return rb_ensure( rb_yield, rb_pgresult, pg_result_clear, rb_pgresult ); + } + return rb_pgresult; +} /* * call-seq: @@ -3592,16 +3578,7 @@ pgconn_async_exec_prepared(int argc, VALUE *argv, VALUE self) static VALUE pgconn_async_describe_portal(VALUE self, VALUE portal) { - VALUE rb_pgresult = Qnil; - - pgconn_discard_results( self ); - pgconn_send_describe_portal( self, portal ); - rb_pgresult = pgconn_async_get_last_result( self ); - - if ( rb_block_given_p() ) { - return rb_ensure( rb_yield, rb_pgresult, pg_result_clear, rb_pgresult ); - } - return rb_pgresult; + return pgconn_async_describe_close_prepared_potral(self, portal, pgconn_send_describe_portal); } @@ -3616,16 +3593,7 @@ pgconn_async_describe_portal(VALUE self, VALUE portal) static VALUE pgconn_async_describe_prepared(VALUE self, VALUE stmt_name) { - VALUE rb_pgresult = Qnil; - - pgconn_discard_results( self ); - pgconn_send_describe_prepared( self, stmt_name ); - rb_pgresult = pgconn_async_get_last_result( self ); - - if ( rb_block_given_p() ) { - return rb_ensure( rb_yield, rb_pgresult, pg_result_clear, rb_pgresult ); - } - return rb_pgresult; + return pgconn_async_describe_close_prepared_potral(self, stmt_name, pgconn_send_describe_prepared); } #ifdef HAVE_PQSETCHUNKEDROWSMODE @@ -3648,16 +3616,7 @@ pgconn_async_describe_prepared(VALUE self, VALUE stmt_name) static VALUE pgconn_async_close_prepared(VALUE self, VALUE stmt_name) { - VALUE rb_pgresult = Qnil; - - pgconn_discard_results( self ); - pgconn_send_close_prepared( self, stmt_name ); - rb_pgresult = pgconn_async_get_last_result( self ); - - if ( rb_block_given_p() ) { - return rb_ensure( rb_yield, rb_pgresult, pg_result_clear, rb_pgresult ); - } - return rb_pgresult; + return pgconn_async_describe_close_prepared_potral(self, stmt_name, pgconn_send_close_prepared); } /* @@ -3677,16 +3636,7 @@ pgconn_async_close_prepared(VALUE self, VALUE stmt_name) static VALUE pgconn_async_close_portal(VALUE self, VALUE portal) { - VALUE rb_pgresult = Qnil; - - pgconn_discard_results( self ); - pgconn_send_close_portal( self, portal ); - rb_pgresult = pgconn_async_get_last_result( self ); - - if ( rb_block_given_p() ) { - return rb_ensure( rb_yield, rb_pgresult, pg_result_clear, rb_pgresult ); - } - return rb_pgresult; + return pgconn_async_describe_close_prepared_potral(self, portal, pgconn_send_close_portal); } #endif From d583823aa9c8bdb622da59050eeff13e13460e21 Mon Sep 17 00:00:00 2001 From: Lars Kanis Date: Sun, 24 Nov 2024 20:19:32 +0100 Subject: [PATCH 027/118] Improve documentation of close_prepared/portal --- ext/pg_connection.c | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/ext/pg_connection.c b/ext/pg_connection.c index ddaeaa744..22b7d9af6 100644 --- a/ext/pg_connection.c +++ b/ext/pg_connection.c @@ -3604,8 +3604,9 @@ pgconn_async_describe_prepared(VALUE self, VALUE stmt_name) * Submits a request to close the specified prepared statement, and waits for completion. * close_prepared allows an application to close a previously prepared statement. * Closing a statement releases all of its associated resources on the server and allows its name to be reused. + * It's the same as using the +DEALLOCATE+ SQL statement, but on a lower protocol level. * - * statement_name can be "" or +nil+ to reference the unnamed statement. + * +statement_name+ can be "" or +nil+ to reference the unnamed statement. * It is fine if no statement exists with this name, in that case the operation is a no-op. * On success, a PG::Result with status PGRES_COMMAND_OK is returned. * @@ -3629,6 +3630,10 @@ pgconn_async_close_prepared(VALUE self, VALUE stmt_name) * Closing a portal releases all of its associated resources on the server and allows its name to be reused. * (pg does not provide any direct access to portals, but you can use this function to close a cursor created with a DECLARE CURSOR SQL command.) * + * +portal_name+ can be "" or +nil+ to reference the unnamed portal. + * It is fine if no portal exists with this name, in that case the operation is a no-op. + * On success, a PG::Result with status PGRES_COMMAND_OK is returned. + * * See also corresponding {libpq function}[https://www.postgresql.org/docs/current/libpq-exec.html#LIBPQ-PQCLOSEPORTAL]. * * Available since PostgreSQL-17. From 6dbb3e65b1bc0e334c4f45edbb8dc3c3db0e1654 Mon Sep 17 00:00:00 2001 From: Lars Kanis Date: Mon, 25 Nov 2024 15:19:35 +0100 Subject: [PATCH 028/118] Add Connection#send_pipeline_sync, async_pipeline_sync and release GVL at PQ(sendP|P)ipelineSync Also make pipeline_sync an alias for sync_pipeline_sync vs. async_pipeline_sync. Now send_pipeline_sync and flush is used to notify IO waiting to the scheduler. --- ext/gvl_wrappers.c | 4 ++++ ext/gvl_wrappers.h | 6 +++++ ext/pg_connection.c | 47 +++++++++++++++++++++++++++++++------- lib/pg/connection.rb | 38 ++++++++++++++++++++++++++++-- spec/pg/connection_spec.rb | 5 ++++ spec/pg/scheduler_spec.rb | 16 +++++++++++++ 6 files changed, 106 insertions(+), 10 deletions(-) diff --git a/ext/gvl_wrappers.c b/ext/gvl_wrappers.c index db9357077..dce0daf92 100644 --- a/ext/gvl_wrappers.c +++ b/ext/gvl_wrappers.c @@ -11,6 +11,10 @@ PGresult *PQclosePrepared(PGconn *conn, const char *stmtName){return NULL;} PGresult *PQclosePortal(PGconn *conn, const char *portalName){return NULL;} int PQsendClosePrepared(PGconn *conn, const char *stmtName){return 0;} int PQsendClosePortal(PGconn *conn, const char *portalName){return 0;} +int PQsendPipelineSync(PGconn *conn){return 0;} +#endif +#ifndef HAVE_PQENTERPIPELINEMODE +int PQpipelineSync(PGconn *conn){return 0;} #endif #ifdef ENABLE_GVL_UNLOCK diff --git a/ext/gvl_wrappers.h b/ext/gvl_wrappers.h index 0c2c5b848..97f5ab070 100644 --- a/ext/gvl_wrappers.h +++ b/ext/gvl_wrappers.h @@ -208,6 +208,10 @@ #define FOR_EACH_PARAM_OF_PQsendClosePortal(param) \ param(PGconn *, conn) +#define FOR_EACH_PARAM_OF_PQpipelineSync(param) + +#define FOR_EACH_PARAM_OF_PQsendPipelineSync(param) + #define FOR_EACH_PARAM_OF_PQsetClientEncoding(param) \ param(PGconn *, conn) @@ -252,6 +256,8 @@ function(PQsendDescribePortal, GVL_TYPE_NONVOID, int, const char *, portal) \ function(PQsendClosePrepared, GVL_TYPE_NONVOID, int, const char *, stmt) \ function(PQsendClosePortal, GVL_TYPE_NONVOID, int, const char *, portal) \ + function(PQpipelineSync, GVL_TYPE_NONVOID, int, PGconn *, conn) \ + function(PQsendPipelineSync, GVL_TYPE_NONVOID, int, PGconn *, conn) \ function(PQsetClientEncoding, GVL_TYPE_NONVOID, int, const char *, encoding) \ function(PQisBusy, GVL_TYPE_NONVOID, int, PGconn *, conn) \ function(PQencryptPasswordConn, GVL_TYPE_NONVOID, char *, const char *, algorithm) \ diff --git a/ext/pg_connection.c b/ext/pg_connection.c index 22b7d9af6..5a2ff6aab 100644 --- a/ext/pg_connection.c +++ b/ext/pg_connection.c @@ -3751,6 +3751,8 @@ pgconn_pipeline_status(VALUE self) * Raises PG::Error and has no effect if the connection is not currently idle, i.e., it has a result ready, or it is waiting for more input from the server, etc. * This function does not actually send anything to the server, it just changes the libpq connection state. * + * See the {PostgreSQL documentation}[https://www.postgresql.org/docs/17/libpq-pipeline-mode.html#LIBPQ-PIPELINE-MODE]. + * * Available since PostgreSQL-14 */ static VALUE @@ -3789,29 +3791,55 @@ pgconn_exit_pipeline_mode(VALUE self) /* * call-seq: - * conn.pipeline_sync -> nil + * conn.sync_pipeline_sync -> nil * - * Marks a synchronization point in a pipeline by sending a sync message and flushing the send buffer. - * This serves as the delimiter of an implicit transaction and an error recovery point; see Section 34.5.1.3 of the PostgreSQL documentation. + * This function has the same behavior as #async_pipeline_sync, but is implemented using the synchronous command processing API of libpq. + * See #async_exec for the differences between the two API variants. + * It's not recommended to use explicit sync or async variants but #pipeline_sync instead, unless you have a good reason to do so. * + * Available since PostgreSQL-14 + */ +static VALUE +pgconn_sync_pipeline_sync(VALUE self) +{ + PGconn *conn = pg_get_pgconn(self); + int res = gvl_PQpipelineSync(conn); + if( res != 1 ) + pg_raise_conn_error( rb_ePGerror, self, "%s", PQerrorMessage(conn)); + + return Qnil; +} + + +#ifdef HAVE_PQSETCHUNKEDROWSMODE +/* + * call-seq: + * conn.send_pipeline_sync -> nil + * + * Marks a synchronization point in a pipeline by sending a sync message without flushing the send buffer. + * + * This serves as the delimiter of an implicit transaction and an error recovery point. * Raises PG::Error if the connection is not in pipeline mode or sending a sync message failed. + * Note that the message is not itself flushed to the server automatically; use flush if necessary. * - * Available since PostgreSQL-14 + * Available since PostgreSQL-17 */ static VALUE -pgconn_pipeline_sync(VALUE self) +pgconn_send_pipeline_sync(VALUE self) { PGconn *conn = pg_get_pgconn(self); - int res = PQpipelineSync(conn); + int res = gvl_PQsendPipelineSync(conn); if( res != 1 ) pg_raise_conn_error( rb_ePGerror, self, "%s", PQerrorMessage(conn)); return Qnil; } +#endif + /* * call-seq: - * conn.pipeline_sync -> nil + * conn.send_flush_request -> nil * * Sends a request for the server to flush its output buffer. * @@ -4769,8 +4797,11 @@ init_pg_connection(void) rb_define_method(rb_cPGconn, "pipeline_status", pgconn_pipeline_status, 0); rb_define_method(rb_cPGconn, "enter_pipeline_mode", pgconn_enter_pipeline_mode, 0); rb_define_method(rb_cPGconn, "exit_pipeline_mode", pgconn_exit_pipeline_mode, 0); - rb_define_method(rb_cPGconn, "pipeline_sync", pgconn_pipeline_sync, 0); + rb_define_method(rb_cPGconn, "sync_pipeline_sync", pgconn_sync_pipeline_sync, 0); rb_define_method(rb_cPGconn, "send_flush_request", pgconn_send_flush_request, 0); +#ifdef HAVE_PQSETCHUNKEDROWSMODE + rb_define_method(rb_cPGconn, "send_pipeline_sync", pgconn_send_pipeline_sync, 0); +#endif #endif /****** PG::Connection INSTANCE METHODS: Large Object Support ******/ diff --git a/lib/pg/connection.rb b/lib/pg/connection.rb index 04613f103..cd3281224 100644 --- a/lib/pg/connection.rb +++ b/lib/pg/connection.rb @@ -536,6 +536,25 @@ def put_copy_end(*args) end alias async_put_copy_end put_copy_end + if method_defined? :send_pipeline_sync + # call-seq: + # conn.pipeline_sync + # + # Marks a synchronization point in a pipeline by sending a sync message and flushing the send buffer. + # This serves as the delimiter of an implicit transaction and an error recovery point. + # + # See enter_pipeline_mode + # + # Raises PG::Error if the connection is not in pipeline mode or sending a sync message failed. + # + # Available since PostgreSQL-14 + def pipeline_sync(*args) + send_pipeline_sync(*args) + flush + end + alias async_pipeline_sync pipeline_sync + end + if method_defined? :sync_encrypt_password # call-seq: # conn.encrypt_password( password, username, algorithm=nil ) -> String @@ -894,14 +913,29 @@ def ping(*args) private_constant :REDIRECT_CLASS_METHODS # These methods are affected by PQsetnonblocking - REDIRECT_SEND_METHODS = PG.make_shareable({ + REDIRECT_SEND_METHODS = { :isnonblocking => [:async_isnonblocking, :sync_isnonblocking], :nonblocking? => [:async_isnonblocking, :sync_isnonblocking], :put_copy_data => [:async_put_copy_data, :sync_put_copy_data], :put_copy_end => [:async_put_copy_end, :sync_put_copy_end], :flush => [:async_flush, :sync_flush], - }) + } private_constant :REDIRECT_SEND_METHODS + if PG::Connection.instance_methods.include? :sync_pipeline_sync + if PG::Connection.instance_methods.include? :send_pipeline_sync + # PostgreSQL-17+ + REDIRECT_SEND_METHODS.merge!({ + :pipeline_sync => [:async_pipeline_sync, :sync_pipeline_sync], + }) + else + # PostgreSQL-14+ + REDIRECT_SEND_METHODS.merge!({ + :pipeline_sync => [:sync_pipeline_sync, :sync_pipeline_sync], + }) + end + end + PG.make_shareable(REDIRECT_SEND_METHODS) + REDIRECT_METHODS = { :exec => [:async_exec, :sync_exec], :query => [:async_exec, :sync_exec], diff --git a/spec/pg/connection_spec.rb b/spec/pg/connection_spec.rb index bdaf597d5..f8f1571ad 100644 --- a/spec/pg/connection_spec.rb +++ b/spec/pg/connection_spec.rb @@ -2173,6 +2173,11 @@ def wait_check_socket(conn) @conn.pipeline_sync }.to raise_error(PG::Error){|err| expect(err).to have_attributes(connection: @conn) } end + + it "has send_pipeline_sync method", :postgresql_17 do + expect( @conn.respond_to?(:send_pipeline_sync) ).to be_truthy + expect( @conn.respond_to?(:async_pipeline_sync) ).to be_truthy + end end describe "send_flush_request" do diff --git a/spec/pg/scheduler_spec.rb b/spec/pg/scheduler_spec.rb index fac371218..d7d5c7d75 100644 --- a/spec/pg/scheduler_spec.rb +++ b/spec/pg/scheduler_spec.rb @@ -263,4 +263,20 @@ expect( ping ).to eq( PG::PQPING_OK ) end end + + it "can send a pipeline_sync message", :postgresql_14 do + run_with_scheduler(99) do |conn| + conn.enter_pipeline_mode + 1000.times do |idx| + # This doesn't fail on sync_pipeline_sync, since PQpipelineSync() tries to flush, but doesn't wait for writablility. + conn.pipeline_sync + end + 1000.times do + expect( conn.get_result.result_status ).to eq( PG::PGRES_PIPELINE_SYNC ) + end + expect( conn.get_result ).to be_nil + expect( conn.get_result ).to be_nil + conn.exit_pipeline_mode + end + end end From 221af3d08e6f6f700e9a7a551f341c6838f33855 Mon Sep 17 00:00:00 2001 From: Lars Kanis Date: Mon, 25 Nov 2024 15:46:27 +0100 Subject: [PATCH 029/118] CI: Change PostgreSQL versions to include 14 and 16 --- .github/workflows/binary-gems.yml | 2 +- .github/workflows/source-gem.yml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/binary-gems.yml b/.github/workflows/binary-gems.yml index 48bb03d7c..5264bddb7 100644 --- a/.github/workflows/binary-gems.yml +++ b/.github/workflows/binary-gems.yml @@ -60,7 +60,7 @@ jobs: - os: windows-latest ruby: "2.7" platform: "x64-mingw32" - PGVERSION: 10.20-1-windows + PGVERSION: 16.6-1-windows-x64 runs-on: ${{ matrix.os }} env: diff --git a/.github/workflows/source-gem.yml b/.github/workflows/source-gem.yml index 28c001457..bc29b5e6f 100644 --- a/.github/workflows/source-gem.yml +++ b/.github/workflows/source-gem.yml @@ -66,7 +66,7 @@ jobs: PGVER: "10" - os: ubuntu ruby: "truffleruby" - PGVER: "13" + PGVER: "14" - os: ubuntu ruby: "truffleruby-head" PGVER: "17" From ac16ec5eb7b0d316b209429b29955a82397595b6 Mon Sep 17 00:00:00 2001 From: Lars Kanis Date: Mon, 25 Nov 2024 21:37:59 +0100 Subject: [PATCH 030/118] Remove workaround for Truffleruby < 21.3.0 --- lib/pg/connection.rb | 18 ------------------ 1 file changed, 18 deletions(-) diff --git a/lib/pg/connection.rb b/lib/pg/connection.rb index cd3281224..1ad1f725a 100644 --- a/lib/pg/connection.rb +++ b/lib/pg/connection.rb @@ -632,24 +632,6 @@ def cancel retry rescue EOFError end - elsif RUBY_ENGINE == 'truffleruby' - begin - cl = socket_io.remote_address.connect - rescue NotImplementedError - # Workaround for truffleruby < 21.3.0 - cl2 = Socket.for_fd(socket_io.fileno) - cl2.autoclose = false - adr = cl2.remote_address - if adr.ip? - cl = TCPSocket.new(adr.ip_address, adr.ip_port) - cl.autoclose = false - else - cl = UNIXSocket.new(adr.unix_path) - cl.autoclose = false - end - end - cl.write(cancel_request) - cl.read(1) else cl = socket_io.remote_address.connect # Send CANCEL_REQUEST_CODE and parameters From 8d41928ccdc611e6b1e2ce4bd666e4b9e8b68220 Mon Sep 17 00:00:00 2001 From: Lars Kanis Date: Tue, 26 Nov 2024 11:55:07 +0100 Subject: [PATCH 031/118] Add support for new query cancel functions of PostgreSQL-17 This adds the new class `PG::CancelConnection` which provides the ability to cancel a query per blocking or per non-blocking functions. If the new functions are available they are used and the older no longer compiled in. This way we can get rid of reading out the internal `PGcancel` struct by `Connection#backend_key` . --- ext/gvl_wrappers.c | 3 + ext/gvl_wrappers.h | 11 + ext/pg.c | 5 + ext/pg.h | 5 + ext/pg_cancel_connection.c | 331 ++++++++++++++++++++++++++++++ ext/pg_connection.c | 15 +- lib/pg.rb | 1 + lib/pg/cancel_connection.rb | 18 ++ lib/pg/connection.rb | 209 +++++++++++-------- spec/pg/cancel_connection_spec.rb | 44 ++++ 10 files changed, 548 insertions(+), 94 deletions(-) create mode 100644 ext/pg_cancel_connection.c create mode 100644 lib/pg/cancel_connection.rb create mode 100644 spec/pg/cancel_connection_spec.rb diff --git a/ext/gvl_wrappers.c b/ext/gvl_wrappers.c index dce0daf92..e472d778c 100644 --- a/ext/gvl_wrappers.c +++ b/ext/gvl_wrappers.c @@ -12,6 +12,9 @@ PGresult *PQclosePortal(PGconn *conn, const char *portalName){return NULL;} int PQsendClosePrepared(PGconn *conn, const char *stmtName){return 0;} int PQsendClosePortal(PGconn *conn, const char *portalName){return 0;} int PQsendPipelineSync(PGconn *conn){return 0;} +int PQcancelBlocking(PGcancelConn *cancelConn){return 0;} +int PQcancelStart(PGcancelConn *cancelConn){return 0;} +PostgresPollingStatusType PQcancelPoll(PGcancelConn *cancelConn){return PGRES_POLLING_FAILED;} #endif #ifndef HAVE_PQENTERPIPELINEMODE int PQpipelineSync(PGconn *conn){return 0;} diff --git a/ext/gvl_wrappers.h b/ext/gvl_wrappers.h index 97f5ab070..b3526d184 100644 --- a/ext/gvl_wrappers.h +++ b/ext/gvl_wrappers.h @@ -21,6 +21,10 @@ # include RUBY_EXTCONF_H #endif +#ifndef HAVE_PQSETCHUNKEDROWSMODE +typedef struct pg_cancel_conn PGcancelConn; +#endif + #define DEFINE_PARAM_LIST1(type, name) \ name, @@ -217,6 +221,10 @@ #define FOR_EACH_PARAM_OF_PQisBusy(param) +#define FOR_EACH_PARAM_OF_PQcancelBlocking(param) +#define FOR_EACH_PARAM_OF_PQcancelStart(param) +#define FOR_EACH_PARAM_OF_PQcancelPoll(param) + #define FOR_EACH_PARAM_OF_PQencryptPasswordConn(param) \ param(PGconn *, conn) \ param(const char *, passwd) \ @@ -260,6 +268,9 @@ function(PQsendPipelineSync, GVL_TYPE_NONVOID, int, PGconn *, conn) \ function(PQsetClientEncoding, GVL_TYPE_NONVOID, int, const char *, encoding) \ function(PQisBusy, GVL_TYPE_NONVOID, int, PGconn *, conn) \ + function(PQcancelBlocking, GVL_TYPE_NONVOID, int, PGcancelConn *, conn) \ + function(PQcancelStart, GVL_TYPE_NONVOID, int, PGcancelConn *, conn) \ + function(PQcancelPoll, GVL_TYPE_NONVOID, PostgresPollingStatusType, PGcancelConn *, conn) \ function(PQencryptPasswordConn, GVL_TYPE_NONVOID, char *, const char *, algorithm) \ function(PQcancel, GVL_TYPE_NONVOID, int, int, errbufsize); diff --git a/ext/pg.c b/ext/pg.c index d60d1c666..86a88366f 100644 --- a/ext/pg.c +++ b/ext/pg.c @@ -404,6 +404,10 @@ Init_pg_ext(void) /* Checking if server is in standby mode. Available since PostgreSQL-14. */ rb_define_const(rb_mPGconstants, "CONNECTION_CHECK_STANDBY", INT2FIX(CONNECTION_CHECK_STANDBY)); #endif +#if PG_MAJORVERSION_NUM >= 17 + /* Waiting for connection attempt to be started. Available since PostgreSQL-17. */ + rb_define_const(rb_mPGconstants, "CONNECTION_ALLOCATED", INT2FIX(CONNECTION_ALLOCATED)); +#endif /****** PG::Connection CLASS CONSTANTS: Nonblocking connection polling status ******/ @@ -689,4 +693,5 @@ Init_pg_ext(void) init_pg_copycoder(); init_pg_recordcoder(); init_pg_tuple(); + init_pg_cancon(); } diff --git a/ext/pg.h b/ext/pg.h index 9b208942e..df13c4cb8 100644 --- a/ext/pg.h +++ b/ext/pg.h @@ -305,6 +305,7 @@ void init_pg_text_decoder _(( void )); void init_pg_binary_encoder _(( void )); void init_pg_binary_decoder _(( void )); void init_pg_tuple _(( void )); +void init_pg_cancon _(( void )); VALUE lookup_error_class _(( const char * )); VALUE pg_bin_dec_bytea _(( t_pg_coder*, const char *, int, int, int, int )); VALUE pg_text_dec_string _(( t_pg_coder*, const char *, int, int, int, int )); @@ -344,6 +345,10 @@ void pg_typemap_compact _(( void * )); PGconn *pg_get_pgconn _(( VALUE )); t_pg_connection *pg_get_connection _(( VALUE )); VALUE pgconn_block _(( int, VALUE *, VALUE )); +#ifdef __GNUC__ +__attribute__((format(printf, 3, 4))) +#endif +NORETURN(void pg_raise_conn_error _(( VALUE klass, VALUE self, const char *format, ...))); VALUE pg_new_result _(( PGresult *, VALUE )); VALUE pg_new_result_autoclear _(( PGresult *, VALUE )); diff --git a/ext/pg_cancel_connection.c b/ext/pg_cancel_connection.c new file mode 100644 index 000000000..86b9eb9f1 --- /dev/null +++ b/ext/pg_cancel_connection.c @@ -0,0 +1,331 @@ +#include "pg.h" + +/******************************************************************** + * + * Document-class: PG::CancelConnection + * + * The class to represent a connection to cancel a query. + * An instance of this class can be created by PG::Connection#cancel . + * + */ + +#ifdef HAVE_PQSETCHUNKEDROWSMODE + +static VALUE rb_cPG_Cancon; +static ID s_id_autoclose_set; + +typedef struct { + PGcancelConn *pg_cancon; + + /* Cached IO object for the socket descriptor */ + VALUE socket_io; + +#if defined(_WIN32) + /* File descriptor to be used for rb_w32_unwrap_io_handle() */ + int ruby_sd; +#endif +} t_pg_cancon; + + +/* + * GC Mark function + */ +static void +pg_cancon_gc_mark( void *_this ) +{ + t_pg_cancon *this = (t_pg_cancon *)_this; + rb_gc_mark_movable( this->socket_io ); +} + +static void +pg_cancon_gc_compact( void *_this ) +{ + t_pg_connection *this = (t_pg_connection *)_this; + pg_gc_location( this->socket_io ); +} + +static void +pg_cancon_gc_free( void *_this ) +{ + t_pg_cancon *this = (t_pg_cancon *)_this; +#if defined(_WIN32) + if ( RTEST(this->socket_io) ) { + if( rb_w32_unwrap_io_handle(this->ruby_sd) ){ + rb_warn("pg: Could not unwrap win32 socket handle by garbage collector"); + } + } +#endif + if (this->pg_cancon) + PQcancelFinish(this->pg_cancon); + xfree(this); +} + +static size_t +pg_cancon_memsize( const void *_this ) +{ + const t_pg_cancon *this = (const t_pg_cancon *)_this; + return sizeof(*this); +} + +static const rb_data_type_t pg_cancon_type = { + "PG::CancelConnection", + { + pg_cancon_gc_mark, + pg_cancon_gc_free, + pg_cancon_memsize, + pg_cancon_gc_compact, + }, + 0, 0, + RUBY_TYPED_FREE_IMMEDIATELY | RUBY_TYPED_WB_PROTECTED | PG_RUBY_TYPED_FROZEN_SHAREABLE, +}; + +/* + * Document-method: allocate + * + * call-seq: + * PG::VeryTuple.allocate -> obj + */ +static VALUE +pg_cancon_s_allocate( VALUE klass ) +{ + t_pg_cancon *this; + return TypedData_Make_Struct( klass, t_pg_cancon, &pg_cancon_type, this ); +} + +static inline t_pg_cancon * +pg_cancon_get_this( VALUE self ) +{ + t_pg_cancon *this; + TypedData_Get_Struct(self, t_pg_cancon, &pg_cancon_type, this); + + return this; +} + +static inline PGcancelConn * +pg_cancon_get_conn( VALUE self ) +{ + t_pg_cancon *this = pg_cancon_get_this(self); + if (this->pg_cancon == NULL) + pg_raise_conn_error( rb_eConnectionBad, self, "PG::CancelConnection is closed"); + + return this->pg_cancon; +} + +/* + * Close the associated socket IO object if there is one. + */ +static void +pg_cancon_close_socket_io( VALUE self ) +{ + t_pg_cancon *this = pg_cancon_get_this( self ); + VALUE socket_io = this->socket_io; + + if ( RTEST(socket_io) ) { +#if defined(_WIN32) + if( rb_w32_unwrap_io_handle(this->ruby_sd) ) + pg_raise_conn_error( rb_eConnectionBad, self, "Could not unwrap win32 socket handle"); +#endif + rb_funcall( socket_io, rb_intern("close"), 0 ); + } + + RB_OBJ_WRITE(self, &this->socket_io, Qnil); +} + +VALUE +pg_cancon_initialize(VALUE self, VALUE rb_conn) +{ + t_pg_cancon *this = pg_cancon_get_this(self); + PGconn *conn = pg_get_pgconn(rb_conn); + + this->pg_cancon = PQcancelCreate(conn); + + return self; +} + +/* + * call-seq: + * conn.sync_cancel -> nil + * + * Requests that the server abandons processing of the current command in a blocking manner. + * + * If the cancel request wasn't successfully dispatched an error message is raised. + * + * Successful dispatch of the cancellation is no guarantee that the request will have any effect, however. + * If the cancellation is effective, the command being canceled will terminate early and raises an error. + * If the cancellation fails (say, because the server was already done processing the command), then there will be no visible result at all. + * + */ +static VALUE +pg_cancon_sync_cancel(VALUE self) +{ + PGcancelConn *conn = pg_cancon_get_conn(self); + + pg_cancon_close_socket_io( self ); + if(gvl_PQcancelBlocking(conn) == 0) + pg_raise_conn_error( rb_eConnectionBad, self, "PQcancelBlocking %s", PQcancelErrorMessage(conn)); + return Qnil; +} + +/* + * call-seq: + * conn.start -> nil + * + */ +static VALUE +pg_cancon_start(VALUE self) +{ + PGcancelConn *conn = pg_cancon_get_conn(self); + + pg_cancon_close_socket_io( self ); + if(gvl_PQcancelStart(conn) == 0) + pg_raise_conn_error( rb_eConnectionBad, self, "PQcancelStart %s", PQcancelErrorMessage(conn)); + return Qnil; +} + +/* + * call-seq: + * conn.error_message -> String + * + */ +static VALUE +pg_cancon_error_message(VALUE self) +{ + PGcancelConn *conn = pg_cancon_get_conn(self); + char *p_err; + + p_err = PQcancelErrorMessage(conn); + + return p_err ? rb_str_new_cstr(p_err) : Qnil; +} + +/* + * call-seq: + * conn.poll -> nil + * + */ +static VALUE +pg_cancon_poll(VALUE self) +{ + PostgresPollingStatusType status; + PGcancelConn *conn = pg_cancon_get_conn(self); + + pg_cancon_close_socket_io( self ); + status = gvl_PQcancelPoll(conn); + + return INT2FIX((int)status); +} + +/* + * call-seq: + * conn.status -> nil + * + */ +static VALUE +pg_cancon_status(VALUE self) +{ + ConnStatusType status; + PGcancelConn *conn = pg_cancon_get_conn(self); + + status = PQcancelStatus(conn); + + return INT2NUM(status); +} + +/* + * call-seq: + * conn.socket_io() -> IO + * + * Fetch an IO object created from the CancelConnection's underlying socket. + * This object can be used per socket_io.wait_readable, socket_io.wait_writable or for IO.select to wait for events while running asynchronous API calls. + * IO#wait_*able is is Fiber.scheduler compatible in contrast to IO.select. + * + * The IO object can change while the connection is established. + * So be sure not to cache the IO object, but repeat calling conn.socket_io instead. + */ +static VALUE +pg_cancon_socket_io(VALUE self) +{ + int sd; + int ruby_sd; + t_pg_cancon *this = pg_cancon_get_this( self ); + VALUE cSocket; + VALUE socket_io = this->socket_io; + + if ( !RTEST(socket_io) ) { + if( (sd = PQcancelSocket(this->pg_cancon)) < 0){ + pg_raise_conn_error( rb_eConnectionBad, self, "PQcancelSocket() can't get socket descriptor"); + } + + #ifdef _WIN32 + ruby_sd = rb_w32_wrap_io_handle((HANDLE)(intptr_t)sd, O_RDWR|O_BINARY|O_NOINHERIT); + if( ruby_sd == -1 ) + pg_raise_conn_error( rb_eConnectionBad, self, "Could not wrap win32 socket handle"); + + this->ruby_sd = ruby_sd; + #else + ruby_sd = sd; + #endif + + cSocket = rb_const_get(rb_cObject, rb_intern("BasicSocket")); + socket_io = rb_funcall( cSocket, rb_intern("for_fd"), 1, INT2NUM(ruby_sd)); + + /* Disable autoclose feature */ + rb_funcall( socket_io, s_id_autoclose_set, 1, Qfalse ); + + RB_OBJ_WRITE(self, &this->socket_io, socket_io); + } + + return socket_io; +} + +/* + * call-seq: + * conn.reset -> nil + * + */ +static VALUE +pg_cancon_reset(VALUE self) +{ + PGcancelConn *conn = pg_cancon_get_conn(self); + + pg_cancon_close_socket_io( self ); + PQcancelReset(conn); + + return Qnil; +} + +static VALUE +pg_cancon_finish(VALUE self) +{ + t_pg_cancon *this = pg_cancon_get_this( self ); + + pg_cancon_close_socket_io( self ); + if( this->pg_cancon ) + PQcancelFinish(this->pg_cancon); + this->pg_cancon = NULL; + + return Qnil; +} +#endif + +void +init_pg_cancon(void) +{ +#ifdef HAVE_PQSETCHUNKEDROWSMODE + s_id_autoclose_set = rb_intern("autoclose="); + + rb_cPG_Cancon = rb_define_class_under( rb_mPG, "CancelConnection", rb_cObject ); + rb_define_alloc_func( rb_cPG_Cancon, pg_cancon_s_allocate ); + rb_include_module(rb_cPG_Cancon, rb_mEnumerable); + + rb_define_method(rb_cPG_Cancon, "initialize", pg_cancon_initialize, 1); + rb_define_method(rb_cPG_Cancon, "sync_cancel", pg_cancon_sync_cancel, 0); + rb_define_method(rb_cPG_Cancon, "start", pg_cancon_start, 0); + rb_define_method(rb_cPG_Cancon, "poll", pg_cancon_poll, 0); + rb_define_method(rb_cPG_Cancon, "status", pg_cancon_status, 0); + rb_define_method(rb_cPG_Cancon, "socket_io", pg_cancon_socket_io, 0); + rb_define_method(rb_cPG_Cancon, "error_message", pg_cancon_error_message, 0); + rb_define_method(rb_cPG_Cancon, "reset", pg_cancon_reset, 0); + rb_define_method(rb_cPG_Cancon, "finish", pg_cancon_finish, 0); +#endif +} diff --git a/ext/pg_connection.c b/ext/pg_connection.c index 5a2ff6aab..beb6da14e 100644 --- a/ext/pg_connection.c +++ b/ext/pg_connection.c @@ -30,11 +30,8 @@ static VALUE pgconn_async_flush(VALUE self); /* * Convenience function to raise connection errors */ -#ifdef __GNUC__ -__attribute__((format(printf, 3, 4))) -#endif -NORETURN( static void -pg_raise_conn_error( VALUE klass, VALUE self, const char *format, ...)) +void +pg_raise_conn_error( VALUE klass, VALUE self, const char *format, ...) { VALUE msg, error; va_list ap; @@ -981,6 +978,7 @@ pgconn_backend_pid(VALUE self) return INT2NUM(PQbackendPID(pg_get_pgconn(self))); } +#ifndef HAVE_PQSETCHUNKEDROWSMODE typedef struct { struct sockaddr_storage addr; @@ -1025,6 +1023,7 @@ pgconn_backend_key(VALUE self) return INT2NUM(be_key); } +#endif /* * call-seq: @@ -2299,6 +2298,7 @@ pgconn_sync_flush(VALUE self) return (ret) ? Qfalse : Qtrue; } +#ifndef HAVE_PQSETCHUNKEDROWSMODE static VALUE pgconn_sync_cancel(VALUE self) { @@ -2320,6 +2320,7 @@ pgconn_sync_cancel(VALUE self) PQfreeCancel(cancel); return retval; } +#endif /* @@ -4683,7 +4684,9 @@ init_pg_connection(void) rb_define_method(rb_cPGconn, "socket", pgconn_socket, 0); rb_define_method(rb_cPGconn, "socket_io", pgconn_socket_io, 0); rb_define_method(rb_cPGconn, "backend_pid", pgconn_backend_pid, 0); +#ifndef HAVE_PQSETCHUNKEDROWSMODE rb_define_method(rb_cPGconn, "backend_key", pgconn_backend_key, 0); +#endif rb_define_method(rb_cPGconn, "connection_needs_password", pgconn_connection_needs_password, 0); rb_define_method(rb_cPGconn, "connection_used_password", pgconn_connection_used_password, 0); /* rb_define_method(rb_cPGconn, "getssl", pgconn_getssl, 0); */ @@ -4753,7 +4756,9 @@ init_pg_connection(void) rb_define_method(rb_cPGconn, "discard_results", pgconn_discard_results, 0); /****** PG::Connection INSTANCE METHODS: Cancelling Queries in Progress ******/ +#ifndef HAVE_PQSETCHUNKEDROWSMODE rb_define_method(rb_cPGconn, "sync_cancel", pgconn_sync_cancel, 0); +#endif /****** PG::Connection INSTANCE METHODS: NOTIFY ******/ rb_define_method(rb_cPGconn, "notifies", pgconn_notifies, 0); diff --git a/lib/pg.rb b/lib/pg.rb index b6217487f..45b58d0ee 100644 --- a/lib/pg.rb +++ b/lib/pg.rb @@ -111,6 +111,7 @@ module TextEncoder require 'pg/coder' require 'pg/type_map_by_column' require 'pg/connection' + require 'pg/cancel_connection' require 'pg/result' require 'pg/tuple' autoload :VERSION, 'pg/version' diff --git a/lib/pg/cancel_connection.rb b/lib/pg/cancel_connection.rb new file mode 100644 index 000000000..82d46beeb --- /dev/null +++ b/lib/pg/cancel_connection.rb @@ -0,0 +1,18 @@ +# -*- ruby -*- +# frozen_string_literal: true + +require 'pg' unless defined?( PG ) + +if defined?(PG::CancelConnection) + class PG::CancelConnection + include PG::Connection::Pollable + + # The timeout used by async_cancel to establish the cancel connection. + attr_accessor :async_connect_timeout + + def async_cancel + start + polling_loop(:poll, async_connect_timeout) + end + end +end diff --git a/lib/pg/connection.rb b/lib/pg/connection.rb index 1ad1f725a..685b3dd98 100644 --- a/lib/pg/connection.rb +++ b/lib/pg/connection.rb @@ -602,110 +602,141 @@ def reset end alias async_reset reset - # call-seq: - # conn.cancel() -> String - # - # Requests cancellation of the command currently being - # processed. - # - # Returns +nil+ on success, or a string containing the - # error message if a failure occurs. - def cancel - be_pid = backend_pid - be_key = backend_key - cancel_request = [0x10, 1234, 5678, be_pid, be_key].pack("NnnNN") - - if Fiber.respond_to?(:scheduler) && Fiber.scheduler && RUBY_PLATFORM =~ /mingw|mswin/ - # Ruby's nonblocking IO is not really supported on Windows. - # We work around by using threads and explicit calls to wait_readable/wait_writable. - cl = Thread.new(socket_io.remote_address) { |ra| ra.connect }.value - begin - cl.write_nonblock(cancel_request) - rescue IO::WaitReadable, Errno::EINTR - cl.wait_writable - retry - end - begin - cl.read_nonblock(1) - rescue IO::WaitReadable, Errno::EINTR - cl.wait_readable - retry - rescue EOFError - end - else - cl = socket_io.remote_address.connect - # Send CANCEL_REQUEST_CODE and parameters - cl.write(cancel_request) - # Wait for the postmaster to close the connection, which indicates that it's processed the request. - cl.read(1) + if defined?(PG::CancelConnection) + # PostgreSQL-17+ + + def sync_cancel + cancon = PG::CancelConnection.new(self) + cancon.sync_cancel + rescue PG::Error => err + err.to_s end - cl.close - nil - rescue SystemCallError => err - err.to_s + # call-seq: + # conn.cancel() -> String + # + # Requests cancellation of the command currently being + # processed. + # + # Returns +nil+ on success, or a string containing the + # error message if a failure occurs. + def cancel + cancon = PG::CancelConnection.new(self) + cancon.async_connect_timeout = conninfo_hash[:connect_timeout] + cancon.async_cancel + rescue PG::Error => err + err.to_s + end + + else + + # PostgreSQL < 17 + + def cancel + be_pid = backend_pid + be_key = backend_key + cancel_request = [0x10, 1234, 5678, be_pid, be_key].pack("NnnNN") + + if Fiber.respond_to?(:scheduler) && Fiber.scheduler && RUBY_PLATFORM =~ /mingw|mswin/ + # Ruby's nonblocking IO is not really supported on Windows. + # We work around by using threads and explicit calls to wait_readable/wait_writable. + cl = Thread.new(socket_io.remote_address) { |ra| ra.connect }.value + begin + cl.write_nonblock(cancel_request) + rescue IO::WaitReadable, Errno::EINTR + cl.wait_writable + retry + end + begin + cl.read_nonblock(1) + rescue IO::WaitReadable, Errno::EINTR + cl.wait_readable + retry + rescue EOFError + end + else + cl = socket_io.remote_address.connect + # Send CANCEL_REQUEST_CODE and parameters + cl.write(cancel_request) + # Wait for the postmaster to close the connection, which indicates that it's processed the request. + cl.read(1) + end + + cl.close + nil + rescue SystemCallError => err + err.to_s + end end alias async_cancel cancel - private def async_connect_or_reset(poll_meth) + module Pollable # Track the progress of the connection, waiting for the socket to become readable/writable before polling it + private def polling_loop(poll_meth, connect_timeout) + if (timeo = connect_timeout.to_i) && timeo > 0 + host_count = conninfo_hash[:host].to_s.count(",") + 1 + stop_time = timeo * host_count + Process.clock_gettime(Process::CLOCK_MONOTONIC) + end - if (timeo = conninfo_hash[:connect_timeout].to_i) && timeo > 0 - host_count = conninfo_hash[:host].to_s.count(",") + 1 - stop_time = timeo * host_count + Process.clock_gettime(Process::CLOCK_MONOTONIC) - end - - poll_status = PG::PGRES_POLLING_WRITING - until poll_status == PG::PGRES_POLLING_OK || - poll_status == PG::PGRES_POLLING_FAILED - - # Set single timeout to parameter "connect_timeout" but - # don't exceed total connection time of number-of-hosts * connect_timeout. - timeout = [timeo, stop_time - Process.clock_gettime(Process::CLOCK_MONOTONIC)].min if stop_time - event = if !timeout || timeout >= 0 - # If the socket needs to read, wait 'til it becomes readable to poll again - case poll_status - when PG::PGRES_POLLING_READING - if defined?(IO::READABLE) # ruby-3.0+ - socket_io.wait(IO::READABLE | IO::PRIORITY, timeout) - else - IO.select([socket_io], nil, [socket_io], timeout) + poll_status = PG::PGRES_POLLING_WRITING + until poll_status == PG::PGRES_POLLING_OK || + poll_status == PG::PGRES_POLLING_FAILED + + # Set single timeout to parameter "connect_timeout" but + # don't exceed total connection time of number-of-hosts * connect_timeout. + timeout = [timeo, stop_time - Process.clock_gettime(Process::CLOCK_MONOTONIC)].min if stop_time + event = if !timeout || timeout >= 0 + # If the socket needs to read, wait 'til it becomes readable to poll again + case poll_status + when PG::PGRES_POLLING_READING + if defined?(IO::READABLE) # ruby-3.0+ + socket_io.wait(IO::READABLE | IO::PRIORITY, timeout) + else + IO.select([socket_io], nil, [socket_io], timeout) + end + + # ...and the same for when the socket needs to write + when PG::PGRES_POLLING_WRITING + if defined?(IO::WRITABLE) # ruby-3.0+ + # Use wait instead of wait_readable, since connection errors are delivered as + # exceptional/priority events on Windows. + socket_io.wait(IO::WRITABLE | IO::PRIORITY, timeout) + else + # io#wait on ruby-2.x doesn't wait for priority, so fallback to IO.select + IO.select(nil, [socket_io], [socket_io], timeout) + end end - - # ...and the same for when the socket needs to write - when PG::PGRES_POLLING_WRITING - if defined?(IO::WRITABLE) # ruby-3.0+ - # Use wait instead of wait_readable, since connection errors are delivered as - # exceptional/priority events on Windows. - socket_io.wait(IO::WRITABLE | IO::PRIORITY, timeout) + end + # connection to server at "localhost" (127.0.0.1), port 5433 failed: timeout expired (PG::ConnectionBad) + # connection to server on socket "/var/run/postgresql/.s.PGSQL.5433" failed: No such file or directory + unless event + if self.class.send(:host_is_named_pipe?, host) + connhost = "on socket \"#{host}\"" + elsif respond_to?(:hostaddr) + connhost = "at \"#{host}\" (#{hostaddr}), port #{port}" else - # io#wait on ruby-2.x doesn't wait for priority, so fallback to IO.select - IO.select(nil, [socket_io], [socket_io], timeout) + connhost = "at \"#{host}\", port #{port}" end + raise PG::ConnectionBad.new("connection to server #{connhost} failed: timeout expired", connection: self) end - end - # connection to server at "localhost" (127.0.0.1), port 5433 failed: timeout expired (PG::ConnectionBad) - # connection to server on socket "/var/run/postgresql/.s.PGSQL.5433" failed: No such file or directory - unless event - if self.class.send(:host_is_named_pipe?, host) - connhost = "on socket \"#{host}\"" - elsif respond_to?(:hostaddr) - connhost = "at \"#{host}\" (#{hostaddr}), port #{port}" - else - connhost = "at \"#{host}\", port #{port}" - end - raise PG::ConnectionBad.new("connection to server #{connhost} failed: timeout expired", connection: self) + + # Check to see if it's finished or failed yet + poll_status = send( poll_meth ) end - # Check to see if it's finished or failed yet - poll_status = send( poll_meth ) + unless status == PG::CONNECTION_OK + msg = error_message + finish + raise PG::ConnectionBad.new(msg, connection: self) + end end + end - unless status == PG::CONNECTION_OK - msg = error_message - finish - raise PG::ConnectionBad.new(msg, connection: self) - end + include Pollable + + private def async_connect_or_reset(poll_meth) + # Track the progress of the connection, waiting for the socket to become readable/writable before polling it + polling_loop(poll_meth, conninfo_hash[:connect_timeout]) # Set connection to nonblocking to handle all blocking states in ruby. # That way a fiber scheduler is able to handle IO requests. diff --git a/spec/pg/cancel_connection_spec.rb b/spec/pg/cancel_connection_spec.rb new file mode 100644 index 000000000..8deee4d78 --- /dev/null +++ b/spec/pg/cancel_connection_spec.rb @@ -0,0 +1,44 @@ +# -*- rspec -*- +# encoding: utf-8 + +require_relative '../helpers' +require 'pg' + +if PG.library_version < 170000 + + context "query cancelation" do + it "shouldn't define PG::CancelConnection" do + expect( !defined?(PG::CancelConnection) ) + end + end + +else + describe PG::CancelConnection do + let!(:conn) { PG::CancelConnection.new(@conn) } + + describe ".new" do + it "needs a PG::Connection" do + expect { PG::CancelConnection.new }.to raise_error( ArgumentError ) + expect { PG::CancelConnection.new 123 }.to raise_error( TypeError ) + end + end + + it "has #status" do + expect( conn.status ).to eq( PG::CONNECTION_ALLOCATED ) + end + + it "can reset" do + conn.reset + conn.reset + expect( conn.status ).to eq( PG::CONNECTION_ALLOCATED ) + end + + it "can be finished" do + conn.finish + conn.finish + expect{ conn.status }.to raise_error( PG::ConnectionBad, /closed/ ) do |err| + expect(err).to have_attributes(connection: conn) + end + end + end +end From 4b709f5602e0d37db2a9e830ba0a254ffd127120 Mon Sep 17 00:00:00 2001 From: Lars Kanis Date: Tue, 26 Nov 2024 15:18:27 +0100 Subject: [PATCH 032/118] Remove duplicated code for (un-)wrapping of socket IO used in PG::Connection and PG::CancelConnection --- ext/pg.h | 5 ++- ext/pg_cancel_connection.c | 42 +++---------------- ext/pg_connection.c | 83 ++++++++++++++++++++++---------------- 3 files changed, 56 insertions(+), 74 deletions(-) diff --git a/ext/pg.h b/ext/pg.h index df13c4cb8..93ef0e466 100644 --- a/ext/pg.h +++ b/ext/pg.h @@ -116,10 +116,8 @@ typedef struct { /* enable automatic flushing of send data at the end of send_query calls */ unsigned int flush_data : 1; -#if defined(_WIN32) /* File descriptor to be used for rb_w32_unwrap_io_handle() */ int ruby_sd; -#endif } t_pg_connection; typedef struct pg_coder t_pg_coder; @@ -349,6 +347,9 @@ VALUE pgconn_block _(( int, VALUE *, VALUE ) __attribute__((format(printf, 3, 4))) #endif NORETURN(void pg_raise_conn_error _(( VALUE klass, VALUE self, const char *format, ...))); +VALUE pg_wrap_socket_io _(( int sd, VALUE self, VALUE *p_socket_io, int *p_ruby_sd )); +void pg_unwrap_socket_io _(( VALUE self, VALUE *p_socket_io, int ruby_sd )); + VALUE pg_new_result _(( PGresult *, VALUE )); VALUE pg_new_result_autoclear _(( PGresult *, VALUE )); diff --git a/ext/pg_cancel_connection.c b/ext/pg_cancel_connection.c index 86b9eb9f1..c21277db3 100644 --- a/ext/pg_cancel_connection.c +++ b/ext/pg_cancel_connection.c @@ -20,10 +20,8 @@ typedef struct { /* Cached IO object for the socket descriptor */ VALUE socket_io; -#if defined(_WIN32) /* File descriptor to be used for rb_w32_unwrap_io_handle() */ int ruby_sd; -#endif } t_pg_cancon; @@ -118,17 +116,7 @@ static void pg_cancon_close_socket_io( VALUE self ) { t_pg_cancon *this = pg_cancon_get_this( self ); - VALUE socket_io = this->socket_io; - - if ( RTEST(socket_io) ) { -#if defined(_WIN32) - if( rb_w32_unwrap_io_handle(this->ruby_sd) ) - pg_raise_conn_error( rb_eConnectionBad, self, "Could not unwrap win32 socket handle"); -#endif - rb_funcall( socket_io, rb_intern("close"), 0 ); - } - - RB_OBJ_WRITE(self, &this->socket_io, Qnil); + pg_unwrap_socket_io( self, &this->socket_io, this->ruby_sd); } VALUE @@ -245,37 +233,17 @@ pg_cancon_status(VALUE self) static VALUE pg_cancon_socket_io(VALUE self) { - int sd; - int ruby_sd; t_pg_cancon *this = pg_cancon_get_this( self ); - VALUE cSocket; - VALUE socket_io = this->socket_io; - if ( !RTEST(socket_io) ) { + if ( !RTEST(this->socket_io) ) { + int sd; if( (sd = PQcancelSocket(this->pg_cancon)) < 0){ pg_raise_conn_error( rb_eConnectionBad, self, "PQcancelSocket() can't get socket descriptor"); } - - #ifdef _WIN32 - ruby_sd = rb_w32_wrap_io_handle((HANDLE)(intptr_t)sd, O_RDWR|O_BINARY|O_NOINHERIT); - if( ruby_sd == -1 ) - pg_raise_conn_error( rb_eConnectionBad, self, "Could not wrap win32 socket handle"); - - this->ruby_sd = ruby_sd; - #else - ruby_sd = sd; - #endif - - cSocket = rb_const_get(rb_cObject, rb_intern("BasicSocket")); - socket_io = rb_funcall( cSocket, rb_intern("for_fd"), 1, INT2NUM(ruby_sd)); - - /* Disable autoclose feature */ - rb_funcall( socket_io, s_id_autoclose_set, 1, Qfalse ); - - RB_OBJ_WRITE(self, &this->socket_io, socket_io); + return pg_wrap_socket_io( sd, self, &this->socket_io, &this->ruby_sd); } - return socket_io; + return this->socket_io; } /* diff --git a/ext/pg_connection.c b/ext/pg_connection.c index beb6da14e..6743b5879 100644 --- a/ext/pg_connection.c +++ b/ext/pg_connection.c @@ -93,6 +93,20 @@ pg_get_pgconn( VALUE self ) } +void +pg_unwrap_socket_io( VALUE self, VALUE *p_socket_io, int ruby_sd ) +{ + if ( RTEST(*p_socket_io) ) { +#if defined(_WIN32) + if( rb_w32_unwrap_io_handle(ruby_sd) ) + pg_raise_conn_error( rb_eConnectionBad, self, "Could not unwrap win32 socket handle"); +#endif + rb_funcall( *p_socket_io, rb_intern("close"), 0 ); + } + + RB_OBJ_WRITE(self, p_socket_io, Qnil); +} + /* * Close the associated socket IO object if there is one. @@ -101,17 +115,7 @@ static void pgconn_close_socket_io( VALUE self ) { t_pg_connection *this = pg_get_connection( self ); - VALUE socket_io = this->socket_io; - - if ( RTEST(socket_io) ) { -#if defined(_WIN32) - if( rb_w32_unwrap_io_handle(this->ruby_sd) ) - pg_raise_conn_error( rb_eConnectionBad, self, "Could not unwrap win32 socket handle"); -#endif - rb_funcall( socket_io, rb_intern("close"), 0 ); - } - - RB_OBJ_WRITE(self, &this->socket_io, Qnil); + pg_unwrap_socket_io( self, &this->socket_io, this->ruby_sd); } @@ -914,6 +918,35 @@ pgconn_socket(VALUE self) return INT2NUM(sd); } + +VALUE +pg_wrap_socket_io(int sd, VALUE self, VALUE *p_socket_io, int *p_ruby_sd) +{ + int ruby_sd; + VALUE cSocket; + VALUE socket_io = *p_socket_io; + + #ifdef _WIN32 + ruby_sd = rb_w32_wrap_io_handle((HANDLE)(intptr_t)sd, O_RDWR|O_BINARY|O_NOINHERIT); + if( ruby_sd == -1 ) + pg_raise_conn_error( rb_eConnectionBad, self, "Could not wrap win32 socket handle"); + + *p_ruby_sd = ruby_sd; + #else + *p_ruby_sd = ruby_sd = sd; + #endif + + cSocket = rb_const_get(rb_cObject, rb_intern("BasicSocket")); + socket_io = rb_funcall( cSocket, rb_intern("for_fd"), 1, INT2NUM(ruby_sd)); + + /* Disable autoclose feature */ + rb_funcall( socket_io, s_id_autoclose_set, 1, Qfalse ); + + RB_OBJ_WRITE(self, p_socket_io, socket_io); + + return socket_io; +} + /* * call-seq: * conn.socket_io() -> IO @@ -931,37 +964,17 @@ pgconn_socket(VALUE self) static VALUE pgconn_socket_io(VALUE self) { - int sd; - int ruby_sd; t_pg_connection *this = pg_get_connection_safe( self ); - VALUE cSocket; - VALUE socket_io = this->socket_io; - if ( !RTEST(socket_io) ) { + if ( !RTEST(this->socket_io) ) { + int sd; if( (sd = PQsocket(this->pgconn)) < 0){ pg_raise_conn_error( rb_eConnectionBad, self, "PQsocket() can't get socket descriptor"); } - - #ifdef _WIN32 - ruby_sd = rb_w32_wrap_io_handle((HANDLE)(intptr_t)sd, O_RDWR|O_BINARY|O_NOINHERIT); - if( ruby_sd == -1 ) - pg_raise_conn_error( rb_eConnectionBad, self, "Could not wrap win32 socket handle"); - - this->ruby_sd = ruby_sd; - #else - ruby_sd = sd; - #endif - - cSocket = rb_const_get(rb_cObject, rb_intern("BasicSocket")); - socket_io = rb_funcall( cSocket, rb_intern("for_fd"), 1, INT2NUM(ruby_sd)); - - /* Disable autoclose feature */ - rb_funcall( socket_io, s_id_autoclose_set, 1, Qfalse ); - - RB_OBJ_WRITE(self, &this->socket_io, socket_io); + return pg_wrap_socket_io( sd, self, &this->socket_io, &this->ruby_sd); } - return socket_io; + return this->socket_io; } /* From d7bca82c6b40e0ef8005dde6e554dfa563508783 Mon Sep 17 00:00:00 2001 From: Lars Kanis Date: Tue, 26 Nov 2024 16:03:49 +0100 Subject: [PATCH 033/118] Check result of PQcancelCreate() --- ext/pg_cancel_connection.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/ext/pg_cancel_connection.c b/ext/pg_cancel_connection.c index c21277db3..8110a603d 100644 --- a/ext/pg_cancel_connection.c +++ b/ext/pg_cancel_connection.c @@ -126,6 +126,8 @@ pg_cancon_initialize(VALUE self, VALUE rb_conn) PGconn *conn = pg_get_pgconn(rb_conn); this->pg_cancon = PQcancelCreate(conn); + if (this->pg_cancon == NULL) + pg_raise_conn_error( rb_eConnectionBad, self, "PQcancelCreate failed"); return self; } From be9262cbaf6ae34142eb12a37fd0dc93976d9236 Mon Sep 17 00:00:00 2001 From: Lars Kanis Date: Tue, 26 Nov 2024 16:04:45 +0100 Subject: [PATCH 034/118] Check CancelConnection#status can raise an error And GC.compact works --- spec/pg/cancel_connection_spec.rb | 4 ++++ spec/pg/gc_compact_spec.rb | 10 ++++++++++ 2 files changed, 14 insertions(+) diff --git a/spec/pg/cancel_connection_spec.rb b/spec/pg/cancel_connection_spec.rb index 8deee4d78..cc1f4097e 100644 --- a/spec/pg/cancel_connection_spec.rb +++ b/spec/pg/cancel_connection_spec.rb @@ -23,6 +23,10 @@ end end + it "fails to return a socket before connecting started" do + expect{ conn.socket_io }.to raise_error( PG::ConnectionBad, /PQcancelSocket/ ) + end + it "has #status" do expect( conn.status ).to eq( PG::CONNECTION_ALLOCATED ) end diff --git a/spec/pg/gc_compact_spec.rb b/spec/pg/gc_compact_spec.rb index 198cb7495..97b2d55f9 100644 --- a/spec/pg/gc_compact_spec.rb +++ b/spec/pg/gc_compact_spec.rb @@ -51,6 +51,12 @@ def conv_array(value) CPYENC = PG::TextEncoder::CopyRow.new type_map: TM3 RECENC = PG::TextEncoder::Record.new type_map: TM3 + if defined?(PG::CancelConnection) + CANCON = PG::CancelConnection.new(CONN2) + CANCON.start + CANCON.socket_io + end + begin # Use GC.verify_compaction_references instead of GC.compact . # This has the advantage that all movable objects are actually moved. @@ -101,6 +107,10 @@ def conv_array(value) expect( RECENC.encode([34]) ).to eq( '("34")' ) end + it "should compact PG::CancelConnection", :postgresql_17 do + expect( CANCON.socket_io ).to be_kind_of( IO ) + end + after :all do CONN2.close end From 67733e48524c3520aa459ba3ec99f60d03f3776b Mon Sep 17 00:00:00 2001 From: Lars Kanis Date: Tue, 26 Nov 2024 16:06:02 +0100 Subject: [PATCH 035/118] Add documentation to PG::CancelConnection --- ext/pg_cancel_connection.c | 85 +++++++++++++++++++++++++++++++------ ext/pg_connection.c | 4 +- ext/pg_tuple.c | 2 +- lib/pg/cancel_connection.rb | 16 ++++++- lib/pg/connection.rb | 3 ++ 5 files changed, 92 insertions(+), 18 deletions(-) diff --git a/ext/pg_cancel_connection.c b/ext/pg_cancel_connection.c index 8110a603d..cd479a805 100644 --- a/ext/pg_cancel_connection.c +++ b/ext/pg_cancel_connection.c @@ -5,7 +5,11 @@ * Document-class: PG::CancelConnection * * The class to represent a connection to cancel a query. - * An instance of this class can be created by PG::Connection#cancel . + * + * On PostgreSQL-17+ client libaray this class is used to implement PG::Connection#cancel . + * It works on older PostgreSQL server versions too. + * + * Available since PostgreSQL-17 * */ @@ -25,9 +29,6 @@ typedef struct { } t_pg_cancon; -/* - * GC Mark function - */ static void pg_cancon_gc_mark( void *_this ) { @@ -81,7 +82,7 @@ static const rb_data_type_t pg_cancon_type = { * Document-method: allocate * * call-seq: - * PG::VeryTuple.allocate -> obj + * PG::CancelConnection.allocate -> obj */ static VALUE pg_cancon_s_allocate( VALUE klass ) @@ -119,6 +120,22 @@ pg_cancon_close_socket_io( VALUE self ) pg_unwrap_socket_io( self, &this->socket_io, this->ruby_sd); } +/* + * call-seq: + * PG::CancelConnection.new(conn) -> obj + * + * Prepares a connection over which a cancel request can be sent. + * + * Creates a PG::CancelConnection from a PG::Connection object, but it won't instantly start sending a cancel request over this connection. + * A cancel request can be sent over this connection in a blocking manner using #cancel and in a non-blocking manner using #start. + * #status can be used to check if the PG::CancelConnection object was connected successfully. + * This PG::CancelConnection object can be used to cancel the query that's running on the original connection in a thread-safe way. + * + * Many connection parameters of the original client will be reused when setting up the connection for the cancel request. + * Importantly, if the original connection requires encryption of the connection and/or verification of the target host (using sslmode or gssencmode), then the connection for the cancel request is made with these same requirements. + * Any connection options that are only used during authentication or after authentication of the client are ignored though, because cancellation requests do not require authentication and the connection is closed right after the cancellation request is submitted. + * + */ VALUE pg_cancon_initialize(VALUE self, VALUE rb_conn) { @@ -138,11 +155,8 @@ pg_cancon_initialize(VALUE self, VALUE rb_conn) * * Requests that the server abandons processing of the current command in a blocking manner. * - * If the cancel request wasn't successfully dispatched an error message is raised. - * - * Successful dispatch of the cancellation is no guarantee that the request will have any effect, however. - * If the cancellation is effective, the command being canceled will terminate early and raises an error. - * If the cancellation fails (say, because the server was already done processing the command), then there will be no visible result at all. + * This method directly calls +PQcancelBlocking+ of libpq, so that it doesn't respond to ruby interrupts and doesn't trigger the +Thread.scheduler+ . + * It is threrfore recommended to call #cancel instead. * */ static VALUE @@ -160,6 +174,12 @@ pg_cancon_sync_cancel(VALUE self) * call-seq: * conn.start -> nil * + * Requests that the server abandons processing of the current command in a non-blocking manner. + * + * The behavior is the same like PG::Connection.connect_start . + * + * Use #poll to poll the status of the connection. + * */ static VALUE pg_cancon_start(VALUE self) @@ -176,6 +196,10 @@ pg_cancon_start(VALUE self) * call-seq: * conn.error_message -> String * + * Returns the error message most recently generated by an operation on the cancel connection. + * + * Nearly all PG::CancelConnection functions will set a message if they fail. + * Note that by libpq convention, a nonempty error_message result can consist of multiple lines, and will include a trailing newline. */ static VALUE pg_cancon_error_message(VALUE self) @@ -190,7 +214,13 @@ pg_cancon_error_message(VALUE self) /* * call-seq: - * conn.poll -> nil + * conn.poll -> Integer + * + * This is to poll libpq so that it can proceed with the cancel connection sequence. + * + * The behavior is the same like PG::Connection#connect_poll . + * + * See also corresponding {libpq function}[https://www.postgresql.org/docs/current/libpq-cancel.html#LIBPQ-PQCANCELSTART] * */ static VALUE @@ -207,7 +237,23 @@ pg_cancon_poll(VALUE self) /* * call-seq: - * conn.status -> nil + * conn.status -> Integer + * + * Returns the status of the cancel connection. + * + * The status can be one of a number of values. + * However, only three of these are seen outside of an asynchronous cancel procedure: + * +CONNECTION_ALLOCATED+, +CONNECTION_OK+ and +CONNECTION_BAD+. + * The initial state of a PG::CancelConnection that's successfully created is +CONNECTION_ALLOCATED+. + * A cancel request that was successfully dispatched has the status +CONNECTION_OK+. + * A failed cancel attempt is signaled by status +CONNECTION_BAD+. + * An OK status will remain so until #finish or #reset is called. + * + * See #poll with regards to other status codes that might be returned. + * + * Successful dispatch of the cancellation is no guarantee that the request will have any effect, however. + * If the cancellation is effective, the command being canceled will terminate early and return an error result. + * If the cancellation fails (say, because the server was already done processing the command), then there will be no visible result at all. * */ static VALUE @@ -227,7 +273,7 @@ pg_cancon_status(VALUE self) * * Fetch an IO object created from the CancelConnection's underlying socket. * This object can be used per socket_io.wait_readable, socket_io.wait_writable or for IO.select to wait for events while running asynchronous API calls. - * IO#wait_*able is is Fiber.scheduler compatible in contrast to IO.select. + * IO#wait_*able is Fiber.scheduler compatible in contrast to IO.select. * * The IO object can change while the connection is established. * So be sure not to cache the IO object, but repeat calling conn.socket_io instead. @@ -252,6 +298,12 @@ pg_cancon_socket_io(VALUE self) * call-seq: * conn.reset -> nil * + * Resets the PG::CancelConnection so it can be reused for a new cancel connection. + * + * If the PG::CancelConnection is currently used to send a cancel request, then this connection is closed. + * It will then prepare the PG::CancelConnection object such that it can be used to send a new cancel request. + * + * This can be used to create one PG::CancelConnection for a PG::Connection and reuse it multiple times throughout the lifetime of the original PG::Connection. */ static VALUE pg_cancon_reset(VALUE self) @@ -264,6 +316,13 @@ pg_cancon_reset(VALUE self) return Qnil; } +/* + * call-seq: + * conn.finish -> nil + * + * Closes the cancel connection (if it did not finish sending the cancel request yet). Also frees memory used by the PG::CancelConnection object. + * + */ static VALUE pg_cancon_finish(VALUE self) { diff --git a/ext/pg_connection.c b/ext/pg_connection.c index 6743b5879..6bca4de19 100644 --- a/ext/pg_connection.c +++ b/ext/pg_connection.c @@ -608,7 +608,7 @@ pgconn_reset_start(VALUE self) * conn.reset_poll -> Integer * * Checks the status of a connection reset operation. - * See #connect_start and #connect_poll for + * See Connection.connect_start and #connect_poll for * usage information and return values. */ static VALUE @@ -953,7 +953,7 @@ pg_wrap_socket_io(int sd, VALUE self, VALUE *p_socket_io, int *p_ruby_sd) * * Fetch an IO object created from the Connection's underlying socket. * This object can be used per socket_io.wait_readable, socket_io.wait_writable or for IO.select to wait for events while running asynchronous API calls. - * IO#wait_*able is is Fiber.scheduler compatible in contrast to IO.select. + * IO#wait_*able is Fiber.scheduler compatible in contrast to IO.select. * * The IO object can change while the connection is established, but is memorized afterwards. * So be sure not to cache the IO object, but repeat calling conn.socket_io instead. diff --git a/ext/pg_tuple.c b/ext/pg_tuple.c index 6c06ad70c..dae13445f 100644 --- a/ext/pg_tuple.c +++ b/ext/pg_tuple.c @@ -135,7 +135,7 @@ static const rb_data_type_t pg_tuple_type = { * Document-method: allocate * * call-seq: - * PG::VeryTuple.allocate -> obj + * PG::Tuple.allocate -> obj */ static VALUE pg_tuple_s_allocate( VALUE klass ) diff --git a/lib/pg/cancel_connection.rb b/lib/pg/cancel_connection.rb index 82d46beeb..9d4a2d781 100644 --- a/lib/pg/cancel_connection.rb +++ b/lib/pg/cancel_connection.rb @@ -7,12 +7,24 @@ class PG::CancelConnection include PG::Connection::Pollable - # The timeout used by async_cancel to establish the cancel connection. + # The timeout used by #cancel and async_cancel to establish the cancel connection. attr_accessor :async_connect_timeout - def async_cancel + # call-seq: + # conn.cancel + # + # Requests that the server abandons processing of the current command in a blocking manner. + # + # If the cancel request wasn't successfully dispatched an error message is raised. + # + # Successful dispatch of the cancellation is no guarantee that the request will have any effect, however. + # If the cancellation is effective, the command being canceled will terminate early and raises an error. + # If the cancellation fails (say, because the server was already done processing the command), then there will be no visible result at all. + # + def cancel start polling_loop(:poll, async_connect_timeout) end + alias async_cancel cancel end end diff --git a/lib/pg/connection.rb b/lib/pg/connection.rb index 685b3dd98..a9d901801 100644 --- a/lib/pg/connection.rb +++ b/lib/pg/connection.rb @@ -620,6 +620,9 @@ def sync_cancel # # Returns +nil+ on success, or a string containing the # error message if a failure occurs. + # + # On PostgreSQL-17+ client libaray the class PG::CancelConnection is used. + # On older client library a pure ruby implementation is used. def cancel cancon = PG::CancelConnection.new(self) cancon.async_connect_timeout = conninfo_hash[:connect_timeout] From 7ac7c3730788ca2c142c1aad112e45021e7880ef Mon Sep 17 00:00:00 2001 From: Lars Kanis Date: Sun, 28 Jul 2024 13:50:28 +0200 Subject: [PATCH 036/118] Add fat binary gem for x86_64-linux - Rename rake task 'gem:windows' to 'gem:native' - Add `rake gem:native:x86_64-linux` - Replace own PostgreSQL and OpenSSL build tasks by MiniPortile This is a more standard way and allows easier extensions. - Add krb5 library for Linux target to support GSSAPI/Kerberos - Change loading of pg_ext Try lib/pg_ext in addition to lib/3.2/pg_ext to support `rake spec` in the build directory - Fat binary linux gem: Try different UnixSocket paths of different distros. - CI: Adjust binary tests for new cross build target - Change patch directory to ports/patches///*.patch - ostruct gem is no longer necessary - Fix remaining "windows" references reg. fat binary gems --- .github/workflows/binary-gems.yml | 27 ++- Gemfile | 4 +- README-Windows.rdoc | 2 +- Rakefile | 67 +++++-- Rakefile.cross | 291 ------------------------------ ext/extconf.rb | 112 +++++++++++- lib/pg.rb | 22 ++- lib/pg/connection.rb | 8 + 8 files changed, 212 insertions(+), 321 deletions(-) delete mode 100644 Rakefile.cross diff --git a/.github/workflows/binary-gems.yml b/.github/workflows/binary-gems.yml index 5264bddb7..69c75bc81 100644 --- a/.github/workflows/binary-gems.yml +++ b/.github/workflows/binary-gems.yml @@ -18,6 +18,7 @@ jobs: - platform: "x64-mingw-ucrt" - platform: "x64-mingw32" - platform: "x86-mingw32" + - platform: "x86_64-linux-gnu" steps: - uses: actions/checkout@v4 - name: Set up Ruby @@ -34,7 +35,7 @@ jobs: cp gem-public_cert.pem ~/.gem/gem-public_cert.pem - name: Build binary gem - run: bundle exec rake gem:windows:${{ matrix.platform }} + run: bundle exec rake gem:native:${{ matrix.platform }} - name: Upload binary gem uses: actions/upload-artifact@v4 @@ -52,7 +53,6 @@ jobs: - os: windows-latest ruby: "3.3" platform: "x64-mingw-ucrt" - PGVERSION: 17.0-1-windows-x64 - os: windows-latest ruby: "3.1.4-1" platform: "x86-mingw32" @@ -61,6 +61,9 @@ jobs: ruby: "2.7" platform: "x64-mingw32" PGVERSION: 16.6-1-windows-x64 + - os: ubuntu-latest + ruby: "3.2" + platform: "x86_64-linux-gnu" runs-on: ${{ matrix.os }} env: @@ -69,9 +72,12 @@ jobs: - uses: actions/checkout@v4 - name: Set up Ruby if: matrix.platform != 'x86-mingw32' - uses: ruby/setup-ruby@v1 + uses: ruby/setup-ruby-pkgs@v1 with: ruby-version: ${{ matrix.ruby }} + apt-get: "postgresql" # Ubuntu + brew: "postgresql" # macOS + mingw: "postgresql" # Windows mingw / mswin /ucrt - name: Set up 32 bit x86 Ruby if: matrix.platform == 'x86-mingw32' @@ -89,17 +95,21 @@ jobs: with: name: binary-gem-${{ matrix.platform }} - - name: Download PostgreSQL + - name: Download PostgreSQL 32-bit + if: ${{ matrix.os == 'windows-latest' && matrix.PGVERSION }} run: | Add-Type -AssemblyName System.IO.Compression.FileSystem function Unzip { param([string]$zipfile, [string]$outpath) [System.IO.Compression.ZipFile]::ExtractToDirectory($zipfile, $outpath) } - $(new-object net.webclient).DownloadFile("http://get.enterprisedb.com/postgresql/postgresql-$env:PGVERSION-binaries.zip", "postgresql-binaries.zip") Unzip "postgresql-binaries.zip" "." echo "$pwd/pgsql/bin" | Out-File -FilePath $env:GITHUB_PATH -Encoding utf8 -Append + + - name: set PGUSER + if: ${{ matrix.os == 'windows-latest' }} + run: | echo "PGUSER=$env:USERNAME" | Out-File -FilePath $env:GITHUB_ENV -Encoding utf8 -Append echo "PGPASSWORD=" | Out-File -FilePath $env:GITHUB_ENV -Encoding utf8 -Append @@ -108,10 +118,17 @@ jobs: - run: bundle install - run: gem install --local pg-*${{ matrix.platform }}.gem --verbose - name: Run specs + if: ${{ matrix.os != 'windows-latest' }} run: ruby -rpg -S rspec -fd spec/**/*_spec.rb + - name: Run specs + if: ${{ matrix.os == 'windows-latest' }} + run: | + ridk enable + ruby -rpg -S rspec -fd spec/**/*_spec.rb - name: Print logs if job failed if: ${{ failure() && matrix.os == 'windows-latest' }} run: | ridk enable find "$(ruby -e"puts RbConfig::CONFIG[%q[libdir]]")" -name mkmf.log -print0 | xargs -0 cat + find -name setup.log -print0 | xargs -0 cat diff --git a/Gemfile b/Gemfile index a3a0ac397..0b5440de4 100644 --- a/Gemfile +++ b/Gemfile @@ -8,10 +8,10 @@ source "https://rubygems.org/" group :development, :test do gem "bundler", ">= 1.16", "< 3.0" gem "rake-compiler", "~> 1.0" - gem "rake-compiler-dock", "~> 1.0" + gem "rake-compiler-dock", "~> 1.5" gem "rdoc", "~> 6.4" gem "rspec", "~> 3.5" - gem "ostruct", "~> 0.5" # for Rakefile.cross + gem "mini_portile2", "~> 2.1" # "bigdecimal" is a gem on ruby-3.4+ and it's optional for ruby-pg. # Specs should succeed without it, but 4 examples are then excluded. # With bigdecimal commented out here, corresponding tests are omitted on ruby-3.4+ but are executed on ruby < 3.4. diff --git a/README-Windows.rdoc b/README-Windows.rdoc index 85d89594a..55f409f26 100644 --- a/README-Windows.rdoc +++ b/README-Windows.rdoc @@ -41,7 +41,7 @@ sure it is started. A native Docker installation is best on Linux. Then run: - rake gem:windows + rake gem:native This will download a docker image suited for building windows gems, and it will download and build OpenSSL and PostgreSQL. Finally the gem is built diff --git a/Rakefile b/Rakefile index 6b44a2f83..06d89d49d 100644 --- a/Rakefile +++ b/Rakefile @@ -31,8 +31,8 @@ CLEAN.include( PKGDIR.to_s, TMPDIR.to_s ) CLEAN.include "lib/*/libpq.dll" CLEAN.include "lib/pg_ext.*" CLEAN.include "lib/pg/postgresql_lib_path.rb" - -load 'Rakefile.cross' +CLEAN.include "ports/*.installed" +CLEAN.include "ports/*mingw*", "ports/*linux*" Bundler::GemHelper.install_tasks $gem_spec = Bundler.load_gemspec(GEMSPEC) @@ -42,6 +42,16 @@ task :maint do ENV['MAINTAINER_MODE'] = 'yes' end +CrossLibrary = Struct.new :platform, :openssl_config, :toolchain +CrossLibraries = [ + ['x64-mingw-ucrt', 'mingw64', 'x86_64-w64-mingw32'], + ['x86-mingw32', 'mingw', 'i686-w64-mingw32'], + ['x64-mingw32', 'mingw64', 'x86_64-w64-mingw32'], + ['x86_64-linux-gnu', 'linux-x86_64', 'x86_64-redhat-linux-gnu'], +].map do |platform, openssl_config, toolchain| + CrossLibrary.new platform, openssl_config, toolchain +end + # Rake-compiler task Rake::ExtensionTask.new do |ext| ext.name = 'pg_ext' @@ -50,24 +60,57 @@ Rake::ExtensionTask.new do |ext| ext.lib_dir = 'lib' ext.source_pattern = "*.{c,h}" ext.cross_compile = true - ext.cross_platform = CrossLibraries.map(&:for_platform) + ext.cross_platform = CrossLibraries.map(&:platform) - ext.cross_config_options += CrossLibraries.map do |lib| + ext.cross_config_options += CrossLibraries.map do |xlib| { - lib.for_platform => [ - "--enable-windows-cross", - "--with-pg-include=#{lib.static_postgresql_incdir}", - "--with-pg-lib=#{lib.static_postgresql_libdir}", - # libpq-fe.h resides in src/interfaces/libpq/ before make install - "--with-opt-include=#{lib.static_postgresql_libdir}", + xlib.platform => [ + "--enable-cross-build", + "--with-openssl-platform=#{xlib.openssl_config}", + "--with-toolchain=#{xlib.toolchain}", ] } end - # Add libpq.dll to windows binary gemspec + # Add libpq.dll/.so to fat binary gemspecs ext.cross_compiling do |spec| - spec.files << "lib/#{spec.platform}/libpq.dll" + spec.files << "ports/#{spec.platform.to_s}/lib/libpq.so.5" if spec.platform.to_s =~ /linux/ + spec.files << "ports/#{spec.platform.to_s}/lib/libpq.dll" if spec.platform.to_s =~ /mingw|mswin/ + end +end + +task 'gem:native:prepare' do + require 'io/console' + require 'rake_compiler_dock' + + # Copy gem signing key and certs to be accessible from the docker container + mkdir_p 'build/gem' + sh "cp ~/.gem/gem-*.pem build/gem/ || true" + sh "bundle package" + begin + OpenSSL::PKey.read(File.read(File.expand_path("~/.gem/gem-private_key.pem")), ENV["GEM_PRIVATE_KEY_PASSPHRASE"] || "") + rescue OpenSSL::PKey::PKeyError + ENV["GEM_PRIVATE_KEY_PASSPHRASE"] = STDIN.getpass("Enter passphrase of gem signature key: ") + retry + end +end + +CrossLibraries.each do |xlib| + platform = xlib.platform + desc "Build fat binary gem for platform #{platform}" + task "gem:native:#{platform}" => ['gem:native:prepare'] do + RakeCompilerDock.sh <<-EOT, platform: platform + #{ "sudo yum install -y perl-IPC-Cmd bison flex &&" if platform =~ /linux/ } + #{ # remove nm on Linux to suppress PostgreSQL's check for exit which raises thread_exit as a false positive: + "sudo mv `which nm` `which nm`.bak && sudo mv `which nm` `which nm`.bak &&" if platform =~ /linux/ } + #{ "sudo apt-get update && sudo apt-get install -y bison flex &&" if platform =~ /mingw/ } + (cp build/gem/gem-*.pem ~/.gem/ || true) && + bundle install --local && + rake native:#{platform} pkg/#{$gem_spec.full_name}-#{platform}.gem MAKEOPTS=-j`nproc` RUBY_CC_VERSION=3.3.0:3.2.0:3.1.0:3.0.0:2.7.0 + EOT end + desc "Build the native binary gems" + multitask 'gem:native' => "gem:native:#{platform}" end RSpec::Core::RakeTask.new(:spec).rspec_opts = "--profile -cfdoc" diff --git a/Rakefile.cross b/Rakefile.cross deleted file mode 100644 index 82a923d5d..000000000 --- a/Rakefile.cross +++ /dev/null @@ -1,291 +0,0 @@ -# -*- rake -*- - -require 'uri' -require 'tempfile' -require 'rbconfig' -require 'rake/clean' -require 'rake/extensiontask' -require 'rake/extensioncompiler' -require 'ostruct' -require_relative 'rakelib/task_extension' - -MISCDIR = BASEDIR + 'misc' - -NUM_CPUS = if File.exist?('/proc/cpuinfo') - File.read('/proc/cpuinfo').scan('processor').length -elsif RUBY_PLATFORM.include?( 'darwin' ) - `system_profiler SPHardwareDataType | grep 'Cores' | awk '{print $5}'`.chomp -else - 1 -end - -class CrossLibrary < OpenStruct - include Rake::DSL - prepend TaskExtension - - def initialize(for_platform, openssl_config, toolchain) - super() - - self.for_platform = for_platform - self.openssl_config = openssl_config - self.host_platform = toolchain - - # Cross-compilation constants - self.openssl_version = ENV['OPENSSL_VERSION'] || '3.4.0' - self.postgresql_version = ENV['POSTGRESQL_VERSION'] || '17.0' - - # Check if symlinks work in the current working directory. - # This fails, if rake-compiler-dock is running on a Windows box. - begin - FileUtils.rm_f '.test_symlink' - FileUtils.ln_s '/', '.test_symlink' - rescue NotImplementedError, SystemCallError - # Symlinks don't work -> use home directory instead - self.compile_home = Pathname( "~/.ruby-pg-build" ).expand_path - else - self.compile_home = Pathname( "./build" ).expand_path - end - self.static_sourcesdir = compile_home + 'sources' - self.static_builddir = compile_home + 'builds' + for_platform - CLOBBER.include( static_sourcesdir ) - CLEAN.include( static_builddir ) - - # Static OpenSSL build vars - self.static_openssl_builddir = static_builddir + "openssl-#{openssl_version}" - self.openssl_source_uri = - URI( "https://github.com/openssl/openssl/releases/download/openssl-#{openssl_version}/openssl-#{openssl_version}.tar.gz" ) - self.openssl_tarball = static_sourcesdir + File.basename( openssl_source_uri.path ) - self.openssl_makefile = static_openssl_builddir + 'Makefile' - - self.libssl = static_openssl_builddir + 'libssl.a' - self.libcrypto = static_openssl_builddir + 'libcrypto.a' - - self.openssl_patches = Rake::FileList[ (MISCDIR + "openssl-#{openssl_version}.*.patch").to_s ] - - # Static PostgreSQL build vars - self.static_postgresql_builddir = static_builddir + "postgresql-#{postgresql_version}" - self.postgresql_source_uri = begin - uristring = "http://ftp.postgresql.org/pub/source/v%s/postgresql-%s.tar.bz2" % - [ postgresql_version, postgresql_version ] - URI( uristring ) - end - self.postgresql_tarball = static_sourcesdir + File.basename( postgresql_source_uri.path ) - - self.static_postgresql_srcdir = static_postgresql_builddir + 'src' - self.static_postgresql_libdir = static_postgresql_srcdir + 'interfaces/libpq' - self.static_postgresql_incdir = static_postgresql_srcdir + 'include' - - self.postgresql_global_makefile = static_postgresql_srcdir + 'Makefile.global' - self.postgresql_shlib_makefile = static_postgresql_srcdir + 'Makefile.shlib' - self.postgresql_shlib_mf_orig = static_postgresql_srcdir + 'Makefile.shlib.orig' - self.postgresql_lib = static_postgresql_libdir + 'libpq.dll' - self.postgresql_patches = Rake::FileList[ (MISCDIR + "postgresql-#{postgresql_version}.*.patch").to_s ] - - # clean intermediate files and folders - CLEAN.include( static_builddir.to_s ) - - ##################################################################### - ### C R O S S - C O M P I L A T I O N - T A S K S - ##################################################################### - - - directory static_sourcesdir.to_s - - # - # Static OpenSSL build tasks - # - directory static_openssl_builddir.to_s - - # openssl source file should be stored there - file openssl_tarball => static_sourcesdir do |t| - download( openssl_source_uri, t.name ) - end - - # Extract the openssl builds - file static_openssl_builddir => openssl_tarball do |t| - puts "extracting %s to %s" % [ openssl_tarball, static_openssl_builddir.parent ] - static_openssl_builddir.mkpath - run 'tar', '-xzf', openssl_tarball.to_s, '-C', static_openssl_builddir.parent.to_s - openssl_makefile.unlink if openssl_makefile.exist? - - openssl_patches.each do |patchfile| - puts " applying patch #{patchfile}..." - run 'patch', '-Np1', '-d', static_openssl_builddir.to_s, - '-i', File.expand_path( patchfile, BASEDIR ) - end - end - - self.cmd_prelude = [ - "env", - "CROSS_COMPILE=#{host_platform}-", - "CFLAGS=-DDSO_WIN32 -DOPENSSL_THREADS", - ] - - - # generate the makefile in a clean build location - file openssl_makefile => static_openssl_builddir do |t| - chdir( static_openssl_builddir ) do - cmd = cmd_prelude.dup - cmd << "./Configure" << "threads" << "-static" << openssl_config - - run( *cmd ) - end - end - - desc "compile static openssl libraries" - task "openssl_libs:#{for_platform}" => [ libssl, libcrypto ] - - task "compile_static_openssl:#{for_platform}" => openssl_makefile do |t| - chdir( static_openssl_builddir ) do - cmd = cmd_prelude.dup - cmd << 'make' << "-j#{NUM_CPUS}" << 'build_libs' - - run( *cmd ) - end - end - - desc "compile static #{libssl}" - file libssl => "compile_static_openssl:#{for_platform}" - - desc "compile static #{libcrypto}" - file libcrypto => "compile_static_openssl:#{for_platform}" - - - - # - # Static PostgreSQL build tasks - # - directory static_postgresql_builddir.to_s - - - # postgresql source file should be stored there - file postgresql_tarball => static_sourcesdir do |t| - download( postgresql_source_uri, t.name ) - end - - # Extract the postgresql sources - file static_postgresql_builddir => postgresql_tarball do |t| - puts "extracting %s to %s" % [ postgresql_tarball, static_postgresql_builddir.parent ] - static_postgresql_builddir.mkpath - run 'tar', '-xjf', postgresql_tarball.to_s, '-C', static_postgresql_builddir.parent.to_s - - postgresql_patches.each do |patchfile| - puts " applying patch #{patchfile}..." - run 'patch', '-Np1', '-d', static_postgresql_builddir.to_s, - '-i', File.expand_path( patchfile, BASEDIR ) - end - end - - # generate the makefile in a clean build location - file postgresql_global_makefile => [ static_postgresql_builddir, "openssl_libs:#{for_platform}" ] do |t| - options = [ - "--target=#{host_platform}", - "--host=#{host_platform}", - '--with-openssl', - '--without-zlib', - '--without-icu', - ] - - chdir( static_postgresql_builddir ) do - configure_path = static_postgresql_builddir + 'configure' - cmd = [ configure_path.to_s, *options ] - cmd << "CFLAGS=-L#{static_openssl_builddir}" - cmd << "LDFLAGS=-L#{static_openssl_builddir}" - cmd << "LDFLAGS_SL=-L#{static_openssl_builddir}" - cmd << "LIBS=-lssl -lwsock32 -lgdi32 -lws2_32 -lcrypt32" - cmd << "CPPFLAGS=-I#{static_openssl_builddir}/include" - - run( *cmd ) - end - end - - - # make libpq.dll - task postgresql_lib => [ postgresql_global_makefile ] do |t| - chdir( postgresql_lib.dirname ) do - sh 'make', - "-j#{NUM_CPUS}", - postgresql_lib.basename.to_s, - 'SHLIB_LINK=-lssl -lcrypto -lcrypt32 -lgdi32 -lsecur32 -lwsock32 -lws2_32' - end - end - - - #desc 'compile libpg.a' - task "native:#{for_platform}" => postgresql_lib - - # copy libpq.dll to lib dir - dest_libpq = "lib/#{for_platform}/#{postgresql_lib.basename}" - directory File.dirname(dest_libpq) - file dest_libpq => [postgresql_lib, File.dirname(dest_libpq)] do - cp postgresql_lib, dest_libpq - end - - stage_libpq = "tmp/#{for_platform}/stage/#{dest_libpq}" - directory File.dirname(stage_libpq) - file stage_libpq => [postgresql_lib, File.dirname(stage_libpq)] do |t| - cp postgresql_lib, stage_libpq - end - end - - def download(url, save_to) - part = save_to+".part" - sh "wget #{url.to_s.inspect} -O #{part.inspect} || curl #{url.to_s.inspect} -o #{part.inspect}" - FileUtils.mv part, save_to - end - - def run(*args) - sh(*args) - end -end - -CrossLibraries = [ - ['x64-mingw-ucrt', 'mingw64', 'x86_64-w64-mingw32'], - ['x86-mingw32', 'mingw', 'i686-w64-mingw32'], - ['x64-mingw32', 'mingw64', 'x86_64-w64-mingw32'], -].map do |platform, openssl_config, toolchain| - CrossLibrary.new platform, openssl_config, toolchain -end - -desc 'cross compile pg for win32' -task :cross => [ :mingw32 ] - -task :mingw32 do - # Use Rake::ExtensionCompiler helpers to find the proper host - unless Rake::ExtensionCompiler.mingw_host then - warn "You need to install mingw32 cross compile functionality to be able to continue." - warn "Please refer to your distribution/package manager documentation about installation." - fail - end -end - -task 'gem:windows:prepare' do - require 'io/console' - require 'rake_compiler_dock' - - # Copy gem signing key and certs to be accessible from the docker container - mkdir_p 'build/gem' - sh "cp ~/.gem/gem-*.pem build/gem/ || true" - sh "bundle package" - begin - OpenSSL::PKey.read(File.read(File.expand_path("~/.gem/gem-private_key.pem")), ENV["GEM_PRIVATE_KEY_PASSPHRASE"] || "") - rescue OpenSSL::PKey::PKeyError - ENV["GEM_PRIVATE_KEY_PASSPHRASE"] = STDIN.getpass("Enter passphrase of gem signature key: ") - retry - end -end - -CrossLibraries.each do |xlib| - platform = xlib.for_platform - desc "Build fat binary gem for platform #{platform}" - task "gem:windows:#{platform}" => ['gem:windows:prepare', xlib.openssl_tarball, xlib.postgresql_tarball] do - RakeCompilerDock.sh <<-EOT, platform: platform - (cp build/gem/gem-*.pem ~/.gem/ || true) && - sudo apt-get update && sudo apt-get install -y bison flex && - bundle install --local && - rake native:#{platform} pkg/#{$gem_spec.full_name}-#{platform}.gem MAKEOPTS=-j`nproc` RUBY_CC_VERSION=3.3.0:3.2.0:3.1.0:3.0.0:2.7.0 - EOT - end - desc "Build the windows binary gems" - multitask 'gem:windows' => "gem:windows:#{platform}" -end diff --git a/ext/extconf.rb b/ext/extconf.rb index 73f579bc0..088b95f17 100644 --- a/ext/extconf.rb +++ b/ext/extconf.rb @@ -1,7 +1,6 @@ require 'pp' require 'mkmf' - if ENV['MAINTAINER_MODE'] $stderr.puts "Maintainer mode enabled." $CFLAGS << @@ -24,12 +23,117 @@ $stderr.puts "Calling libpq with GVL locked" end -if enable_config("windows-cross") +if enable_config("cross-build") + gem 'mini_portile2', '~>2.1' + require 'mini_portile2' + + OPENSSL_VERSION = ENV['OPENSSL_VERSION'] || '3.4.0' + OPENSSL_SOURCE_URI = "http://www.openssl.org/source/openssl-#{OPENSSL_VERSION}.tar.gz" + + KRB5_VERSION = ENV['KRB5_VERSION'] || '1.21.3' + KRB5_SOURCE_URI = "http://kerberos.org/dist/krb5/#{KRB5_VERSION[/^(\d+\.\d+)/]}/krb5-#{KRB5_VERSION}.tar.gz" + + POSTGRESQL_VERSION = ENV['POSTGRESQL_VERSION'] || '17.2' + POSTGRESQL_SOURCE_URI = "http://ftp.postgresql.org/pub/source/v#{POSTGRESQL_VERSION}/postgresql-#{POSTGRESQL_VERSION}.tar.bz2" + + class BuildRecipe < MiniPortile + def initialize(name, version, files) + super(name, version) + self.files = files + rootdir = File.expand_path('../..', __FILE__) + self.target = File.join(rootdir, "ports") + self.patch_files = Dir[File.join(target, "patches", self.name, self.version, "*.patch")].sort + end + + def port_path + "#{target}/#{RUBY_PLATFORM}" + end + + def cook_and_activate + checkpoint = File.join(self.target, "#{self.name}-#{self.version}-#{RUBY_PLATFORM}.installed") + unless File.exist?(checkpoint) + self.cook + FileUtils.touch checkpoint + end + self.activate + self + end + end + + openssl_platform = with_config("openssl-platform") + toolchain = with_config("toolchain") + + openssl_recipe = BuildRecipe.new("openssl", OPENSSL_VERSION, [OPENSSL_SOURCE_URI]).tap do |recipe| + class << recipe + attr_accessor :openssl_platform + def configure + envs = [] + envs << "CFLAGS=-DDSO_WIN32 -DOPENSSL_THREADS" if RUBY_PLATFORM =~ /mingw|mswin/ + envs << "CFLAGS=-fPIC -DOPENSSL_THREADS" if RUBY_PLATFORM =~ /linux/ + execute('configure', ['env', *envs, "./Configure", openssl_platform, "threads", "-static", "CROSS_COMPILE=#{host}-", configure_prefix], altlog: "config.log") + end + def compile + execute('compile', "#{make_cmd} build_libs") + end + def install + execute('install', "#{make_cmd} install_dev") + end + end + + recipe.openssl_platform = openssl_platform + recipe.host = toolchain + recipe.cook_and_activate + end + + if RUBY_PLATFORM =~ /linux/ + krb5_recipe = BuildRecipe.new("krb5", KRB5_VERSION, [KRB5_SOURCE_URI]).tap do |recipe| + class << recipe + def work_path + File.join(super, "src") + end + end + # We specify -fcommon to get around duplicate definition errors in recent gcc. + # See https://github.com/cockroachdb/cockroach/issues/49734 + recipe.configure_options << "CFLAGS=-fcommon#{" -fPIC" if RUBY_PLATFORM =~ /linux/}" + recipe.configure_options << "--without-keyutils" + recipe.host = toolchain + recipe.cook_and_activate + end + end + + postgresql_recipe = BuildRecipe.new("postgresql", POSTGRESQL_VERSION, [POSTGRESQL_SOURCE_URI]).tap do |recipe| + class << recipe + def configure_defaults + [ + "--target=#{host}", + "--host=#{host}", + '--with-openssl', + *(RUBY_PLATFORM=~/linux/ ? ['--with-gssapi'] : []), + '--without-zlib', + '--without-icu', + ] + end + def compile + execute 'compile include', "#{make_cmd} -C src/include install" + execute 'compile interfaces', "#{make_cmd} -C src/interfaces install" + end + def install + end + end + + recipe.configure_options << "CFLAGS=#{" -fPIC" if RUBY_PLATFORM =~ /linux/}" + recipe.configure_options << "LDFLAGS=-L#{openssl_recipe.path}/lib -L#{openssl_recipe.path}/lib64 #{"-lgssapi_krb5 -lkrb5 -lk5crypto -lkrb5support" if RUBY_PLATFORM =~ /linux/}" + recipe.configure_options << "LIBS=-lkrb5 -lcom_err -lk5crypto -lkrb5support -lresolv" if RUBY_PLATFORM =~ /linux/ + recipe.configure_options << "LIBS=-lssl -lwsock32 -lgdi32 -lws2_32 -lcrypt32" if RUBY_PLATFORM =~ /mingw|mswin/ + recipe.configure_options << "CPPFLAGS=-I#{openssl_recipe.path}/include" + recipe.host = toolchain + recipe.cook_and_activate + end + # Avoid dependency to external libgcc.dll on x86-mingw32 $LDFLAGS << " -static-libgcc" # Don't use pg_config for cross build, but --with-pg-* path options - dir_config 'pg' - + dir_config('pg', "#{postgresql_recipe.path}/include", "#{postgresql_recipe.path}/lib") else # Native build diff --git a/lib/pg.rb b/lib/pg.rb index 45b58d0ee..5e032ad86 100644 --- a/lib/pg.rb +++ b/lib/pg.rb @@ -6,11 +6,10 @@ module PG # Is this file part of a fat binary gem with bundled libpq? - bundled_libpq_path = File.join(__dir__, RUBY_PLATFORM.gsub(/^i386-/, "x86-")) - if File.exist?(bundled_libpq_path) + bundled_libpq_path = Dir[File.expand_path("../ports/#{RUBY_PLATFORM.gsub(/^i386-/, "x86-")}*/lib", __dir__)].first + if bundled_libpq_path POSTGRESQL_LIB_PATH = bundled_libpq_path else - bundled_libpq_path = nil # Try to load libpq path as found by extconf.rb begin require "pg/postgresql_lib_path" @@ -23,6 +22,7 @@ module PG add_dll_path = proc do |path, &block| if RUBY_PLATFORM =~/(mswin|mingw)/i && path && File.exist?(path) + BUNDLED_LIBPQ_WITH_UNIXSOCKET = false begin require 'ruby_installer/runtime' RubyInstaller::Runtime.add_dll_directory(path, &block) @@ -32,7 +32,17 @@ module PG block.call ENV['PATH'] = old_path end + elsif RUBY_PLATFORM =~/(linux)/i && bundled_libpq_path && File.exist?(bundled_libpq_path) + BUNDLED_LIBPQ_WITH_UNIXSOCKET = true + + # Load dependent libpq.so into the process, so that it is already present, + # when pg_ext.so is loaded. + # This ensures, that the shared library is loaded when the path is different between build and run time (e.g. fat binary gems). + require 'fiddle' + Fiddle.dlopen(File.join(bundled_libpq_path, "libpq.so.5")) + block.call else + BUNDLED_LIBPQ_WITH_UNIXSOCKET = false # No need to set a load path manually - it's set as library rpath. block.call end @@ -40,12 +50,12 @@ module PG # Add a load path to the one retrieved from pg_config add_dll_path.call(POSTGRESQL_LIB_PATH) do - if bundled_libpq_path - # It's a Windows binary gem, try the . subdirectory + begin + # Try the . subdirectory for fat binary gems major_minor = RUBY_VERSION[ /^(\d+\.\d+)/ ] or raise "Oops, can't extract the major/minor version from #{RUBY_VERSION.dump}" require "#{major_minor}/pg_ext" - else + rescue LoadError require 'pg_ext' end end diff --git a/lib/pg/connection.rb b/lib/pg/connection.rb index a9d901801..5b464013d 100644 --- a/lib/pg/connection.rb +++ b/lib/pg/connection.rb @@ -857,6 +857,14 @@ def new(*args) iopts = PG::Connection.conninfo_parse(option_string).each_with_object({}){|h, o| o[h[:keyword].to_sym] = h[:val] if h[:val] } iopts = PG::Connection.conndefaults.each_with_object({}){|h, o| o[h[:keyword].to_sym] = h[:val] if h[:val] }.merge(iopts) + if PG::BUNDLED_LIBPQ_WITH_UNIXSOCKET && iopts[:host].to_s.empty? + # Many distors patch the hardcoded default UnixSocket path in libpq to /var/run/postgresql instead of /tmp . + # We simply try them all. + iopts[:host] = "/var/run/postgresql" # Ubuntu, Debian, Fedora, Opensuse + ",/run/postgresql" # Alpine, Archlinux, Gentoo + ",/tmp" # Stock PostgreSQL + end + iopts_for_reset = iopts if iopts[:hostaddr] # hostaddr is provided -> no need to resolve hostnames From bc0eac41cd939768d366075c9ddae579339383e7 Mon Sep 17 00:00:00 2001 From: Lars Kanis Date: Mon, 28 Oct 2024 11:08:23 +0100 Subject: [PATCH 037/118] Use rpath instead of fiddle for loading of libpq.so Fiddle will probably be removed from ruby stdlibs, so using a alternative approach seems necessary. --- Rakefile | 4 ++-- ext/extconf.rb | 10 ++++++++-- lib/pg.rb | 7 +------ 3 files changed, 11 insertions(+), 10 deletions(-) diff --git a/Rakefile b/Rakefile index 06d89d49d..9f5faf9e6 100644 --- a/Rakefile +++ b/Rakefile @@ -65,7 +65,7 @@ Rake::ExtensionTask.new do |ext| ext.cross_config_options += CrossLibraries.map do |xlib| { xlib.platform => [ - "--enable-cross-build", + "--with-cross-build=#{xlib.platform}", "--with-openssl-platform=#{xlib.openssl_config}", "--with-toolchain=#{xlib.toolchain}", ] @@ -74,7 +74,7 @@ Rake::ExtensionTask.new do |ext| # Add libpq.dll/.so to fat binary gemspecs ext.cross_compiling do |spec| - spec.files << "ports/#{spec.platform.to_s}/lib/libpq.so.5" if spec.platform.to_s =~ /linux/ + spec.files << "ports/#{spec.platform.to_s}/lib/libpq-ruby-pg.so.1" if spec.platform.to_s =~ /linux/ spec.files << "ports/#{spec.platform.to_s}/lib/libpq.dll" if spec.platform.to_s =~ /mingw|mswin/ end end diff --git a/ext/extconf.rb b/ext/extconf.rb index 088b95f17..cec9db7b2 100644 --- a/ext/extconf.rb +++ b/ext/extconf.rb @@ -23,7 +23,7 @@ $stderr.puts "Calling libpq with GVL locked" end -if enable_config("cross-build") +if gem_platform=with_config("cross-build") gem 'mini_portile2', '~>2.1' require 'mini_portile2' @@ -122,7 +122,7 @@ def install end recipe.configure_options << "CFLAGS=#{" -fPIC" if RUBY_PLATFORM =~ /linux/}" - recipe.configure_options << "LDFLAGS=-L#{openssl_recipe.path}/lib -L#{openssl_recipe.path}/lib64 #{"-lgssapi_krb5 -lkrb5 -lk5crypto -lkrb5support" if RUBY_PLATFORM =~ /linux/}" + recipe.configure_options << "LDFLAGS=-L#{openssl_recipe.path}/lib -L#{openssl_recipe.path}/lib64 #{"-Wl,-soname,libpq-ruby-pg.so.1 -lgssapi_krb5 -lkrb5 -lk5crypto -lkrb5support" if RUBY_PLATFORM =~ /linux/}" recipe.configure_options << "LIBS=-lkrb5 -lcom_err -lk5crypto -lkrb5support -lresolv" if RUBY_PLATFORM =~ /linux/ recipe.configure_options << "LIBS=-lssl -lwsock32 -lgdi32 -lws2_32 -lcrypt32" if RUBY_PLATFORM =~ /mingw|mswin/ recipe.configure_options << "CPPFLAGS=-I#{openssl_recipe.path}/include" @@ -130,8 +130,14 @@ def install recipe.cook_and_activate end + # Use our own library name for libpq to avoid loading of system libpq by accident. + FileUtils.ln_sf File.join(postgresql_recipe.port_path, "lib/libpq.so.5"), + File.join(postgresql_recipe.port_path, "lib/libpq-ruby-pg.so.1") # Avoid dependency to external libgcc.dll on x86-mingw32 $LDFLAGS << " -static-libgcc" + # Find libpq in the ports directory coming from lib/3.3 + # It is shared between all compiled ruby versions. + $LDFLAGS << " '-Wl,-rpath=$$ORIGIN/../../ports/#{gem_platform}/lib'" # Don't use pg_config for cross build, but --with-pg-* path options dir_config('pg', "#{postgresql_recipe.path}/include", "#{postgresql_recipe.path}/lib") else diff --git a/lib/pg.rb b/lib/pg.rb index 5e032ad86..fd82f3646 100644 --- a/lib/pg.rb +++ b/lib/pg.rb @@ -34,12 +34,7 @@ module PG end elsif RUBY_PLATFORM =~/(linux)/i && bundled_libpq_path && File.exist?(bundled_libpq_path) BUNDLED_LIBPQ_WITH_UNIXSOCKET = true - - # Load dependent libpq.so into the process, so that it is already present, - # when pg_ext.so is loaded. - # This ensures, that the shared library is loaded when the path is different between build and run time (e.g. fat binary gems). - require 'fiddle' - Fiddle.dlopen(File.join(bundled_libpq_path, "libpq.so.5")) + # libpq is found by a relative rpath in the cross compiled extension dll block.call else BUNDLED_LIBPQ_WITH_UNIXSOCKET = false From 54460a7228808cc49c9705786316f1dc3c51b8dc Mon Sep 17 00:00:00 2001 From: Lars Kanis Date: Wed, 30 Oct 2024 21:41:34 +0100 Subject: [PATCH 038/118] CI: Add test run on Fedora and Alpine --- .github/workflows/binary-gems.yml | 38 +++++++++++++++++++++++++++---- Gemfile | 9 +++++--- Rakefile | 2 +- ext/extconf.rb | 2 ++ ext/pg.c | 6 +++++ lib/pg.rb | 15 ++++++------ lib/pg/connection.rb | 4 ++-- spec/env/Dockerfile.alpine | 27 ++++++++++++++++++++++ spec/env/Dockerfile.centos | 27 ++++++++++++++++++++++ spec/pg/connection_spec.rb | 2 +- 10 files changed, 113 insertions(+), 19 deletions(-) create mode 100644 spec/env/Dockerfile.alpine create mode 100644 spec/env/Dockerfile.centos diff --git a/.github/workflows/binary-gems.yml b/.github/workflows/binary-gems.yml index 69c75bc81..4779569d5 100644 --- a/.github/workflows/binary-gems.yml +++ b/.github/workflows/binary-gems.yml @@ -8,7 +8,7 @@ on: - cron: "0 5 * * 3" # At 05:00 on Wednesday # https://crontab.guru/#0_5_*_*_3 jobs: - job_build_x64: + rcd_build: name: Build runs-on: ubuntu-latest strategy: @@ -18,7 +18,7 @@ jobs: - platform: "x64-mingw-ucrt" - platform: "x64-mingw32" - platform: "x86-mingw32" - - platform: "x86_64-linux-gnu" + - platform: "x86_64-linux" steps: - uses: actions/checkout@v4 - name: Set up Ruby @@ -45,7 +45,7 @@ jobs: job_test_binary: name: Test - needs: job_build_x64 + needs: rcd_build strategy: fail-fast: false matrix: @@ -63,7 +63,7 @@ jobs: PGVERSION: 16.6-1-windows-x64 - os: ubuntu-latest ruby: "3.2" - platform: "x86_64-linux-gnu" + platform: "x86_64-linux" runs-on: ${{ matrix.os }} env: @@ -132,3 +132,33 @@ jobs: ridk enable find "$(ruby -e"puts RbConfig::CONFIG[%q[libdir]]")" -name mkmf.log -print0 | xargs -0 cat find -name setup.log -print0 | xargs -0 cat + + + job_binary_multiarch: + name: multiarch (${{matrix.platform}} on ${{matrix.from_image}} ${{matrix.image_platform}}) + needs: rcd_build + strategy: + fail-fast: false + matrix: + include: + - from_image: fedora:39 + image_platform: linux/x86_64 + gem_platform: x86_64-linux + dockerfile: centos + - from_image: alpine + image_platform: linux/x86_64 + gem_platform: x86_64-linux + dockerfile: alpine + + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + - name: Download gem-${{ matrix.gem_platform }} + uses: actions/download-artifact@v4 + with: + name: binary-gem-${{ matrix.gem_platform }} + - name: Build image and Run tests + run: | + docker run --rm --privileged multiarch/qemu-user-static --reset -p yes + docker build --rm --platform ${{matrix.image_platform}} --build-arg from_image=${{matrix.from_image}} -t ruby-test -f spec/env/Dockerfile.${{matrix.dockerfile}} . + docker run --rm -t --network=host -v `pwd`:/build ruby-test diff --git a/Gemfile b/Gemfile index 0b5440de4..81dfc52cc 100644 --- a/Gemfile +++ b/Gemfile @@ -5,13 +5,16 @@ gemspec source "https://rubygems.org/" -group :development, :test do +group :development do + gem "rdoc", "~> 6.4" + gem "mini_portile2", "~> 2.1" +end + +group :test do gem "bundler", ">= 1.16", "< 3.0" gem "rake-compiler", "~> 1.0" gem "rake-compiler-dock", "~> 1.5" - gem "rdoc", "~> 6.4" gem "rspec", "~> 3.5" - gem "mini_portile2", "~> 2.1" # "bigdecimal" is a gem on ruby-3.4+ and it's optional for ruby-pg. # Specs should succeed without it, but 4 examples are then excluded. # With bigdecimal commented out here, corresponding tests are omitted on ruby-3.4+ but are executed on ruby < 3.4. diff --git a/Rakefile b/Rakefile index 9f5faf9e6..56ab281d5 100644 --- a/Rakefile +++ b/Rakefile @@ -47,7 +47,7 @@ CrossLibraries = [ ['x64-mingw-ucrt', 'mingw64', 'x86_64-w64-mingw32'], ['x86-mingw32', 'mingw', 'i686-w64-mingw32'], ['x64-mingw32', 'mingw64', 'x86_64-w64-mingw32'], - ['x86_64-linux-gnu', 'linux-x86_64', 'x86_64-redhat-linux-gnu'], + ['x86_64-linux', 'linux-x86_64', 'x86_64-redhat-linux-gnu'], ].map do |platform, openssl_config, toolchain| CrossLibrary.new platform, openssl_config, toolchain end diff --git a/ext/extconf.rb b/ext/extconf.rb index cec9db7b2..ed81b5123 100644 --- a/ext/extconf.rb +++ b/ext/extconf.rb @@ -140,6 +140,8 @@ def install $LDFLAGS << " '-Wl,-rpath=$$ORIGIN/../../ports/#{gem_platform}/lib'" # Don't use pg_config for cross build, but --with-pg-* path options dir_config('pg', "#{postgresql_recipe.path}/include", "#{postgresql_recipe.path}/lib") + + $defs.push( "-DPG_IS_BINARY_GEM") else # Native build diff --git a/ext/pg.c b/ext/pg.c index 86a88366f..db6d5cbfa 100644 --- a/ext/pg.c +++ b/ext/pg.c @@ -671,6 +671,12 @@ Init_pg_ext(void) /* PostgreSQL compiled in default port */ rb_define_const(rb_mPGconstants, "DEF_PGPORT", INT2FIX(DEF_PGPORT)); +#ifdef PG_IS_BINARY_GEM + rb_define_const(rb_mPG, "IS_BINARY_GEM", Qtrue); +#else + rb_define_const(rb_mPG, "IS_BINARY_GEM", Qfalse); +#endif + /* Add the constants to the toplevel namespace */ rb_include_module( rb_mPG, rb_mPGconstants ); diff --git a/lib/pg.rb b/lib/pg.rb index fd82f3646..73f2f894c 100644 --- a/lib/pg.rb +++ b/lib/pg.rb @@ -6,7 +6,9 @@ module PG # Is this file part of a fat binary gem with bundled libpq? - bundled_libpq_path = Dir[File.expand_path("../ports/#{RUBY_PLATFORM.gsub(/^i386-/, "x86-")}*/lib", __dir__)].first + # This path must be enabled by add_dll_directory on Windows. + gplat = Gem::Platform.local + bundled_libpq_path = Dir[File.expand_path("../ports/#{gplat.cpu}-#{gplat.os}*/lib", __dir__)].first if bundled_libpq_path POSTGRESQL_LIB_PATH = bundled_libpq_path else @@ -21,7 +23,7 @@ module PG end add_dll_path = proc do |path, &block| - if RUBY_PLATFORM =~/(mswin|mingw)/i && path && File.exist?(path) + if RUBY_PLATFORM =~/(mswin|mingw)/i && path BUNDLED_LIBPQ_WITH_UNIXSOCKET = false begin require 'ruby_installer/runtime' @@ -32,14 +34,11 @@ module PG block.call ENV['PATH'] = old_path end - elsif RUBY_PLATFORM =~/(linux)/i && bundled_libpq_path && File.exist?(bundled_libpq_path) - BUNDLED_LIBPQ_WITH_UNIXSOCKET = true - # libpq is found by a relative rpath in the cross compiled extension dll - block.call else - BUNDLED_LIBPQ_WITH_UNIXSOCKET = false - # No need to set a load path manually - it's set as library rpath. + # libpq is found by a relative rpath in the cross compiled extension dll + # or by the system library loader block.call + BUNDLED_LIBPQ_WITH_UNIXSOCKET = RUBY_PLATFORM=~/linux/i && PG::IS_BINARY_GEM end end diff --git a/lib/pg/connection.rb b/lib/pg/connection.rb index 5b464013d..2c9ecd8c7 100644 --- a/lib/pg/connection.rb +++ b/lib/pg/connection.rb @@ -860,8 +860,8 @@ def new(*args) if PG::BUNDLED_LIBPQ_WITH_UNIXSOCKET && iopts[:host].to_s.empty? # Many distors patch the hardcoded default UnixSocket path in libpq to /var/run/postgresql instead of /tmp . # We simply try them all. - iopts[:host] = "/var/run/postgresql" # Ubuntu, Debian, Fedora, Opensuse - ",/run/postgresql" # Alpine, Archlinux, Gentoo + iopts[:host] = "/var/run/postgresql" + # Ubuntu, Debian, Fedora, Opensuse + ",/run/postgresql" + # Alpine, Archlinux, Gentoo ",/tmp" # Stock PostgreSQL end diff --git a/spec/env/Dockerfile.alpine b/spec/env/Dockerfile.alpine new file mode 100644 index 000000000..f1268d491 --- /dev/null +++ b/spec/env/Dockerfile.alpine @@ -0,0 +1,27 @@ +ARG from_image +FROM ${from_image} + +RUN uname -a +RUN apk add ruby ruby-etc ruby-rake ruby-dev git gcc make musl-dev gcompat postgresql16 sudo + +RUN git config --global --add safe.directory /build +RUN ruby --version +RUN ruby -e 'puts File.read("/proc/#{Process.pid}/maps")' +RUN gem env +RUN gem inst bundler --conservative +RUN gem list +RUN sudo -u postgres initdb -D /var/lib/postgresql/ && \ + mkdir -p /run/postgresql && \ + chown postgres /run/postgresql + +WORKDIR /build + +CMD ruby -v && \ + ruby -e "puts Gem::Platform.local.to_s" && \ + rm /var/run && sudo -u postgres pg_ctl -D /var/lib/postgresql/ start && \ + chmod -R ugo+wrX . && \ + gem install --local *.gem --verbose --no-document && \ + bundle config set --local without 'development' && \ + bundle install && \ + sudo -u postgres ruby -rpg -e "p RUBY_DESCRIPTION, PG::VERSION, PG::POSTGRESQL_LIB_PATH, PG::IS_BINARY_GEM, PG::BUNDLED_LIBPQ_WITH_UNIXSOCKET; puts PG.connect.exec('SELECT version()').values; p PG.connect.host" && \ + sudo -u postgres ruby -rpg -S rspec -fd spec/**/*_spec.rb diff --git a/spec/env/Dockerfile.centos b/spec/env/Dockerfile.centos new file mode 100644 index 000000000..bd09a2abb --- /dev/null +++ b/spec/env/Dockerfile.centos @@ -0,0 +1,27 @@ +ARG from_image +FROM ${from_image} + +RUN uname -a +RUN yum install -y ruby-devel rake git gcc make redhat-rpm-config postgresql-server + +RUN git config --global --add safe.directory /build +RUN ruby --version +RUN ruby -e 'puts File.read("/proc/#{Process.pid}/maps")' +RUN gem env +RUN gem inst bundler --conservative +RUN gem list +RUN sudo -u postgres initdb -D /var/lib/pgsql/data && \ + mkdir -p /run/postgresql && \ + chown postgres /run/postgresql + +WORKDIR /build + +CMD ruby -v && \ + ruby -e "puts Gem::Platform.local.to_s" && \ + sudo -u postgres pg_ctl -D /var/lib/pgsql/data start && \ + chmod -R ugo+wrX . && \ + gem install --local *.gem --verbose --no-document && \ + bundle config set --local without 'development' && \ + bundle install && \ + sudo -u postgres ruby -rpg -e "p RUBY_DESCRIPTION, PG::VERSION, PG::POSTGRESQL_LIB_PATH, PG::IS_BINARY_GEM, PG::BUNDLED_LIBPQ_WITH_UNIXSOCKET; puts PG.connect.exec('SELECT version()').values; p PG.connect.host" && \ + sudo -u postgres ruby -rpg -S rspec -fd spec/**/*_spec.rb diff --git a/spec/pg/connection_spec.rb b/spec/pg/connection_spec.rb index f8f1571ad..63d3585ad 100644 --- a/spec/pg/connection_spec.rb +++ b/spec/pg/connection_spec.rb @@ -698,7 +698,7 @@ end it "rejects to send lots of COPY data" do - unless RUBY_PLATFORM =~ /i386-mingw|x86_64-darwin|x86_64-linux/ + unless RUBY_PLATFORM =~ /i386-mingw|x86_64-darwin|x86_64-linux$/ skip "this spec depends on out-of-memory condition in put_copy_data, which is not reliable on all platforms" end From 97d071819b8afa23452b490b419cbdf94bc600cc Mon Sep 17 00:00:00 2001 From: Lars Kanis Date: Wed, 27 Nov 2024 20:47:36 +0100 Subject: [PATCH 039/118] Try to decrease the number of CI failures on Macos --- spec/pg/result_spec.rb | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/spec/pg/result_spec.rb b/spec/pg/result_spec.rb index d34a07996..6fc460bc7 100644 --- a/spec/pg/result_spec.rb +++ b/spec/pg/result_spec.rb @@ -296,7 +296,7 @@ it "raises an error if there is a timeout during streaming" do @conn.exec( "SET local statement_timeout = 20" ) - @conn.send_query( "SELECT 1, true UNION ALL SELECT 2, (pg_sleep(0.1) IS NULL)" ) + @conn.send_query( "SELECT 1, true UNION ALL SELECT 2, (pg_sleep(0.3) IS NULL)" ) @conn.send(*row_mode) expect{ @conn.get_result.stream_each_row do |row| From c6d1047b419e87e57aaec076d9338e80f9bd269a Mon Sep 17 00:00:00 2001 From: Lars Kanis Date: Thu, 28 Nov 2024 10:43:23 +0100 Subject: [PATCH 040/118] Bump VERSION to 1.6.0.rc1 and add release notes --- History.md | 26 ++++++++++++++++++++++++++ lib/pg/version.rb | 2 +- 2 files changed, 27 insertions(+), 1 deletion(-) diff --git a/History.md b/History.md index d83135469..5dd3dff09 100644 --- a/History.md +++ b/History.md @@ -1,3 +1,29 @@ +## v1.6.0.rc1 [2024-11-28] Lars Kanis + +Added: + +- Add fat binary gem for platform `x86_64-linux`. + [#551](https://github.com/ged/ruby-pg/pull/551#issuecomment-2504715762) +- Add PG::BinaryDecoder::Array and PG::BinaryEncoder::Array to parse and encode PostgreSQL arrays in binary format. + [#603](https://github.com/ged/ruby-pg/pull/603) +- Add support for new query cancel functions of PostgreSQL-17. + This adds the new class `PG::CancelConnection` which provides the ability to cancel a query per blocking or per non-blocking functions. + If the new functions are available they are used and the older are no longer compiled in. + This way we can get rid of reading out the internal `PGcancel` struct by `Connection#backend_key`. + [#614](https://github.com/ged/ruby-pg/pull/614) +- Add Connection#set_chunked_rows_mode [#610](https://github.com/ged/ruby-pg/pull/610) +- Add PG::Connection#close_prepared, PG::Connection#close_portal, PG::Connection#send_close_prepared and PG::Connection#send_close_portal which are new in PostgreSQL-17. + [#611](https://github.com/ged/ruby-pg/pull/611) +- Add Connection#send_pipeline_sync, async_pipeline_sync and release GVL at PQ(sendP|P)ipelineSync. + [#612](https://github.com/ged/ruby-pg/pull/612) + +Removed: + +- Drop support of Ruby < 2.7 [#606](https://github.com/ged/ruby-pg/pull/606) +- Drop support of PostgreSQL < 10 [#606](https://github.com/ged/ruby-pg/pull/606) +- Remove workaround for Truffleruby < 21.3.0 [#613](https://github.com/ged/ruby-pg/pull/613) + + ## v1.5.9 [2024-10-24] Lars Kanis - Enable thread safety in static OpenSSL build for Windows. [#595](https://github.com/ged/ruby-pg/pull/595) diff --git a/lib/pg/version.rb b/lib/pg/version.rb index 0af7e1f5f..9accb0791 100644 --- a/lib/pg/version.rb +++ b/lib/pg/version.rb @@ -1,4 +1,4 @@ module PG # Library version - VERSION = '1.5.9' + VERSION = '1.6.0.rc1' end From fba32e451beb1e5d4e978e93ddc80f18ac4b940f Mon Sep 17 00:00:00 2001 From: Lars Kanis Date: Thu, 28 Nov 2024 15:13:15 +0100 Subject: [PATCH 041/118] Add patch and test job for starvation on bigger SSL records --- .github/workflows/binary-gems.yml | 23 ++++++++ misc/yugabyte/Dockerfile | 9 ++++ misc/yugabyte/docker-compose.yml | 28 ++++++++++ misc/yugabyte/pg-test.rb | 45 ++++++++++++++++ ...ffered-SSL-read-bytes-to-support-rec.patch | 52 +++++++++++++++++++ 5 files changed, 157 insertions(+) create mode 100644 misc/yugabyte/Dockerfile create mode 100644 misc/yugabyte/docker-compose.yml create mode 100644 misc/yugabyte/pg-test.rb create mode 100644 ports/patches/postgresql/17.2/0001-libpq-Process-buffered-SSL-read-bytes-to-support-rec.patch diff --git a/.github/workflows/binary-gems.yml b/.github/workflows/binary-gems.yml index 4779569d5..e3a48cc7e 100644 --- a/.github/workflows/binary-gems.yml +++ b/.github/workflows/binary-gems.yml @@ -162,3 +162,26 @@ jobs: docker run --rm --privileged multiarch/qemu-user-static --reset -p yes docker build --rm --platform ${{matrix.image_platform}} --build-arg from_image=${{matrix.from_image}} -t ruby-test -f spec/env/Dockerfile.${{matrix.dockerfile}} . docker run --rm -t --network=host -v `pwd`:/build ruby-test + + job_binary_yugabyte: + name: yugabyte (${{matrix.gem_platform}} + needs: rcd_build + strategy: + fail-fast: false + matrix: + include: + - gem_platform: x86_64-linux + + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + - name: Download gem-${{ matrix.gem_platform }} + uses: actions/download-artifact@v4 + with: + name: binary-gem-${{ matrix.gem_platform }} + - name: Build image and Run tests + run: | + sudo apt-get install -y docker-compose + cp -v pg-*.gem misc/yugabyte/ + cd misc/yugabyte + docker-compose up --abort-on-container-exit --exit-code-from pg diff --git a/misc/yugabyte/Dockerfile b/misc/yugabyte/Dockerfile new file mode 100644 index 000000000..330215241 --- /dev/null +++ b/misc/yugabyte/Dockerfile @@ -0,0 +1,9 @@ +FROM yugabytedb/yugabyte:2024.1.0.0-b129 + +WORKDIR /app + +RUN yugabyted cert generate_server_certs --hostnames=127.0.0.1 --base_dir=. + +ENTRYPOINT ["yugabyted"] +CMD ["start", "--background", "false", "--ui", "false", "--tserver_flags", "use_client_to_server_encryption=true,cert_node_filename=127.0.0.1,certs_dir=/app/generated_certs/127.0.0.1"] +VOLUME /app diff --git a/misc/yugabyte/docker-compose.yml b/misc/yugabyte/docker-compose.yml new file mode 100644 index 000000000..03d4560cc --- /dev/null +++ b/misc/yugabyte/docker-compose.yml @@ -0,0 +1,28 @@ +services: + yb: + build: . + container_name: yb + ports: + - "127.0.0.1:5433:5433" + volumes: + - certs:/app/generated_certs + healthcheck: + test: 'ysqlsh -h $$(hostname) -c \\conninfo || exit 1;' + interval: 2s + timeout: 30s + retries: 20 + start_period: 10s + + pg: + image: ruby:3.0 + working_dir: /app + volumes: + - .:/app + - certs:/generated_certs + command: bash -c "gem inst pg-*.gem && ruby pg-test.rb" + depends_on: + yb: + condition: service_healthy + +volumes: + certs: diff --git a/misc/yugabyte/pg-test.rb b/misc/yugabyte/pg-test.rb new file mode 100644 index 000000000..d292537da --- /dev/null +++ b/misc/yugabyte/pg-test.rb @@ -0,0 +1,45 @@ +require 'pg' + +conn = PG.connect( + host: 'yb', + port: 5433, + user: 'yugabyte', + dbname: 'yugabyte', + sslmode: 'require', + sslrootcert: 'app/generated_certs/127.0.0.1/ca.crt', + sslcert: 'app/generated_certs/127.0.0.1/node.127.0.0.1.crt', + sslkey: 'app/generated_certs/127.0.0.1/node.127.0.0.1.key' +) + +$stdout.sync = true +# fd = File.open("pg_trace.log", "a+") +# conn.trace(fd) + +begin + # Validate connection is working + res = conn.exec("SELECT version();") + res.each_row do |row| + puts "You are connected to: #{row[0]}" + end +# 53*511 +# 53*767 +# 53*1023 +# 53*1279 +# 7*1817 +# 11*1487 +# 13*1363 +# 16*1211 +# 18*1128 +# 22*1984 +# 27*1723 + + (22..53).each do |m| + (1..2048).each do |l| + hanging_query = "SELECT lpad(''::text, #{m}, '0') FROM generate_series(1, #{l});" + puts "Executing hanging query: #{hanging_query}" + conn.exec(hanging_query) + end + end +ensure + conn.close if conn +end diff --git a/ports/patches/postgresql/17.2/0001-libpq-Process-buffered-SSL-read-bytes-to-support-rec.patch b/ports/patches/postgresql/17.2/0001-libpq-Process-buffered-SSL-read-bytes-to-support-rec.patch new file mode 100644 index 000000000..3d6818132 --- /dev/null +++ b/ports/patches/postgresql/17.2/0001-libpq-Process-buffered-SSL-read-bytes-to-support-rec.patch @@ -0,0 +1,52 @@ +From ab793829a4ce473f1cc2bbc0e2a6f6753553255d Mon Sep 17 00:00:00 2001 +From: Lars Kanis +Date: Sun, 8 Sep 2024 13:59:05 +0200 +Subject: [PATCH] libpq: Process buffered SSL read bytes to support records + >8kB on async API + +The async API of libpq doesn't support SSL record sizes >8kB so far. +This size isn't exceeded by vanilla PostgreSQL, but by other products using +the postgres wire protocol 3. +PQconsumeInput() reads all data readable from the socket, so that the read +condition is cleared. +But it doesn't process all the data that is pending on the SSL layer. +Also a subsequent call to PQisBusy() doesn't process it, so that the client +is triggered to wait for more readable data on the socket. +But this never arrives, so that the connection blocks infinitely. + +To fix this issue call pqReadData() repeatedly until there is no buffered +SSL data left to be read. + +The synchronous libpq API isn't affected, since it supports arbitrary SSL +record sizes already. +--- + src/interfaces/libpq/fe-exec.c | 13 +++++++++++++ + 1 file changed, 13 insertions(+) + +diff --git a/src/interfaces/libpq/fe-exec.c b/src/interfaces/libpq/fe-exec.c +index 0d224a852..637894ee1 100644 +--- a/src/interfaces/libpq/fe-exec.c ++++ b/src/interfaces/libpq/fe-exec.c +@@ -2006,6 +2006,19 @@ PQconsumeInput(PGconn *conn) + if (pqReadData(conn) < 0) + return 0; + ++ #ifdef USE_SSL ++ /* ++ * Ensure all buffered read bytes in the SSL library are processed, ++ * which might be not the case, if the SSL record size exceeds 8k. ++ * Otherwise parseInput can't process the data. ++ */ ++ while (conn->ssl_in_use && pgtls_read_pending(conn)) ++ { ++ if (pqReadData(conn) < 0) ++ return 0; ++ } ++ #endif ++ + /* Parsing of the data waits till later. */ + return 1; + } +-- +2.43.0 + From a974f31965ee83aa873e703a78537b4a70537772 Mon Sep 17 00:00:00 2001 From: Lars Kanis Date: Fri, 10 Jan 2025 09:49:55 +0100 Subject: [PATCH 042/118] Add possibility to define the number of array dimensions to be encoded Setting dimensions is especially useful, when a Record shall be encoded into an Array, since the Array encoder can not distinguish if the array shall be encoded as a higher dimension or as a record otherwise. Related to #620 --- ext/pg.h | 1 + ext/pg_binary_encoder.c | 9 +++- ext/pg_coder.c | 49 +++++++++++++++++++++ ext/pg_text_encoder.c | 23 +++++++--- lib/pg/coder.rb | 3 +- spec/pg/type_spec.rb | 98 ++++++++++++++++++++++++++++++++++++++++- 6 files changed, 175 insertions(+), 8 deletions(-) diff --git a/ext/pg.h b/ext/pg.h index 93ef0e466..165c2c7ee 100644 --- a/ext/pg.h +++ b/ext/pg.h @@ -206,6 +206,7 @@ typedef struct { t_pg_coder comp; t_pg_coder *elem; int needs_quotation; + int dimensions; char delimiter; } t_pg_composite_coder; diff --git a/ext/pg_binary_encoder.c b/ext/pg_binary_encoder.c index 601bd333e..f887d1d04 100644 --- a/ext/pg_binary_encoder.c +++ b/ext/pg_binary_encoder.c @@ -320,6 +320,9 @@ pg_bin_enc_date(t_pg_coder *this, VALUE value, char *out, VALUE *intermediate, i * This encoder expects an Array of values or sub-arrays as input. * Other values are passed through as byte string without interpretation. * + * It is possible to enforce a number of dimensions to be encoded by #dimensions= . + * Deeper nested arrays are then passed to the elements encoder and less nested arrays raise an ArgumentError. + * * The accessors needs_quotation and delimiter are ignored for binary encoding. * */ @@ -346,7 +349,8 @@ pg_bin_enc_array(t_pg_coder *conv, VALUE value, char *out, VALUE *intermediate, dim_sizes[ndim-1] = RARRAY_LENINT(el1); nitems *= dim_sizes[ndim-1]; el2 = rb_ary_entry(el1, 0); - if (TYPE(el2) == T_ARRAY) { + if ( (this->dimensions < 0 || ndim < this->dimensions) && + TYPE(el2) == T_ARRAY) { ndim++; if (ndim > MAXDIM) rb_raise( rb_eArgError, "unsupported number of array dimensions: >%d", ndim ); @@ -356,6 +360,9 @@ pg_bin_enc_array(t_pg_coder *conv, VALUE value, char *out, VALUE *intermediate, el1 = el2; } } + if( this->dimensions >= 0 && (ndim==0 ? 1 : ndim) != this->dimensions ){ + rb_raise(rb_eArgError, "less array dimensions to encode (%d) than expected (%d)", ndim, this->dimensions); + } if(out){ /* Second encoder pass -> write data to `out` */ diff --git a/ext/pg_coder.c b/ext/pg_coder.c index ef98dee16..f6c252fe9 100644 --- a/ext/pg_coder.c +++ b/ext/pg_coder.c @@ -135,6 +135,7 @@ pg_composite_encoder_allocate( VALUE klass ) this->elem = NULL; this->needs_quotation = 1; this->delimiter = ','; + this->dimensions = -1; rb_iv_set( self, "@elements_type", Qnil ); return self; } @@ -157,6 +158,7 @@ pg_composite_decoder_allocate( VALUE klass ) this->elem = NULL; this->needs_quotation = 1; this->delimiter = ','; + this->dimensions = -1; rb_iv_set( self, "@elements_type", Qnil ); return self; } @@ -421,6 +423,49 @@ pg_coder_delimiter_get(VALUE self) return rb_str_new(&this->delimiter, 1); } +/* + * call-seq: + * coder.dimensions = Integer + * coder.dimensions = nil + * + * Set number of array dimensions to be encoded. + * + * This property ensures, that this number of dimensions is always encoded. + * If less dimensions than this number are in the given value, an ArgumentError is raised. + * If more dimensions than this number are in the value, the Array value is passed to the next encoder. + * + * Setting dimensions is especially useful, when a Record shall be encoded into an Array, since the Array encoder can not distinguish if the array shall be encoded as a higher dimension or as a record otherwise. + * + * The default is +nil+. + * + * See #dimensions + */ +static VALUE +pg_coder_dimensions_set(VALUE self, VALUE dimensions) +{ + t_pg_composite_coder *this = RTYPEDDATA_DATA(self); + rb_check_frozen(self); + if(!NIL_P(dimensions) && NUM2INT(dimensions) < 0) + rb_raise( rb_eArgError, "dimensions must be nil or >= 0"); + this->dimensions = NIL_P(dimensions) ? -1 : NUM2INT(dimensions); + return dimensions; +} + +/* + * call-seq: + * coder.dimensions -> Integer | nil + * + * Get number of enforced array dimensions or +nil+ if not set. + * + * See #dimensions= + */ +static VALUE +pg_coder_dimensions_get(VALUE self) +{ + t_pg_composite_coder *this = RTYPEDDATA_DATA(self); + return this->dimensions < 0 ? Qnil : INT2NUM(this->dimensions); +} + /* * call-seq: * coder.elements_type = coder @@ -602,6 +647,8 @@ init_pg_coder(void) * * This is the base class for all type cast classes of PostgreSQL types, * that are made up of some sub type. + * + * See PG::TextEncoder::Array, PG::TextDecoder::Array, PG::BinaryEncoder::Array, PG::BinaryDecoder::Array, etc. */ rb_cPG_CompositeCoder = rb_define_class_under( rb_mPG, "CompositeCoder", rb_cPG_Coder ); rb_define_method( rb_cPG_CompositeCoder, "elements_type=", pg_coder_elements_type_set, 1 ); @@ -610,6 +657,8 @@ init_pg_coder(void) rb_define_method( rb_cPG_CompositeCoder, "needs_quotation?", pg_coder_needs_quotation_get, 0 ); rb_define_method( rb_cPG_CompositeCoder, "delimiter=", pg_coder_delimiter_set, 1 ); rb_define_method( rb_cPG_CompositeCoder, "delimiter", pg_coder_delimiter_get, 0 ); + rb_define_method( rb_cPG_CompositeCoder, "dimensions=", pg_coder_dimensions_set, 1 ); + rb_define_method( rb_cPG_CompositeCoder, "dimensions", pg_coder_dimensions_get, 0 ); /* Document-class: PG::CompositeEncoder < PG::CompositeCoder */ rb_cPG_CompositeEncoder = rb_define_class_under( rb_mPG, "CompositeEncoder", rb_cPG_CompositeCoder ); diff --git a/ext/pg_text_encoder.c b/ext/pg_text_encoder.c index 0e0e7fbf1..d34bba627 100644 --- a/ext/pg_text_encoder.c +++ b/ext/pg_text_encoder.c @@ -537,7 +537,7 @@ quote_string(t_pg_coder *this, VALUE value, VALUE string, char *current_out, int } static char * -write_array(t_pg_composite_coder *this, VALUE value, char *current_out, VALUE string, int quote, int enc_idx) +write_array(t_pg_composite_coder *this, VALUE value, char *current_out, VALUE string, int quote, int enc_idx, int dimension) { int i; @@ -545,6 +545,10 @@ write_array(t_pg_composite_coder *this, VALUE value, char *current_out, VALUE st current_out = pg_rb_str_ensure_capa( string, 2, current_out, NULL ); *current_out++ = '{'; + if( RARRAY_LEN(value) == 0 && this->dimensions >= 0 && dimension != this->dimensions ){ + rb_raise(rb_eArgError, "less array dimensions to encode (%d) than expected (%d)", dimension, this->dimensions); + } + for( i=0; idimensions >= 0 && dimension != this->dimensions ){ + rb_raise(rb_eArgError, "less array dimensions to encode (%d) than expected (%d)", dimension, this->dimensions); + } current_out = pg_rb_str_ensure_capa( string, 4, current_out, NULL ); *current_out++ = 'N'; *current_out++ = 'U'; *current_out++ = 'L'; *current_out++ = 'L'; break; + case T_ARRAY: + if( this->dimensions < 0 || dimension < this->dimensions ){ + current_out = write_array(this, entry, current_out, string, quote, enc_idx, dimension+1); + break; + } + /* Number of dimensions reached -> handle array as normal value */ default: + if( this->dimensions >= 0 && dimension != this->dimensions ){ + rb_raise(rb_eArgError, "less array dimensions to encode (%d) than expected (%d)", dimension, this->dimensions); + } current_out = quote_string( this->elem, entry, string, current_out, quote, quote_array_buffer, this, enc_idx ); } } @@ -596,7 +609,7 @@ pg_text_enc_array(t_pg_coder *conv, VALUE value, char *out, VALUE *intermediate, VALUE out_str = rb_str_new(NULL, 0); PG_ENCODING_SET_NOCHECK(out_str, enc_idx); - end_ptr = write_array(this, value, RSTRING_PTR(out_str), out_str, this->needs_quotation, enc_idx); + end_ptr = write_array(this, value, RSTRING_PTR(out_str), out_str, this->needs_quotation, enc_idx, 1); rb_str_set_len( out_str, end_ptr - RSTRING_PTR(out_str) ); *intermediate = out_str; diff --git a/lib/pg/coder.rb b/lib/pg/coder.rb index e9d214fe9..7bf2c99e8 100644 --- a/lib/pg/coder.rb +++ b/lib/pg/coder.rb @@ -76,12 +76,13 @@ def to_h elements_type: elements_type, needs_quotation: needs_quotation?, delimiter: delimiter, + dimensions: dimensions, } end def inspect str = super - str[-1,0] = " elements_type=#{elements_type.inspect} #{needs_quotation? ? 'needs' : 'no'} quotation" + str[-1,0] = " elements_type=#{elements_type.inspect} #{needs_quotation? ? 'needs' : 'no'} quotation#{dimensions && " #{dimensions} dimensions"}" str end end diff --git a/spec/pg/type_spec.rb b/spec/pg/type_spec.rb index 1393f7793..039cd6c02 100644 --- a/spec/pg/type_spec.rb +++ b/spec/pg/type_spec.rb @@ -956,6 +956,34 @@ def expect_deprecated_coder_init expect( binaryenc_text_array.encode([[[5,6]],[["6\"",7]],[[nil,5]]]) ).to eq( exp ) end + + let!(:binaryenc_array_array) { PG::BinaryEncoder::Array.new elements_type: PG::BinaryEncoder::Array.new(elements_type: PG::BinaryEncoder::Int4.new(oid: 0x17), dimensions: 1), dimensions: 2 } + + it 'encodes an array in an array of int4' do + exp = ["00000002" + "00000001" + "00000000" + + "00000003" + "00000001" + "00000001" + "00000001" + + + "00000024" + + "00000001" + "00000001" + "00000017" + + "00000002" + "00000001" + + "00000004" + "00000005" + + "00000004" + "00000006" + + + "00000024" + + "00000001" + "00000001" + "00000017" + + "00000002" + "00000001" + + "00000004" + "00000006" + + "00000004" + "00000007" + + + "00000020" + + "00000001" + "00000001" + "00000017" + + "00000002" + "00000001" + + "ffffffff" + + "00000004" + "00000005" + ].pack("H*") + + expect( binaryenc_array_array.encode([[[5,6]],[[6,7]],[[nil,5]]]) ).to eq( exp ) + end end context 'two dimensional arrays' do @@ -963,6 +991,40 @@ def expect_deprecated_coder_init expect( textenc_timestamp_array.encode([Time.new(2014,12,31),[nil, Time.new(2016,01,02, 23, 23, 59.99)]]) ). to eq( %[{2014-12-31 00:00:00.000000000,{NULL,2016-01-02 23:23:59.990000000}}] ) end + + context 'with dimensions' do + let!(:textenc_array_2dim) { textenc_string_array.dup.tap{|a| a.dimensions = 2} } + let!(:binaryenc_array_2dim) { binaryenc_array.dup.tap{|a| a.dimensions = 2} } + + it 'encodes binary int array' do + binaryenc_array_2dim.encode([[1]]) + end + it 'encodes text int array' do + expect( textenc_array_2dim.encode([[1]]) ).to eq( "{{1}}" ) + end + it 'encodes empty array' do + binaryenc_array_2dim.encode([[]]) + end + it 'encodes text empty array' do + expect( textenc_array_2dim.encode([[]]) ).to eq( "{{}}" ) + end + it 'raises an error on 1 dim binary array input to int4' do + expect{ binaryenc_array_2dim.encode([1]) }.to raise_error( ArgumentError, /less array dimensions.*1.*2/) + end + it 'raises an error on 1 dim text array input to int4' do + expect{ textenc_array_2dim.encode([1]) }.to raise_error( ArgumentError, /less array dimensions.*1.*2/) + end + + it 'raises an error on 0 dim array input to int4' do + expect{ binaryenc_array_2dim.encode([]) }.to raise_error( ArgumentError, /less array dimensions.*0.*2/) + end + it 'raises an error on 0 dim text array input to int4' do + expect{ textenc_array_2dim.encode([]) }.to raise_error( ArgumentError, /less array dimensions.*1.*2/) + end + it 'raises an error on 1 dim text array nil input' do + expect{ textenc_array_2dim.encode([nil]) }.to raise_error( ArgumentError, /less array dimensions.*1.*2/) + end + end end context 'one dimensional array' do @@ -986,6 +1048,37 @@ def expect_deprecated_coder_init expect( binaryenc_array.encode([nil, "6\""]) ).to eq( exp ) end + + context 'with dimensions' do + let!(:textenc_array_1dim) { textenc_int_array.dup.tap{|a| a.dimensions = 1} } + let!(:binaryenc_array_1dim) { binaryenc_array.dup.tap{|a| a.dimensions = 1} } + + it 'encodes an array' do + exp =["00000001" + "00000001" + "00000000" + + "00000002" + "00000001" + + "ffffffff" + + "00000002" + "3622" + ].pack("H*") + + expect( binaryenc_array_1dim.encode([nil, "6\""]) ).to eq( exp ) + end + it 'encodes an empty binary array' do + exp =["00000000" + "00000001" + "00000000" + ].pack("H*") + expect( binaryenc_array_1dim.encode([]) ).to eq( exp ) + end + it 'encodes an empty text array' do + expect( textenc_array_1dim.encode([]) ).to eq( "{}" ) + end + + let!(:binaryenc_int4_array_1dim) { PG::BinaryEncoder::Array.new elements_type: PG::BinaryEncoder::Int4.new, dimensions: 1 } + it 'raises an error on binary array input to int4' do + expect{ binaryenc_int4_array_1dim.encode([[1]]) }.to raise_error( NoMethodError, /to_i/) + end + it 'raises an error on text array input to int4' do + expect{ textenc_array_1dim.encode([[1]]) }.to raise_error( NoMethodError, /to_i/) + end + end end context 'other dimensional array' do @@ -1091,7 +1184,8 @@ def expect_deprecated_coder_init it "should respond to to_h" do expect( textenc_int_array.to_h ).to eq( { name: nil, oid: 0, format: 0, flags: 0, - elements_type: textenc_int, needs_quotation: false, delimiter: ',' + elements_type: textenc_int, needs_quotation: false, delimiter: ',', + dimensions: nil } ) end @@ -1107,6 +1201,7 @@ def expect_deprecated_coder_init expect( t.needs_quotation? ).to eq( true ) expect( t.delimiter ).to eq( ',' ) expect( t.elements_type ).to be_nil + expect( t.dimensions ).to be_nil end it "should deny changes when frozen" do @@ -1117,6 +1212,7 @@ def expect_deprecated_coder_init expect{ t.needs_quotation = true }.to raise_error(FrozenError) expect{ t.delimiter = "," }.to raise_error(FrozenError) expect{ t.elements_type = nil }.to raise_error(FrozenError) + expect{ t.dimensions = 1 }.to raise_error(FrozenError) end it "should be shareable for Ractor", :ractor do From 1ec3e20bdee6ad3648e285c586b96fad9dbef0fb Mon Sep 17 00:00:00 2001 From: Lars Kanis Date: Fri, 10 Jan 2025 09:27:17 +0100 Subject: [PATCH 043/118] Verify array input to PG::TypeMapByColumn This could cause a segfault. Fixes #620 --- ext/pg_type_map_by_column.c | 1 + spec/pg/type_map_by_column_spec.rb | 6 ++++++ 2 files changed, 7 insertions(+) diff --git a/ext/pg_type_map_by_column.c b/ext/pg_type_map_by_column.c index 20e3a3893..61d621020 100644 --- a/ext/pg_type_map_by_column.c +++ b/ext/pg_type_map_by_column.c @@ -54,6 +54,7 @@ pg_tmbc_fit_to_query( VALUE self, VALUE params ) t_tmbc *this = RTYPEDDATA_DATA( self ); t_typemap *default_tm; + Check_Type(params, T_ARRAY); nfields = (int)RARRAY_LEN( params ); if ( this->nfields != nfields ) { rb_raise( rb_eArgError, "number of result fields (%d) does not match number of mapped columns (%d)", diff --git a/spec/pg/type_map_by_column_spec.rb b/spec/pg/type_map_by_column_spec.rb index b73f124a1..15bc696a6 100644 --- a/spec/pg/type_map_by_column_spec.rb +++ b/spec/pg/type_map_by_column_spec.rb @@ -182,6 +182,12 @@ def decode(res, tuple, field) expect{ PG::TypeMapByColumn.new( [123] ) }.to raise_error(TypeError, /wrong argument type (Integer|Fixnum)/) end + it "should raise an error for invalid input when used as type_map" do + map = PG::TypeMapByColumn.new([PG::TextEncoder::Integer.new]) + record_encoder = PG::TextEncoder::Record.new(type_map: map) + expect{ record_encoder.encode(123) }.to raise_error(TypeError) + end + it "shouldn't allow result mappings with different number of fields" do res = @conn.exec( "SELECT 1" ) expect{ res.type_map = PG::TypeMapByColumn.new([]) }.to raise_error(ArgumentError, /mapped columns/) From 15be07dcb18975d0ff9943201dee5d135dba5b27 Mon Sep 17 00:00:00 2001 From: Lars Kanis Date: Fri, 10 Jan 2025 13:55:46 +0100 Subject: [PATCH 044/118] Update to rcd-1.8.0 and add ruby-3.4 in native gem --- Gemfile | 2 +- Rakefile | 9 ++++----- ext/extconf.rb | 7 +++++++ 3 files changed, 12 insertions(+), 6 deletions(-) diff --git a/Gemfile b/Gemfile index 81dfc52cc..8395dc1ac 100644 --- a/Gemfile +++ b/Gemfile @@ -13,7 +13,7 @@ end group :test do gem "bundler", ">= 1.16", "< 3.0" gem "rake-compiler", "~> 1.0" - gem "rake-compiler-dock", "~> 1.5" + gem "rake-compiler-dock", "~> 1.8.0" gem "rspec", "~> 3.5" # "bigdecimal" is a gem on ruby-3.4+ and it's optional for ruby-pg. # Specs should succeed without it, but 4 examples are then excluded. diff --git a/Rakefile b/Rakefile index 56ab281d5..6c663d2cc 100644 --- a/Rakefile +++ b/Rakefile @@ -47,7 +47,7 @@ CrossLibraries = [ ['x64-mingw-ucrt', 'mingw64', 'x86_64-w64-mingw32'], ['x86-mingw32', 'mingw', 'i686-w64-mingw32'], ['x64-mingw32', 'mingw64', 'x86_64-w64-mingw32'], - ['x86_64-linux', 'linux-x86_64', 'x86_64-redhat-linux-gnu'], + ['x86_64-linux', 'linux-x86_64', 'x86_64-linux-gnu'], ].map do |platform, openssl_config, toolchain| CrossLibrary.new platform, openssl_config, toolchain end @@ -100,13 +100,12 @@ CrossLibraries.each do |xlib| desc "Build fat binary gem for platform #{platform}" task "gem:native:#{platform}" => ['gem:native:prepare'] do RakeCompilerDock.sh <<-EOT, platform: platform - #{ "sudo yum install -y perl-IPC-Cmd bison flex &&" if platform =~ /linux/ } #{ # remove nm on Linux to suppress PostgreSQL's check for exit which raises thread_exit as a false positive: - "sudo mv `which nm` `which nm`.bak && sudo mv `which nm` `which nm`.bak &&" if platform =~ /linux/ } - #{ "sudo apt-get update && sudo apt-get install -y bison flex &&" if platform =~ /mingw/ } + "sudo mv `which nm` `which nm`.bak &&" if platform =~ /linux/ } + sudo apt-get update && sudo apt-get install -y bison flex && (cp build/gem/gem-*.pem ~/.gem/ || true) && bundle install --local && - rake native:#{platform} pkg/#{$gem_spec.full_name}-#{platform}.gem MAKEOPTS=-j`nproc` RUBY_CC_VERSION=3.3.0:3.2.0:3.1.0:3.0.0:2.7.0 + rake native:#{platform} pkg/#{$gem_spec.full_name}-#{platform}.gem MAKEOPTS=-j`nproc` RUBY_CC_VERSION=3.4.1:3.3.5:3.2.6:3.1.6:3.0.7:2.7.8 EOT end desc "Build the native binary gems" diff --git a/ext/extconf.rb b/ext/extconf.rb index ed81b5123..dbe7c58a7 100644 --- a/ext/extconf.rb +++ b/ext/extconf.rb @@ -96,6 +96,9 @@ def work_path # See https://github.com/cockroachdb/cockroach/issues/49734 recipe.configure_options << "CFLAGS=-fcommon#{" -fPIC" if RUBY_PLATFORM =~ /linux/}" recipe.configure_options << "--without-keyutils" + recipe.configure_options << "krb5_cv_attr_constructor_destructor=yes" + recipe.configure_options << "ac_cv_func_regcomp=yes" + recipe.configure_options << "ac_cv_printf_positional=yes" recipe.host = toolchain recipe.cook_and_activate end @@ -111,6 +114,8 @@ def configure_defaults *(RUBY_PLATFORM=~/linux/ ? ['--with-gssapi'] : []), '--without-zlib', '--without-icu', + '--without-readline', + 'ac_cv_search_gss_store_cred_into=', ] end def compile @@ -135,6 +140,8 @@ def install File.join(postgresql_recipe.port_path, "lib/libpq-ruby-pg.so.1") # Avoid dependency to external libgcc.dll on x86-mingw32 $LDFLAGS << " -static-libgcc" + # Avoid: "libpq.so: undefined reference to `dlopen'" in cross-ruby-2.7.8 + $LDFLAGS << " -Wl,--no-as-needed" # Find libpq in the ports directory coming from lib/3.3 # It is shared between all compiled ruby versions. $LDFLAGS << " '-Wl,-rpath=$$ORIGIN/../../ports/#{gem_platform}/lib'" From 9afa148b13fa5294119af17bbeb15a85b7eaf07d Mon Sep 17 00:00:00 2001 From: Lars Kanis Date: Fri, 10 Jan 2025 14:25:35 +0100 Subject: [PATCH 045/118] CI: run binary jobs onlky on master and PRs --- .github/workflows/binary-gems.yml | 13 +++++++++++-- 1 file changed, 11 insertions(+), 2 deletions(-) diff --git a/.github/workflows/binary-gems.yml b/.github/workflows/binary-gems.yml index 4779569d5..b07cff905 100644 --- a/.github/workflows/binary-gems.yml +++ b/.github/workflows/binary-gems.yml @@ -1,11 +1,20 @@ name: Binary gems on: - push: - pull_request: workflow_dispatch: schedule: - cron: "0 5 * * 3" # At 05:00 on Wednesday # https://crontab.guru/#0_5_*_*_3 + push: + branches: + - master + tags: + - "*.*.*" + pull_request: + types: [opened, synchronize] + branches: + - "*" +permissions: + contents: read jobs: rcd_build: From 3ac52bb3d6dfa29405295d0d0b1f3f0012510e6b Mon Sep 17 00:00:00 2001 From: Lars Kanis Date: Sat, 14 Dec 2024 22:29:22 +0100 Subject: [PATCH 046/118] Add MINGW package dependency which is resolved by RubyInstaller MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit There is no package with contains libpq only, but the postgresql package now has less depencies than it had in the past. Especially python and perl are optional dependecies now, so that the install size is acceptable: Pakete (5) mingw-w64-clang-aarch64-icu-75.1-2 mingw-w64-clang-aarch64-lz4-1.10.0-1 mingw-w64-clang-aarch64-openssl-3.4.0-1 winpty-0.4.3-3 mingw-w64-clang-aarch64-postgresql-17.2-1 Gesamtgröße des Downloads: 40,09 MiB Gesamtgröße der installierten Pakete: 198,79 MiB --- .appveyor.yml | 2 -- pg.gemspec | 2 ++ 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.appveyor.yml b/.appveyor.yml index 30987e2e7..e463100f1 100644 --- a/.appveyor.yml +++ b/.appveyor.yml @@ -24,8 +24,6 @@ install: $env:PATH = 'C:/Program Files/PostgreSQL/' + $env:PGVER + '/bin;' + $env:PATH $env:PATH = 'C:/Program Files (x86)/PostgreSQL/' + $env:PGVER + '/bin;' + $env:PATH - } else { - c:/msys64/usr/bin/bash -lc "pacman -S --noconfirm --needed `${MINGW_PACKAGE_PREFIX}-postgresql" } - echo %PATH% - pg_config diff --git a/pg.gemspec b/pg.gemspec index 310dc354a..7d9c24844 100644 --- a/pg.gemspec +++ b/pg.gemspec @@ -19,6 +19,8 @@ Gem::Specification.new do |spec| spec.metadata["source_code_uri"] = "https://github.com/ged/ruby-pg" spec.metadata["changelog_uri"] = "https://github.com/ged/ruby-pg/blob/master/History.md" spec.metadata["documentation_uri"] = "http://deveiate.org/code/pg" + # https://github.com/oneclick/rubyinstaller2/wiki/For-gem-developers#msys2-library-dependency + spec.metadata["msys2_mingw_dependencies"] = "postgresql" # Specify which files should be added to the gem when it is released. # The `git ls-files -z` loads the files in the RubyGem that have been added into git. From 1e6ffe7ea5162c627b2b36597e788ea3c047483d Mon Sep 17 00:00:00 2001 From: Lars Kanis Date: Fri, 31 Jan 2025 22:15:03 +0100 Subject: [PATCH 047/118] Add cross compiled binary gem for aarch64-mingw-ucrt --- Rakefile | 1 + ext/extconf.rb | 6 +-- .../openssl/3.4.0/0001-aarch64-mingw.patch | 21 ++++++++++ ...f-__builtin_setjmp-only-on-MINGW-on-.patch | 42 +++++++++++++++++++ 4 files changed, 67 insertions(+), 3 deletions(-) create mode 100644 ports/patches/openssl/3.4.0/0001-aarch64-mingw.patch create mode 100644 ports/patches/postgresql/17.2/0001-Use-workaround-of-__builtin_setjmp-only-on-MINGW-on-.patch diff --git a/Rakefile b/Rakefile index 6c663d2cc..d2b16d919 100644 --- a/Rakefile +++ b/Rakefile @@ -44,6 +44,7 @@ end CrossLibrary = Struct.new :platform, :openssl_config, :toolchain CrossLibraries = [ + ['aarch64-mingw-ucrt', 'mingwarm64', 'aarch64-w64-mingw32'], ['x64-mingw-ucrt', 'mingw64', 'x86_64-w64-mingw32'], ['x86-mingw32', 'mingw', 'i686-w64-mingw32'], ['x64-mingw32', 'mingw64', 'x86_64-w64-mingw32'], diff --git a/ext/extconf.rb b/ext/extconf.rb index dbe7c58a7..086481071 100644 --- a/ext/extconf.rb +++ b/ext/extconf.rb @@ -126,12 +126,12 @@ def install end end + recipe.host = toolchain recipe.configure_options << "CFLAGS=#{" -fPIC" if RUBY_PLATFORM =~ /linux/}" - recipe.configure_options << "LDFLAGS=-L#{openssl_recipe.path}/lib -L#{openssl_recipe.path}/lib64 #{"-Wl,-soname,libpq-ruby-pg.so.1 -lgssapi_krb5 -lkrb5 -lk5crypto -lkrb5support" if RUBY_PLATFORM =~ /linux/}" + recipe.configure_options << "LDFLAGS=-L#{openssl_recipe.path}/lib -L#{openssl_recipe.path}/lib64 -L#{openssl_recipe.path}/lib-arm64 #{"-Wl,-soname,libpq-ruby-pg.so.1 -lgssapi_krb5 -lkrb5 -lk5crypto -lkrb5support" if RUBY_PLATFORM =~ /linux/}" recipe.configure_options << "LIBS=-lkrb5 -lcom_err -lk5crypto -lkrb5support -lresolv" if RUBY_PLATFORM =~ /linux/ recipe.configure_options << "LIBS=-lssl -lwsock32 -lgdi32 -lws2_32 -lcrypt32" if RUBY_PLATFORM =~ /mingw|mswin/ recipe.configure_options << "CPPFLAGS=-I#{openssl_recipe.path}/include" - recipe.host = toolchain recipe.cook_and_activate end @@ -141,7 +141,7 @@ def install # Avoid dependency to external libgcc.dll on x86-mingw32 $LDFLAGS << " -static-libgcc" # Avoid: "libpq.so: undefined reference to `dlopen'" in cross-ruby-2.7.8 - $LDFLAGS << " -Wl,--no-as-needed" + $LDFLAGS << " -Wl,--no-as-needed" if RUBY_PLATFORM !~ /aarch64/ # Find libpq in the ports directory coming from lib/3.3 # It is shared between all compiled ruby versions. $LDFLAGS << " '-Wl,-rpath=$$ORIGIN/../../ports/#{gem_platform}/lib'" diff --git a/ports/patches/openssl/3.4.0/0001-aarch64-mingw.patch b/ports/patches/openssl/3.4.0/0001-aarch64-mingw.patch new file mode 100644 index 000000000..0cd894c86 --- /dev/null +++ b/ports/patches/openssl/3.4.0/0001-aarch64-mingw.patch @@ -0,0 +1,21 @@ +--- a/Configurations/10-main.conf ++++ b/Configurations/10-main.conf +@@ -1603,6 +1603,18 @@ + multilib => "64", + }, + ++ "mingwarm64" => { ++ inherit_from => [ "mingw-common" ], ++ cflags => "", ++ sys_id => "MINGWARM64", ++ bn_ops => add("SIXTY_FOUR_BIT"), ++ asm_arch => 'aarch64', ++ uplink_arch => 'armv8', ++ perlasm_scheme => "win64", ++ shared_rcflag => "", ++ multilib => "-arm64", ++ }, ++ + #### UEFI + "UEFI" => { + inherit_from => [ "BASE_unix" ], diff --git a/ports/patches/postgresql/17.2/0001-Use-workaround-of-__builtin_setjmp-only-on-MINGW-on-.patch b/ports/patches/postgresql/17.2/0001-Use-workaround-of-__builtin_setjmp-only-on-MINGW-on-.patch new file mode 100644 index 000000000..ebbd56121 --- /dev/null +++ b/ports/patches/postgresql/17.2/0001-Use-workaround-of-__builtin_setjmp-only-on-MINGW-on-.patch @@ -0,0 +1,42 @@ +From 746e8e250b265c40d9706f26560e02e8623f123f Mon Sep 17 00:00:00 2001 +From: Lars Kanis +Date: Fri, 31 Jan 2025 21:58:00 +0100 +Subject: [PATCH] Use workaround of __builtin_setjmp only on MINGW on MSVCRT + +Because it is not present on ARM64 on Windows and not necessary on any UCRT based toolchain. +--- + src/include/c.h | 10 +++++----- + 1 file changed, 5 insertions(+), 5 deletions(-) + +diff --git a/src/include/c.h b/src/include/c.h +index a14c631516..33792c860c 100644 +--- a/src/include/c.h ++++ b/src/include/c.h +@@ -1312,19 +1312,19 @@ extern int fdatasync(int fildes); + /* + * When there is no sigsetjmp, its functionality is provided by plain + * setjmp. We now support the case only on Windows. However, it seems +- * that MinGW-64 has some longstanding issues in its setjmp support, +- * so on that toolchain we cheat and use gcc's builtins. ++ * that MinGW-64 on x86_64 has some longstanding issues in its setjmp ++ * support, so on that toolchain we cheat and use gcc's builtins. + */ + #ifdef WIN32 +-#ifdef __MINGW64__ ++#if defined(__MINGW64__) && !defined(_UCRT) + typedef intptr_t sigjmp_buf[5]; + #define sigsetjmp(x,y) __builtin_setjmp(x) + #define siglongjmp __builtin_longjmp +-#else /* !__MINGW64__ */ ++#else /* !defined(__MINGW64__) || defined(_UCRT) */ + #define sigjmp_buf jmp_buf + #define sigsetjmp(x,y) setjmp(x) + #define siglongjmp longjmp +-#endif /* __MINGW64__ */ ++#endif /* defined(__MINGW64__) && !defined(_UCRT) */ + #endif /* WIN32 */ + + /* /port compatibility functions */ +-- +2.43.0 + From dd138ecf313274cc6a5481071bf301dc8c5139d8 Mon Sep 17 00:00:00 2001 From: Lars Kanis Date: Fri, 28 Feb 2025 10:39:07 +0100 Subject: [PATCH 048/118] =?UTF-8?q?Use=20a=20connection=20encoding=20compa?= =?UTF-8?q?tible=20to=20"M=C3=B6hre"?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The function PQescapeString was changed in PostgreSQL 17.4, 16.8, 15.12, 14.17, and 13.20. It now returns a predefined invalid character as a replacement in the connection encoding, if the input text is not valid according to the current connection encoding. Using a compatible connection encoding avoids this, so that we get the original text out of the singleton escape function. Fixes #628 --- spec/pg/connection_spec.rb | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/spec/pg/connection_spec.rb b/spec/pg/connection_spec.rb index 63d3585ad..b64604467 100644 --- a/spec/pg/connection_spec.rb +++ b/spec/pg/connection_spec.rb @@ -2325,7 +2325,7 @@ def wait_check_socket(conn) it "uses the previous string encoding for escaped string" do original = "Möhre to 'scape".encode( "iso-8859-1" ) - @conn.set_client_encoding( "euc_jp" ) + @conn.set_client_encoding( "iso-8859-2" ) escaped = described_class.escape( original ) expect( escaped.encoding ).to eq( Encoding::ISO8859_1 ) expect( escaped ).to eq( "Möhre to ''scape".encode(Encoding::ISO8859_1) ) From 938c0d17ae1ee86b33cd8c34f7b6dc7bc7ea34a6 Mon Sep 17 00:00:00 2001 From: Jean Boussier Date: Sat, 8 Mar 2025 10:27:24 +0100 Subject: [PATCH 049/118] Change #server_version to raise instead of return 0 on error Ref: https://github.com/rails/rails/issues/54712 Returning 0 on error is a bit uncommon in Ruby, and not very convenient, I think it would make more sense to raise the appropriate exception. --- ext/pg_connection.c | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/ext/pg_connection.c b/ext/pg_connection.c index 6bca4de19..5d99ecf0f 100644 --- a/ext/pg_connection.c +++ b/ext/pg_connection.c @@ -862,14 +862,18 @@ pgconn_protocol_version(VALUE self) * The number is formed by converting the major, minor, and revision * numbers into two-decimal-digit numbers and appending them together. * For example, version 7.4.2 will be returned as 70402, and version - * 8.1 will be returned as 80100 (leading zeroes are not shown). Zero - * is returned if the connection is bad. + * 8.1 will be returned as 80100 (leading zeroes are not shown). * + * PG::ConnectionBad is raised if the connection is bad. */ static VALUE pgconn_server_version(VALUE self) { - return INT2NUM(PQserverVersion(pg_get_pgconn(self))); + int server_version = PQserverVersion(pg_get_pgconn(self)); + if (server_version == 0) { + pg_raise_conn_error( rb_eConnectionBad, self, "PQserverVersion() can't get server version"); + } + return INT2NUM(server_version); } /* From 8ffc51e10e19a46474ade514cb1749292a2f1695 Mon Sep 17 00:00:00 2001 From: Lars Kanis Date: Sun, 9 Mar 2025 11:43:07 +0100 Subject: [PATCH 050/118] Add a spec for conn.server_version with success and failure case. --- spec/pg/connection_spec.rb | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/spec/pg/connection_spec.rb b/spec/pg/connection_spec.rb index b64604467..5dce96250 100644 --- a/spec/pg/connection_spec.rb +++ b/spec/pg/connection_spec.rb @@ -912,6 +912,18 @@ end end + context :server_version do + it "should retrieve the server version" do + expect( @conn.server_version ).to be >= 100000 + end + + it "should raise an error on a bad connection" do + conn = PG::Connection.connect_start( @conninfo ) + expect{ conn.server_version }.to raise_error(PG::ConnectionBad) + conn.finish + end + end + it "allows a query to be cancelled" do start = Time.now @conn.set_notice_processor do |notice| From fd2276256e71a05d4cbc195379360232276d5d23 Mon Sep 17 00:00:00 2001 From: Lars Kanis Date: Sun, 9 Mar 2025 16:53:07 +0100 Subject: [PATCH 051/118] Update the documentation of conn.server_version --- ext/pg_connection.c | 16 ++++++++++++---- 1 file changed, 12 insertions(+), 4 deletions(-) diff --git a/ext/pg_connection.c b/ext/pg_connection.c index 5d99ecf0f..cf34d4208 100644 --- a/ext/pg_connection.c +++ b/ext/pg_connection.c @@ -859,12 +859,20 @@ pgconn_protocol_version(VALUE self) * call-seq: * conn.server_version -> Integer * - * The number is formed by converting the major, minor, and revision - * numbers into two-decimal-digit numbers and appending them together. - * For example, version 7.4.2 will be returned as 70402, and version - * 8.1 will be returned as 80100 (leading zeroes are not shown). + * Returns an integer representing the server version. + * + * Applications might use this function to determine the version of the database server they are connected to. + * The result is formed by multiplying the server's major version number by 10000 and adding the minor version number. + * For example, version 10.1 will be returned as 100001, and version 11.0 will be returned as 110000. * * PG::ConnectionBad is raised if the connection is bad. + * + * Prior to major version 10, PostgreSQL used three-part version numbers in which the first two parts together represented the major version. + * For those versions, PQserverVersion uses two digits for each part; for example version 9.1.5 will be returned as 90105, and version 9.2.0 will be returned as 90200. + * + * Therefore, for purposes of determining feature compatibility, applications should divide the result of PQserverVersion by 100 not 10000 to determine a logical major version number. + * In all release series, only the last two digits differ between minor releases (bug-fix releases). + * */ static VALUE pgconn_server_version(VALUE self) From b74a7c68abe13d584b59117b11191e98e5f4d211 Mon Sep 17 00:00:00 2001 From: Lars Kanis Date: Sun, 9 Mar 2025 16:59:43 +0100 Subject: [PATCH 052/118] Change conn.protocol_version to raise an error on failure ... the same way as server_version. --- ext/pg_connection.c | 17 +++++++++++++---- spec/pg/connection_spec.rb | 12 ++++++++++++ 2 files changed, 25 insertions(+), 4 deletions(-) diff --git a/ext/pg_connection.c b/ext/pg_connection.c index cf34d4208..754f45fcc 100644 --- a/ext/pg_connection.c +++ b/ext/pg_connection.c @@ -845,14 +845,23 @@ pgconn_parameter_status(VALUE self, VALUE param_name) * call-seq: * conn.protocol_version -> Integer * - * The 3.0 protocol will normally be used when communicating with PostgreSQL 7.4 - * or later servers; pre-7.4 servers support only protocol 2.0. (Protocol 1.0 is - * obsolete and not supported by libpq.) + * Interrogates the frontend/backend protocol being used. + * + * Applications might wish to use this function to determine whether certain features are supported. + * Currently, the only value is 3 (3.0 protocol). + * The protocol version will not change after connection startup is complete, but it could theoretically change during a connection reset. + * The 3.0 protocol is supported by PostgreSQL server versions 7.4 and above. + * + * PG::ConnectionBad is raised if the connection is bad. */ static VALUE pgconn_protocol_version(VALUE self) { - return INT2NUM(PQprotocolVersion(pg_get_pgconn(self))); + int protocol_version = PQprotocolVersion(pg_get_pgconn(self)); + if (protocol_version == 0) { + pg_raise_conn_error( rb_eConnectionBad, self, "PQprotocolVersion() can't get protocol version"); + } + return INT2NUM(protocol_version); } /* diff --git a/spec/pg/connection_spec.rb b/spec/pg/connection_spec.rb index 5dce96250..2c60c532e 100644 --- a/spec/pg/connection_spec.rb +++ b/spec/pg/connection_spec.rb @@ -924,6 +924,18 @@ end end + context :protocol_version do + it "should retrieve the wrie protocol version" do + expect( @conn.protocol_version ).to eq 3 + end + + it "should raise an error on a bad connection" do + conn = PG::Connection.connect_start( @conninfo ) + conn.finish + expect{ conn.protocol_version }.to raise_error(PG::ConnectionBad) + end + end + it "allows a query to be cancelled" do start = Time.now @conn.set_notice_processor do |notice| From 8599591966f59a333af7046dda7c722d2ff32a8d Mon Sep 17 00:00:00 2001 From: Lars Kanis Date: Sun, 9 Mar 2025 17:53:56 +0100 Subject: [PATCH 053/118] Avoid usage of port 54321 for tests but use 23456 54321 is an ephemeral port on most operating systems, so that is's sometimes already in use in CI. This makes initdb fail sometimes on Windows like so: ``` Success. You can now start the database server using: "D:\a\ruby-pg\ruby-pg\pgsql\bin\pg_ctl" -D D:/a/ruby-pg/ruby-pg/tmp_test_specs/data -l logfile start waiting for server to start....2025-03-09 16:03:40.843 UTC [4624] LOG: starting PostgreSQL 17.0 on x86_64-windows, compiled by msvc-19.41.34120, 64-bit 2025-03-09 16:03:40.846 UTC [4624] LOG: could not bind IPv6 address "::1": Permission denied 2025-03-09 16:03:40.846 UTC [4624] LOG: could not bind IPv4 address "127.0.0.1": Permission denied 2025-03-09 16:03:40.846 UTC [4624] WARNING: could not create listen socket for "localhost" 2025-03-09 16:03:40.846 UTC [4624] FATAL: could not create any TCP/IP sockets 2025-03-09 16:03:40.847 UTC [4624] LOG: database system is shut down ``` --- spec/helpers.rb | 4 ++-- spec/pg/connection_spec.rb | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/spec/helpers.rb b/spec/helpers.rb index 7214ec10a..7ffee2b6c 100644 --- a/spec/helpers.rb +++ b/spec/helpers.rb @@ -250,7 +250,7 @@ class PostgresServer attr_reader :pgdata ### Set up a PostgreSQL database instance for testing. - def initialize( name, port: 54321, postgresql_conf: '' ) + def initialize( name, port: 23456, postgresql_conf: '' ) trace "Setting up test database for #{name}" @name = name @port = port @@ -719,7 +719,7 @@ def set_etc_hosts(hostaddr, hostname) PG::TestingHelpers.stop_existing_postmasters ENV['PGHOST'] = 'localhost' - ENV['PGPORT'] ||= "54321" + ENV['PGPORT'] ||= "23456" port = ENV['PGPORT'].to_i $pg_server = PG::TestingHelpers::PostgresServer.new("specs", port: port) $pg_server.create_test_db diff --git a/spec/pg/connection_spec.rb b/spec/pg/connection_spec.rb index 2c60c532e..06de454c2 100644 --- a/spec/pg/connection_spec.rb +++ b/spec/pg/connection_spec.rb @@ -1527,7 +1527,7 @@ it "can return the default connection options as a Hash" do expect( described_class.conndefaults_hash ).to be_a( Hash ) expect( described_class.conndefaults_hash ).to include( :user, :password, :dbname, :host, :port ) - expect( ['5432', '54321', @port.to_s] ).to include( described_class.conndefaults_hash[:port] ) + expect( ['5432', '23456', @port.to_s] ).to include( described_class.conndefaults_hash[:port] ) expect( @conn.conndefaults_hash ).to eq( described_class.conndefaults_hash ) end From 73f05c9e3992d625be00d47ad63633861a041a56 Mon Sep 17 00:00:00 2001 From: Lars Kanis Date: Tue, 8 Apr 2025 13:22:22 +0200 Subject: [PATCH 054/118] Fix making PG::BasicTypeMapForQueries shareable for Ractor This was broken since ruby commit https://github.com/ruby/ruby/commit/d80f3a287c5c8d0404b6cb837db360cab320cde1 It shrinked shareability of Proc objects, so that we need to avoid a Proc here. --- lib/pg/basic_type_map_for_queries.rb | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/lib/pg/basic_type_map_for_queries.rb b/lib/pg/basic_type_map_for_queries.rb index 525d6cf2d..3b0d492d9 100644 --- a/lib/pg/basic_type_map_for_queries.rb +++ b/lib/pg/basic_type_map_for_queries.rb @@ -53,14 +53,18 @@ def initialize(connection_or_coder_maps, registry: nil, if_undefined: nil) @coder_maps = build_coder_maps(connection_or_coder_maps, registry: registry) @array_encoders_by_klass = array_encoders_by_klass @encode_array_as = :array - @if_undefined = if_undefined || method(:raise_undefined_type).to_proc + @if_undefined = if_undefined || UndefinedDefault init_encoders end - private def raise_undefined_type(oid_name, format) - raise UndefinedEncoder, "no encoder defined for type #{oid_name.inspect} format #{format}" + class UndefinedDefault + def self.call(oid_name, format) + raise UndefinedEncoder, "no encoder defined for type #{oid_name.inspect} format #{format}" + end end + private_constant :UndefinedDefault + # Change the mechanism that is used to encode ruby array values # # Possible values: From 55c6e92d66a396b134d90301313c58556905a6b4 Mon Sep 17 00:00:00 2001 From: Lars Kanis Date: Fri, 6 Jun 2025 18:44:47 +0200 Subject: [PATCH 055/118] CI: Update Ubuntu version, which is no longer supported --- .github/workflows/source-gem.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/source-gem.yml b/.github/workflows/source-gem.yml index bc29b5e6f..dc42da20d 100644 --- a/.github/workflows/source-gem.yml +++ b/.github/workflows/source-gem.yml @@ -61,7 +61,7 @@ jobs: ruby: "3.2" PGVER: "12" - os: ubuntu - os_ver: "20.04" + os_ver: "22.04" ruby: "2.7" PGVER: "10" - os: ubuntu From ca83c7c4853ac538d1f58a833ee1dc4e00ea5a75 Mon Sep 17 00:00:00 2001 From: Lars Kanis Date: Fri, 6 Jun 2025 18:51:52 +0200 Subject: [PATCH 056/118] Use Ractor#value instead of deprecated #take Ractor#take was removed from ruby-3.5 since https://github.com/ruby/ruby/pull/13445 --- spec/helpers.rb | 7 +++++++ spec/pg/basic_type_map_based_on_result_spec.rb | 2 +- spec/pg/basic_type_map_for_queries_spec.rb | 2 +- spec/pg/basic_type_map_for_results_spec.rb | 4 ++-- spec/pg/connection_spec.rb | 4 ++-- spec/pg/exceptions_spec.rb | 2 +- spec/pg/result_spec.rb | 2 +- 7 files changed, 15 insertions(+), 8 deletions(-) diff --git a/spec/helpers.rb b/spec/helpers.rb index 7ffee2b6c..3bab430c2 100644 --- a/spec/helpers.rb +++ b/spec/helpers.rb @@ -15,6 +15,13 @@ TEST_DIRECTORY = Pathname.new(TEST_DIR_STR) DATA_OBJ_MEMSIZE = ObjectSpace.memsize_of(Object.new) +if defined? Ractor + class Ractor + # compat with Ruby-3.4 and older + alias value take unless method_defined? :value + end +end + module PG::TestingHelpers ### Automatically wrap a transaction around examples that don't disable it. diff --git a/spec/pg/basic_type_map_based_on_result_spec.rb b/spec/pg/basic_type_map_based_on_result_spec.rb index b973dca51..0d0666f24 100644 --- a/spec/pg/basic_type_map_based_on_result_spec.rb +++ b/spec/pg/basic_type_map_based_on_result_spec.rb @@ -49,7 +49,7 @@ res.values ensure conn&.finish - end.take + end.value expect( vals ).to eq( [['b', '234', '{2,3}']] ) end diff --git a/spec/pg/basic_type_map_for_queries_spec.rb b/spec/pg/basic_type_map_for_queries_spec.rb index 660202086..143afbf47 100644 --- a/spec/pg/basic_type_map_for_queries_spec.rb +++ b/spec/pg/basic_type_map_for_queries_spec.rb @@ -30,7 +30,7 @@ res.values ensure conn&.finish - end.take + end.value expect( vals ).to eq( [[ "2019-12-08 23:38:12.123", "t" ]] ) end diff --git a/spec/pg/basic_type_map_for_results_spec.rb b/spec/pg/basic_type_map_for_results_spec.rb index 127481ebf..d3176c43c 100644 --- a/spec/pg/basic_type_map_for_results_spec.rb +++ b/spec/pg/basic_type_map_for_results_spec.rb @@ -37,7 +37,7 @@ res.map_types!(btm).values ensure conn&.finish - end.take + end.value expect( vals ).to eq( [ [ 1, 'a', 2.0, true, Date.new(2013,6,30) ], @@ -52,7 +52,7 @@ res.map_types!(btm).values ensure conn&.finish - end.take + end.value expect( vals ).to eq( [ [ 1, 'a', 2.0, true, Date.new(2013,6,30) ], diff --git a/spec/pg/connection_spec.rb b/spec/pg/connection_spec.rb index 06de454c2..f98996e87 100644 --- a/spec/pg/connection_spec.rb +++ b/spec/pg/connection_spec.rb @@ -42,7 +42,7 @@ conn.exec("SELECT 123").values ensure conn&.finish - end.take + end.value expect( vals ).to eq( [["123"]] ) end @@ -52,7 +52,7 @@ PG.connect( 'localhost', @port, nil, nil, :test, nil, nil ) do |conn| conn.exec("SELECT 234").values end - end.take + end.value expect( vals ).to eq( [["234"]] ) end diff --git a/spec/pg/exceptions_spec.rb b/spec/pg/exceptions_spec.rb index 413b1085c..189cdd811 100644 --- a/spec/pg/exceptions_spec.rb +++ b/spec/pg/exceptions_spec.rb @@ -35,7 +35,7 @@ end begin - r.take + r.value rescue Exception => err end diff --git a/spec/pg/result_spec.rb b/spec/pg/result_spec.rb index 6fc460bc7..2f1a17fc1 100644 --- a/spec/pg/result_spec.rb +++ b/spec/pg/result_spec.rb @@ -38,7 +38,7 @@ conn.exec("SELECT 123 as col") ensure conn&.finish - end.take + end.value expect( res ).to be_kind_of( PG::Result ) expect( res.fields ).to eq( ["col"] ) From 07ce11b59cd5dd1c759772a1942c4cc42eff8c97 Mon Sep 17 00:00:00 2001 From: Lars Kanis Date: Fri, 6 Jun 2025 21:41:05 +0200 Subject: [PATCH 057/118] CI: Fix binary gem test on Alpine --- spec/env/Dockerfile.alpine | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/spec/env/Dockerfile.alpine b/spec/env/Dockerfile.alpine index f1268d491..576cf7528 100644 --- a/spec/env/Dockerfile.alpine +++ b/spec/env/Dockerfile.alpine @@ -2,7 +2,7 @@ ARG from_image FROM ${from_image} RUN uname -a -RUN apk add ruby ruby-etc ruby-rake ruby-dev git gcc make musl-dev gcompat postgresql16 sudo +RUN apk add ruby ruby-rake ruby-dev git gcc make musl-dev gcompat postgresql16 sudo RUN git config --global --add safe.directory /build RUN ruby --version From 0a3d10fcc696bc8de7cae793ecf0791b8c578a84 Mon Sep 17 00:00:00 2001 From: Lars Kanis Date: Mon, 7 Jul 2025 08:25:01 +0200 Subject: [PATCH 058/118] Rename History.md to CHANGELOG.md The name "History" is for historical reasons, but CHANGELOG seems to be the better name. Fixes #642 --- History.md => CHANGELOG.md | 0 README.ja.md | 2 +- README.md | 2 +- pg.gemspec | 2 +- translation/po/all.pot | 2 +- translation/po/ja.po | 4 ++-- 6 files changed, 6 insertions(+), 6 deletions(-) rename History.md => CHANGELOG.md (100%) diff --git a/History.md b/CHANGELOG.md similarity index 100% rename from History.md rename to CHANGELOG.md diff --git a/README.ja.md b/README.ja.md index 95ab6a79a..b18904076 100644 --- a/README.ja.md +++ b/README.ja.md @@ -2,7 +2,7 @@ * ホーム :: https://github.com/ged/ruby-pg * ドキュメント :: http://deveiate.org/code/pg (英語)、 https://deveiate.org/code/pg/README_ja_md.html (日本語) -* 変更履歴 :: link:/History.md +* 変更履歴 :: link:/CHANGELOG.md [![https://gitter.im/ged/ruby-pg でチャットに参加](https://badges.gitter.im/Join%20Chat.svg)](https://gitter.im/ged/ruby-pg?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge) diff --git a/README.md b/README.md index 1821f1368..4869922fa 100644 --- a/README.md +++ b/README.md @@ -3,7 +3,7 @@ * home :: https://github.com/ged/ruby-pg * docs :: http://deveiate.org/code/pg (English) , https://deveiate.org/code/pg/README_ja_md.html (Japanese) -* clog :: link:/History.md +* clog :: link:/CHANGELOG.md [![Join the chat at https://gitter.im/ged/ruby-pg](https://badges.gitter.im/Join%20Chat.svg)](https://gitter.im/ged/ruby-pg?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge) diff --git a/pg.gemspec b/pg.gemspec index 7d9c24844..8419e4f76 100644 --- a/pg.gemspec +++ b/pg.gemspec @@ -17,7 +17,7 @@ Gem::Specification.new do |spec| spec.metadata["homepage_uri"] = spec.homepage spec.metadata["source_code_uri"] = "https://github.com/ged/ruby-pg" - spec.metadata["changelog_uri"] = "https://github.com/ged/ruby-pg/blob/master/History.md" + spec.metadata["changelog_uri"] = "https://github.com/ged/ruby-pg/blob/master/CHANGELOG.md" spec.metadata["documentation_uri"] = "http://deveiate.org/code/pg" # https://github.com/oneclick/rubyinstaller2/wiki/For-gem-developers#msys2-library-dependency spec.metadata["msys2_mingw_dependencies"] = "postgresql" diff --git a/translation/po/all.pot b/translation/po/all.pot index 5a21ff434..9a0d88328 100644 --- a/translation/po/all.pot +++ b/translation/po/all.pot @@ -34,7 +34,7 @@ msgstr "" msgid "" "* docs :: http://deveiate.org/code/pg (English) ,\n" " https://deveiate.org/code/pg/README_ja_md.html (Japanese)\n" -"* clog :: link:/History.md\n" +"* clog :: link:/CHANGELOG.md\n" msgstr "" #. type: Plain text diff --git a/translation/po/ja.po b/translation/po/ja.po index f67da8a75..c4b0d51aa 100644 --- a/translation/po/ja.po +++ b/translation/po/ja.po @@ -39,10 +39,10 @@ msgstr "ホーム :: https://github.com/ged/ruby-pg" msgid "" "* docs :: http://deveiate.org/code/pg (English) ,\n" " https://deveiate.org/code/pg/README_ja_md.html (Japanese)\n" -"* clog :: link:/History.md\n" +"* clog :: link:/CHANGELOG.md\n" msgstr "" "* ドキュメント :: http://deveiate.org/code/pg (英語)、 https://deveiate.org/code/pg/README_ja_md.html (日本語)\n" -"* 変更履歴 :: link:/History.md\n" +"* 変更履歴 :: link:/CHANGELOG.md\n" #. type: Plain text #: ../README.md:9 From 1a332f3ccfdd6a3bb2210952bd958f45f3c39342 Mon Sep 17 00:00:00 2001 From: Lars Kanis Date: Mon, 7 Jul 2025 08:34:24 +0200 Subject: [PATCH 059/118] Remove now unused files --- .gems | 6 ----- .gemtest | 0 .hgsigs | 34 ------------------------- .hgtags | 41 ------------------------------ .travis.yml | 49 ----------------------------------- Manifest.txt | 72 ---------------------------------------------------- 6 files changed, 202 deletions(-) delete mode 100644 .gems delete mode 100644 .gemtest delete mode 100644 .hgsigs delete mode 100644 .hgtags delete mode 100644 .travis.yml delete mode 100644 Manifest.txt diff --git a/.gems b/.gems deleted file mode 100644 index c51daab89..000000000 --- a/.gems +++ /dev/null @@ -1,6 +0,0 @@ -# .gems generated gem export file. Note that any env variable settings will be missing. Append these after using a ';' field separator -rake-compiler -v1.1.0 -rake-compiler-dock -v1.0.0 -hoe-deveiate -v0.10.0 -hoe-bundler -v1.3.0 - diff --git a/.gemtest b/.gemtest deleted file mode 100644 index e69de29bb..000000000 diff --git a/.hgsigs b/.hgsigs deleted file mode 100644 index 613119f58..000000000 --- a/.hgsigs +++ /dev/null @@ -1,34 +0,0 @@ -872063e42b129af10539f73b3c083ad8a031f961 0 iEYEABECAAYFAkuKoCoACgkQ+zlz4UKpE6QzewCgrFcSsAwju/KpZ8myuWexlcSbe04AmwWCbf4HM95tDXdFvsvzeegPg8AS -3993015a841e43c9cd9d1321819cbf5e74264f1d 0 iEYEABECAAYFAkz2ycMACgkQ+zlz4UKpE6SYjQCgi/1Ik2rntK2dU93Hb91wYh0Yv4sAoKxEXVuXaEIAiwB4vSQ/7JQGIBzM -230ea3e68db2360548097542c4856dec4c3cd97a 0 iEYEABECAAYFAk03CpAACgkQ+zlz4UKpE6SPAgCfbRwKmAgHTmrudSoC09c37Tuyff0AnRHrSaqKhiCO7KlX5UJq6x0ttoKH -24aa7899c6966ce349c8e4f2a87b17c3e943ff56 0 iEYEABECAAYFAk2s1wQACgkQ+zlz4UKpE6SkLQCdHOS5yxoUFguEo885HkDyOZg4Y7wAoMVofhwOUHVQ6djXr0hgAmahI1lW -19b551f972e27dcfa281b92914e2a98661243206 0 iEYEABECAAYFAk7f51sACgkQ+zlz4UKpE6RkYACg0WZjt1crbi72DQYs3kYKSYRflNYAnA80+VVwmMUQiWuFuQ+7gbiUPCyY -f72b14d349bf385c769aacfddbea7a0e60ff5e9e 0 iEYEABECAAYFAk8CFCIACgkQ+zlz4UKpE6QbYACgyLQwHPQH50sGVgzTD3y13XKwi38AoIrF5zSOiMXAeL+sk++iwDYV4ddW -f3dfdb6929b70ddd3bb952757bdfb199e6916245 0 iEYEABECAAYFAk8Di+MACgkQ+zlz4UKpE6TVvwCg+ibuW22lRdnOIrRF2V4am7b4YxYAn0bDEnP93JX6qKAaU8kcoCrTKDXp -b67309d3ccf2f9de56535e01f58c7af994426827 0 iEYEABECAAYFAk8iJKkACgkQ+zlz4UKpE6SjUQCgpItY5hW5NyVkfL5+nkRhJqaetQMAoJQQkNPL2jQLgJREfj3PtMBbn2VG -0e7f0c2451e55855b4a90efce8db0cafbf04b26f 0 iEYEABECAAYFAk8kb0cACgkQ+zlz4UKpE6RpxgCfQDV3zq2N+zle1XLKoXGMr7EK19IAnR3llz7WPf2j9lqXdZjw4xtl0XBk -9c262b875047f9acfedb63396a262ab5a5b101ca 0 iEYEABECAAYFAk80EvkACgkQ+zlz4UKpE6SUHQCeJuJMb8+k8ynIDPSmcKHL/a5gD6AAoPXMns9HF2c3XwtS1CMRf6rcZp3e -1ba641824000abbf1b22000772815c24e9b5f6d9 0 iEYEABECAAYFAk84LSUACgkQ+zlz4UKpE6RlPQCgiGZbYJFbeWEAdehVUrIZdU7sRe4AoOgESbvEWynP4X0OKbvdC9rLchYl -41e071bdd6ed970887d4ed4da59fdfa62003c39e 0 iEYEABECAAYFAk9FXikACgkQ+zlz4UKpE6TB8ACgt/VSo/kJMg9UVLKd5UUPBPjbgOIAn0DJuOla9GF85mW74sEkCOqE6Ada -a45710f8db303c400200017242589562936fcf1b 0 iEYEABECAAYFAk/l/kgACgkQ+zlz4UKpE6QCkwCg049BpW4kSvaKuICyvKokeoXbNiAAoPWAaiDuK6xjZhqGSuuGVWMmCRwk -52d22b060501ab90a89b3a758aca8ce70ad05231 0 iEYEABECAAYFAlBDfn8ACgkQ+zlz4UKpE6R3GACgzLiZ+fyM4Hx8/Qp9fyWF+mHk4FQAn3P3Y06AHadVvKwyksrAgKk/33LV -384fcbc92366ca0108b6c0984d861ffef2d38570 0 iEYEABECAAYFAlFRsM4ACgkQ+zlz4UKpE6TYXgCgksacYvWJ5dhx9oYFRR+oSH6wPgwAoJ3QO01zfiDbBz6Z9Mxy7tNi3jx6 -0bfb6ff650be2d003af3d0fc6c75be16369605e1 0 iEYEABECAAYFAlFjCYkACgkQ+zlz4UKpE6RldACg77Rp2I4vYUXpFakUrYq6uSfPLLQAn266JL6CiQG44cSroW+Mgz4CZgJn -4e0606f5f5aab87855860a3eeaf4c9eaaea77f09 0 iEYEABECAAYFAlHuizwACgkQ+zlz4UKpE6QphACg4FNFwvVju9wk6PC6vwkY8cZRtvkAn1nDR0pbto9xMdMUqhJxOc5Dqisr -eed93df350a6cc657d5151bd3aa29ab427fba7cc 0 iEYEABECAAYFAlI3Sy4ACgkQ+zlz4UKpE6ShLQCffDunkSEo5TCnzCx8PjVF9jetDxYAn02ZCfDJ2UPgojF+gjhHCGk9haFq -22d57e3a2b378a34675982a77e6daa643f38fa6e 0 iEYEABECAAYFAlKyO9QACgkQ+zlz4UKpE6QO/wCfWabZRMDkk/vNME1LK1cHCp4oOtMAoORYSAU8OTOxjhPW3IGDMFShHKHv -c519766e3ec9a60b1960dcb008f01434f98a17b2 0 iEYEABECAAYFAlSoTtUACgkQ+zlz4UKpE6TIoQCg2nBKrFlaMtD1P4H1KuDxQJBsDkQAniIdmVBVhWvBU+pUfMHhPRBY+puR -ba5aff64b5cbe818ddabaac924d0bee6ab27f1b0 0 iEYEABECAAYFAlSq+v4ACgkQ+zlz4UKpE6SfvwCg8cL68fxxt7k/yzTV7hLNyOovci0AnAoPXmKEYaoyWehwMUpxOitaVRwf -7d31b04e79134d276c1e8a3a64ee35b7002da1ef 0 iQIcBAABAgAGBQJVVO4yAAoJEGE7GvLhImG9r6cP/jMU8jKHKbFiyRLCz/IXw72bnORdGiOwZzIjFyRSpXnrZ9dkIF8Hjllv27XW2jiQ2eg+N+MQmchO3VAqNEgad782535p01LY2hmP8s6LAKM7GFCTi6yCVcavcGUS8GDwK1df1nLK0Sfi3TrRsaduhizd0BI0MPuVt2qjDE+8AA0/6DkIkPsohUbvpJXMMl8BiuZBM3IViHYn4janRdeUdSvv9hDo3gYqMH9OsihhacOVX1KoHirkeO14JGfrTN9P7wgtQeIa6VP/cC6ek3qsUhahGXqFPvMw5oApcGyBMmVdfw4rgVVCgVKK1XRLGstt1JozgFIB9Dcjppjcv5VnawuDBvrQDNpFChxyAW8coyssKYG4Mug2wpoJawsy3Mb+rmDyw5KHXJXdWMS0uf+2h6+6FG4Y+DDb4LM8PGgSilJPktS7f9CqY6pROT4bPyG0o0z2VNa+3pdnQ3J4LMap9cdhPtTArvc0S/GwxrffRzKlXZW6LH3Apu9dn9dVwf+fUr8yui2DxNaZ/l8u5dYOixbCOp6rFSdHq/SYKOMfi3DrvdoWTBrhsUfI3ulJQxa13fFWrKVGOcEswjBxnaYEd7sIBt3ij/z3/1bCz9Phhp8N8u+5wQjbHhLrVqkb/u0I7lM6WSG8o7zg5abeotLbL4ieDsO/BBw3WuKzZ9ylie8h -57291f1e96b95a2545f98ec95ba1b01f8d3a0cf5 0 iQIcBAABAgAGBQJV6LWaAAoJEGE7GvLhImG9TMEP/jGHXPtiwWWb1xS+hL1i7b5J13IjciOHW+hGtp4lFb/J1jtF4o3JoPDdq+q1Ytuc0zo/lcYU73kw2gseGgO96MIEFdDcdCS1tbE5EP8456ADCn4TKykSSCdIuBXizhh/CTIJyry7i8VXpio1K26Uav2J2M2G91IADqmg2AWFtHmboGmaGRwU4TMuZbZPMFkiPyhFMMz9FH9VhVOEqF4KaEzUQM3RyKsfJ9RvJk7g3oxBS6vq/bPzQq3LNXVqirKfx4kSv8Rv+dyGHadKfdhigTXDWfzplnmuDcmOvhIcEnUsgPQyoPFfKM6RDaaNswFaLAXrGQXirx5hXDUhehXYjBuRB5iF372AACcnRJUJHV+mdW9L5jmJw64umZ7FuKOVqojumMLIEj16nz7ucAJpgOwbWKgLiUk+6vVr6QknjNYC6FDlgJ04nYfjovbzrT+HCC5UAVRBBX+w/khybhhsvvZUIZOzt6RPkriin7NQi3LST2ZN2AOolkDtSJd6esExXkUod7qGfTl/nKa8qWpeAQ7XSq+bv8/Wbj/bqN7kIDy6qYcy2J+aL/PNdrzuOSWKeQrOWhsb02mlsxC9bmRBEWJ1WbpdrnX7/6aVuPwF0LKsftitkFR6IqPza20qUebz+UF9Pd8lW4qn28BCRtwLprw/Oh0Qct1cVE9OUiB4GVXP -da42b972b5ab3381b41a3a63cc2a65b42e5caa05 0 iQIcBAABAgAGBQJWRjUaAAoJEGE7GvLhImG9gFUP/34+eviBFlK2TPDBAp/AQz8aQp4dcPBZ9S5JCCXW1c2YE+UL6X7MpkRR3t/eXrzBJFSgiXmB+TzTkfz1DsFKKoAXymq5hP5AIf+5dpkvL+JH24f/+Jzv3qaNWYqJbUNYajy+GXMI8OGwmQ7x3EtynJmYpMVWdgtjcfCRGVRw38Zun+ePiluI83K/I52RptZenhcQP9I7wehdUtCp8bH7LX1nbeHH/HDY5OmkN22HkFzkPPLjYFgAzNfciZMI7bmxmTbLZ1wqGFyTHjGONEiKPW8vgnMK26QXm+/+DkPkg0RwqeA4oUwlT5+8m/pBlzJBY+Boz0+ffCBxpHOSto04hP2rCcBd1hihr6OWtZiZJ1S/YMsKW4vnZoIBVDr+z7fAOaLkZ6GX580BtoVH3Etr7/727ebaWYQfPknlAPn6lkO271/+r8X8GlTqxqlF/gvq5baqCLXvdjIgUgJAseuf4RWsSef+GxMaC/w9cScoqnr/v3DAcTKPY4FdomDUlEp/3HcjzothsXIDifrH1FhX0NjPzAMMvQm+jOsZWF7Z0ipfsPQGjx7enOdsUiUQzU+pYxiIZHdZ2vpkALFB8VhRB8QoO0hnyORLVrSqYHNQ+UdcV2lwwThi6qVfLjT0gKuxCG2e2u3pGvv28iW7nk7SYFCpHCRtaEpZh+4VDa8GPAOj -75d4f016a17f58cb049f1891aa4794c3308dc274 0 iQIcBAABAgAGBQJWRjdlAAoJEGE7GvLhImG9cbIP/jSrGQnXeTML/pYtcVj/3DigVd7M03MHAX1hyIz4cFCE8yZHXkOzMgoMe+47OoC+bRANvmh9zJcgVcgIbA/ooXFP2AiiutH5aI20mKES9N5bTqEPyiMACqjs1eb4ZIBMbDEt6UTD1256l5xd9wCBVzlXahuNQN5FyDMxFyrKcsWRoB/vW1ano4jT+1+R8SkSJzf0reJaooJAif4HHM1mwRsgepWFH91dT766m63/jZV8TrHmQHxh+jrCCDhBtZCbrrYEq2FTzSD6ZyBYIKa7lGbJaDH86XuAnFGMszDAkdTGxp+riWmpPfmssh9e17aayzoG5wLWGKfRgiV7/18YuYBzFnbnyZ+VPep5XKnm20L08T3WPId/nK3IdnShROLLm/B8MIxSOlmLYouFGuWQ9LP9Wpgsk07qDRtA7W8R2ooQI3F3iU7UIspA4oPO/P509wVcTJpf1WSnfkJ3K/yRifiKFL+FLlklXF+B5HEZttRzmjzx8/Qvn9lMfYh5pzqhDGxTkt1L5hftEtxp5inWtT9a4HPaG/jcp8MJgmS0eXmw4hTXb1gKQmTACJfZSiitSWCwvaE4oIoVXJ6HZZUCEfHNlGxAQ643AaApNeOCAe0FmzcXfyuCJtwhM5lDXgPM7sWZuKsUxeLElQ2sWXLDsNvQ35yr4wKsi1n1hMU2DbX8 -8beaa5d72670b40cbcaaf11d77a27cb9655ca00d 0 iQIcBAABAgAGBQJX406jAAoJEGE7GvLhImG9iVwQAKBeA2NODvHZLBLFjheeRBMjRbSGWV8lscY/bNnSiIu1n99tLjnRKXszAXowUJnYVa14IWB9U56aoNdc+yWm1e1V+x7q6UXLkC6Jjecra9pfxDmW2VTs4o9D1wL2IVVOOB+3XrgF4N1Jb9TyhbF6ya6kdt9UYHgtMYwL3+fe5s5cTjCwoQNlS9L4drmCTGndtE6CTGrW6I2+S5Soc34QhDp8+WVDi6BTtHNDz1QkK3sO2X3MIJZcfxLSeWegR1JaZ48/dgensvmDFTAnqBf66cjJpjBkhwotqfA54G5M4xOcqKC6SMCJ599UpA+RBs4ntBGuVeSoLyFDpoebrMF1A/xFRfzcnyQLu0/o4LJYBb1+XNUdhrfTLmHxTrgPB4z7iJuNOfgz8sTvFRd4Ip/2hq43JCTFVNpv6d1qFCgf75WAXAqi91LaUpNFr1DoUsXlm4OTBB2PAycGF5N6E4YQDUAdXp792k7DFJJ+n6zHxuhDP6dBbzJbHzWrri4nAQDO1O1RhSjOSgIMadPs8UsOWf/WhvZPJ5TVUJk9bnnSoKMa+CWysg8koxwFeT67EAAZOdeKDKgqomw4Rb76fGlAjVVR+SJZx522I8SY48cc0tVVJyeM88I94WdSCnOupSvrcwEZNeA14xbV//alAN+odUR0ffMPb4KBOtIBQo1Q2OdY -838985377b4829c61b45bfe61d2ec76e5f8e5672 0 iQIcBAABCAAGBQJYwyEyAAoJEGE7GvLhImG9srMP/inukxZyEl/ZyA+gpqlWZegYvrE/Pyd6IinGIAzehbjuiixizZZMf35FYkF33TjVGKTBZyxeLV6UNaQMf6+cM9JHdLVf9HWbLcdCYelQtyvGpJvloVXK2twNMg0Gd/PP9nXaMbbGQ2a4j54zRpOZ28W37hM0pH94GNtRuN+wy4scHtFAHewK9K4GQWU1APf37EXq0Aoxf0OUs0BK5To8EvI6C9nCPpoD2VlxS3i+2UhLMogRhlYw7zBpBqZkdKnhRWIDP/Oc6WfMsxz7St5E7S/V5Lk6+iWnkdmdXSuWiUByWUckPGaSXa/IRa0S9LhBeUmZkVTHic//nWOA8uib8iaT7YlU9oSYmba4kYgHsUeNk035v9f4z6yJdxNdSrqPGtRp3EHGsCC8XuYzew7W8MwPVaN1CsscOJZKRAGNFJrwEMzo5pcg7dk0rJwalCwOzXAVNWAWAPw21cl3H03BsR5lQBDeZdjBbu37OWMFy/LYKQN3Be2znb6OHWla7FbsUtuFKesTGII56coSQVkrFgdoySOwsFd8V3DlTbYPPfd9SZyxwMgmyQzwVrBU95SrsLeQ4/WiEG9ZEr7Av+VOO+B3FFahED5Q7Bv279PMEhMWAh08zXz5/4OUMzvrTIhuYnjT2DKyqjxzkpif529zIbn3vOiK4ugK4pL7YVrnM9UV -f275e318641f185b8a15a2220e7c189b1769f84c 0 iQIzBAABCAAdFiEEoYl52o0gA4yRUmHQ4yEXIpU5F6MFAlk/FR8ACgkQ4yEXIpU5F6PobRAAl1JEPMiyMSWGfMyu/h3OtL3xkOpwcONp0ySv4DQHjOh3u638FMWnEUMF+7pRTnlqQssctH4vO88gnhz5XJOfFtisn4xg64gT0JhR/OiOSOmp90pm//8cXwocjwoOotFKAHxM96b8KWSyGCoFXS+FmMmhCvKWxAp4+qwW133DdaOcDdQeLG19Dcp/ffKGEt83NSoNBCmmG2WsQ11TGAp5Bj8aK+4844HMZFGpvxK0Vl/AecWcxkuB9ql6cPZS0V1Z3Ndwh8mKPkrtAZgPFjZStLXT3iCjEszsQmD6LUQ3x5hWGtPODZpo7uWhi9jFrrcHEGO7/u6l8T3ho7UWqJ1lp6xapCeZH2ZFxKtVar9RzjRB0kvtkwjUBIgpJuVZJCHdTfivJkLWPeapDjiJ4P4NiVMef0KLRDAF5EHO4VYasU754U2/GSkZBrmQgHyYUX3x3VDkPPglfhEphLKMTY3wvM5+EnuO1DDGl0aGGsx+yI/RyJVzp5+jImEdfKHrXS2OTVFB9CGR7t8gcIbrbVaiGrTV4WJPn9Qi4RIoXCizd0rtrBEkd820dGtPhSpOkPY5WNrh9I2B+YkoL1OjR92qZsnx6ByIyJlwvg/A1qZ8PaGVjFk/XkMJnJLOwDC7nVZNs2x4+w91qyG40WcO3FycLogvBp6OsQ2rNSJ1A0UUIhc= -fef434914848ccb2497776324dbf0850852b980d 0 iQIzBAABCAAdFiEEoYl52o0gA4yRUmHQ4yEXIpU5F6MFAlpWN+QACgkQ4yEXIpU5F6O+/Q/+LkYHQHS2sbU+vKlCSMjNCl1wpc1MZ3obh2I+gjjqN5n+3QPNrZC+XMLPlfC54H6bsbuPo9r22Mln574RmX7W2ckc0OHWzNaCDzYLPI7NvnTxyLsVtM7lUvdkuK1YgOAqZE5uK62Dzo8F8Ou2GNUMRH+nsVgCEgT4liyYhSL3NoNlE+h4RAYnvkkBt8NpBSB661wNBZCkL2DxSzidwE8DT20gnhb5ngiRwNevP0DbUnZIs4CUxzkKgnZdyOL8cTftAGj/XnYxm0I9+rUBl0lfJrLhlw5sCtc+vMUHarF2g5CCyvT/dpeACkjOArDj7o3rgqEvwX5zoRUeaUbVv8k7CcJ1uu5R0G+5VLLs9o/a6ilC37tQiGkl4zDJtD7G3bQs4hxYNVvZEmj/SrebNeOjJkouNsIKWJ2tfVnAyI5hZt4+jNZFET8RPAsTvgOLI5u17zU90O4KS7RLuzcT8TxLb55CkKSKsn1qbn8WdObZsoOvk4VQA9Dek7ZH8ZY9v8KtHAzDH+ip5xc01doEJziybz41fibTVma5rgbvnNXXCMqPRRkkSE+k7ogLgC6R3kCoqZUu1V6qoMkPh9s3WzQoSBqGL17l9RBCTh2o2QriBxZNPS2jG+AUrFOKGusc9M8c1AcEO1Z2tbVZBEzKssOSQ22CGpO13ZVzKFJcs601qgw= -ca83074366ac1610134deaee6f12e3a58981e757 0 iQIzBAABCAAdFiEEoYl52o0gA4yRUmHQ4yEXIpU5F6MFAluAfD0ACgkQ4yEXIpU5F6O7tw/7By3wuM1HaXnVADqL66w3xGm15GV5zTZCkIY/lx3s1Lfg314dWRM3V2X4gI/dhvnQZEpo361kmT9cVGs0ggN3uz/jigX1Anjy/5t1L3mz9OBWWHd9+NXCuW3QzoChLrFyq7QD9u+tYdls5mGDisB5PfbSukc0j+69N/4jIPD7kABP//eF8auaWGN1WKs65xjPgXoATkV6FjQL9wLjnzuLnXOVQspzN3G/YwzlJTKQ3CH/7UxffmTLqWGrhLZcwDlCt7QYmHxSVmxBsjQ1tQcLWYjDAdR+nNTSBB9whE4zW6rHsB0Qs7gtWypmTKrKZ7ikRQcFGj3enlK+gzspA+rc/NGDHTGYSSbHS1BhE3SpOD7zyR06UnrsYGk2M6Cg5cvE/9RGFUIZ7MPbSfW5a/RRV0ZZgihJjkvsL1w8rzsl/8eaXi7Nkp5KsgEb/hQ+o4V+TEBIxe+TpOZDjpvPwTHC5f0rbRLdWxTxVpUteHwps4N8I5S3+HrMNw+9ewB92V1SxUVMUIBYKoiVJSo92eVnaw/GKIZduZrrr8/XzKh69gvXOrshucxH7AsIvGo4WQR6VtWeFrtUrQpUXPJ1fXi7nc6ciksIMMWKCUaMDsv0nN7k/kuL4m/NRAjCDlbZRoJ0Pj2+ukiGz8+GOhXjshkoj4oatX9UMp4mueDEWdl5JIQ= -71d5c24f937e00c2348f8d5b9680b9abe8597618 0 iQIzBAABCAAdFiEEoYl52o0gA4yRUmHQ4yEXIpU5F6MFAluEcKMACgkQ4yEXIpU5F6OLDg/8DKSI8HzShD47HQMiqMtRSanmlJz2qrKea9gY3Y9HgXnIODTFUdi1v8CLzOU/NyD+GtYl8AiJSCUQfT2W0IEZkf7AeAsliq/0edPKQvCbHbjdOcWTTe07x8E1BbQoP8sS8EyYj3nbjQrYaTu3qKxOIwYhN9h8DP6C4xDQeGS05kdMcEbOXcvL0wNRnLW6mBJK2fqNFKR5BSIWgZUlqqYC90URD546D/Y5a+zR9tzsxiFXP/yKkDKVXoaFWiMEt/PjClgt7pcaCiyt1ZUt0N398/oVLPrIgMPOSqlbQB3eFYcdx3I2DpWIvWm9NjUe7FhkJfRUOQEnZ6IiqiLb6UwaRY1B+yt7SVOPygv86B1sACwE2G9+tcBGaIcKILdxD2HAt1nVsxVOKGI/9prmrVGrIzX6AX4phPpVS+PZRbd+CwI+dcGYXIjhVs+oy/6G+soo3ayLqqzWgcu+kC4cf39mVr7kNLGGbg15xJUmMp49WEk8/tiLgUDySdcCJAjADzWFvQrjy40JMxmt8dJPZwHhIBUZ0mXsGZWD0OVunQT9B3yswRj7I4TV/ztyfNYN7OtCyzxs1gwzrKiS8T/COSp7cb3+hXXoI9doWOPTZbO1Na51ch+4nqR6mtOCOmR7sCqSXw/VRLn97DsPK0miKbEQUm7mNT/XcxWPwHsWUI3yg+Y= -bbf57bf7e58354bc8052c9914da6d88940c0b493 0 iQIzBAABCAAdFiEEoYl52o0gA4yRUmHQ4yEXIpU5F6MFAluF62EACgkQ4yEXIpU5F6PEyxAAuA11YAWDZ6Bqag8h0IpxgbT9kcLZPo9s4XhQmFnKN7Ykga+UZK+mUs/pjcu9zk/CdZwEzLATtNiI9Zyvf4IkGkyNVj4qU4eVtvndClzflPP54fW/mxQCrALsIT7uqZQitZDomOJUnJPmbPni/dSjcbr/u77qh0rQifcABga3e+R3AmY8BSfsJbkEIV5A1wAC9O5ccCJRubvPLsQiQ0OhaMA+xjMtLA7/DVS/C9PIXexBrCK1vFtMkPN9RzPLQ3Y4hAxMwrYuMZ+l7l+Zngi+yIFEDJrTOIxAfP2dSxTrEwWEL/JfRPaN7BGFR9j/RG3KfG6aTggPIRc0ru4fAFVXhQ1zWPBYqnw+w8JM5w8A0vNuIV2fSQe7AmpE4d8obXqRKkvHTWvXQ1nqNbTK+X7DTLO+vTlcMbI7YJUzVZO2oiuJt5ZJ1irHtr/eIwgBEfLV7GvhmyZT74qTRJ8v+vCko0thjrfVOs/Fstfw4PB2QBb1f4LifKSL1Ol3yvygbLdbxXZr6SDjXUcG09ABb5Xk6yyVYn3/DivMGyNqU9e9ZK39i76Vmh156Ml/MHmA6ZQFEA0zAWB47imnkqigQHF/CEJW9yYEHAK5UXVC9uQcHboFNFHAbzRiDcaTCjBNrFk5vglIQG+IZR30KbYfkODcjp/NGekppii8fjLioq8cuL4= -6f611e78845adc38eac1fffe4793bea2d52bf099 0 iQIzBAABCAAdFiEEoYl52o0gA4yRUmHQ4yEXIpU5F6MFAluRxokACgkQ4yEXIpU5F6Np6w/8D4R+QDUJN4mPGlbQNOSB8ew6do1ixP+uMZtMTgltYtbX+Uf9vNgCM+rmy0Tb+HjkMeNy47YZXaobfZ7ejmT/pFt9WXEr90mADAwrWcbqCCKC9OVmQG3tQQT2BGqxKsaB3t0s2+tNBGXs5o7AzHjqzain1nBe19y29EzpWfYusRA+exvIYWk8hf1JpY0wAaNIVBKdALSNcR/t0msHJRBkn2UxB+0e5te+A09atd0K1sXp9qii4WvGZBGJtpiFVK8Ayk+7Q2/RxqYoyJoIR2xcoZTA2e1no4uRjXa0ZTphPQpsDEJEwAQ5ymsFTtWPapWoJIgeyGopPckR85GT3c7MgkOGLLCJs1lKeQGWevjWquHV4lP3NZr4tmnKC9BHoqc16tlBbtbuaPXyGRkBP4ROn3NIm8rftixDGQtTZwlaNzJ8OoXCqj5PzTMUO8zbgVv9QrEHAcpxB+j+tINB5kdc4vICH2zoe0J7jK32ynwY9JH36cg8GVLkNIwzYWP5GKmeFm1GQKvv7Ptqfa4rTPKDy2G1/yTCwrzpg5vRNy+7ouhHRrVAxEaIvehdoYs+3K+SwGzgk0RoL0sw0n54zv/svTtIugO1inE9tKA31cuPujIcIrWPthHBY7bSPO18HGyoHkOcaS0rkkwQ0Ug85aBtyuFLjPUkUt4NL4kGmHOALFs= -b86eef21886cbc08a29dbb1893c68c360fbec7cc 0 iQIzBAABCAAdFiEEoYl52o0gA4yRUmHQ4yEXIpU5F6MFAlw2ItgACgkQ4yEXIpU5F6OQEA//Y7qc4spxKxzAUEhXl8c1DGJ2CCHS1vMNoXTTgpCYGk6oDTCa00bHa4dUTyGQXUAbpjcAYD5uiyB8VTj1a8Qy7QFFPXofWx+Ljyfi3hx8isfjal7ktlHh7Y0PvnYBEH8a9zK7BUymDXF4n4qIGfKA3wgDB427yHGXlApIvXjmEk1C08GVzKCX03xWhAhOJyRk2aKwwlyZfYXJvpdhk18sI1DNhR925iz/e/wcS8cO0ESWR7gzTZrWPm4N32q3KoYWmWwp2msb3JsaEWpMdbY2/JJmwx6kkOwtn4GR+G4AujY6d7/XQQ8Yqtsld7x5LK7l44BG6RpHFgRWKZpWStxxp+VhuVpqZekXNuyfP1MIlR7w3B1p0wBIWekDGO8eEDGoK9TewzufZzJa1uCed9JulgGtvlcvpyIghwODLGjbGBr4YztPf9W16iOXt0Mtx1a9ni3C9xF3KgakvYYkLY3osFZG8my3AIXgFps5fNzfcq4GbJIPx3PcF9ka0iP6114/3g92vXpVKlvxczwil+lqPYBT2rxc3+9JtW77bG7tQOllKjnfAiQ0BK1fMBZRVDzN4QU8jifyDwUQvjB4tZMvGzXWxRW3TmTKOdqp+s6hJnBma9lAwUerGa5wtk1xZhO1udJYsk7PMt590bxgUl+1PjKJ6AG1Sj5MhgMct9euex4= diff --git a/.hgtags b/.hgtags deleted file mode 100644 index af66c6936..000000000 --- a/.hgtags +++ /dev/null @@ -1,41 +0,0 @@ -7fbe4187e9e53e58baf6cd7c1c21e3a3c5b920e5 0.8.0 -da726282493c57b4ef8e5be1a21e98cc028fda4c 0.9.0 -1822a169c4fecac402335a64a484b5dc053994a3 0.10.0 -1822a169c4fecac402335a64a484b5dc053994a3 v0.10.0 -1822a169c4fecac402335a64a484b5dc053994a3 0.10.0 -0000000000000000000000000000000000000000 0.10.0 -de10b5d8e4429d22790976ec4de89f209e882906 v0.10.1 -3cb8e57c6c80737c714dd7607a144ef12074c4fe v0.11.0 -da726282493c57b4ef8e5be1a21e98cc028fda4c v0.9.0 -7fbe4187e9e53e58baf6cd7c1c21e3a3c5b920e5 v0.8.0 -b767401684d8a4051230874b0686a54537b10e2f v0.12.0 -21f84883e5c206a3f2890905af68e08a0046ba1c v0.12.1 -88bd78632f86f696dd3fa8904c1d3180216378cc v0.12.2 -7b2da7e0815cce834cd60f9747209923952876ec v0.13.0 -9e60b2c477cde450a088161ca8f3d72b52531aaf v0.13.1 -c79cd308363d614f7ba32fd86294c9aa3117c361 v0.13.2 -634e0a42a1010fc1dcd279fb28506873a47090c1 v0.14.0 -2d83ce956f971c3aeb145c9ad68f426e78b852dd v0.14.1 -065fd1f0e9dda58557de0efb2deb138e93ba7632 v0.15.0 -4692c20bcbdeadd8a31283e234464c6e1c43765d v0.15.1 -def8f41a76726cf7239ff6dbaa2828a881f93451 v0.16.0 -30da9c169efc3985ad0464936483c229faba0e33 v0.17.0 -78846e47d87b7ed5bb7397116070692b1cfa87d7 v0.17.1 -cfb2bfc0f66181e67768c4313bcce473292a0825 v0.18.0 -f97dd6cb4f34da6a62c4339887249115c7c25b9c v0.18.1 -22a361201fd1d387d59a066b179124694a446f38 v0.18.2 -01c42c68797e724507b76056b98981cb30748a36 v0.18.3 -94ef4830540d8fa74b8912118fb8065f4a6a3563 v0.18.4 -94ef4830540d8fa74b8912118fb8065f4a6a3563 v0.18.4 -0000000000000000000000000000000000000000 v0.18.4 -0000000000000000000000000000000000000000 v0.18.4 -f61127650cd00a1154c591dcde85ebac01f2be9f v0.18.4 -bd2aaa2c5797de78435977a1c60e450d6f22811b v0.19.0 -e5eb92cca97abc0c6fc168acfad993c2ad314589 v0.20.0 -deae742eacfa985bd20f47a12a8fee6ce2e0447c v0.21.0 -9a388d1023ec145cb00e6e16f3a8cabd3cc81d16 v1.0.0 -319c00d9d59e24ce06493715cff2701e3a2a8990 v1.1.0 -c80083c5e395451d612d43323c40317eb63bcb54 v1.1.1 -f54d10c5d98fd06d6fc70896107319901ae374ae v1.1.2 -c7035371f972982c1716daf61861b9dde15de03e v1.1.3 -11d3487e303cf0fc6af48086f3e9c0b1c8283039 v1.1.4 diff --git a/.travis.yml b/.travis.yml deleted file mode 100644 index 0d8a36357..000000000 --- a/.travis.yml +++ /dev/null @@ -1,49 +0,0 @@ -sudo: required -dist: focal -services: - - docker -language: ruby -matrix: - include: - # i386: Intel 32-bit - - name: i386 - - language: generic - env: - - PGPATH="/usr/lib/postgresql/10/bin" - before_install: | - docker run --rm --privileged multiarch/qemu-user-static --reset -p yes && - docker build --rm --build-arg PGPATH="${PGPATH}" -t ruby-pg -f spec/env/Dockerfile.i386 . - script: | - docker run --rm -t --network=host ruby-pg - - - rvm: "2.7" - env: - - "PGVERSION=10" - # Use Ubuntu-16.04 which provides openssl-1.0.0, which isn't available in 20.04 - dist: xenial - - rvm: ruby-head - env: - - "PGVERSION=14" - - rvm: truffleruby - env: - - "PGVERSION=14" - - allow_failures: - - rvm: ruby-head - fast_finish: true - -before_install: - - bundle install - # Download and install postgresql version to test against in /opt (for non-cross compile only) - - echo "deb http://apt.postgresql.org/pub/repos/apt/ ${TRAVIS_DIST}-pgdg main $PGVERSION" | sudo tee -a /etc/apt/sources.list.d/pgdg.list - - wget --quiet -O - https://www.postgresql.org/media/keys/ACCC4CF8.asc | sudo apt-key add - - - sudo apt-get -y update - - sudo apt-get -y --allow-downgrades install postgresql-$PGVERSION libpq5=$PGVERSION* libpq-dev=$PGVERSION* - - export PATH=/usr/lib/postgresql/$PGVERSION/bin:$PATH - -script: - - bundle exec rake compile test PG_DEBUG=0 - -after_failure: - - "find tmp -name mkmf.log | xargs cat" diff --git a/Manifest.txt b/Manifest.txt deleted file mode 100644 index 25f43fea0..000000000 --- a/Manifest.txt +++ /dev/null @@ -1,72 +0,0 @@ -.gemtest -BSDL -Contributors.rdoc -History.rdoc -LICENSE -Manifest.txt -POSTGRES -README-OS_X.rdoc -README-Windows.rdoc -README.ja.rdoc -README.rdoc -Rakefile -Rakefile.cross -ext/errorcodes.def -ext/errorcodes.rb -ext/errorcodes.txt -ext/extconf.rb -ext/gvl_wrappers.c -ext/gvl_wrappers.h -ext/pg.c -ext/pg.h -ext/pg_binary_decoder.c -ext/pg_binary_encoder.c -ext/pg_coder.c -ext/pg_connection.c -ext/pg_copy_coder.c -ext/pg_errors.c -ext/pg_record_coder.c -ext/pg_result.c -ext/pg_text_decoder.c -ext/pg_text_encoder.c -ext/pg_tuple.c -ext/pg_type_map.c -ext/pg_type_map_all_strings.c -ext/pg_type_map_by_class.c -ext/pg_type_map_by_column.c -ext/pg_type_map_by_mri_type.c -ext/pg_type_map_by_oid.c -ext/pg_type_map_in_ruby.c -ext/pg_util.c -ext/pg_util.h -ext/vc/pg.sln -ext/vc/pg_18/pg.vcproj -ext/vc/pg_19/pg_19.vcproj -lib/pg.rb -lib/pg/basic_type_mapping.rb -lib/pg/binary_decoder.rb -lib/pg/coder.rb -lib/pg/connection.rb -lib/pg/constants.rb -lib/pg/exceptions.rb -lib/pg/result.rb -lib/pg/text_decoder.rb -lib/pg/text_encoder.rb -lib/pg/tuple.rb -lib/pg/type_map_by_column.rb -spec/data/expected_trace.out -spec/data/random_binary_data -spec/helpers.rb -spec/pg/basic_type_mapping_spec.rb -spec/pg/connection_spec.rb -spec/pg/connection_sync_spec.rb -spec/pg/result_spec.rb -spec/pg/tuple_spec.rb -spec/pg/type_map_by_class_spec.rb -spec/pg/type_map_by_column_spec.rb -spec/pg/type_map_by_mri_type_spec.rb -spec/pg/type_map_by_oid_spec.rb -spec/pg/type_map_in_ruby_spec.rb -spec/pg/type_map_spec.rb -spec/pg/type_spec.rb -spec/pg_spec.rb From d484c84da81fed5163677774cbad6d383c66f423 Mon Sep 17 00:00:00 2001 From: Lars Kanis Date: Mon, 7 Jul 2025 22:28:16 +0200 Subject: [PATCH 060/118] Improve readability --- spec/pg/connection_spec.rb | 2 +- spec/pg/result_spec.rb | 5 ++++- 2 files changed, 5 insertions(+), 2 deletions(-) diff --git a/spec/pg/connection_spec.rb b/spec/pg/connection_spec.rb index f98996e87..179c2afcc 100644 --- a/spec/pg/connection_spec.rb +++ b/spec/pg/connection_spec.rb @@ -2082,7 +2082,7 @@ def wait_check_socket(conn) expect { @conn.set_chunked_rows_mode(-2) }.to raise_error(PG::Error) end - it "should work in single row mode" do + it "should work in chunked rows mode" do @conn.send_query( "SELECT generate_series(1,12)" ) @conn.set_chunked_rows_mode(3) diff --git a/spec/pg/result_spec.rb b/spec/pg/result_spec.rb index 2f1a17fc1..74fc56071 100644 --- a/spec/pg/result_spec.rb +++ b/spec/pg/result_spec.rb @@ -128,7 +128,10 @@ expect( res.each.to_a ).to eq [{:a=>'1', :b=>'2'}] end - [[:single, nil, [:set_single_row_mode]], [:chunked, :postgresql_17, [:set_chunked_rows_mode, 3]]].each do |mode_name, guard, row_mode| + [ + [:single, nil, [:set_single_row_mode]], + [:chunked, :postgresql_17, [:set_chunked_rows_mode, 3]], + ].each do |mode_name, guard, row_mode| context "result streaming in #{mode_name} row mode", guard do let!(:textdec_int){ PG::TextDecoder::Integer.new name: 'INT4', oid: 23 } From 6002928e43d6a5df13889807240091e318834adb Mon Sep 17 00:00:00 2001 From: "Alexander J. Maidak" Date: Mon, 23 Dec 2024 15:21:36 -0600 Subject: [PATCH 061/118] try the next host on connnect_timeout libpq does not support connect_timeout when using the async api. So when using the async connection API with a multi-host connection string libpq will not timeout connections to the first host in the list and thus will not attempt to connect to any subsequent hosts in the connection string list. This fixes this by closing and reopenning the connection with a reordered connection string when connection times out. See discussion on the pgsql-hackers list discussing this "feature" of the api: https://www.postgresql.org/message-id/flat/CA%2Bmi_8YyGKA9dWELu63e%3DKL2oN-%2BFe4uca4EtFfb6uQD4Up8pw%40mail.gmail.com --- lib/pg/connection.rb | 16 +++++++++++++++- 1 file changed, 15 insertions(+), 1 deletion(-) diff --git a/lib/pg/connection.rb b/lib/pg/connection.rb index 2c9ecd8c7..1b64410b5 100644 --- a/lib/pg/connection.rb +++ b/lib/pg/connection.rb @@ -680,6 +680,7 @@ module Pollable host_count = conninfo_hash[:host].to_s.count(",") + 1 stop_time = timeo * host_count + Process.clock_gettime(Process::CLOCK_MONOTONIC) end + connection_attempts = 1 poll_status = PG::PGRES_POLLING_WRITING until poll_status == PG::PGRES_POLLING_OK || @@ -720,7 +721,13 @@ module Pollable else connhost = "at \"#{host}\", port #{port}" end - raise PG::ConnectionBad.new("connection to server #{connhost} failed: timeout expired", connection: self) + if connection_attempts < host_count.to_i + connection_attempts += 1 + new_conninfo_hash = rotate_hosts(conninfo_hash.compact) + send(:reset_start2, self.class.send(:parse_connect_args, new_conninfo_hash)) + else + raise PG::ConnectionBad.new("connection to server #{connhost} failed: timeout expired", connection: self) + end end # Check to see if it's finished or failed yet @@ -733,6 +740,13 @@ module Pollable raise PG::ConnectionBad.new(msg, connection: self) end end + + private def rotate_hosts(conninfo_hash) + conninfo_hash[:host] = conninfo_hash[:host].split(",").rotate.join(",") if conninfo_hash[:host] + conninfo_hash[:port] = conninfo_hash[:port].split(",").rotate.join(",") if conninfo_hash[:port] + conninfo_hash[:hostaddr] = conninfo_hash[:hostaddr].split(",").rotate.join(",") if conninfo_hash[:hostaddr] + conninfo_hash + end end include Pollable From e6e4cfefdfd0ccd4e57744392613244c327a3123 Mon Sep 17 00:00:00 2001 From: Lars Kanis Date: Fri, 10 Jan 2025 21:59:59 +0100 Subject: [PATCH 062/118] Add a spec for connect_timeout with multiple hosts --- spec/pg/connection_spec.rb | 22 ++++++++++++++++++---- 1 file changed, 18 insertions(+), 4 deletions(-) diff --git a/spec/pg/connection_spec.rb b/spec/pg/connection_spec.rb index 179c2afcc..f92796c2f 100644 --- a/spec/pg/connection_spec.rb +++ b/spec/pg/connection_spec.rb @@ -374,10 +374,10 @@ start_time = Time.now expect { described_class.connect( - host: 'localhost', - port: 54320, - connect_timeout: 1, - dbname: "test") + host: 'localhost', + port: 54320, + connect_timeout: 1, + dbname: "test") }.to raise_error do |error| expect( error ).to be_an( PG::ConnectionBad ) expect( error.message ).to match( /timeout expired/ ) @@ -391,6 +391,20 @@ end end + it "succeeds with second host after connect_timeout" do + TCPServer.open( 'localhost', 54320 ) do |serv| + start_time = Time.now + conn = described_class.connect( + host: 'localhost,localhost,localhost', + port: "54320,#{@port},54320", + connect_timeout: 1, + dbname: "test") + + expect( conn.port ).to eq( @port ) + expect( Time.now - start_time ).to be_between(0.9, 10).inclusive + end + end + context "with multiple PostgreSQL servers", :without_transaction do before :all do @port_ro = @port + 1 From 233bcacbf65bb729145a3cdd9973864238a9e09d Mon Sep 17 00:00:00 2001 From: Lars Kanis Date: Sun, 12 Jan 2025 13:38:24 +0100 Subject: [PATCH 063/118] Raise the error text of several connection attepts in case of several hosts and async API. This is what libpq does in sync API as well. --- lib/pg/connection.rb | 8 ++++---- spec/pg/connection_spec.rb | 10 +++++----- 2 files changed, 9 insertions(+), 9 deletions(-) diff --git a/lib/pg/connection.rb b/lib/pg/connection.rb index 1b64410b5..572a2bf59 100644 --- a/lib/pg/connection.rb +++ b/lib/pg/connection.rb @@ -680,7 +680,7 @@ module Pollable host_count = conninfo_hash[:host].to_s.count(",") + 1 stop_time = timeo * host_count + Process.clock_gettime(Process::CLOCK_MONOTONIC) end - connection_attempts = 1 + connection_errors = [] poll_status = PG::PGRES_POLLING_WRITING until poll_status == PG::PGRES_POLLING_OK || @@ -721,12 +721,12 @@ module Pollable else connhost = "at \"#{host}\", port #{port}" end - if connection_attempts < host_count.to_i - connection_attempts += 1 + connection_errors << "connection to server #{connhost} failed: timeout expired" + if connection_errors.count < host_count.to_i new_conninfo_hash = rotate_hosts(conninfo_hash.compact) send(:reset_start2, self.class.send(:parse_connect_args, new_conninfo_hash)) else - raise PG::ConnectionBad.new("connection to server #{connhost} failed: timeout expired", connection: self) + raise PG::ConnectionBad.new(connection_errors.join("\n"), connection: self) end end diff --git a/spec/pg/connection_spec.rb b/spec/pg/connection_spec.rb index f92796c2f..2576a7f53 100644 --- a/spec/pg/connection_spec.rb +++ b/spec/pg/connection_spec.rb @@ -369,25 +369,25 @@ end end - it "times out after connect_timeout seconds" do + it "times out after 2 * connect_timeout seconds on two connections" do TCPServer.open( 'localhost', 54320 ) do |serv| start_time = Time.now expect { described_class.connect( - host: 'localhost', + host: 'localhost,localhost', port: 54320, connect_timeout: 1, dbname: "test") }.to raise_error do |error| expect( error ).to be_an( PG::ConnectionBad ) - expect( error.message ).to match( /timeout expired/ ) + expect( error.message ).to match( /timeout expired.*timeout expired/m ) if PG.library_version >= 120000 - expect( error.message ).to match( /\"localhost\"/ ) + expect( error.message ).to match( /\"localhost\".*\"localhost\"/m ) expect( error.message ).to match( /port 54320/ ) end end - expect( Time.now - start_time ).to be_between(0.9, 10).inclusive + expect( Time.now - start_time ).to be_between(1.9, 10).inclusive end end From 2f25db248833730e10dcd427a059b8b5689a7f0e Mon Sep 17 00:00:00 2001 From: Lars Kanis Date: Sun, 12 Jan 2025 17:24:31 +0100 Subject: [PATCH 064/118] Use a dedated TCP server class and a port assigned by the operating system --- spec/helpers.rb | 13 +++++++++++++ spec/pg/connection_spec.rb | 35 +++++++++++++++++++---------------- 2 files changed, 32 insertions(+), 16 deletions(-) diff --git a/spec/helpers.rb b/spec/helpers.rb index 3bab430c2..928076bae 100644 --- a/spec/helpers.rb +++ b/spec/helpers.rb @@ -482,6 +482,19 @@ def create_cert_from_csr(name, csr, ca_cert, ca_key, valid_years: 10, dns_names: end end + class ListenSocket + attr_reader :port + def initialize(host = 'localhost', accept: true) + TCPServer.open( host, 0 ) do |serv| + if accept + Thread.new { begin loop do serv.accept end rescue nil end } + end + @port = serv.local_address.ip_port + yield self + end + end + end + def check_for_lingering_connections( conn ) conn.exec( "SELECT * FROM pg_stat_activity" ) do |res| conns = res.find_all {|row| row['pid'].to_i != conn.backend_pid && ["client backend", nil].include?(row["backend_type"]) } diff --git a/spec/pg/connection_spec.rb b/spec/pg/connection_spec.rb index 2576a7f53..05c2a05c4 100644 --- a/spec/pg/connection_spec.rb +++ b/spec/pg/connection_spec.rb @@ -370,12 +370,12 @@ end it "times out after 2 * connect_timeout seconds on two connections" do - TCPServer.open( 'localhost', 54320 ) do |serv| + PG::TestingHelpers::ListenSocket.new do |sock| start_time = Time.now expect { described_class.connect( host: 'localhost,localhost', - port: 54320, + port: sock.port, connect_timeout: 1, dbname: "test") }.to raise_error do |error| @@ -383,7 +383,7 @@ expect( error.message ).to match( /timeout expired.*timeout expired/m ) if PG.library_version >= 120000 expect( error.message ).to match( /\"localhost\".*\"localhost\"/m ) - expect( error.message ).to match( /port 54320/ ) + expect( error.message ).to match( /port #{sock.port}/ ) end end @@ -392,11 +392,11 @@ end it "succeeds with second host after connect_timeout" do - TCPServer.open( 'localhost', 54320 ) do |serv| + PG::TestingHelpers::ListenSocket.new do |sock| start_time = Time.now conn = described_class.connect( host: 'localhost,localhost,localhost', - port: "54320,#{@port},54320", + port: "#{sock.port},#{@port},#{sock.port}", connect_timeout: 1, dbname: "test") @@ -782,7 +782,8 @@ end it "raises proper error when sending fails" do - conn = described_class.connect_start( '127.0.0.1', 54320, "", "", "me", "xxxx", "somedb" ) + sock = PG::TestingHelpers::ListenSocket.new('127.0.0.1', accept: false){ } + conn = described_class.connect_start( '127.0.0.1', sock.port, "", "", "me", "xxxx", "somedb" ) expect{ conn.exec 'SELECT 1' }.to raise_error(PG::UnableToSend, /no connection/){|err| expect(err).to have_attributes(connection: conn) } end @@ -1688,11 +1689,12 @@ it "handles server close while asynchronous connect" do - serv = TCPServer.new( '127.0.0.1', 54320 ) - conn = described_class.connect_start( '127.0.0.1', 54320, "", "", "me", "xxxx", "somedb" ) - expect( [PG::PGRES_POLLING_WRITING, PG::CONNECTION_OK] ).to include conn.connect_poll - select( nil, [conn.socket_io], nil, 0.2 ) - serv.close + conn = nil + PG::TestingHelpers::ListenSocket.new('127.0.0.1', accept: false)do |sock| + conn = described_class.connect_start( '127.0.0.1', sock.port, "", "", "me", "xxxx", "somedb" ) + expect( [PG::PGRES_POLLING_WRITING, PG::CONNECTION_OK] ).to include conn.connect_poll + select( nil, [conn.socket_io], nil, 0.2 ) + end if conn.connect_poll == PG::PGRES_POLLING_READING select( [conn.socket_io], nil, nil, 0.2 ) end @@ -1816,12 +1818,13 @@ end it "consume_input should raise ConnectionBad for a closed connection" do - serv = TCPServer.new( '127.0.0.1', 54320 ) - conn = described_class.connect_start( '127.0.0.1', 54320, "", "", "me", "xxxx", "somedb" ) - while [PG::CONNECTION_STARTED, PG::CONNECTION_MADE].include?(conn.connect_poll) - sleep 0.1 + conn = nil + PG::TestingHelpers::ListenSocket.new '127.0.0.1', accept: false do |sock| + conn = described_class.connect_start( '127.0.0.1', sock.port, "", "", "me", "xxxx", "somedb" ) + while [PG::CONNECTION_STARTED, PG::CONNECTION_MADE].include?(conn.connect_poll) + sleep 0.1 + end end - serv.close expect{ conn.consume_input }.to raise_error(PG::ConnectionBad, /server closed the connection unexpectedly/){|err| expect(err).to have_attributes(connection: conn) } expect{ conn.consume_input }.to raise_error(PG::ConnectionBad, /can't get socket descriptor|connection not open/){|err| expect(err).to have_attributes(connection: conn) } end From e88153420ee4d34f4c08e8a42b0554fefac6a138 Mon Sep 17 00:00:00 2001 From: Lars Kanis Date: Sat, 3 May 2025 09:10:15 +0200 Subject: [PATCH 065/118] Count hostaddr if host is not given --- lib/pg/connection.rb | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/pg/connection.rb b/lib/pg/connection.rb index 572a2bf59..2daf70ca4 100644 --- a/lib/pg/connection.rb +++ b/lib/pg/connection.rb @@ -677,7 +677,7 @@ module Pollable # Track the progress of the connection, waiting for the socket to become readable/writable before polling it private def polling_loop(poll_meth, connect_timeout) if (timeo = connect_timeout.to_i) && timeo > 0 - host_count = conninfo_hash[:host].to_s.count(",") + 1 + host_count = (conninfo_hash[:hostaddr].to_s.empty? ? conninfo_hash[:host] : conninfo_hash[:hostaddr]).to_s.count(",") + 1 stop_time = timeo * host_count + Process.clock_gettime(Process::CLOCK_MONOTONIC) end connection_errors = [] From ea8a8daaa870750ea305332b45cda706a3d3c1c6 Mon Sep 17 00:00:00 2001 From: Lars Kanis Date: Sat, 3 May 2025 13:38:36 +0200 Subject: [PATCH 066/118] ensure connection is actually bad in server_version spec --- spec/pg/connection_spec.rb | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/spec/pg/connection_spec.rb b/spec/pg/connection_spec.rb index 05c2a05c4..40ec53c96 100644 --- a/spec/pg/connection_spec.rb +++ b/spec/pg/connection_spec.rb @@ -934,8 +934,8 @@ it "should raise an error on a bad connection" do conn = PG::Connection.connect_start( @conninfo ) - expect{ conn.server_version }.to raise_error(PG::ConnectionBad) conn.finish + expect{ conn.server_version }.to raise_error(PG::ConnectionBad) end end From 661065636d40ee2ce17b1126f6ca55ce6c3d05e1 Mon Sep 17 00:00:00 2001 From: Lars Kanis Date: Sat, 3 May 2025 14:51:38 +0200 Subject: [PATCH 067/118] Check hostaddr option in addition before setting the unixsockets --- lib/pg/connection.rb | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/pg/connection.rb b/lib/pg/connection.rb index 2daf70ca4..47104b24a 100644 --- a/lib/pg/connection.rb +++ b/lib/pg/connection.rb @@ -871,7 +871,7 @@ def new(*args) iopts = PG::Connection.conninfo_parse(option_string).each_with_object({}){|h, o| o[h[:keyword].to_sym] = h[:val] if h[:val] } iopts = PG::Connection.conndefaults.each_with_object({}){|h, o| o[h[:keyword].to_sym] = h[:val] if h[:val] }.merge(iopts) - if PG::BUNDLED_LIBPQ_WITH_UNIXSOCKET && iopts[:host].to_s.empty? + if PG::BUNDLED_LIBPQ_WITH_UNIXSOCKET && iopts[:host].to_s.empty? && iopts[:hostaddr].to_s.empty? # Many distors patch the hardcoded default UnixSocket path in libpq to /var/run/postgresql instead of /tmp . # We simply try them all. iopts[:host] = "/var/run/postgresql" + # Ubuntu, Debian, Fedora, Opensuse From 16918f859329d7adc15d9608128544e547ea3619 Mon Sep 17 00:00:00 2001 From: Lars Kanis Date: Sat, 3 May 2025 09:14:32 +0200 Subject: [PATCH 068/118] Remove instead of rotate hosts that timed out When rotating hosts, they are tried twice, if combined with hosts which fail with some other error. When other errors occure, then add timeout errors to the libpq message list. Also finish the connection object before raise to avoid keeping fds open. Use a higher timeout on Windows, since the "Connection refused" error is raised after 2 seconds. Remove unused parameter accept: from ListenSocket --- lib/pg/connection.rb | 27 +++++++++++++++---------- spec/helpers.rb | 5 +---- spec/pg/connection_spec.rb | 41 ++++++++++++++++++++++++++++---------- 3 files changed, 49 insertions(+), 24 deletions(-) diff --git a/lib/pg/connection.rb b/lib/pg/connection.rb index 47104b24a..4420ec6e0 100644 --- a/lib/pg/connection.rb +++ b/lib/pg/connection.rb @@ -722,11 +722,12 @@ module Pollable connhost = "at \"#{host}\", port #{port}" end connection_errors << "connection to server #{connhost} failed: timeout expired" - if connection_errors.count < host_count.to_i - new_conninfo_hash = rotate_hosts(conninfo_hash.compact) - send(:reset_start2, self.class.send(:parse_connect_args, new_conninfo_hash)) + iopts = conninfo_hash.compact + if remove_current_host(iopts) + reset_start2(self.class.parse_connect_args(iopts)) else - raise PG::ConnectionBad.new(connection_errors.join("\n"), connection: self) + finish + raise PG::ConnectionBad.new(connection_errors.join("\n").b, connection: self) end end @@ -737,15 +738,21 @@ module Pollable unless status == PG::CONNECTION_OK msg = error_message finish - raise PG::ConnectionBad.new(msg, connection: self) + raise PG::ConnectionBad.new(connection_errors.map{|e| e + "\n" }.join.b + msg, connection: self) end end - private def rotate_hosts(conninfo_hash) - conninfo_hash[:host] = conninfo_hash[:host].split(",").rotate.join(",") if conninfo_hash[:host] - conninfo_hash[:port] = conninfo_hash[:port].split(",").rotate.join(",") if conninfo_hash[:port] - conninfo_hash[:hostaddr] = conninfo_hash[:hostaddr].split(",").rotate.join(",") if conninfo_hash[:hostaddr] - conninfo_hash + private def remove_current_host(conninfo_hash) + deleted = nil + %i[ host hostaddr port ].each do |sym| + if conninfo_hash[sym] + a = conninfo_hash[sym].split(",", -1) + d = a.delete_at(a.index(send(sym).to_s)) if a.size > 1 + deleted ||= d + conninfo_hash[sym] = a.join(",") + end + end + deleted end end diff --git a/spec/helpers.rb b/spec/helpers.rb index 928076bae..36643748b 100644 --- a/spec/helpers.rb +++ b/spec/helpers.rb @@ -484,11 +484,8 @@ def create_cert_from_csr(name, csr, ca_cert, ca_key, valid_years: 10, dns_names: class ListenSocket attr_reader :port - def initialize(host = 'localhost', accept: true) + def initialize(host = '127.0.0.1') TCPServer.open( host, 0 ) do |serv| - if accept - Thread.new { begin loop do serv.accept end rescue nil end } - end @port = serv.local_address.ip_port yield self end diff --git a/spec/pg/connection_spec.rb b/spec/pg/connection_spec.rb index 40ec53c96..f93797210 100644 --- a/spec/pg/connection_spec.rb +++ b/spec/pg/connection_spec.rb @@ -369,25 +369,46 @@ end end + it "returns timeout and connection refused together" do + with_env_vars(PGHOST: nil) do + PG::TestingHelpers::ListenSocket.new do |sock| + start_time = Time.now + expect { + described_class.connect( + hostaddr: '127.0.0.1,127.0.0.1,127.0.0.1', + port: "#{@port_down},#{sock.port},#{@port_down}", + connect_timeout: RUBY_PLATFORM=~/mingw|mswin/i ? 3 : 1, + dbname: "test") + }.to raise_error do |error| + expect( error ).to be_an( PG::ConnectionBad ) + if PG.library_version >= 140000 + expect( error.message ).to match( /127\.0\.0\.1.+#{sock.port}.+timeout expired/im ) + expect( error.message ).to match( /127\.0\.0\.1.+#{@port_down}.+(Connection refused|ECONNREFUSED).+127\.0\.0\.1.+#{@port_down}.+(Connection refused|ECONNREFUSED)/im ) + end + end + + expect( Time.now - start_time ).to be_between(0.9, 20).inclusive + end + end + end + it "times out after 2 * connect_timeout seconds on two connections" do PG::TestingHelpers::ListenSocket.new do |sock| start_time = Time.now expect { described_class.connect( - host: 'localhost,localhost', + host: '127.0.0.1,127.0.0.1', port: sock.port, - connect_timeout: 1, + connect_timeout: RUBY_PLATFORM=~/mingw|mswin/i ? 3 : 1, dbname: "test") }.to raise_error do |error| expect( error ).to be_an( PG::ConnectionBad ) - expect( error.message ).to match( /timeout expired.*timeout expired/m ) - if PG.library_version >= 120000 - expect( error.message ).to match( /\"localhost\".*\"localhost\"/m ) - expect( error.message ).to match( /port #{sock.port}/ ) + if PG.library_version >= 140000 + expect( error.message ).to match( /127\.0\.0\.1.+#{sock.port}.+timeout expired.+127\.0\.0\.1.+#{sock.port}.+timeout expired/im ) end end - expect( Time.now - start_time ).to be_between(1.9, 10).inclusive + expect( Time.now - start_time ).to be_between(1.9, 20).inclusive end end @@ -782,7 +803,7 @@ end it "raises proper error when sending fails" do - sock = PG::TestingHelpers::ListenSocket.new('127.0.0.1', accept: false){ } + sock = PG::TestingHelpers::ListenSocket.new('127.0.0.1') { } conn = described_class.connect_start( '127.0.0.1', sock.port, "", "", "me", "xxxx", "somedb" ) expect{ conn.exec 'SELECT 1' }.to raise_error(PG::UnableToSend, /no connection/){|err| expect(err).to have_attributes(connection: conn) } end @@ -1690,7 +1711,7 @@ it "handles server close while asynchronous connect" do conn = nil - PG::TestingHelpers::ListenSocket.new('127.0.0.1', accept: false)do |sock| + PG::TestingHelpers::ListenSocket.new('127.0.0.1') do |sock| conn = described_class.connect_start( '127.0.0.1', sock.port, "", "", "me", "xxxx", "somedb" ) expect( [PG::PGRES_POLLING_WRITING, PG::CONNECTION_OK] ).to include conn.connect_poll select( nil, [conn.socket_io], nil, 0.2 ) @@ -1819,7 +1840,7 @@ it "consume_input should raise ConnectionBad for a closed connection" do conn = nil - PG::TestingHelpers::ListenSocket.new '127.0.0.1', accept: false do |sock| + PG::TestingHelpers::ListenSocket.new '127.0.0.1' do |sock| conn = described_class.connect_start( '127.0.0.1', sock.port, "", "", "me", "xxxx", "somedb" ) while [PG::CONNECTION_STARTED, PG::CONNECTION_MADE].include?(conn.connect_poll) sleep 0.1 From 8c83e8fe0a0c778ed88fd55668b8c9f8f22f5839 Mon Sep 17 00:00:00 2001 From: Lars Kanis Date: Sat, 3 May 2025 15:42:37 +0200 Subject: [PATCH 069/118] Adjust timeout spec for Macos and Windows --- spec/pg/connection_spec.rb | 21 ++++++++++++++++++--- 1 file changed, 18 insertions(+), 3 deletions(-) diff --git a/spec/pg/connection_spec.rb b/spec/pg/connection_spec.rb index f93797210..33390aa1b 100644 --- a/spec/pg/connection_spec.rb +++ b/spec/pg/connection_spec.rb @@ -369,7 +369,7 @@ end end - it "returns timeout and connection refused together" do + it "raises after 'timeout' and two times 'connection refused'" do with_env_vars(PGHOST: nil) do PG::TestingHelpers::ListenSocket.new do |sock| start_time = Time.now @@ -377,13 +377,28 @@ described_class.connect( hostaddr: '127.0.0.1,127.0.0.1,127.0.0.1', port: "#{@port_down},#{sock.port},#{@port_down}", - connect_timeout: RUBY_PLATFORM=~/mingw|mswin/i ? 3 : 1, + connect_timeout: RUBY_PLATFORM=~/mingw|mswin/i ? 5 : 1, dbname: "test") }.to raise_error do |error| expect( error ).to be_an( PG::ConnectionBad ) if PG.library_version >= 140000 expect( error.message ).to match( /127\.0\.0\.1.+#{sock.port}.+timeout expired/im ) - expect( error.message ).to match( /127\.0\.0\.1.+#{@port_down}.+(Connection refused|ECONNREFUSED).+127\.0\.0\.1.+#{@port_down}.+(Connection refused|ECONNREFUSED)/im ) + expect( error.message ).to match( /127\.0\.0\.1.+#{@port_down}.+(Connection refused|ECONNREFUSED|could not receive data from server: (Connection refused|Socket is not connected))(.+127\.0\.0\.1.+#{@port_down}.+(Connection refused|ECONNREFUSED)|)/im ) + +# Failure on Macos is either: +# connection to server at "127.0.0.1" (127.0.0.1), port 52806 failed: timeout expired +# connection to server at "127.0.0.1", port 23467 failed: could not receive data from server: Connection refused +# or: +# connection to server at "127.0.0.1" (127.0.0.1), port 52899 failed: timeout expired +# connection to server at "127.0.0.1", port 23467 failed: Connection refused +# Is the server running on that host and accepting TCP/IP connections? +# connection to server at "127.0.0.1", port 23467 failed: Connection refused +# Is the server running on that host and accepting TCP/IP connections? +# +# and on Windows it is sometimes: +# connection to server at "127.0.0.1" (127.0.0.1), port 52806 failed: timeout expired +# connection to server at "127.0.0.1", port 23467 failed: could not receive data from server: Socket is not connected (0x00002749/10057) + end end From ee11bfaf9d1ca6273c3e73095f8ea7fae22bb520 Mon Sep 17 00:00:00 2001 From: Lars Kanis Date: Sun, 4 May 2025 09:23:34 -0700 Subject: [PATCH 070/118] Rework cycling of multiple hosts Until now the connect_timeout handling worked like so: - All hosts are passed to connect_start - If a timeout happens then the related host is removed from the hosts list - This runs until no timeout happens and either a connection is established or aborted with CONNECTION_BAD - The downside is that this might connect multiple times to one and the same host. This changes the connect_timeout handling like so: - All hosts are passed to connect_start - As soon as the host is tried to connect the related host is removed from the hosts list - This runs until no hosts are left after timeout or either a connection is established or aborted with CONNECTION_BAD - The downside is that this connects only once to hosts which are listed twice. This also fixes the "socket not connected" issue on Windows. And it harmonizes the handling of connection parameters in CancelConnection. --- lib/pg/cancel_connection.rb | 29 +++++++++++++-- lib/pg/connection.rb | 71 ++++++++++++++++++++++++------------- spec/pg/connection_spec.rb | 22 +++--------- 3 files changed, 77 insertions(+), 45 deletions(-) diff --git a/lib/pg/cancel_connection.rb b/lib/pg/cancel_connection.rb index 9d4a2d781..6375481b3 100644 --- a/lib/pg/cancel_connection.rb +++ b/lib/pg/cancel_connection.rb @@ -7,8 +7,24 @@ class PG::CancelConnection include PG::Connection::Pollable - # The timeout used by #cancel and async_cancel to establish the cancel connection. - attr_accessor :async_connect_timeout + alias c_initialize initialize + + def initialize(conn) + c_initialize(conn) + + # A cancel connection is always to one destination server only. + # Prepare conninfo_hash with just enough information to allow a shared polling_loop. + @host = conn.host + @hostaddr = conn.hostaddr + @port = conn.port + + @conninfo_hash = { + host: @host, + hostaddr: @hostaddr, + port: @port.to_s, + connect_timeout: conn.conninfo_hash[:connect_timeout], + } + end # call-seq: # conn.cancel @@ -23,8 +39,15 @@ class PG::CancelConnection # def cancel start - polling_loop(:poll, async_connect_timeout) + polling_loop(:poll) end alias async_cancel cancel + + # These private methods are there to allow a shared polling_loop. + private + attr_reader :host + attr_reader :hostaddr + attr_reader :port + attr_reader :conninfo_hash end end diff --git a/lib/pg/connection.rb b/lib/pg/connection.rb index 4420ec6e0..7d10b95e9 100644 --- a/lib/pg/connection.rb +++ b/lib/pg/connection.rb @@ -625,7 +625,6 @@ def sync_cancel # On older client library a pure ruby implementation is used. def cancel cancon = PG::CancelConnection.new(self) - cancon.async_connect_timeout = conninfo_hash[:connect_timeout] cancon.async_cancel rescue PG::Error => err err.to_s @@ -675,20 +674,25 @@ def cancel module Pollable # Track the progress of the connection, waiting for the socket to become readable/writable before polling it - private def polling_loop(poll_meth, connect_timeout) + private def polling_loop(poll_meth) + connect_timeout = conninfo_hash[:connect_timeout] if (timeo = connect_timeout.to_i) && timeo > 0 host_count = (conninfo_hash[:hostaddr].to_s.empty? ? conninfo_hash[:host] : conninfo_hash[:hostaddr]).to_s.count(",") + 1 stop_time = timeo * host_count + Process.clock_gettime(Process::CLOCK_MONOTONIC) end + iopts = conninfo_hash.compact connection_errors = [] - poll_status = PG::PGRES_POLLING_WRITING + until poll_status == PG::PGRES_POLLING_OK || poll_status == PG::PGRES_POLLING_FAILED # Set single timeout to parameter "connect_timeout" but # don't exceed total connection time of number-of-hosts * connect_timeout. timeout = [timeo, stop_time - Process.clock_gettime(Process::CLOCK_MONOTONIC)].min if stop_time + + hostcnt = remove_current_host(iopts) + event = if !timeout || timeout >= 0 # If the socket needs to read, wait 'til it becomes readable to poll again case poll_status @@ -711,20 +715,17 @@ module Pollable end end end + # connection to server at "localhost" (127.0.0.1), port 5433 failed: timeout expired (PG::ConnectionBad) # connection to server on socket "/var/run/postgresql/.s.PGSQL.5433" failed: No such file or directory unless event - if self.class.send(:host_is_named_pipe?, host) - connhost = "on socket \"#{host}\"" - elsif respond_to?(:hostaddr) - connhost = "at \"#{host}\" (#{hostaddr}), port #{port}" - else - connhost = "at \"#{host}\", port #{port}" - end - connection_errors << "connection to server #{connhost} failed: timeout expired" - iopts = conninfo_hash.compact - if remove_current_host(iopts) + connection_errors << (error_message + "timeout expired") + if hostcnt > 0 reset_start2(self.class.parse_connect_args(iopts)) + # Restart polling with waiting for writable. + # Otherwise "not connected" error is raised on Windows. + poll_status = PG::PGRES_POLLING_WRITING + next else finish raise PG::ConnectionBad.new(connection_errors.join("\n").b, connection: self) @@ -742,17 +743,39 @@ module Pollable end end - private def remove_current_host(conninfo_hash) - deleted = nil - %i[ host hostaddr port ].each do |sym| - if conninfo_hash[sym] - a = conninfo_hash[sym].split(",", -1) - d = a.delete_at(a.index(send(sym).to_s)) if a.size > 1 - deleted ||= d - conninfo_hash[sym] = a.join(",") - end + # Remove the host to which the connection is currently established from the option hash. + # Affected options are: + # - :host + # - :hostaddr + # - :port + # + # Return the number of remaining hosts. + private def remove_current_host(iopts) + ihosts = iopts[:host]&.split(",", -1) + ihostaddrs = iopts[:hostaddr]&.split(",", -1) + iports = iopts[:port]&.split(",", -1) + iports = iports * (ihosts || ihostaddrs).size if iports&.size == 1 + + idx = (ihosts || ihostaddrs || iports).index.with_index do |_, i| + (ihosts ? ihosts[i] == host : true) && + (ihostaddrs && respond_to?(:hostaddr, true) ? ihostaddrs[i] == hostaddr : true) && + (iports ? iports[i].to_i == port : true) end - deleted + + if idx + ihosts&.delete_at(idx) + ihostaddrs&.delete_at(idx) + iports&.delete_at(idx) + + iopts.merge!( + host: ihosts.join(",")) if ihosts + iopts.merge!( + hostaddr: ihostaddrs.join(",")) if ihostaddrs + iopts.merge!( + port: iports.join(",")) if iports + end + + (ihosts || ihostaddrs || iports).size end end @@ -760,7 +783,7 @@ module Pollable private def async_connect_or_reset(poll_meth) # Track the progress of the connection, waiting for the socket to become readable/writable before polling it - polling_loop(poll_meth, conninfo_hash[:connect_timeout]) + polling_loop(poll_meth) # Set connection to nonblocking to handle all blocking states in ruby. # That way a fiber scheduler is able to handle IO requests. diff --git a/spec/pg/connection_spec.rb b/spec/pg/connection_spec.rb index 33390aa1b..0d76cd368 100644 --- a/spec/pg/connection_spec.rb +++ b/spec/pg/connection_spec.rb @@ -382,23 +382,7 @@ }.to raise_error do |error| expect( error ).to be_an( PG::ConnectionBad ) if PG.library_version >= 140000 - expect( error.message ).to match( /127\.0\.0\.1.+#{sock.port}.+timeout expired/im ) - expect( error.message ).to match( /127\.0\.0\.1.+#{@port_down}.+(Connection refused|ECONNREFUSED|could not receive data from server: (Connection refused|Socket is not connected))(.+127\.0\.0\.1.+#{@port_down}.+(Connection refused|ECONNREFUSED)|)/im ) - -# Failure on Macos is either: -# connection to server at "127.0.0.1" (127.0.0.1), port 52806 failed: timeout expired -# connection to server at "127.0.0.1", port 23467 failed: could not receive data from server: Connection refused -# or: -# connection to server at "127.0.0.1" (127.0.0.1), port 52899 failed: timeout expired -# connection to server at "127.0.0.1", port 23467 failed: Connection refused -# Is the server running on that host and accepting TCP/IP connections? -# connection to server at "127.0.0.1", port 23467 failed: Connection refused -# Is the server running on that host and accepting TCP/IP connections? -# -# and on Windows it is sometimes: -# connection to server at "127.0.0.1" (127.0.0.1), port 52806 failed: timeout expired -# connection to server at "127.0.0.1", port 23467 failed: could not receive data from server: Socket is not connected (0x00002749/10057) - + expect( error.message ).to match( /127\.0\.0\.1.+#{@port_down}.+(Connection refused|ECONNREFUSED).+127\.0\.0\.1.+#{sock.port}.+timeout expired.+127\.0\.0\.1.+#{@port_down}.+(Connection refused|ECONNREFUSED)/im ) end end @@ -412,7 +396,7 @@ start_time = Time.now expect { described_class.connect( - host: '127.0.0.1,127.0.0.1', + host: '127.0.0.1,localhost', port: sock.port, connect_timeout: RUBY_PLATFORM=~/mingw|mswin/i ? 3 : 1, dbname: "test") @@ -438,6 +422,8 @@ expect( conn.port ).to eq( @port ) expect( Time.now - start_time ).to be_between(0.9, 10).inclusive + ensure + conn&.finish end end From b6c597793390d9f0e10ab9875d1971849c3b6c5a Mon Sep 17 00:00:00 2001 From: Lars Kanis Date: Sat, 10 May 2025 16:35:08 +0200 Subject: [PATCH 071/118] Add binary gem for Macos --- Rakefile | 20 ++++++++- ext/extconf.rb | 45 ++++++++++++++----- ...01-Allow-static-linking-krb5-library.patch | 30 +++++++++++++ 3 files changed, 83 insertions(+), 12 deletions(-) create mode 100644 ports/patches/krb5/1.21.3/0001-Allow-static-linking-krb5-library.patch diff --git a/Rakefile b/Rakefile index d2b16d919..820be7fa2 100644 --- a/Rakefile +++ b/Rakefile @@ -32,7 +32,7 @@ CLEAN.include "lib/*/libpq.dll" CLEAN.include "lib/pg_ext.*" CLEAN.include "lib/pg/postgresql_lib_path.rb" CLEAN.include "ports/*.installed" -CLEAN.include "ports/*mingw*", "ports/*linux*" +CLEAN.include "ports/*mingw*", "ports/*linux*", "ports/*darwin*" Bundler::GemHelper.install_tasks $gem_spec = Bundler.load_gemspec(GEMSPEC) @@ -49,6 +49,8 @@ CrossLibraries = [ ['x86-mingw32', 'mingw', 'i686-w64-mingw32'], ['x64-mingw32', 'mingw64', 'x86_64-w64-mingw32'], ['x86_64-linux', 'linux-x86_64', 'x86_64-linux-gnu'], + ['x86_64-darwin', 'darwin64-x86_64', 'x86_64-apple-darwin'], + ['arm64-darwin', 'darwin64-arm64', 'arm64-apple-darwin'], ].map do |platform, openssl_config, toolchain| CrossLibrary.new platform, openssl_config, toolchain end @@ -76,6 +78,7 @@ Rake::ExtensionTask.new do |ext| # Add libpq.dll/.so to fat binary gemspecs ext.cross_compiling do |spec| spec.files << "ports/#{spec.platform.to_s}/lib/libpq-ruby-pg.so.1" if spec.platform.to_s =~ /linux/ + spec.files << "ports/#{spec.platform.to_s}/lib/libpq-ruby-pg.1.dylib" if spec.platform.to_s =~ /darwin/ spec.files << "ports/#{spec.platform.to_s}/lib/libpq.dll" if spec.platform.to_s =~ /mingw|mswin/ end end @@ -96,16 +99,31 @@ task 'gem:native:prepare' do end end +task 'install_darwin_mig' do + sh <<~EOT + rm -rf bootstrap_cmds && + git clone --branch=cross_platform https://github.com/markmentovai/bootstrap_cmds && + cd bootstrap_cmds && + autoreconf --install && + sh configure && + make && + sed -E -i 's/^cppflags=(.*)/cppflags=(\\1 "-D__arm64__" "-I\\/opt\\/osxcross\\/target\\/SDK\\/MacOSX11.1.sdk\\/usr\\/include")/' migcom.tproj/mig.sh && + sudo make install + EOT +end + CrossLibraries.each do |xlib| platform = xlib.platform desc "Build fat binary gem for platform #{platform}" task "gem:native:#{platform}" => ['gem:native:prepare'] do RakeCompilerDock.sh <<-EOT, platform: platform + #{ "sudo apt-get update && sudo apt-get install -y bison flex &&" if platform =~ /darwin/ } #{ # remove nm on Linux to suppress PostgreSQL's check for exit which raises thread_exit as a false positive: "sudo mv `which nm` `which nm`.bak &&" if platform =~ /linux/ } sudo apt-get update && sudo apt-get install -y bison flex && (cp build/gem/gem-*.pem ~/.gem/ || true) && bundle install --local && + #{ "rake install_darwin_mig" if platform =~ /darwin/ } rake native:#{platform} pkg/#{$gem_spec.full_name}-#{platform}.gem MAKEOPTS=-j`nproc` RUBY_CC_VERSION=3.4.1:3.3.5:3.2.6:3.1.6:3.0.7:2.7.8 EOT end diff --git a/ext/extconf.rb b/ext/extconf.rb index 086481071..e89a3f2ed 100644 --- a/ext/extconf.rb +++ b/ext/extconf.rb @@ -69,7 +69,7 @@ class << recipe def configure envs = [] envs << "CFLAGS=-DDSO_WIN32 -DOPENSSL_THREADS" if RUBY_PLATFORM =~ /mingw|mswin/ - envs << "CFLAGS=-fPIC -DOPENSSL_THREADS" if RUBY_PLATFORM =~ /linux/ + envs << "CFLAGS=-fPIC -DOPENSSL_THREADS" if RUBY_PLATFORM =~ /linux|darwin/ execute('configure', ['env', *envs, "./Configure", openssl_platform, "threads", "-static", "CROSS_COMPILE=#{host}-", configure_prefix], altlog: "config.log") end def compile @@ -85,17 +85,34 @@ def install recipe.cook_and_activate end - if RUBY_PLATFORM =~ /linux/ + if RUBY_PLATFORM =~ /linux|darwin/ krb5_recipe = BuildRecipe.new("krb5", KRB5_VERSION, [KRB5_SOURCE_URI]).tap do |recipe| class << recipe def work_path File.join(super, "src") end + def configure + if RUBY_PLATFORM=~/darwin/ + ENV["CC"] = host[/^.*[^\.\d]/] + "-clang" + ENV["CXX"] = host[/^.*[^\.\d]/] + "-c++" + + # Manually set the correct values for configure checks that libkrb5 won't be + # able to perform because we're cross-compiling. + ENV["krb5_cv_attr_constructor_destructor"] = "yes" + ENV["ac_cv_func_regcomp"] = "yes" + ENV["ac_cv_printf_positional"] = "yes" + end + super + end end # We specify -fcommon to get around duplicate definition errors in recent gcc. # See https://github.com/cockroachdb/cockroach/issues/49734 recipe.configure_options << "CFLAGS=-fcommon#{" -fPIC" if RUBY_PLATFORM =~ /linux/}" + recipe.configure_options << "LDFLAGS=-framework Kerberos" if RUBY_PLATFORM =~ /darwin/ recipe.configure_options << "--without-keyutils" + recipe.configure_options << "--disable-nls" + recipe.configure_options << "--disable-silent-rules" + recipe.configure_options << "--without-system-verto" recipe.configure_options << "krb5_cv_attr_constructor_destructor=yes" recipe.configure_options << "ac_cv_func_regcomp=yes" recipe.configure_options << "ac_cv_printf_positional=yes" @@ -104,6 +121,11 @@ def work_path end end + libpq_orig, libpq_rubypg = case RUBY_PLATFORM + when /linux/ then ["libpq.so.5", "libpq-ruby-pg.so.1"] + when /darwin/ then ["libpq.5.dylib", "libpq-ruby-pg.1.dylib"] + end + postgresql_recipe = BuildRecipe.new("postgresql", POSTGRESQL_VERSION, [POSTGRESQL_SOURCE_URI]).tap do |recipe| class << recipe def configure_defaults @@ -111,7 +133,7 @@ def configure_defaults "--target=#{host}", "--host=#{host}", '--with-openssl', - *(RUBY_PLATFORM=~/linux/ ? ['--with-gssapi'] : []), + *(RUBY_PLATFORM=~/linux|darwin/ ? ['--with-gssapi'] : []), '--without-zlib', '--without-icu', '--without-readline', @@ -127,8 +149,8 @@ def install end recipe.host = toolchain - recipe.configure_options << "CFLAGS=#{" -fPIC" if RUBY_PLATFORM =~ /linux/}" - recipe.configure_options << "LDFLAGS=-L#{openssl_recipe.path}/lib -L#{openssl_recipe.path}/lib64 -L#{openssl_recipe.path}/lib-arm64 #{"-Wl,-soname,libpq-ruby-pg.so.1 -lgssapi_krb5 -lkrb5 -lk5crypto -lkrb5support" if RUBY_PLATFORM =~ /linux/}" + recipe.configure_options << "CFLAGS=#{" -fPIC" if RUBY_PLATFORM =~ /linux|darwin/}" + recipe.configure_options << "LDFLAGS=-L#{openssl_recipe.path}/lib -L#{openssl_recipe.path}/lib64 -L#{openssl_recipe.path}/lib-arm64 #{"-Wl,-soname,#{libpq_rubypg} -lgssapi_krb5 -lkrb5 -lk5crypto -lkrb5support" if RUBY_PLATFORM =~ /linux/} #{"-Wl,-install_name,#{libpq_rubypg} -lgssapi_krb5 -lkrb5 -lk5crypto -lkrb5support -lresolv -framework Kerberos" if RUBY_PLATFORM =~ /darwin/}" recipe.configure_options << "LIBS=-lkrb5 -lcom_err -lk5crypto -lkrb5support -lresolv" if RUBY_PLATFORM =~ /linux/ recipe.configure_options << "LIBS=-lssl -lwsock32 -lgdi32 -lws2_32 -lcrypt32" if RUBY_PLATFORM =~ /mingw|mswin/ recipe.configure_options << "CPPFLAGS=-I#{openssl_recipe.path}/include" @@ -136,15 +158,16 @@ def install end # Use our own library name for libpq to avoid loading of system libpq by accident. - FileUtils.ln_sf File.join(postgresql_recipe.port_path, "lib/libpq.so.5"), - File.join(postgresql_recipe.port_path, "lib/libpq-ruby-pg.so.1") + FileUtils.ln_sf File.join(postgresql_recipe.port_path, "lib/#{libpq_orig}"), + File.join(postgresql_recipe.port_path, "lib/#{libpq_rubypg}") # Avoid dependency to external libgcc.dll on x86-mingw32 - $LDFLAGS << " -static-libgcc" + $LDFLAGS << " -static-libgcc" if RUBY_PLATFORM =~ /mingw|mswin/ # Avoid: "libpq.so: undefined reference to `dlopen'" in cross-ruby-2.7.8 - $LDFLAGS << " -Wl,--no-as-needed" if RUBY_PLATFORM !~ /aarch64/ - # Find libpq in the ports directory coming from lib/3.3 + $LDFLAGS << " -Wl,--no-as-needed" if RUBY_PLATFORM !~ /aarch64|arm64/ + # Find libpq in the ports directory coming from lib/3.x # It is shared between all compiled ruby versions. - $LDFLAGS << " '-Wl,-rpath=$$ORIGIN/../../ports/#{gem_platform}/lib'" + $LDFLAGS << " '-Wl,-rpath=$$ORIGIN/../../ports/#{gem_platform}/lib'" if RUBY_PLATFORM =~ /linux/ + $LDFLAGS << " '-Wl,-rpath,@loader_path/../../ports/#{gem_platform}/lib'" if RUBY_PLATFORM =~ /darwin/ # Don't use pg_config for cross build, but --with-pg-* path options dir_config('pg', "#{postgresql_recipe.path}/include", "#{postgresql_recipe.path}/lib") diff --git a/ports/patches/krb5/1.21.3/0001-Allow-static-linking-krb5-library.patch b/ports/patches/krb5/1.21.3/0001-Allow-static-linking-krb5-library.patch new file mode 100644 index 000000000..5ef1c0848 --- /dev/null +++ b/ports/patches/krb5/1.21.3/0001-Allow-static-linking-krb5-library.patch @@ -0,0 +1,30 @@ +From e82c1b395162ea71279ea2170259383082e41ab0 Mon Sep 17 00:00:00 2001 +From: Lars Kanis +Date: Sat, 12 Jul 2025 10:55:17 +0200 +Subject: [PATCH] Allow static linking krb5 library + +Otherwise it fails with: + Undefined symbols for architecture arm64: + "_krb5int_c_mit_des_zeroblock", referenced from: + _krb5int_des3_cbc_encrypt in libk5crypto.a(d3_aead.o) + _krb5int_des3_cbc_decrypt in libk5crypto.a(d3_aead.o) +--- + src/lib/crypto/builtin/des/des_int.h | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/src/lib/crypto/builtin/des/des_int.h b/src/lib/crypto/builtin/des/des_int.h +index 46fed7dbd..114e48ebd 100644 +--- a/lib/crypto/builtin/des/des_int.h ++++ b/lib/crypto/builtin/des/des_int.h +@@ -159,7 +159,7 @@ mit_des_cbc_encrypt(const mit_des_cblock *in, mit_des_cblock *out, + const mit_des_cblock ivec, int enc); + + #define mit_des_zeroblock krb5int_c_mit_des_zeroblock +-extern const mit_des_cblock mit_des_zeroblock; ++const mit_des_cblock mit_des_zeroblock; + + /* fin_rndkey.c */ + krb5_error_code mit_des_finish_random_key(const krb5_encrypt_block *, +-- +2.43.0 + From e352fe4ba0e7fdcdcbb9910f4d3858ea314e6763 Mon Sep 17 00:00:00 2001 From: Lars Kanis Date: Sun, 13 Jul 2025 13:31:24 +0200 Subject: [PATCH 072/118] Update to rake-compiler-dock-1.9.1, add binaries for x86_64-darwin --- Gemfile | 2 +- Rakefile | 9 +++++---- 2 files changed, 6 insertions(+), 5 deletions(-) diff --git a/Gemfile b/Gemfile index 8395dc1ac..d0237dd8b 100644 --- a/Gemfile +++ b/Gemfile @@ -13,7 +13,7 @@ end group :test do gem "bundler", ">= 1.16", "< 3.0" gem "rake-compiler", "~> 1.0" - gem "rake-compiler-dock", "~> 1.8.0" + gem "rake-compiler-dock", "~> 1.9.1" gem "rspec", "~> 3.5" # "bigdecimal" is a gem on ruby-3.4+ and it's optional for ruby-pg. # Specs should succeed without it, but 4 examples are then excluded. diff --git a/Rakefile b/Rakefile index 820be7fa2..9df31c286 100644 --- a/Rakefile +++ b/Rakefile @@ -99,7 +99,7 @@ task 'gem:native:prepare' do end end -task 'install_darwin_mig' do +task 'install_darwin_mig', [:arch] do |t, args| sh <<~EOT rm -rf bootstrap_cmds && git clone --branch=cross_platform https://github.com/markmentovai/bootstrap_cmds && @@ -107,7 +107,7 @@ task 'install_darwin_mig' do autoreconf --install && sh configure && make && - sed -E -i 's/^cppflags=(.*)/cppflags=(\\1 "-D__arm64__" "-I\\/opt\\/osxcross\\/target\\/SDK\\/MacOSX11.1.sdk\\/usr\\/include")/' migcom.tproj/mig.sh && + sed -E -i 's/^cppflags=(.*)/cppflags=(\\1 "-D#{args[:arch]}" "-I\\/opt\\/osxcross\\/target\\/SDK\\/MacOSX11.1.sdk\\/usr\\/include")/' migcom.tproj/mig.sh && sudo make install EOT end @@ -123,8 +123,9 @@ CrossLibraries.each do |xlib| sudo apt-get update && sudo apt-get install -y bison flex && (cp build/gem/gem-*.pem ~/.gem/ || true) && bundle install --local && - #{ "rake install_darwin_mig" if platform =~ /darwin/ } - rake native:#{platform} pkg/#{$gem_spec.full_name}-#{platform}.gem MAKEOPTS=-j`nproc` RUBY_CC_VERSION=3.4.1:3.3.5:3.2.6:3.1.6:3.0.7:2.7.8 + #{ "rake install_darwin_mig[__arm64__]" if platform =~ /arm64-darwin/ } + #{ "rake install_darwin_mig[__x86_64__]" if platform =~ /x86_64-darwin/ } + rake native:#{platform} pkg/#{$gem_spec.full_name}-#{platform}.gem MAKEOPTS=-j`nproc` RUBY_CC_VERSION=#{RakeCompilerDock.ruby_cc_version("~>2.7", "~>3.0")} EOT end desc "Build the native binary gems" From f06f4830d4c53ff9bed59b2197fafaf252a06591 Mon Sep 17 00:00:00 2001 From: Lars Kanis Date: Sun, 13 Jul 2025 13:32:02 +0200 Subject: [PATCH 073/118] Add CI jobs for macos binaries --- .github/workflows/binary-gems.yml | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/.github/workflows/binary-gems.yml b/.github/workflows/binary-gems.yml index 4088892b9..68cb0a993 100644 --- a/.github/workflows/binary-gems.yml +++ b/.github/workflows/binary-gems.yml @@ -28,6 +28,8 @@ jobs: - platform: "x64-mingw32" - platform: "x86-mingw32" - platform: "x86_64-linux" + - platform: "x86_64-darwin" + - platform: "arm64-darwin" steps: - uses: actions/checkout@v4 - name: Set up Ruby @@ -73,6 +75,12 @@ jobs: - os: ubuntu-latest ruby: "3.2" platform: "x86_64-linux" + - os: macos-latest + ruby: "3.4" + platform: "arm64-darwin" + - os: macos-13 + ruby: "3.4" + platform: "x86_64-darwin" runs-on: ${{ matrix.os }} env: From 7a10fbdd70488690081178ecfc5699aa115c3124 Mon Sep 17 00:00:00 2001 From: Lars Kanis Date: Sun, 13 Jul 2025 13:48:15 +0200 Subject: [PATCH 074/118] clang has no option --no-as-needed --- ext/extconf.rb | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ext/extconf.rb b/ext/extconf.rb index e89a3f2ed..6c9aea0dc 100644 --- a/ext/extconf.rb +++ b/ext/extconf.rb @@ -163,7 +163,7 @@ def install # Avoid dependency to external libgcc.dll on x86-mingw32 $LDFLAGS << " -static-libgcc" if RUBY_PLATFORM =~ /mingw|mswin/ # Avoid: "libpq.so: undefined reference to `dlopen'" in cross-ruby-2.7.8 - $LDFLAGS << " -Wl,--no-as-needed" if RUBY_PLATFORM !~ /aarch64|arm64/ + $LDFLAGS << " -Wl,--no-as-needed" if RUBY_PLATFORM !~ /aarch64|arm64|darwin/ # Find libpq in the ports directory coming from lib/3.x # It is shared between all compiled ruby versions. $LDFLAGS << " '-Wl,-rpath=$$ORIGIN/../../ports/#{gem_platform}/lib'" if RUBY_PLATFORM =~ /linux/ From d4ba029bd5212c058b291e87246217358d087b53 Mon Sep 17 00:00:00 2001 From: Lars Kanis Date: Sun, 13 Jul 2025 14:40:14 +0200 Subject: [PATCH 075/118] Use rpath on Macos to find libpq-ruby-pg.1.dylib --- ext/extconf.rb | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ext/extconf.rb b/ext/extconf.rb index 6c9aea0dc..7e82f5625 100644 --- a/ext/extconf.rb +++ b/ext/extconf.rb @@ -150,7 +150,7 @@ def install recipe.host = toolchain recipe.configure_options << "CFLAGS=#{" -fPIC" if RUBY_PLATFORM =~ /linux|darwin/}" - recipe.configure_options << "LDFLAGS=-L#{openssl_recipe.path}/lib -L#{openssl_recipe.path}/lib64 -L#{openssl_recipe.path}/lib-arm64 #{"-Wl,-soname,#{libpq_rubypg} -lgssapi_krb5 -lkrb5 -lk5crypto -lkrb5support" if RUBY_PLATFORM =~ /linux/} #{"-Wl,-install_name,#{libpq_rubypg} -lgssapi_krb5 -lkrb5 -lk5crypto -lkrb5support -lresolv -framework Kerberos" if RUBY_PLATFORM =~ /darwin/}" + recipe.configure_options << "LDFLAGS=-L#{openssl_recipe.path}/lib -L#{openssl_recipe.path}/lib64 -L#{openssl_recipe.path}/lib-arm64 #{"-Wl,-soname,#{libpq_rubypg} -lgssapi_krb5 -lkrb5 -lk5crypto -lkrb5support" if RUBY_PLATFORM =~ /linux/} #{"-Wl,-install_name,@rpath/#{libpq_rubypg} -lgssapi_krb5 -lkrb5 -lk5crypto -lkrb5support -lresolv -framework Kerberos" if RUBY_PLATFORM =~ /darwin/}" recipe.configure_options << "LIBS=-lkrb5 -lcom_err -lk5crypto -lkrb5support -lresolv" if RUBY_PLATFORM =~ /linux/ recipe.configure_options << "LIBS=-lssl -lwsock32 -lgdi32 -lws2_32 -lcrypt32" if RUBY_PLATFORM =~ /mingw|mswin/ recipe.configure_options << "CPPFLAGS=-I#{openssl_recipe.path}/include" From 9f90c0562ebc458f9abb8614f2756f5bbeb756ea Mon Sep 17 00:00:00 2001 From: Lars Kanis Date: Sun, 13 Jul 2025 14:57:14 +0200 Subject: [PATCH 076/118] Macos: Load libpq-ruby-pg.1.dylib directly from ports directory .. instead of using an rpath. --- ext/extconf.rb | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/ext/extconf.rb b/ext/extconf.rb index 7e82f5625..e76d6508d 100644 --- a/ext/extconf.rb +++ b/ext/extconf.rb @@ -150,7 +150,7 @@ def install recipe.host = toolchain recipe.configure_options << "CFLAGS=#{" -fPIC" if RUBY_PLATFORM =~ /linux|darwin/}" - recipe.configure_options << "LDFLAGS=-L#{openssl_recipe.path}/lib -L#{openssl_recipe.path}/lib64 -L#{openssl_recipe.path}/lib-arm64 #{"-Wl,-soname,#{libpq_rubypg} -lgssapi_krb5 -lkrb5 -lk5crypto -lkrb5support" if RUBY_PLATFORM =~ /linux/} #{"-Wl,-install_name,@rpath/#{libpq_rubypg} -lgssapi_krb5 -lkrb5 -lk5crypto -lkrb5support -lresolv -framework Kerberos" if RUBY_PLATFORM =~ /darwin/}" + recipe.configure_options << "LDFLAGS=-L#{openssl_recipe.path}/lib -L#{openssl_recipe.path}/lib64 -L#{openssl_recipe.path}/lib-arm64 #{"-Wl,-soname,#{libpq_rubypg} -lgssapi_krb5 -lkrb5 -lk5crypto -lkrb5support" if RUBY_PLATFORM =~ /linux/} #{"-Wl,-install_name,@loader_path/../../ports/#{gem_platform}/lib/#{libpq_rubypg} -lgssapi_krb5 -lkrb5 -lk5crypto -lkrb5support -lresolv -framework Kerberos" if RUBY_PLATFORM =~ /darwin/}" recipe.configure_options << "LIBS=-lkrb5 -lcom_err -lk5crypto -lkrb5support -lresolv" if RUBY_PLATFORM =~ /linux/ recipe.configure_options << "LIBS=-lssl -lwsock32 -lgdi32 -lws2_32 -lcrypt32" if RUBY_PLATFORM =~ /mingw|mswin/ recipe.configure_options << "CPPFLAGS=-I#{openssl_recipe.path}/include" @@ -167,7 +167,6 @@ def install # Find libpq in the ports directory coming from lib/3.x # It is shared between all compiled ruby versions. $LDFLAGS << " '-Wl,-rpath=$$ORIGIN/../../ports/#{gem_platform}/lib'" if RUBY_PLATFORM =~ /linux/ - $LDFLAGS << " '-Wl,-rpath,@loader_path/../../ports/#{gem_platform}/lib'" if RUBY_PLATFORM =~ /darwin/ # Don't use pg_config for cross build, but --with-pg-* path options dir_config('pg', "#{postgresql_recipe.path}/include", "#{postgresql_recipe.path}/lib") From e04e5d015ce03b4fac9168391ea4cf1b774384d9 Mon Sep 17 00:00:00 2001 From: Lars Kanis Date: Sun, 13 Jul 2025 15:29:32 +0200 Subject: [PATCH 077/118] Add comment about loading of libpq in binary gems --- ext/extconf.rb | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/ext/extconf.rb b/ext/extconf.rb index e76d6508d..a75ff8ae9 100644 --- a/ext/extconf.rb +++ b/ext/extconf.rb @@ -121,9 +121,18 @@ def configure end end + # We build a libpq library file which static links OpenSSL and krb5. + # Our builtin libpq is referenced in different ways depending on the OS: + # - Window: Add the ports directory at runtime per RubyInstaller::Runtime.add_dll_directory + # The file is called "libpq.dll" + # - Linux: Add a rpath to pg_ext.so which references the ports directory. + # The file is called "libpq-ruby-pg.so.1" to avoid loading of system libpq by accident. + # - Macos: Add a reference with relative path in pg_ext.so to the ports directory. + # The file is called "libpq-ruby-pg.1.dylib" to avoid loading of other libpq by accident. libpq_orig, libpq_rubypg = case RUBY_PLATFORM when /linux/ then ["libpq.so.5", "libpq-ruby-pg.so.1"] when /darwin/ then ["libpq.5.dylib", "libpq-ruby-pg.1.dylib"] + # when /mingw/ then ["libpq.dll", "libpq.dll"] # renaming not needed end postgresql_recipe = BuildRecipe.new("postgresql", POSTGRESQL_VERSION, [POSTGRESQL_SOURCE_URI]).tap do |recipe| From cad06bf2cfc96f32ea5b2306e9a630a18e58e417 Mon Sep 17 00:00:00 2001 From: Lars Kanis Date: Mon, 14 Jul 2025 13:32:17 +0200 Subject: [PATCH 078/118] Update to OpenSSL-3.5.1 and PostgreSQL-17.5 for binary gems --- ext/extconf.rb | 4 ++-- .../patches/openssl/{3.4.0 => 3.5.1}/0001-aarch64-mingw.patch | 0 ...Use-workaround-of-__builtin_setjmp-only-on-MINGW-on-.patch | 0 ...libpq-Process-buffered-SSL-read-bytes-to-support-rec.patch | 0 4 files changed, 2 insertions(+), 2 deletions(-) rename ports/patches/openssl/{3.4.0 => 3.5.1}/0001-aarch64-mingw.patch (100%) rename ports/patches/postgresql/{17.2 => 17.5}/0001-Use-workaround-of-__builtin_setjmp-only-on-MINGW-on-.patch (100%) rename ports/patches/postgresql/{17.2 => 17.5}/0001-libpq-Process-buffered-SSL-read-bytes-to-support-rec.patch (100%) diff --git a/ext/extconf.rb b/ext/extconf.rb index a75ff8ae9..2878baef2 100644 --- a/ext/extconf.rb +++ b/ext/extconf.rb @@ -27,13 +27,13 @@ gem 'mini_portile2', '~>2.1' require 'mini_portile2' - OPENSSL_VERSION = ENV['OPENSSL_VERSION'] || '3.4.0' + OPENSSL_VERSION = ENV['OPENSSL_VERSION'] || '3.5.1' OPENSSL_SOURCE_URI = "http://www.openssl.org/source/openssl-#{OPENSSL_VERSION}.tar.gz" KRB5_VERSION = ENV['KRB5_VERSION'] || '1.21.3' KRB5_SOURCE_URI = "http://kerberos.org/dist/krb5/#{KRB5_VERSION[/^(\d+\.\d+)/]}/krb5-#{KRB5_VERSION}.tar.gz" - POSTGRESQL_VERSION = ENV['POSTGRESQL_VERSION'] || '17.2' + POSTGRESQL_VERSION = ENV['POSTGRESQL_VERSION'] || '17.5' POSTGRESQL_SOURCE_URI = "http://ftp.postgresql.org/pub/source/v#{POSTGRESQL_VERSION}/postgresql-#{POSTGRESQL_VERSION}.tar.bz2" class BuildRecipe < MiniPortile diff --git a/ports/patches/openssl/3.4.0/0001-aarch64-mingw.patch b/ports/patches/openssl/3.5.1/0001-aarch64-mingw.patch similarity index 100% rename from ports/patches/openssl/3.4.0/0001-aarch64-mingw.patch rename to ports/patches/openssl/3.5.1/0001-aarch64-mingw.patch diff --git a/ports/patches/postgresql/17.2/0001-Use-workaround-of-__builtin_setjmp-only-on-MINGW-on-.patch b/ports/patches/postgresql/17.5/0001-Use-workaround-of-__builtin_setjmp-only-on-MINGW-on-.patch similarity index 100% rename from ports/patches/postgresql/17.2/0001-Use-workaround-of-__builtin_setjmp-only-on-MINGW-on-.patch rename to ports/patches/postgresql/17.5/0001-Use-workaround-of-__builtin_setjmp-only-on-MINGW-on-.patch diff --git a/ports/patches/postgresql/17.2/0001-libpq-Process-buffered-SSL-read-bytes-to-support-rec.patch b/ports/patches/postgresql/17.5/0001-libpq-Process-buffered-SSL-read-bytes-to-support-rec.patch similarity index 100% rename from ports/patches/postgresql/17.2/0001-libpq-Process-buffered-SSL-read-bytes-to-support-rec.patch rename to ports/patches/postgresql/17.5/0001-libpq-Process-buffered-SSL-read-bytes-to-support-rec.patch From 325e22bbbf7ba92c3d9e1a77cebf876e624e6e20 Mon Sep 17 00:00:00 2001 From: Lars Kanis Date: Tue, 15 Jul 2025 21:04:34 +0200 Subject: [PATCH 079/118] Add some documentation --- README.md | 1 + ext/pg_type_map_by_oid.c | 2 ++ lib/pg/connection.rb | 33 +++++++++++++++++++++++++++------ 3 files changed, 30 insertions(+), 6 deletions(-) diff --git a/README.md b/README.md index 4869922fa..3841eaf3d 100644 --- a/README.md +++ b/README.md @@ -175,6 +175,7 @@ The following type maps are prefilled with type mappings from the PG::BasicTypeR * PG::BasicTypeMapBasedOnResult - a PG::TypeMapByOid prefilled with encoders for common PostgreSQL column types * PG::BasicTypeMapForQueries - a PG::TypeMapByClass prefilled with encoders for common Ruby value classes +Several type maps can be chained by setting PG::TypeMap::DefaultTypeMappable#default_type_map . ## Thread support diff --git a/ext/pg_type_map_by_oid.c b/ext/pg_type_map_by_oid.c index 6439684ef..bbdc4cb00 100644 --- a/ext/pg_type_map_by_oid.c +++ b/ext/pg_type_map_by_oid.c @@ -315,6 +315,8 @@ pg_tmbo_coders( VALUE self ) * The type map will do Hash lookups for each result value, if the number of rows * is below or equal +number+. * + * Default is 10. + * */ static VALUE pg_tmbo_max_rows_for_online_lookup_set( VALUE self, VALUE value ) diff --git a/lib/pg/connection.rb b/lib/pg/connection.rb index 7d10b95e9..264906f83 100644 --- a/lib/pg/connection.rb +++ b/lib/pg/connection.rb @@ -673,7 +673,16 @@ def cancel alias async_cancel cancel module Pollable - # Track the progress of the connection, waiting for the socket to become readable/writable before polling it + # Track the progress of the connection, waiting for the socket to become readable/writable before polling it. + # + # Connecting to multiple hosts is done like so: + # - All hosts are passed to PG::Connection.connect_start + # - As soon as the host is tried to connect the related host is removed from the hosts list + # - When the polling status changes to `PG::PGRES_POLLING_OK` the connection is returned and ready to use. + # - When the polling status changes to `PG::PGRES_POLLING_FAILED` connecting is aborted and a PG::ConnectionBad is raised with details to all connection attepts. + # - When a timeout occurs, connecting is restarted with the remaining hosts. + # + # The downside is that this connects only once to hosts which are listed twice when they timeout. private def polling_loop(poll_meth) connect_timeout = conninfo_hash[:connect_timeout] if (timeo = connect_timeout.to_i) && timeo > 0 @@ -799,7 +808,7 @@ class << self # PG::Connection.new(connection_string) -> conn # PG::Connection.new(host, port, options, tty, dbname, user, password) -> conn # - # Create a connection to the specified server. + # === Create a connection to the specified server. # # +connection_hash+ must be a ruby Hash with connection parameters. # See the {list of valid parameters}[https://www.postgresql.org/docs/current/libpq-connect.html#LIBPQ-PARAMKEYWORDS] in the PostgreSQL documentation. @@ -823,7 +832,13 @@ class << self # [+password+] # login password # - # Examples: + # + # If the Ruby default internal encoding is set (i.e., Encoding.default_internal != nil), the + # connection will have its +client_encoding+ set accordingly. + # + # Raises a PG::Error if the connection fails. + # + # === Examples: # # # Connect using all defaults # PG::Connection.new @@ -840,10 +855,16 @@ class << self # # As an URI # PG::Connection.new( "postgresql://user:pass@pgsql.example.com:5432/testdb?sslmode=require" ) # - # If the Ruby default internal encoding is set (i.e., Encoding.default_internal != nil), the - # connection will have its +client_encoding+ set accordingly. + # === Specifying Multiple Hosts + # + # It is possible to specify multiple hosts to connect to, so that they are tried in the given order or optionally in random order. + # In the Keyword/Value format, the host, hostaddr, and port options accept comma-separated lists of values. + # The {details to libpq}[https://www.postgresql.org/docs/current/libpq-connect.html#LIBPQ-MULTIPLE-HOSTS] describe how it works, but there are two small differences how ruby-pg handles multiple hosts: + # - All hosts are resolved before the first connection is tried. + # This means that when +load_balance_hosts+ is set to +random+, then all resolved addresses are tried randomly in one level. + # When a host resolves to more than one address, it is therefore tried more often than a host that has only one address. + # - When a timeout occurs due to the value of +connect_timeout+, then the given +host+, +hostaddr+ and +port+ combination is not tried a second time, even if it is specified several times. # - # Raises a PG::Error if the connection fails. def new(*args) conn = connect_to_hosts(*args) From 0605e91fbf94e9301b64b49440e964dd84ebc03f Mon Sep 17 00:00:00 2001 From: Lars Kanis Date: Tue, 15 Jul 2025 21:06:43 +0200 Subject: [PATCH 080/118] Bump VERSION to 1.6.0.rc2 --- lib/pg/version.rb | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/pg/version.rb b/lib/pg/version.rb index 9accb0791..cd5cf83c7 100644 --- a/lib/pg/version.rb +++ b/lib/pg/version.rb @@ -1,4 +1,4 @@ module PG # Library version - VERSION = '1.6.0.rc1' + VERSION = '1.6.0.rc2' end From 977a86194e362b306fb9546c46afe374dcabb512 Mon Sep 17 00:00:00 2001 From: Lars Kanis Date: Tue, 15 Jul 2025 21:12:21 +0200 Subject: [PATCH 081/118] Add binary gem platform aarch64-linux --- .github/workflows/binary-gems.yml | 4 ++++ Rakefile | 1 + ext/extconf.rb | 2 +- 3 files changed, 6 insertions(+), 1 deletion(-) diff --git a/.github/workflows/binary-gems.yml b/.github/workflows/binary-gems.yml index 68cb0a993..b15849b4e 100644 --- a/.github/workflows/binary-gems.yml +++ b/.github/workflows/binary-gems.yml @@ -28,6 +28,7 @@ jobs: - platform: "x64-mingw32" - platform: "x86-mingw32" - platform: "x86_64-linux" + - platform: "aarch64-linux" - platform: "x86_64-darwin" - platform: "arm64-darwin" steps: @@ -75,6 +76,9 @@ jobs: - os: ubuntu-latest ruby: "3.2" platform: "x86_64-linux" + - os: ubuntu-24.04-arm + ruby: "3.4" + platform: "aarch64-linux" - os: macos-latest ruby: "3.4" platform: "arm64-darwin" diff --git a/Rakefile b/Rakefile index 9df31c286..57a17c7eb 100644 --- a/Rakefile +++ b/Rakefile @@ -49,6 +49,7 @@ CrossLibraries = [ ['x86-mingw32', 'mingw', 'i686-w64-mingw32'], ['x64-mingw32', 'mingw64', 'x86_64-w64-mingw32'], ['x86_64-linux', 'linux-x86_64', 'x86_64-linux-gnu'], + ['aarch64-linux', 'linux-aarch64', 'aarch64-linux-gnu'], ['x86_64-darwin', 'darwin64-x86_64', 'x86_64-apple-darwin'], ['arm64-darwin', 'darwin64-arm64', 'arm64-apple-darwin'], ].map do |platform, openssl_config, toolchain| diff --git a/ext/extconf.rb b/ext/extconf.rb index 2878baef2..f307593e0 100644 --- a/ext/extconf.rb +++ b/ext/extconf.rb @@ -159,7 +159,7 @@ def install recipe.host = toolchain recipe.configure_options << "CFLAGS=#{" -fPIC" if RUBY_PLATFORM =~ /linux|darwin/}" - recipe.configure_options << "LDFLAGS=-L#{openssl_recipe.path}/lib -L#{openssl_recipe.path}/lib64 -L#{openssl_recipe.path}/lib-arm64 #{"-Wl,-soname,#{libpq_rubypg} -lgssapi_krb5 -lkrb5 -lk5crypto -lkrb5support" if RUBY_PLATFORM =~ /linux/} #{"-Wl,-install_name,@loader_path/../../ports/#{gem_platform}/lib/#{libpq_rubypg} -lgssapi_krb5 -lkrb5 -lk5crypto -lkrb5support -lresolv -framework Kerberos" if RUBY_PLATFORM =~ /darwin/}" + recipe.configure_options << "LDFLAGS=-L#{openssl_recipe.path}/lib -L#{openssl_recipe.path}/lib64 -L#{openssl_recipe.path}/lib-arm64 #{"-Wl,-soname,#{libpq_rubypg} -lgssapi_krb5 -lkrb5 -lk5crypto -lkrb5support -ldl" if RUBY_PLATFORM =~ /linux/} #{"-Wl,-install_name,@loader_path/../../ports/#{gem_platform}/lib/#{libpq_rubypg} -lgssapi_krb5 -lkrb5 -lk5crypto -lkrb5support -lresolv -framework Kerberos" if RUBY_PLATFORM =~ /darwin/}" recipe.configure_options << "LIBS=-lkrb5 -lcom_err -lk5crypto -lkrb5support -lresolv" if RUBY_PLATFORM =~ /linux/ recipe.configure_options << "LIBS=-lssl -lwsock32 -lgdi32 -lws2_32 -lcrypt32" if RUBY_PLATFORM =~ /mingw|mswin/ recipe.configure_options << "CPPFLAGS=-I#{openssl_recipe.path}/include" From d4a386e7007b387a6658d7c5d73da4da416e1a23 Mon Sep 17 00:00:00 2001 From: Lars Kanis Date: Wed, 16 Jul 2025 06:45:10 +0200 Subject: [PATCH 082/118] Add missing require "fileutils" --- spec/helpers.rb | 1 + 1 file changed, 1 insertion(+) diff --git a/spec/helpers.rb b/spec/helpers.rb index 36643748b..b8252d0da 100644 --- a/spec/helpers.rb +++ b/spec/helpers.rb @@ -5,6 +5,7 @@ require 'shellwords' require 'pg' require 'openssl' +require 'fileutils' require 'objspace' require_relative 'helpers/scheduler.rb' require_relative 'helpers/tcp_gate_scheduler.rb' From 4d806962a233df888d3411a28103651513c77c01 Mon Sep 17 00:00:00 2001 From: Lars Kanis Date: Wed, 16 Jul 2025 06:47:29 +0200 Subject: [PATCH 083/118] Add CHANGELOG entry for pg-1.6.0.rc2 --- CHANGELOG.md | 28 ++++++++++++++++++++++++++++ 1 file changed, 28 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 5dd3dff09..dfcd90b38 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,31 @@ +## v1.6.0.rc2 [2025-07-14] Lars Kanis + +Added: + +- Add binary gems for Ruby 3.4. +- Add fat binary gem for platform `aarch64-mingw-ucrt` aka Windows on ARM [#626](https://github.com/ged/ruby-pg/pull/626), for platform Macos on Intel and ARM [#643](https://github.com/ged/ruby-pg/pull/643) and for platform `aarch64-linux` [#646](https://github.com/ged/ruby-pg/pull/646). +- Update fat binary gem to OpenSSL-3.5.1 and PostgreSQL-17.5. +- Add a patch to libpq to avoid starvation on bigger SSL records, which some database engines other than vanilla PostgreSQL use. + This patch applies to platform specific binary gems only. + [#616](https://github.com/ged/ruby-pg/pull/616) +- Fix missing array input verification in PG::TypeMapByColumn. + This could cause a segfault. + [#620](https://github.com/ged/ruby-pg/pull/620) +- Add possibility to define the number of array dimensions to be encoded. + Setting dimensions is especially useful, when a Record shall be encoded into an Array, since the Array encoder can not distinguish if the array shall be encoded as a higher dimension or as a record otherwise. + [#622](https://github.com/ged/ruby-pg/pull/622) +- Add MINGW package dependency which is resolved by RubyInstaller. + [#617](https://github.com/ged/ruby-pg/pull/617) +- Change `conn.server_version` and `conn.protocol_version` to raise instead of return 0 on error. + [#632](https://github.com/ged/ruby-pg/pull/632) +- Fix making PG::BasicTypeMapForQueries shareable for Ractor in ruby-3.5. + [#636](https://github.com/ged/ruby-pg/pull/636) +- Rename `History.md` to `CHANGELOG.md`, which is more common. + [#642](https://github.com/ged/ruby-pg/pull/642) +- Fix connecting to multiple hosts after `connnect_timeout`. + [#637](https://github.com/ged/ruby-pg/pull/637) + + ## v1.6.0.rc1 [2024-11-28] Lars Kanis Added: From 38250ff2d81d2c4c25fc9dc8ea0778154f215fb7 Mon Sep 17 00:00:00 2001 From: Lars Kanis Date: Wed, 16 Jul 2025 17:23:58 +0200 Subject: [PATCH 084/118] Update release date in CHANGELOG.md --- CHANGELOG.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index dfcd90b38..a53dbb893 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,4 +1,4 @@ -## v1.6.0.rc2 [2025-07-14] Lars Kanis +## v1.6.0.rc2 [2025-07-16] Lars Kanis Added: From 7f0b6cc7ecf216ea462f125ac79fac69879297b5 Mon Sep 17 00:00:00 2001 From: Lars Kanis Date: Wed, 16 Jul 2025 17:25:49 +0200 Subject: [PATCH 085/118] More release automation Set annotated git tags with the related changelog entry. Upload all release files as part of "rake release". --- Rakefile | 6 +++- rakelib/pg_gem_helper.rb | 64 ++++++++++++++++++++++++++++++++++++++++ 2 files changed, 69 insertions(+), 1 deletion(-) create mode 100644 rakelib/pg_gem_helper.rb diff --git a/Rakefile b/Rakefile index 57a17c7eb..823e36efc 100644 --- a/Rakefile +++ b/Rakefile @@ -11,6 +11,7 @@ require 'rake/clean' require 'rspec/core/rake_task' require 'bundler' require 'bundler/gem_helper' +require_relative "rakelib/pg_gem_helper" # Build directory constants BASEDIR = Pathname( __FILE__ ).dirname @@ -34,7 +35,7 @@ CLEAN.include "lib/pg/postgresql_lib_path.rb" CLEAN.include "ports/*.installed" CLEAN.include "ports/*mingw*", "ports/*linux*", "ports/*darwin*" -Bundler::GemHelper.install_tasks +PgGemHelper.install_tasks $gem_spec = Bundler.load_gemspec(GEMSPEC) desc "Turn on warnings and debugging in the build." @@ -56,6 +57,9 @@ CrossLibraries = [ CrossLibrary.new platform, openssl_config, toolchain end +# Register binary gems to be pushed to rubygems.org +Bundler::GemHelper.instance.cross_platforms = CrossLibraries.map(&:platform) + # Rake-compiler task Rake::ExtensionTask.new do |ext| ext.name = 'pg_ext' diff --git a/rakelib/pg_gem_helper.rb b/rakelib/pg_gem_helper.rb new file mode 100644 index 000000000..495f7022e --- /dev/null +++ b/rakelib/pg_gem_helper.rb @@ -0,0 +1,64 @@ +require 'bundler' +require 'bundler/gem_helper' + +class PgGemHelper < Bundler::GemHelper + attr_accessor :cross_platforms + + def install + super + + task "release:guard_clean" => ["release:update_history"] + + task "release:update_history" do + update_history + end + + task "release:rubygem_push" => ["gem:native"] + end + + def hfile + "CHANGELOG.md" + end + + def headline + '^([^\n]*)(\d+\.\d+\.\d+(?:\.\w+)?)([^\w]+)([2Y][0Y][0-9Y][0-9Y]-[0-1M][0-9M]-[0-3D][0-9D])([^\w]*|$)' + end + + def reldate + Time.now.strftime("%Y-%m-%d") + end + + def update_history + hin = File.read(hfile) + hout = hin.sub(/#{headline}/) do + raise "#{hfile} isn't up-to-date for version #{version} (!= #{$2})" unless $2==version.to_s + $1 + $2 + $3 + reldate + $5 + end + if hout != hin + Bundler.ui.confirm "Updating #{hfile} for release." + File.write(hfile, hout) + Rake::FileUtilsExt.sh "git", "commit", hfile, "-m", "Update release date in #{hfile}" + end + end + + def tag_version + Bundler.ui.confirm "Tag release with annotation:" + m = File.read(hfile).match(/(?#{headline}.*?)#{headline}/m) || raise("Unable to find release notes in #{hfile}") + Bundler.ui.info(m[:annotation].gsub(/^/, " ")) + IO.popen(["git", "tag", "--file=-", version_tag], "w") do |fd| + fd.write m[:annotation] + end + yield if block_given? + rescue + Bundler.ui.error "Untagging #{version_tag} due to error." + Rake::FileUtilsExt.sh "git", "tag", "-d", version_tag + raise + end + + def rubygem_push(path) + cross_platforms.each do |ruby_platform| + super(path.gsub(/\.gem\z/, "-#{ruby_platform}.gem")) + end + super(path) + end +end From 0703db4f8897010016d0b9555c59d763f42f3b52 Mon Sep 17 00:00:00 2001 From: Lars Kanis Date: Sat, 26 Jul 2025 14:05:46 +0200 Subject: [PATCH 086/118] README: Try to make the `connect_timeout` description more clear ... in conjunction with multiple servers. --- lib/pg/connection.rb | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/lib/pg/connection.rb b/lib/pg/connection.rb index 264906f83..860752e09 100644 --- a/lib/pg/connection.rb +++ b/lib/pg/connection.rb @@ -863,7 +863,9 @@ class << self # - All hosts are resolved before the first connection is tried. # This means that when +load_balance_hosts+ is set to +random+, then all resolved addresses are tried randomly in one level. # When a host resolves to more than one address, it is therefore tried more often than a host that has only one address. - # - When a timeout occurs due to the value of +connect_timeout+, then the given +host+, +hostaddr+ and +port+ combination is not tried a second time, even if it is specified several times. + # - When a timeout occurs due to the value of +connect_timeout+, then the given +host+, +hostaddr+ and +port+ combination is not tried a second time, even if it's specified several times. + # It's still possible to do load balancing with +load_balance_hosts+ set to +random+ and to increase the number of connections a node gets, when the hostname is provided multiple times in the host string. + # This is because in non-timeout cases the host is tried multiple times. # def new(*args) conn = connect_to_hosts(*args) From 9ef29bd24b0f56803521b9fa46de728fe15fc92e Mon Sep 17 00:00:00 2001 From: Lars Kanis Date: Sun, 27 Jul 2025 16:33:22 +0200 Subject: [PATCH 087/118] Fix connecting without any host or hostaddr --- lib/pg/connection.rb | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/pg/connection.rb b/lib/pg/connection.rb index 860752e09..238900070 100644 --- a/lib/pg/connection.rb +++ b/lib/pg/connection.rb @@ -763,7 +763,7 @@ module Pollable ihosts = iopts[:host]&.split(",", -1) ihostaddrs = iopts[:hostaddr]&.split(",", -1) iports = iopts[:port]&.split(",", -1) - iports = iports * (ihosts || ihostaddrs).size if iports&.size == 1 + iports = iports * (ihosts || ihostaddrs || [1]).size if iports&.size == 1 idx = (ihosts || ihostaddrs || iports).index.with_index do |_, i| (ihosts ? ihosts[i] == host : true) && From e129350142d3ab5f2ad0ad4e3031e7f87857aa32 Mon Sep 17 00:00:00 2001 From: Lars Kanis Date: Sun, 27 Jul 2025 17:33:07 +0200 Subject: [PATCH 088/118] Remove installation of Macos "mig" tool It was added to rake-compiler-dock in https://github.com/rake-compiler/rake-compiler-dock/pull/155 --- Rakefile | 15 --------------- 1 file changed, 15 deletions(-) diff --git a/Rakefile b/Rakefile index 823e36efc..3d72b56ba 100644 --- a/Rakefile +++ b/Rakefile @@ -104,19 +104,6 @@ task 'gem:native:prepare' do end end -task 'install_darwin_mig', [:arch] do |t, args| - sh <<~EOT - rm -rf bootstrap_cmds && - git clone --branch=cross_platform https://github.com/markmentovai/bootstrap_cmds && - cd bootstrap_cmds && - autoreconf --install && - sh configure && - make && - sed -E -i 's/^cppflags=(.*)/cppflags=(\\1 "-D#{args[:arch]}" "-I\\/opt\\/osxcross\\/target\\/SDK\\/MacOSX11.1.sdk\\/usr\\/include")/' migcom.tproj/mig.sh && - sudo make install - EOT -end - CrossLibraries.each do |xlib| platform = xlib.platform desc "Build fat binary gem for platform #{platform}" @@ -128,8 +115,6 @@ CrossLibraries.each do |xlib| sudo apt-get update && sudo apt-get install -y bison flex && (cp build/gem/gem-*.pem ~/.gem/ || true) && bundle install --local && - #{ "rake install_darwin_mig[__arm64__]" if platform =~ /arm64-darwin/ } - #{ "rake install_darwin_mig[__x86_64__]" if platform =~ /x86_64-darwin/ } rake native:#{platform} pkg/#{$gem_spec.full_name}-#{platform}.gem MAKEOPTS=-j`nproc` RUBY_CC_VERSION=#{RakeCompilerDock.ruby_cc_version("~>2.7", "~>3.0")} EOT end From f37be3dfc7047f9b45d07ff6311d10b5f594817b Mon Sep 17 00:00:00 2001 From: Lars Kanis Date: Sun, 27 Jul 2025 19:28:16 +0200 Subject: [PATCH 089/118] Bump VERSION to 1.6.0 --- lib/pg/version.rb | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/pg/version.rb b/lib/pg/version.rb index cd5cf83c7..8919e65f4 100644 --- a/lib/pg/version.rb +++ b/lib/pg/version.rb @@ -1,4 +1,4 @@ module PG # Library version - VERSION = '1.6.0.rc2' + VERSION = '1.6.0' end From 99a815cef9699a2263577b18ecc98e244b5106ef Mon Sep 17 00:00:00 2001 From: Lars Kanis Date: Sun, 27 Jul 2025 19:35:40 +0200 Subject: [PATCH 090/118] CHANGELOG: Merge 1.6.0.rc1 and rc2 to final release note --- CHANGELOG.md | 46 +++++++++++++++++++--------------------------- 1 file changed, 19 insertions(+), 27 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index a53dbb893..432844490 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,49 +1,41 @@ -## v1.6.0.rc2 [2025-07-16] Lars Kanis +## v1.6.0 [2025-07-27] Lars Kanis Added: - Add binary gems for Ruby 3.4. -- Add fat binary gem for platform `aarch64-mingw-ucrt` aka Windows on ARM [#626](https://github.com/ged/ruby-pg/pull/626), for platform Macos on Intel and ARM [#643](https://github.com/ged/ruby-pg/pull/643) and for platform `aarch64-linux` [#646](https://github.com/ged/ruby-pg/pull/646). +- Add fat binary gem for platform `aarch64-mingw-ucrt` aka Windows on ARM [#626](https://github.com/ged/ruby-pg/pull/626), for platform Macos on Intel and ARM [#643](https://github.com/ged/ruby-pg/pull/643), for platform `aarch64-linux` [#646](https://github.com/ged/ruby-pg/pull/646) and for platform `x86_64-linux` [#551](https://github.com/ged/ruby-pg/pull/551#issuecomment-2504715762). - Update fat binary gem to OpenSSL-3.5.1 and PostgreSQL-17.5. - Add a patch to libpq to avoid starvation on bigger SSL records, which some database engines other than vanilla PostgreSQL use. This patch applies to platform specific binary gems only. [#616](https://github.com/ged/ruby-pg/pull/616) -- Fix missing array input verification in PG::TypeMapByColumn. - This could cause a segfault. - [#620](https://github.com/ged/ruby-pg/pull/620) +- Add support for new query cancel functions of PostgreSQL-17. + This adds the new class `PG::CancelConnection` which provides the ability to cancel a query per blocking or per non-blocking functions. + If the new functions are available they are used and the older are no longer compiled in. + This way we can get rid of reading out the internal `PGcancel` struct by `Connection#backend_key`. + [#614](https://github.com/ged/ruby-pg/pull/614) +- Add PG::BinaryDecoder::Array and PG::BinaryEncoder::Array to parse and encode PostgreSQL arrays in binary format. + [#603](https://github.com/ged/ruby-pg/pull/603) - Add possibility to define the number of array dimensions to be encoded. Setting dimensions is especially useful, when a Record shall be encoded into an Array, since the Array encoder can not distinguish if the array shall be encoded as a higher dimension or as a record otherwise. [#622](https://github.com/ged/ruby-pg/pull/622) +- Add Connection#set_chunked_rows_mode [#610](https://github.com/ged/ruby-pg/pull/610) +- Add PG::Connection#close_prepared, PG::Connection#close_portal, PG::Connection#send_close_prepared and PG::Connection#send_close_portal which are new in PostgreSQL-17. + [#611](https://github.com/ged/ruby-pg/pull/611) +- Add Connection#send_pipeline_sync, async_pipeline_sync and release GVL at PQ(sendP|P)ipelineSync. + [#612](https://github.com/ged/ruby-pg/pull/612) - Add MINGW package dependency which is resolved by RubyInstaller. [#617](https://github.com/ged/ruby-pg/pull/617) - Change `conn.server_version` and `conn.protocol_version` to raise instead of return 0 on error. [#632](https://github.com/ged/ruby-pg/pull/632) +- Fix connecting to multiple hosts after `connnect_timeout`. + [#637](https://github.com/ged/ruby-pg/pull/637) - Fix making PG::BasicTypeMapForQueries shareable for Ractor in ruby-3.5. [#636](https://github.com/ged/ruby-pg/pull/636) +- Fix missing array input verification in PG::TypeMapByColumn. + This could cause a segfault. + [#620](https://github.com/ged/ruby-pg/pull/620) - Rename `History.md` to `CHANGELOG.md`, which is more common. [#642](https://github.com/ged/ruby-pg/pull/642) -- Fix connecting to multiple hosts after `connnect_timeout`. - [#637](https://github.com/ged/ruby-pg/pull/637) - - -## v1.6.0.rc1 [2024-11-28] Lars Kanis - -Added: - -- Add fat binary gem for platform `x86_64-linux`. - [#551](https://github.com/ged/ruby-pg/pull/551#issuecomment-2504715762) -- Add PG::BinaryDecoder::Array and PG::BinaryEncoder::Array to parse and encode PostgreSQL arrays in binary format. - [#603](https://github.com/ged/ruby-pg/pull/603) -- Add support for new query cancel functions of PostgreSQL-17. - This adds the new class `PG::CancelConnection` which provides the ability to cancel a query per blocking or per non-blocking functions. - If the new functions are available they are used and the older are no longer compiled in. - This way we can get rid of reading out the internal `PGcancel` struct by `Connection#backend_key`. - [#614](https://github.com/ged/ruby-pg/pull/614) -- Add Connection#set_chunked_rows_mode [#610](https://github.com/ged/ruby-pg/pull/610) -- Add PG::Connection#close_prepared, PG::Connection#close_portal, PG::Connection#send_close_prepared and PG::Connection#send_close_portal which are new in PostgreSQL-17. - [#611](https://github.com/ged/ruby-pg/pull/611) -- Add Connection#send_pipeline_sync, async_pipeline_sync and release GVL at PQ(sendP|P)ipelineSync. - [#612](https://github.com/ged/ruby-pg/pull/612) Removed: From b77e86c478c294fd74ed187b7573df2665903209 Mon Sep 17 00:00:00 2001 From: Lars Kanis Date: Tue, 29 Jul 2025 12:43:12 +0200 Subject: [PATCH 091/118] README: Update install description reg. binary vs. source gem --- README.md | 66 ++++++++++++++++++++++++++++++++++++++++++++----------- 1 file changed, 53 insertions(+), 13 deletions(-) diff --git a/README.md b/README.md index 3841eaf3d..6394e588d 100644 --- a/README.md +++ b/README.md @@ -40,29 +40,34 @@ A small example usage: ## Requirements * Ruby 2.7 or newer -* PostgreSQL 10.x or later (with headers, -dev packages, etc). +* PostgreSQL 10.x or later +* When installing the source gem: libpq with headers, -dev packages, etc. -It usually works with earlier versions of Ruby/PostgreSQL as well, but those are -not regularly tested. +## How To Install -## Versioning +Install via RubyGems: -We tag and release gems according to the [Semantic Versioning](http://semver.org/) principle. + gem install pg -As a result of this policy, you can (and should) specify a dependency on this gem using the [Pessimistic Version Constraint](http://guides.rubygems.org/patterns/#pessimistic-version-constraint) with two digits of precision. +This installs the binary gem, specific to the running platform by default. -For example: +### Binary gem -```ruby - spec.add_dependency 'pg', '~> 1.0' -``` +The binary gems don't depend on the libpq package on the running system. +They have libpq builtin. -## How To Install +The gems for platform `x86_64-linux` and `aarch64-linux` run on Alpine Linux, but require the package `gcompat` there as long as we don't provide a native gem for platform `x86_64-linux-musl`. Install this package like so: -Install via RubyGems: + apk add gcompat - gem install pg +There is one use case the binary gems don't support: Retrieval of connection [options from LDAP](https://www.postgresql.org/docs/current/libpq-ldap.html). To support this `libldap` would be necessary, but it has a lot of dependencies. It doesn't seem to be a widely used feature and that it's worth to support it. If it's necessary, the source gem can be forced. + +### Source gem + +The source gem can be forced by: + + gem install pg --platform ruby You may need to specify the path to the 'pg_config' program installed with Postgres: @@ -73,6 +78,28 @@ If you're installing via Bundler, you can provide compile hints like so: bundle config build.pg --with-pg-config= +### Bundler + +To make sure, the necessary platforms and the source gem are fetched by bundler, they can be added like so + +``` +bundle lock --add-platform x86_64-linux +bundle lock --add-platform arm64-darwin +bundle lock --add-platform x64-mingw-ucrt +bundle lock --add-platform ruby +bundle package --all-platforms +``` + +A re-run of `bundle package` is also necessary after `bundle update`, in order to retrieve the new specific gems of all platforms. + +If the binary gems don't work for some reason, it's easy to force the usage of the source gem in the Gemfile: + +``` +gem "pg", force_ruby_platform: true +``` + +### More + See README-OS_X.rdoc for more information about installing under MacOS X, and README-Windows.rdoc for Windows build/installation instructions. @@ -85,6 +112,19 @@ can be found in [the `certs` directory](https://github.com/ged/ruby-pg/tree/mast of the repository. +## Versioning + +We tag and release gems according to the [Semantic Versioning](http://semver.org/) principle. + +As a result of this policy, you can (and should) specify a dependency on this gem using the [Pessimistic Version Constraint](http://guides.rubygems.org/patterns/#pessimistic-version-constraint) with two digits of precision. + +For example: + +```ruby + spec.add_dependency 'pg', '~> 1.0' +``` + + ## Type Casts Pg can optionally type cast result values and query parameters in Ruby or From d425e208939bcfaa2b3e2921e9d4feca3da991ca Mon Sep 17 00:00:00 2001 From: Lars Kanis Date: Thu, 31 Jul 2025 13:15:16 +0200 Subject: [PATCH 092/118] Add comments to Coder#delimiter and #needs_quotation reg. binary coders --- ext/pg_coder.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/ext/pg_coder.c b/ext/pg_coder.c index f6c252fe9..972e6f2c4 100644 --- a/ext/pg_coder.c +++ b/ext/pg_coder.c @@ -364,6 +364,7 @@ pg_coder_flags_get(VALUE self) * Specifies whether the assigned #elements_type requires quotation marks to * be transferred safely. Encoding with #needs_quotation=false is somewhat * faster. + * It is only used by text coders and ignored by binary coders. * * The default is +true+. This option is ignored for decoding of values. */ @@ -397,6 +398,7 @@ pg_coder_needs_quotation_get(VALUE self) * Specifies the character that separates values within the composite type. * The default is a comma. * This must be a single one-byte character. + * It is only used by text coders and ignored by binary coders. */ static VALUE pg_coder_delimiter_set(VALUE self, VALUE delimiter) From 887ebeed65a2bd376fb658f98c6f0b494366f70c Mon Sep 17 00:00:00 2001 From: Lars Kanis Date: Thu, 31 Jul 2025 13:17:20 +0200 Subject: [PATCH 093/118] Serialize CompositeCoder#dimensions only when set This fixes the compatibility to pg-1.5.9, when deserializing Marshal data from pg-1.6, as long as the new attribute isn't used. Fixes #652 --- lib/pg/coder.rb | 5 +++-- spec/pg/type_spec.rb | 11 +++++++++-- 2 files changed, 12 insertions(+), 4 deletions(-) diff --git a/lib/pg/coder.rb b/lib/pg/coder.rb index 7bf2c99e8..a9db747f0 100644 --- a/lib/pg/coder.rb +++ b/lib/pg/coder.rb @@ -72,12 +72,13 @@ def inspect_short class CompositeCoder < Coder def to_h - { **super, + h = { **super, elements_type: elements_type, needs_quotation: needs_quotation?, delimiter: delimiter, - dimensions: dimensions, } + h[:dimensions] = dimensions if dimensions # Write only when set, for Marshal compat with pg<1.6 + h end def inspect diff --git a/spec/pg/type_spec.rb b/spec/pg/type_spec.rb index 039cd6c02..ad5e5f9b0 100644 --- a/spec/pg/type_spec.rb +++ b/spec/pg/type_spec.rb @@ -1184,8 +1184,15 @@ def expect_deprecated_coder_init it "should respond to to_h" do expect( textenc_int_array.to_h ).to eq( { name: nil, oid: 0, format: 0, flags: 0, - elements_type: textenc_int, needs_quotation: false, delimiter: ',', - dimensions: nil + elements_type: textenc_int, needs_quotation: false, delimiter: ',' + } ) + end + + it "should respond to to_h with dimensions set" do + enc_array = PG::BinaryEncoder::Array.new dimensions: 1 + expect( enc_array.to_h ).to eq( { + name: nil, oid: 0, format: 1, flags: 0, dimensions: 1, + elements_type: nil, needs_quotation: true, delimiter: ',' } ) end From 1f13006866b9f48445ce8cf3ab8859bcf7e7a1ac Mon Sep 17 00:00:00 2001 From: Lars Kanis Date: Fri, 1 Aug 2025 10:56:50 +0200 Subject: [PATCH 094/118] Remove dependencies which aren't needed for binary naive gems --- Rakefile | 3 +++ 1 file changed, 3 insertions(+) diff --git a/Rakefile b/Rakefile index 823e36efc..58dd0b0d0 100644 --- a/Rakefile +++ b/Rakefile @@ -85,6 +85,9 @@ Rake::ExtensionTask.new do |ext| spec.files << "ports/#{spec.platform.to_s}/lib/libpq-ruby-pg.so.1" if spec.platform.to_s =~ /linux/ spec.files << "ports/#{spec.platform.to_s}/lib/libpq-ruby-pg.1.dylib" if spec.platform.to_s =~ /darwin/ spec.files << "ports/#{spec.platform.to_s}/lib/libpq.dll" if spec.platform.to_s =~ /mingw|mswin/ + + # Binary gems don't postgresql header+lib files + spec.metadata.delete("msys2_mingw_dependencies") end end From fcf68c8da1e4aefa876dc1edff36fe20aee1ecff Mon Sep 17 00:00:00 2001 From: Lars Kanis Date: Tue, 29 Jul 2025 12:48:11 +0200 Subject: [PATCH 095/118] CI: Remove ruby < 3.2 from test matrix on Windows Ruby < 3.2 is not compatible to gcc-15 and patches will not be backported since it's EOL now. Unfortunately MSYS2 is a rolling release with no option for an older gcc. Althought setup-ruby installs an older gcc-14.2 package by default, it fails to install further packages per pacman due to dependencies: ```sh $ pacman.exe -Sy --noconfirm --noprogressbar --needed --disable-download-timeout mingw-w64-x86_64-postgresql resolving dependencies... looking for conflicting packages... error: failed to prepare transaction (could not satisfy dependencies) :: installing mingw-w64-x86_64-gcc-libs (15.1.0-8) breaks dependency 'mingw-w64-x86_64-gcc-libs=14.2.0-3' required by mingw-w64-x86_64-gcc ``` Also move "bundle install" before PostgreSQL install, as it disturbs compiling gem extensions somehow. --- .github/workflows/binary-gems.yml | 16 ++++++++-------- .github/workflows/source-gem.yml | 26 +++++++++++++++++++------- 2 files changed, 27 insertions(+), 15 deletions(-) diff --git a/.github/workflows/binary-gems.yml b/.github/workflows/binary-gems.yml index b15849b4e..86983ee73 100644 --- a/.github/workflows/binary-gems.yml +++ b/.github/workflows/binary-gems.yml @@ -63,15 +63,15 @@ jobs: matrix: include: - os: windows-latest - ruby: "3.3" + ruby: "3.4" platform: "x64-mingw-ucrt" - os: windows-latest - ruby: "3.1.4-1" + ruby: "3.2.9-1" platform: "x86-mingw32" PGVERSION: 10.20-1-windows - os: windows-latest - ruby: "2.7" - platform: "x64-mingw32" + ruby: "3.2" + platform: "x64-mingw-ucrt" PGVERSION: 16.6-1-windows-x64 - os: ubuntu-latest ruby: "3.2" @@ -116,7 +116,10 @@ jobs: with: name: binary-gem-${{ matrix.platform }} - - name: Download PostgreSQL 32-bit + - run: bundle install + - run: gem install --local pg-*${{ matrix.platform }}.gem --verbose + + - name: Download PostgreSQL if: ${{ matrix.os == 'windows-latest' && matrix.PGVERSION }} run: | Add-Type -AssemblyName System.IO.Compression.FileSystem @@ -135,9 +138,6 @@ jobs: echo "PGPASSWORD=" | Out-File -FilePath $env:GITHUB_ENV -Encoding utf8 -Append - run: echo $env:PATH - - run: gem update --system 3.3.26 - - run: bundle install - - run: gem install --local pg-*${{ matrix.platform }}.gem --verbose - name: Run specs if: ${{ matrix.os != 'windows-latest' }} run: ruby -rpg -S rspec -fd spec/**/*_spec.rb diff --git a/.github/workflows/source-gem.yml b/.github/workflows/source-gem.yml index dc42da20d..7b56eee9d 100644 --- a/.github/workflows/source-gem.yml +++ b/.github/workflows/source-gem.yml @@ -47,7 +47,7 @@ jobs: PGVERSION: 17.0-1-windows-x64 PGVER: "17" - os: windows - ruby: "2.7" + ruby: "3.2" PGVERSION: 10.20-1-windows-x64 PGVER: "10" - os: windows @@ -88,6 +88,12 @@ jobs: with: ruby-version: ${{ matrix.ruby }} + - name: Print tool versions + run: | + ruby -v + gem env + gcc -v + - name: Download gem from build job uses: actions/download-artifact@v4 with: @@ -98,6 +104,8 @@ jobs: shell: cmd run: ridk exec sh -c "pacman --sync --needed --noconfirm ${MINGW_PACKAGE_PREFIX}-gcc" + - run: bundle install + - name: Download PostgreSQL Windows if: matrix.os == 'windows' run: | @@ -109,7 +117,7 @@ jobs: $(new-object net.webclient).DownloadFile("http://get.enterprisedb.com/postgresql/postgresql-$env:PGVERSION-binaries.zip", "postgresql-binaries.zip") Unzip "postgresql-binaries.zip" "." - echo "$pwd/pgsql/bin" | Out-File -FilePath $env:GITHUB_PATH -Encoding utf8 -Append + echo "$env:RI_DEVKIT$env:MINGW_PREFIX/bin;$env:RI_DEVKIT/usr/bin;$pwd/pgsql/bin" | Out-File -FilePath $env:GITHUB_PATH -Encoding utf8 -Append echo "PGUSER=$env:USERNAME" | Out-File -FilePath $env:GITHUB_ENV -Encoding utf8 -Append echo "PGPASSWORD=" | Out-File -FilePath $env:GITHUB_ENV -Encoding utf8 -Append @@ -131,9 +139,7 @@ jobs: sudo mv /Library/PostgreSQL/$PGVER/pgsql/* /Library/PostgreSQL/$PGVER/ && \ echo /Library/PostgreSQL/$PGVER/bin >> $GITHUB_PATH - - run: gem update --system 3.3.26 - - run: bundle install - + - run: echo $env:PATH - run: gem install --local *.gem --verbose - name: Run specs @@ -143,10 +149,16 @@ jobs: TRUFFLERUBYOPT: --experimental-options --keep-handles-alive run: ruby -rpg -S rspec spec/**/*_spec.rb -cfdoc - - name: Print logs if job failed + - name: Print db logs if job failed if: ${{ failure() && matrix.os == 'windows' }} run: ridk exec cat tmp_test_specs/*.log - - name: Print logs if job failed + - name: Print db logs if job failed if: ${{ failure() && matrix.os != 'windows' }} run: cat tmp_test_specs/*.log + + - name: Print mkmf logs if job failed on Windows-head + if: ${{ failure() && matrix.os == 'windows' && matrix.ruby == 'head' }} + run: | + ridk exec cat c:/rubyinstaller-head-*/lib/ruby/gems/*/extensions/*/*/*/mkmf.log || ridk exec cat d:/rubyinstaller-head-*/lib/ruby/gems/*/extensions/*/*/*/mkmf.log + ridk exec gcc -v From 95fb237ee1767548e017a40997f670e5ad671e06 Mon Sep 17 00:00:00 2001 From: Lars Kanis Date: Fri, 1 Aug 2025 15:51:25 +0200 Subject: [PATCH 096/118] Revert "Remove installation of Macos "mig" tool" --- Rakefile | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/Rakefile b/Rakefile index 716b7973c..58dd0b0d0 100644 --- a/Rakefile +++ b/Rakefile @@ -107,6 +107,19 @@ task 'gem:native:prepare' do end end +task 'install_darwin_mig', [:arch] do |t, args| + sh <<~EOT + rm -rf bootstrap_cmds && + git clone --branch=cross_platform https://github.com/markmentovai/bootstrap_cmds && + cd bootstrap_cmds && + autoreconf --install && + sh configure && + make && + sed -E -i 's/^cppflags=(.*)/cppflags=(\\1 "-D#{args[:arch]}" "-I\\/opt\\/osxcross\\/target\\/SDK\\/MacOSX11.1.sdk\\/usr\\/include")/' migcom.tproj/mig.sh && + sudo make install + EOT +end + CrossLibraries.each do |xlib| platform = xlib.platform desc "Build fat binary gem for platform #{platform}" @@ -118,6 +131,8 @@ CrossLibraries.each do |xlib| sudo apt-get update && sudo apt-get install -y bison flex && (cp build/gem/gem-*.pem ~/.gem/ || true) && bundle install --local && + #{ "rake install_darwin_mig[__arm64__]" if platform =~ /arm64-darwin/ } + #{ "rake install_darwin_mig[__x86_64__]" if platform =~ /x86_64-darwin/ } rake native:#{platform} pkg/#{$gem_spec.full_name}-#{platform}.gem MAKEOPTS=-j`nproc` RUBY_CC_VERSION=#{RakeCompilerDock.ruby_cc_version("~>2.7", "~>3.0")} EOT end From 3ce8a71fb8e3e6c8c4c9f1fd8a4a612f5f1750e1 Mon Sep 17 00:00:00 2001 From: Lars Kanis Date: Fri, 1 Aug 2025 18:43:14 +0200 Subject: [PATCH 097/118] Add binary gems for `aarch64-linux-musl` and `x86_64-linux-musl` This requires a workaround for building linux-musl gems. Fixes #648 --- .github/workflows/binary-gems.yml | 25 +++++++++++++++++++++---- Rakefile | 10 +++++++++- spec/env/Dockerfile.alpine | 3 ++- 3 files changed, 32 insertions(+), 6 deletions(-) diff --git a/.github/workflows/binary-gems.yml b/.github/workflows/binary-gems.yml index 86983ee73..322cdfc53 100644 --- a/.github/workflows/binary-gems.yml +++ b/.github/workflows/binary-gems.yml @@ -28,7 +28,9 @@ jobs: - platform: "x64-mingw32" - platform: "x86-mingw32" - platform: "x86_64-linux" + - platform: "x86_64-linux-musl" - platform: "aarch64-linux" + - platform: "aarch64-linux-musl" - platform: "x86_64-darwin" - platform: "arm64-darwin" steps: @@ -156,7 +158,7 @@ jobs: job_binary_multiarch: - name: multiarch (${{matrix.platform}} on ${{matrix.from_image}} ${{matrix.image_platform}}) + name: multiarch (${{matrix.gem_platform}} on ${{matrix.from_image}} ${{matrix.image_platform}}) needs: rcd_build strategy: fail-fast: false @@ -166,12 +168,28 @@ jobs: image_platform: linux/x86_64 gem_platform: x86_64-linux dockerfile: centos + - from_image: alpine + image_platform: linux/x86_64 + gem_platform: x86_64-linux-musl + dockerfile: alpine + - from_image: alpine + image_platform: linux/arm64 + gem_platform: aarch64-linux-musl + dockerfile: alpine + os: ubuntu-24.04-arm - from_image: alpine image_platform: linux/x86_64 gem_platform: x86_64-linux dockerfile: alpine + docker_arg: "--build-arg with_pkg=gcompat" + - from_image: alpine + image_platform: linux/arm64 + gem_platform: aarch64-linux + dockerfile: alpine + docker_arg: "--build-arg with_pkg=gcompat" + os: ubuntu-24.04-arm - runs-on: ubuntu-latest + runs-on: ${{ matrix.os || 'ubuntu-latest' }} steps: - uses: actions/checkout@v4 - name: Download gem-${{ matrix.gem_platform }} @@ -180,8 +198,7 @@ jobs: name: binary-gem-${{ matrix.gem_platform }} - name: Build image and Run tests run: | - docker run --rm --privileged multiarch/qemu-user-static --reset -p yes - docker build --rm --platform ${{matrix.image_platform}} --build-arg from_image=${{matrix.from_image}} -t ruby-test -f spec/env/Dockerfile.${{matrix.dockerfile}} . + docker build --rm --platform ${{matrix.image_platform}} --build-arg from_image=${{matrix.from_image}} ${{matrix.docker_arg}} -t ruby-test -f spec/env/Dockerfile.${{matrix.dockerfile}} . docker run --rm -t --network=host -v `pwd`:/build ruby-test job_binary_yugabyte: diff --git a/Rakefile b/Rakefile index 58dd0b0d0..a06ab13bf 100644 --- a/Rakefile +++ b/Rakefile @@ -50,7 +50,9 @@ CrossLibraries = [ ['x86-mingw32', 'mingw', 'i686-w64-mingw32'], ['x64-mingw32', 'mingw64', 'x86_64-w64-mingw32'], ['x86_64-linux', 'linux-x86_64', 'x86_64-linux-gnu'], + ['x86_64-linux-musl', 'linux-x86_64', 'x86_64-unknown-linux-musl'], ['aarch64-linux', 'linux-aarch64', 'aarch64-linux-gnu'], + ['aarch64-linux-musl', 'linux-aarch64', 'aarch64-linux-musl'], ['x86_64-darwin', 'darwin64-x86_64', 'x86_64-apple-darwin'], ['arm64-darwin', 'darwin64-arm64', 'arm64-apple-darwin'], ].map do |platform, openssl_config, toolchain| @@ -68,7 +70,13 @@ Rake::ExtensionTask.new do |ext| ext.lib_dir = 'lib' ext.source_pattern = "*.{c,h}" ext.cross_compile = true - ext.cross_platform = CrossLibraries.map(&:platform) + + # Activate current cross compiled platform only. + # This is to work around the issue that `linux` platform is selected in `linux-musl` image. + ext.cross_platform = CrossLibraries.map(&:platform).select do |pl| + m = ENV["RCD_IMAGE"]&.match(/:(?[\d\.]+)-mri-(?[-\w]+)$/) + m && m[:platform] == pl + end ext.cross_config_options += CrossLibraries.map do |xlib| { diff --git a/spec/env/Dockerfile.alpine b/spec/env/Dockerfile.alpine index 576cf7528..ff83b0c93 100644 --- a/spec/env/Dockerfile.alpine +++ b/spec/env/Dockerfile.alpine @@ -1,8 +1,9 @@ ARG from_image FROM ${from_image} +ARG with_pkg RUN uname -a -RUN apk add ruby ruby-rake ruby-dev git gcc make musl-dev gcompat postgresql16 sudo +RUN apk add ruby ruby-rake ruby-dev git gcc make musl-dev postgresql16 sudo $with_pkg RUN git config --global --add safe.directory /build RUN ruby --version From f326991877accd1301ea3913f8370eee60b234e3 Mon Sep 17 00:00:00 2001 From: Lars Kanis Date: Sun, 3 Aug 2025 21:26:00 +0200 Subject: [PATCH 098/118] Add release notes for pg-1.6.1 --- CHANGELOG.md | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 432844490..d98a39cc1 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,11 @@ +## v1.6.1 [2025-08-03] Lars Kanis + +- Add binary gems for platforms `aarch64-linux-musl` and `x86_64-linux-musl` without the need to install package `gcompat`. [#657](https://github.com/ged/ruby-pg/pull/657). +- Serialize CompositeCoder#dimensions only when set. [#652](https://github.com/ged/ruby-pg/pull/652) + This fixes the compatibility to pg-1.5.9, when deserializing Marshal data from pg-1.6, as long as the new attribute isn't used. +- Remove dependency to MSYS2 package "postgresql" from binary Windows gem [#654](https://github.com/ged/ruby-pg/pull/654) + + ## v1.6.0 [2025-07-27] Lars Kanis Added: From 5da9f3b98a0383070fe7d9c315988df8eebdf034 Mon Sep 17 00:00:00 2001 From: Lars Kanis Date: Sun, 3 Aug 2025 21:26:59 +0200 Subject: [PATCH 099/118] Bump VERSION to 1.6.1 --- lib/pg/version.rb | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/pg/version.rb b/lib/pg/version.rb index 8919e65f4..579dbe2b8 100644 --- a/lib/pg/version.rb +++ b/lib/pg/version.rb @@ -1,4 +1,4 @@ module PG # Library version - VERSION = '1.6.0' + VERSION = '1.6.1' end From 3aca24984c42c1adbb3ce60d6ff2b88388f4f12c Mon Sep 17 00:00:00 2001 From: Lars Kanis Date: Mon, 4 Aug 2025 09:04:52 +0200 Subject: [PATCH 100/118] Update README regarding new `*-linux-musl` gem platform --- README.md | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/README.md b/README.md index 6394e588d..1eccdae5c 100644 --- a/README.md +++ b/README.md @@ -57,9 +57,7 @@ This installs the binary gem, specific to the running platform by default. The binary gems don't depend on the libpq package on the running system. They have libpq builtin. -The gems for platform `x86_64-linux` and `aarch64-linux` run on Alpine Linux, but require the package `gcompat` there as long as we don't provide a native gem for platform `x86_64-linux-musl`. Install this package like so: - - apk add gcompat +The gems for platform `x86_64-linux` and `aarch64-linux` run on Alpine Linux, but require the package `gcompat`, while the native gems for platform `*-linux-musl` work without that dependency. There is one use case the binary gems don't support: Retrieval of connection [options from LDAP](https://www.postgresql.org/docs/current/libpq-ldap.html). To support this `libldap` would be necessary, but it has a lot of dependencies. It doesn't seem to be a widely used feature and that it's worth to support it. If it's necessary, the source gem can be forced. From 66f1266e3903c0fbae7befd7283fc42c3d9b3445 Mon Sep 17 00:00:00 2001 From: Lars Kanis Date: Mon, 4 Aug 2025 09:33:46 +0200 Subject: [PATCH 101/118] Some small documentation improvements --- ext/pg_result.c | 10 +++++----- ext/pg_tuple.c | 8 ++++---- 2 files changed, 9 insertions(+), 9 deletions(-) diff --git a/ext/pg_result.c b/ext/pg_result.c index 4ea56c452..bc0a38c24 100644 --- a/ext/pg_result.c +++ b/ext/pg_result.c @@ -617,7 +617,7 @@ pgresult_error_message(VALUE self) * call-seq: * res.verbose_error_message( verbosity, show_context ) -> String * - * Returns a reformatted version of the error message associated with a PGresult object. + * Returns a reformatted version of the error message associated with the PG::Result object. * */ static VALUE @@ -734,9 +734,9 @@ pgresult_nfields(VALUE self) * call-seq: * res.binary_tuples() -> Integer * - * Returns 1 if the PGresult contains binary data and 0 if it contains text data. + * Returns 1 if the PG::Result contains binary data and 0 if it contains text data. * - * This function is deprecated (except for its use in connection with COPY), because it is possible for a single PGresult to contain text data in some columns and binary data in others. + * This function is deprecated (except for its use in connection with COPY), because it is possible for a single PG::Result to contain text data in some columns and binary data in others. * Result#fformat is preferred. binary_tuples returns 1 only if all columns of the result are binary (format 1). */ static VALUE @@ -1572,8 +1572,8 @@ pgresult_stream_any(VALUE self, int (*yielder)(VALUE, int, int, void*), void* da * wrapping each row into a dedicated result object, it delivers data in nearly * the same speed as with ordinary results. * - * The base result must be in status PGRES_SINGLE_TUPLE or PGRES_TUPLES_CHUNK. - * It iterates over all tuples until the status changes to PGRES_TUPLES_OK. + * The base result must be in status +PGRES_SINGLE_TUPLE+ or +PGRES_TUPLES_CHUNK+. + * It iterates over all tuples until the status changes to +PGRES_TUPLES_OK+. * A PG::Error is raised for any errors from the server. * * Row description data does not change while the iteration. All value retrieval diff --git a/ext/pg_tuple.c b/ext/pg_tuple.c index dae13445f..7b167c9e7 100644 --- a/ext/pg_tuple.c +++ b/ext/pg_tuple.c @@ -242,10 +242,10 @@ pg_tuple_materialize(VALUE self) * An integer +key+ is interpreted as column index. * Negative values of index count from the end of the array. * - * Depending on Result#field_name_type= a string or symbol +key+ is interpreted as column name. + * Depending on PG::Result#field_name_type= a string or symbol +key+ is interpreted as column name. * * If the key can't be found, there are several options: - * With no other arguments, it will raise a IndexError exception; + * With no other arguments, it will raise a +IndexError+ exception; * if default is given, then that will be returned; * if the optional code block is specified, then that will be run and its result returned. */ @@ -302,7 +302,7 @@ pg_tuple_fetch(int argc, VALUE *argv, VALUE self) * An integer +key+ is interpreted as column index. * Negative values of index count from the end of the array. * - * Depending on Result#field_name_type= a string or symbol +key+ is interpreted as column name. + * Depending on PG::Result#field_name_type= a string or symbol +key+ is interpreted as column name. * * If the key can't be found, it returns +nil+ . */ @@ -405,7 +405,7 @@ pg_tuple_each_value(VALUE self) * tup.values -> Array * * Returns the values of this tuple as Array. - * +res.tuple(i).values+ is equal to +res.tuple_values(i)+ . + * res.tuple(i).values is equal to res.tuple_values(i) . */ static VALUE pg_tuple_values(VALUE self) From 6b174359fccc6a8180a7400a53d8b2916f4dd659 Mon Sep 17 00:00:00 2001 From: Lars Kanis Date: Mon, 4 Aug 2025 10:24:20 +0200 Subject: [PATCH 102/118] More documentation improvements --- README.md | 6 +++--- ext/pg_result.c | 4 ++-- ext/pg_type_map.c | 4 +++- 3 files changed, 8 insertions(+), 6 deletions(-) diff --git a/README.md b/README.md index 1eccdae5c..f722208d4 100644 --- a/README.md +++ b/README.md @@ -146,7 +146,7 @@ Very basic type casting can be enabled by: But Pg's type casting is highly customizable. That's why it's divided into 2 layers: -### Encoders / Decoders (ext/pg_*coder.c, lib/pg/*coder.rb) +### Encoders / Decoders (ext/pg_\*coder.c, lib/pg/\*coder.rb) This is the lower layer, containing encoding classes that convert Ruby objects for transmission to the DBMS and decoding classes to convert @@ -188,9 +188,9 @@ The following text and binary formats can also be encoded although they are not * Literal for insertion into SQL string: [TE](rdoc-ref:PG::TextEncoder::QuotedLiteral) * SQL-Identifier: [TE](rdoc-ref:PG::TextEncoder::Identifier), [TD](rdoc-ref:PG::TextDecoder::Identifier) -### PG::TypeMap and derivations (ext/pg_type_map*.c, lib/pg/type_map*.rb) +### TypeMap and derivations (ext/pg_type_map\*.c, lib/pg/type_map\*.rb) -A TypeMap defines which value will be converted by which encoder/decoder. +A PG::TypeMap defines which value will be converted by which encoder/decoder. There are different type map strategies, implemented by several derivations of this class. They can be chosen and configured according to the particular needs for type casting. The default type map is PG::TypeMapAllStrings. diff --git a/ext/pg_result.c b/ext/pg_result.c index bc0a38c24..999c3dff2 100644 --- a/ext/pg_result.c +++ b/ext/pg_result.c @@ -1402,7 +1402,7 @@ pgresult_fields(VALUE self) * call-seq: * res.type_map = typemap * - * Set the TypeMap that is used for type casts of result values to ruby objects. + * Set the PG::TypeMap that is used for type casts of result values to ruby objects. * * All value retrieval methods will respect the type map and will do the * type casts from PostgreSQL's wire format to Ruby objects on the fly, @@ -1432,7 +1432,7 @@ pgresult_type_map_set(VALUE self, VALUE typemap) * call-seq: * res.type_map -> value * - * Returns the TypeMap that is currently set for type casts of result values to ruby objects. + * Returns the PG::TypeMap that is currently set for type casts of result values to ruby objects. * */ static VALUE diff --git a/ext/pg_type_map.c b/ext/pg_type_map.c index f6db5c0aa..8f06cb12f 100644 --- a/ext/pg_type_map.c +++ b/ext/pg_type_map.c @@ -187,7 +187,9 @@ init_pg_type_map(void) * * This is the base class for type maps. * See derived classes for implementations of different type cast strategies - * ( PG::TypeMapByColumn, PG::TypeMapByOid ). + * ( PG::TypeMapByColumn, PG::TypeMapByOid, etc.). + * + * Find more type maps in the {README}[rdoc-ref:README.md@Type+Casts]. * */ rb_cTypeMap = rb_define_class_under( rb_mPG, "TypeMap", rb_cObject ); From 0745dc9b0d8d20f1184db1893fc61947bcbd39f1 Mon Sep 17 00:00:00 2001 From: Lars Kanis Date: Mon, 4 Aug 2025 15:21:59 +0200 Subject: [PATCH 103/118] Fix some constants to be frozen and usable in a Ractor Freeze some constants to make them available in a Ractor context. --- lib/pg.rb | 1 + lib/pg/version.rb | 1 + spec/pg_spec.rb | 10 ++++++++++ 3 files changed, 12 insertions(+) diff --git a/lib/pg.rb b/lib/pg.rb index 73f2f894c..557f81728 100644 --- a/lib/pg.rb +++ b/lib/pg.rb @@ -21,6 +21,7 @@ module PG POSTGRESQL_LIB_PATH = false end end + POSTGRESQL_LIB_PATH.freeze add_dll_path = proc do |path, &block| if RUBY_PLATFORM =~/(mswin|mingw)/i && path diff --git a/lib/pg/version.rb b/lib/pg/version.rb index 579dbe2b8..d37ddd956 100644 --- a/lib/pg/version.rb +++ b/lib/pg/version.rb @@ -1,3 +1,4 @@ +# frozen_string_literal: true module PG # Library version VERSION = '1.6.1' diff --git a/spec/pg_spec.rb b/spec/pg_spec.rb index dc7b3e2b4..b24d3c1bf 100644 --- a/spec/pg_spec.rb +++ b/spec/pg_spec.rb @@ -57,4 +57,14 @@ expect( bres ).to eq( 55 ) end + + it "should provide constants in a Ractor", :ractor do + vals = Ractor.new(@conninfo) do |conninfo| + [PG.library_version, PG.version_string, PG.threadsafe?, PG::VERSION, PG::POSTGRESQL_LIB_PATH] + end.value + + expect( vals ).to eq( + [PG.library_version, PG.version_string, PG.threadsafe?, PG::VERSION, PG::POSTGRESQL_LIB_PATH] + ) + end end From dae9173a10a194c6bfd1a5eb6bcb98637cfbf4b5 Mon Sep 17 00:00:00 2001 From: Lars Kanis Date: Thu, 7 Aug 2025 13:19:01 +0200 Subject: [PATCH 104/118] Add `gem uninst` to README [ci skip] Without `gem uninstall` the platform specific version is preferred over the source version. --- README.md | 1 + 1 file changed, 1 insertion(+) diff --git a/README.md b/README.md index f722208d4..6084a2010 100644 --- a/README.md +++ b/README.md @@ -65,6 +65,7 @@ There is one use case the binary gems don't support: Retrieval of connection [op The source gem can be forced by: + gem uninstall pg --all gem install pg --platform ruby You may need to specify the path to the 'pg_config' program installed with From cd0f81d779ee22d4d421daccae23fdc7cebee4f8 Mon Sep 17 00:00:00 2001 From: pTr <47277287+ptrgits@users.noreply.github.com> Date: Fri, 8 Aug 2025 21:03:12 +0700 Subject: [PATCH 105/118] Update helpers.rb --- spec/helpers.rb | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/spec/helpers.rb b/spec/helpers.rb index b8252d0da..992f53265 100644 --- a/spec/helpers.rb +++ b/spec/helpers.rb @@ -474,7 +474,7 @@ def create_cert_from_csr(name, csr, ca_cert, ca_key, valid_years: 10, dns_names: csr_cert.sign ca_key, OpenSSL::Digest::SHA256.new - open "#{output_dir}/#{name}", 'w' do |io| + File.open "#{output_dir}/#{name}", 'w' do |io| io.puts csr_cert.to_text io.write csr_cert.to_pem end From a10e8ae596f11876cb2b4fb97ba81c0387020dba Mon Sep 17 00:00:00 2001 From: Lars Kanis Date: Thu, 28 Aug 2025 10:42:04 +0200 Subject: [PATCH 106/118] Fix indention --- ext/pg_tuple.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ext/pg_tuple.c b/ext/pg_tuple.c index 7b167c9e7..f240e1d60 100644 --- a/ext/pg_tuple.c +++ b/ext/pg_tuple.c @@ -474,7 +474,7 @@ pg_tuple_dump(VALUE self) values = rb_ary_new4(this->num_fields, &this->values[0]); a = rb_ary_new3(2, field_names, values); - rb_copy_generic_ivar(a, self); + rb_copy_generic_ivar(a, self); return a; } From f927f51708d6ec8d53c4e8e7076ee6f000a01368 Mon Sep 17 00:00:00 2001 From: Lars Kanis Date: Thu, 7 Aug 2025 14:19:21 +0200 Subject: [PATCH 107/118] Raise a more descriptive error message for pg_ext LoadError If the `3.4/pg_ext.so` doesn't load, the specific error message is overwritten by the subsequent LoadError of `pg_ext.so`. This is bad since this can hide valuable information about the load error, like a missing dependency. One such mussing dependency can be the required GLIBC version. This patch adds a more descriptive error message containing both attempts to load the C extension. And it adds an additional help text for the special case of a GLIBC load error. The old error: ``` cannot load such file -- pg_ext (LoadError) ``` The new error: ``` pg's C extension failed to load: (LoadError) /lib/x86_64-linux-gnu/libm.so.6: version `GLIBC_2.29' not found (required by /root/.rubies/ruby-3.2.9/lib/ruby/gems/3.2.0/gems/pg-1.6.1-x86_64-linux/lib/3.2/pg_ext.so) - /root/.rubies/ruby-3.2.9/lib/ruby/gems/3.2.0/gems/pg-1.6.1-x86_64-linux/lib/3.2/pg_ext.so cannot load such file -- pg_ext The GLIBC version of this system seems too old. Please use the source version of pg: gem uninstall pg --all gem install pg --platform ruby or in your Gemfile: gem "pg", force_ruby_platform: true See also: https://github.com/ged/ruby-pg/blob/master/README.md#source-gem ``` Changing from tabs to spaces for better message formatting. Relates to #661 --- .github/workflows/binary-gems.yml | 23 +++ lib/pg.rb | 292 ++++++++++++++++-------------- misc/glibc/Dockerfile | 20 ++ misc/glibc/docker-compose.yml | 9 + misc/glibc/glibc_spec.rb | 5 + 5 files changed, 213 insertions(+), 136 deletions(-) create mode 100644 misc/glibc/Dockerfile create mode 100644 misc/glibc/docker-compose.yml create mode 100644 misc/glibc/glibc_spec.rb diff --git a/.github/workflows/binary-gems.yml b/.github/workflows/binary-gems.yml index 322cdfc53..86ebb3fca 100644 --- a/.github/workflows/binary-gems.yml +++ b/.github/workflows/binary-gems.yml @@ -223,3 +223,26 @@ jobs: cp -v pg-*.gem misc/yugabyte/ cd misc/yugabyte docker-compose up --abort-on-container-exit --exit-code-from pg + + job_binary_too_old_glibc: + name: GLIBC + needs: rcd_build + strategy: + fail-fast: false + matrix: + include: + - gem_platform: x86_64-linux + + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + - name: Download gem-${{ matrix.gem_platform }} + uses: actions/download-artifact@v4 + with: + name: binary-gem-${{ matrix.gem_platform }} + - name: Build image and Run tests + run: | + sudo apt-get install -y docker-compose + cp -v pg-*.gem misc/glibc/ + cd misc/glibc + docker-compose up --abort-on-container-exit --exit-code-from pg diff --git a/lib/pg.rb b/lib/pg.rb index 557f81728..b996362da 100644 --- a/lib/pg.rb +++ b/lib/pg.rb @@ -5,141 +5,161 @@ # The top-level PG namespace. module PG - # Is this file part of a fat binary gem with bundled libpq? - # This path must be enabled by add_dll_directory on Windows. - gplat = Gem::Platform.local - bundled_libpq_path = Dir[File.expand_path("../ports/#{gplat.cpu}-#{gplat.os}*/lib", __dir__)].first - if bundled_libpq_path - POSTGRESQL_LIB_PATH = bundled_libpq_path - else - # Try to load libpq path as found by extconf.rb - begin - require "pg/postgresql_lib_path" - rescue LoadError - # rake-compiler doesn't use regular "make install", but uses it's own install tasks. - # It therefore doesn't copy pg/postgresql_lib_path.rb in case of "rake compile". - POSTGRESQL_LIB_PATH = false - end - end - POSTGRESQL_LIB_PATH.freeze - - add_dll_path = proc do |path, &block| - if RUBY_PLATFORM =~/(mswin|mingw)/i && path - BUNDLED_LIBPQ_WITH_UNIXSOCKET = false - begin - require 'ruby_installer/runtime' - RubyInstaller::Runtime.add_dll_directory(path, &block) - rescue LoadError - old_path = ENV['PATH'] - ENV['PATH'] = "#{path};#{old_path}" - block.call - ENV['PATH'] = old_path - end - else - # libpq is found by a relative rpath in the cross compiled extension dll - # or by the system library loader - block.call - BUNDLED_LIBPQ_WITH_UNIXSOCKET = RUBY_PLATFORM=~/linux/i && PG::IS_BINARY_GEM - end - end - - # Add a load path to the one retrieved from pg_config - add_dll_path.call(POSTGRESQL_LIB_PATH) do - begin - # Try the . subdirectory for fat binary gems - major_minor = RUBY_VERSION[ /^(\d+\.\d+)/ ] or - raise "Oops, can't extract the major/minor version from #{RUBY_VERSION.dump}" - require "#{major_minor}/pg_ext" - rescue LoadError - require 'pg_ext' - end - end - - # Get the PG library version. - # - # +include_buildnum+ is no longer used and any value passed will be ignored. - def self.version_string( include_buildnum=nil ) - "%s %s" % [ self.name, VERSION ] - end - - - ### Convenience alias for PG::Connection.new. - def self.connect( *args, &block ) - Connection.new( *args, &block ) - end - - if defined?(Ractor.make_shareable) - def self.make_shareable(obj) - Ractor.make_shareable(obj) - end - else - def self.make_shareable(obj) - obj.freeze - end - end - - module BinaryDecoder - %i[ TimestampUtc TimestampUtcToLocal TimestampLocal ].each do |klass| - autoload klass, 'pg/binary_decoder/timestamp' - end - autoload :Date, 'pg/binary_decoder/date' - end - module BinaryEncoder - %i[ TimestampUtc TimestampLocal ].each do |klass| - autoload klass, 'pg/binary_encoder/timestamp' - end - end - module TextDecoder - %i[ TimestampUtc TimestampUtcToLocal TimestampLocal TimestampWithoutTimeZone TimestampWithTimeZone ].each do |klass| - autoload klass, 'pg/text_decoder/timestamp' - end - autoload :Date, 'pg/text_decoder/date' - autoload :Inet, 'pg/text_decoder/inet' - autoload :JSON, 'pg/text_decoder/json' - autoload :Numeric, 'pg/text_decoder/numeric' - end - module TextEncoder - %i[ TimestampUtc TimestampWithoutTimeZone TimestampWithTimeZone ].each do |klass| - autoload klass, 'pg/text_encoder/timestamp' - end - autoload :Date, 'pg/text_encoder/date' - autoload :Inet, 'pg/text_encoder/inet' - autoload :JSON, 'pg/text_encoder/json' - autoload :Numeric, 'pg/text_encoder/numeric' - end - - autoload :BasicTypeMapBasedOnResult, 'pg/basic_type_map_based_on_result' - autoload :BasicTypeMapForQueries, 'pg/basic_type_map_for_queries' - autoload :BasicTypeMapForResults, 'pg/basic_type_map_for_results' - autoload :BasicTypeRegistry, 'pg/basic_type_registry' - require 'pg/exceptions' - require 'pg/coder' - require 'pg/type_map_by_column' - require 'pg/connection' - require 'pg/cancel_connection' - require 'pg/result' - require 'pg/tuple' - autoload :VERSION, 'pg/version' - - - # Avoid "uninitialized constant Truffle::WarningOperations" on Truffleruby up to 22.3.1 - if RUBY_ENGINE=="truffleruby" && !defined?(Truffle::WarningOperations) - module TruffleFixWarn - def warn(str, category=nil) - super(str) - end - end - Warning.extend(TruffleFixWarn) - end - - # Ruby-3.4+ prints a warning, if bigdecimal is required but not in the Gemfile. - # But it's a false positive, since we enable bigdecimal depending features only if it's available. - # And most people don't need these features. - def self.require_bigdecimal_without_warning - oldverb, $VERBOSE = $VERBOSE, nil - require "bigdecimal" - ensure - $VERBOSE = oldverb - end + # Is this file part of a fat binary gem with bundled libpq? + # This path must be enabled by add_dll_directory on Windows. + gplat = Gem::Platform.local + bundled_libpq_path = Dir[File.expand_path("../ports/#{gplat.cpu}-#{gplat.os}*/lib", __dir__)].first + if bundled_libpq_path + POSTGRESQL_LIB_PATH = bundled_libpq_path + else + # Try to load libpq path as found by extconf.rb + begin + require "pg/postgresql_lib_path" + rescue LoadError + # rake-compiler doesn't use regular "make install", but uses it's own install tasks. + # It therefore doesn't copy pg/postgresql_lib_path.rb in case of "rake compile". + POSTGRESQL_LIB_PATH = false + end + end + POSTGRESQL_LIB_PATH.freeze + + add_dll_path = proc do |path, &block| + if RUBY_PLATFORM =~/(mswin|mingw)/i && path + BUNDLED_LIBPQ_WITH_UNIXSOCKET = false + begin + require 'ruby_installer/runtime' + RubyInstaller::Runtime.add_dll_directory(path, &block) + rescue LoadError + old_path = ENV['PATH'] + ENV['PATH'] = "#{path};#{old_path}" + block.call + ENV['PATH'] = old_path + end + else + # libpq is found by a relative rpath in the cross compiled extension dll + # or by the system library loader + block.call + BUNDLED_LIBPQ_WITH_UNIXSOCKET = RUBY_PLATFORM=~/linux/i && PG::IS_BINARY_GEM + end + end + + # Add a load path to the one retrieved from pg_config + add_dll_path.call(POSTGRESQL_LIB_PATH) do + begin + # Try the . subdirectory for fat binary gems + major_minor = RUBY_VERSION[ /^(\d+\.\d+)/ ] or + raise "Oops, can't extract the major/minor version from #{RUBY_VERSION.dump}" + require "#{major_minor}/pg_ext" + rescue LoadError => error1 + begin + require 'pg_ext' + rescue LoadError => error2 + msg = <<~EOT + pg's C extension failed to load: + #{error1} + #{error2} + EOT + if msg =~ /GLIBC/ + msg += <<~EOT + + The GLIBC version of this system seems too old. Please use the source version of pg: + gem uninstall pg --all + gem install pg --platform ruby + or in your Gemfile: + gem "pg", force_ruby_platform: true + See also: https://deveiate.org/code/pg/README_md.html#label-Source+gem + EOT + end + raise error2, msg + end + end + end + + # Get the PG library version. + # + # +include_buildnum+ is no longer used and any value passed will be ignored. + def self.version_string( include_buildnum=nil ) + "%s %s" % [ self.name, VERSION ] + end + + + ### Convenience alias for PG::Connection.new. + def self.connect( *args, &block ) + Connection.new( *args, &block ) + end + + if defined?(Ractor.make_shareable) + def self.make_shareable(obj) + Ractor.make_shareable(obj) + end + else + def self.make_shareable(obj) + obj.freeze + end + end + + module BinaryDecoder + %i[ TimestampUtc TimestampUtcToLocal TimestampLocal ].each do |klass| + autoload klass, 'pg/binary_decoder/timestamp' + end + autoload :Date, 'pg/binary_decoder/date' + end + module BinaryEncoder + %i[ TimestampUtc TimestampLocal ].each do |klass| + autoload klass, 'pg/binary_encoder/timestamp' + end + end + module TextDecoder + %i[ TimestampUtc TimestampUtcToLocal TimestampLocal TimestampWithoutTimeZone TimestampWithTimeZone ].each do |klass| + autoload klass, 'pg/text_decoder/timestamp' + end + autoload :Date, 'pg/text_decoder/date' + autoload :Inet, 'pg/text_decoder/inet' + autoload :JSON, 'pg/text_decoder/json' + autoload :Numeric, 'pg/text_decoder/numeric' + end + module TextEncoder + %i[ TimestampUtc TimestampWithoutTimeZone TimestampWithTimeZone ].each do |klass| + autoload klass, 'pg/text_encoder/timestamp' + end + autoload :Date, 'pg/text_encoder/date' + autoload :Inet, 'pg/text_encoder/inet' + autoload :JSON, 'pg/text_encoder/json' + autoload :Numeric, 'pg/text_encoder/numeric' + end + + autoload :BasicTypeMapBasedOnResult, 'pg/basic_type_map_based_on_result' + autoload :BasicTypeMapForQueries, 'pg/basic_type_map_for_queries' + autoload :BasicTypeMapForResults, 'pg/basic_type_map_for_results' + autoload :BasicTypeRegistry, 'pg/basic_type_registry' + require 'pg/exceptions' + require 'pg/coder' + require 'pg/type_map_by_column' + require 'pg/connection' + require 'pg/cancel_connection' + require 'pg/result' + require 'pg/tuple' + autoload :VERSION, 'pg/version' + + + # Avoid "uninitialized constant Truffle::WarningOperations" on Truffleruby up to 22.3.1 + if RUBY_ENGINE=="truffleruby" && !defined?(Truffle::WarningOperations) + module TruffleFixWarn + def warn(str, category=nil) + super(str) + end + end + Warning.extend(TruffleFixWarn) + end + + # Ruby-3.4+ prints a warning, if bigdecimal is required but not in the Gemfile. + # But it's a false positive, since we enable bigdecimal depending features only if it's available. + # And most people don't need these features. + def self.require_bigdecimal_without_warning + oldverb, $VERBOSE = $VERBOSE, nil + require "bigdecimal" + ensure + $VERBOSE = oldverb + end end # module PG diff --git a/misc/glibc/Dockerfile b/misc/glibc/Dockerfile new file mode 100644 index 000000000..6bb9ff120 --- /dev/null +++ b/misc/glibc/Dockerfile @@ -0,0 +1,20 @@ +FROM debian:10.13 + +WORKDIR /pg + +# Debian 10.13 is EOL now: +RUN sed -i s/deb.debian.org/archive.debian.org/g /etc/apt/sources.list + +RUN apt-get update && apt-get install ruby git wget gcc make libz-dev libffi-dev libreadline-dev libyaml-dev libssl-dev -y + +ENV RBENV_ROOT=/usr/local/rbenv + +RUN git clone https://github.com/rbenv/rbenv.git ${RBENV_ROOT} && \ + git clone https://github.com/rbenv/ruby-build.git ${RBENV_ROOT}/plugins/ruby-build && \ + $RBENV_ROOT/bin/rbenv init + +RUN $RBENV_ROOT/bin/rbenv install 3.3.9 -- --disable-install-doc +RUN /usr/local/rbenv/versions/3.3.9/bin/gem inst rspec + +CMD /usr/local/rbenv/versions/3.3.9/bin/gem inst --local pg-*.gem && \ + /usr/local/rbenv/versions/3.3.9/bin/rspec glibc_spec.rb diff --git a/misc/glibc/docker-compose.yml b/misc/glibc/docker-compose.yml new file mode 100644 index 000000000..533cccaa0 --- /dev/null +++ b/misc/glibc/docker-compose.yml @@ -0,0 +1,9 @@ +services: + pg: + build: + context: . + args: + - http_proxy + - https_proxy + volumes: + - .:/pg diff --git a/misc/glibc/glibc_spec.rb b/misc/glibc/glibc_spec.rb new file mode 100644 index 000000000..9b6b28327 --- /dev/null +++ b/misc/glibc/glibc_spec.rb @@ -0,0 +1,5 @@ +RSpec.describe "require 'pg'" do + it "gives a descriptive error message when GLIBC is too old" do + expect { require "pg" }.to raise_error(/GLIBC.*gem install pg --platform ruby/m) + end +end From 124e4fccfb701059c5cccb592e33e544574cd049 Mon Sep 17 00:00:00 2001 From: Lars Kanis Date: Thu, 28 Aug 2025 11:17:37 +0200 Subject: [PATCH 108/118] Fix invalid use of instance variable in Ractor context --- spec/pg/connection_spec.rb | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/spec/pg/connection_spec.rb b/spec/pg/connection_spec.rb index 0d76cd368..2e7075e02 100644 --- a/spec/pg/connection_spec.rb +++ b/spec/pg/connection_spec.rb @@ -48,8 +48,8 @@ end it "connects using 7 arguments in a Ractor", :ractor do - vals = Ractor.new do - PG.connect( 'localhost', @port, nil, nil, :test, nil, nil ) do |conn| + vals = Ractor.new(@port) do |port| + PG.connect( 'localhost', port, nil, nil, :test, nil, nil ) do |conn| conn.exec("SELECT 234").values end end.value From c6e49b511e5ef8a2994e447848a0a9c102ff4246 Mon Sep 17 00:00:00 2001 From: Lars Kanis Date: Thu, 28 Aug 2025 11:31:54 +0200 Subject: [PATCH 109/118] Use "rbpg_" prefix for base64_* functions To avoid name clashes with functions provided by other libraries like Heimdal. Otherwise this fails with a segfault on Macos due to this function: /System/Library/PrivateFrameworks/Heimdal.framework/Versions/A/Heimdal(base64_encode+0x94) --- ext/pg_binary_decoder.c | 2 +- ext/pg_binary_encoder.c | 4 ++-- ext/pg_text_decoder.c | 2 +- ext/pg_text_encoder.c | 4 ++-- ext/pg_util.c | 4 ++-- ext/pg_util.h | 4 ++-- 6 files changed, 10 insertions(+), 10 deletions(-) diff --git a/ext/pg_binary_decoder.c b/ext/pg_binary_decoder.c index 5006a3c72..6b6c06e30 100644 --- a/ext/pg_binary_decoder.c +++ b/ext/pg_binary_decoder.c @@ -117,7 +117,7 @@ pg_bin_dec_to_base64(t_pg_coder *conv, const char *val, int len, int tuple, int /* create a buffer of the encoded length */ VALUE out_value = rb_str_new(NULL, encoded_len); - base64_encode( RSTRING_PTR(out_value), val, len ); + rbpg_base64_encode( RSTRING_PTR(out_value), val, len ); /* Is it a pure String conversion? Then we can directly send out_value to the user. */ if( this->comp.format == 0 && dec_func == pg_text_dec_string ){ diff --git a/ext/pg_binary_encoder.c b/ext/pg_binary_encoder.c index f887d1d04..9417fe8b3 100644 --- a/ext/pg_binary_encoder.c +++ b/ext/pg_binary_encoder.c @@ -524,7 +524,7 @@ pg_bin_enc_from_base64(t_pg_coder *conv, VALUE value, char *out, VALUE *intermed if(out){ /* Second encoder pass, if required */ strlen = enc_func(this->elem, value, out, intermediate, enc_idx); - strlen = base64_decode( out, out, strlen ); + strlen = rbpg_base64_decode( out, out, strlen ); return strlen; } else { @@ -538,7 +538,7 @@ pg_bin_enc_from_base64(t_pg_coder *conv, VALUE value, char *out, VALUE *intermed strlen = RSTRING_LENINT(subint); out_str = rb_str_new(NULL, BASE64_DECODED_SIZE(strlen)); - strlen = base64_decode( RSTRING_PTR(out_str), RSTRING_PTR(subint), strlen); + strlen = rbpg_base64_decode( RSTRING_PTR(out_str), RSTRING_PTR(subint), strlen); rb_str_set_len( out_str, strlen ); *intermediate = out_str; diff --git a/ext/pg_text_decoder.c b/ext/pg_text_decoder.c index 0f311bc29..2e1eb1059 100644 --- a/ext/pg_text_decoder.c +++ b/ext/pg_text_decoder.c @@ -579,7 +579,7 @@ pg_text_dec_from_base64(t_pg_coder *conv, const char *val, int len, int tuple, i /* create a buffer of the expected decoded length */ VALUE out_value = rb_str_new(NULL, BASE64_DECODED_SIZE(len)); - decoded_len = base64_decode( RSTRING_PTR(out_value), val, len ); + decoded_len = rbpg_base64_decode( RSTRING_PTR(out_value), val, len ); rb_str_set_len(out_value, decoded_len); /* Is it a pure String conversion? Then we can directly send out_value to the user. */ diff --git a/ext/pg_text_encoder.c b/ext/pg_text_encoder.c index d34bba627..e1c72972b 100644 --- a/ext/pg_text_encoder.c +++ b/ext/pg_text_encoder.c @@ -784,7 +784,7 @@ pg_text_enc_to_base64(t_pg_coder *conv, VALUE value, char *out, VALUE *intermedi if(out){ /* Second encoder pass, if required */ strlen = enc_func(this->elem, value, out, intermediate, enc_idx); - base64_encode( out, out, strlen ); + rbpg_base64_encode( out, out, strlen ); return BASE64_ENCODED_SIZE(strlen); } else { @@ -799,7 +799,7 @@ pg_text_enc_to_base64(t_pg_coder *conv, VALUE value, char *out, VALUE *intermedi out_str = rb_str_new(NULL, BASE64_ENCODED_SIZE(strlen)); PG_ENCODING_SET_NOCHECK(out_str, enc_idx); - base64_encode( RSTRING_PTR(out_str), RSTRING_PTR(subint), strlen); + rbpg_base64_encode( RSTRING_PTR(out_str), RSTRING_PTR(subint), strlen); *intermediate = out_str; return -1; diff --git a/ext/pg_util.c b/ext/pg_util.c index 4cec50a2f..572aa8db8 100644 --- a/ext/pg_util.c +++ b/ext/pg_util.c @@ -15,7 +15,7 @@ static const char base64_encode_table[] = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijk * in-place (with _out_ == _in_). */ void -base64_encode( char *out, const char *in, int len) +rbpg_base64_encode( char *out, const char *in, int len) { const unsigned char *in_ptr = (const unsigned char *)in + len; char *out_ptr = out + BASE64_ENCODED_SIZE(len); @@ -72,7 +72,7 @@ static const unsigned char base64_decode_table[] = * It is possible to decode a string in-place (with _out_ == _in_). */ int -base64_decode( char *out, const char *in, unsigned int len) +rbpg_base64_decode( char *out, const char *in, unsigned int len) { unsigned char a, b, c, d; const unsigned char *in_ptr = (const unsigned char *)in; diff --git a/ext/pg_util.h b/ext/pg_util.h index 282b3d5c2..226b56345 100644 --- a/ext/pg_util.h +++ b/ext/pg_util.h @@ -57,8 +57,8 @@ #define BASE64_ENCODED_SIZE(strlen) (((strlen) + 2) / 3 * 4) #define BASE64_DECODED_SIZE(base64len) (((base64len) + 3) / 4 * 3) -void base64_encode( char *out, const char *in, int len); -int base64_decode( char *out, const char *in, unsigned int len); +void rbpg_base64_encode( char *out, const char *in, int len); +int rbpg_base64_decode( char *out, const char *in, unsigned int len); int rbpg_strncasecmp(const char *s1, const char *s2, size_t n); From 5c7e9bc6fe7f2a941cc6fe74e60b32b4d2c7bee4 Mon Sep 17 00:00:00 2001 From: Lars Kanis Date: Fri, 29 Aug 2025 12:33:58 +0200 Subject: [PATCH 110/118] Remove absolute paths from binaries which point to build directories There are several absolute paths built into the binary. For instance fallback path to `pg_service.conf` or the fallback path to kerberos ccache file or the rpath included into the libpq or C-ext files. They can be avoided or fixed by disabling rpaths and by using `DESTDIR` instead of `--prefix` configure option. Fixes #666 The remaining rpath to libruby will probably be fixed by: https://github.com/rake-compiler/rake-compiler-dock/pull/165 Also fix the `MAKEFLAGS` option. `MAKEOPTS` was wrong. --- Rakefile | 2 +- ext/extconf.rb | 24 ++++++++++++++++++------ 2 files changed, 19 insertions(+), 7 deletions(-) diff --git a/Rakefile b/Rakefile index a06ab13bf..28e8f67f1 100644 --- a/Rakefile +++ b/Rakefile @@ -141,7 +141,7 @@ CrossLibraries.each do |xlib| bundle install --local && #{ "rake install_darwin_mig[__arm64__]" if platform =~ /arm64-darwin/ } #{ "rake install_darwin_mig[__x86_64__]" if platform =~ /x86_64-darwin/ } - rake native:#{platform} pkg/#{$gem_spec.full_name}-#{platform}.gem MAKEOPTS=-j`nproc` RUBY_CC_VERSION=#{RakeCompilerDock.ruby_cc_version("~>2.7", "~>3.0")} + rake native:#{platform} pkg/#{$gem_spec.full_name}-#{platform}.gem MAKEFLAGS="-j`nproc` V=1" RUBY_CC_VERSION=#{RakeCompilerDock.ruby_cc_version("~>2.7", "~>3.0")} EOT end desc "Build the native binary gems" diff --git a/ext/extconf.rb b/ext/extconf.rb index f307593e0..ecfc885a0 100644 --- a/ext/extconf.rb +++ b/ext/extconf.rb @@ -49,6 +49,12 @@ def port_path "#{target}/#{RUBY_PLATFORM}" end + # Add "--prefix=/", to avoid our actual build install path compiled into the binary. + # Instead use DESTDIR variable of make to set our install path. + def configure_prefix + "--prefix=" + end + def cook_and_activate checkpoint = File.join(self.target, "#{self.name}-#{self.version}-#{RUBY_PLATFORM}.installed") unless File.exist?(checkpoint) @@ -70,13 +76,13 @@ def configure envs = [] envs << "CFLAGS=-DDSO_WIN32 -DOPENSSL_THREADS" if RUBY_PLATFORM =~ /mingw|mswin/ envs << "CFLAGS=-fPIC -DOPENSSL_THREADS" if RUBY_PLATFORM =~ /linux|darwin/ - execute('configure', ['env', *envs, "./Configure", openssl_platform, "threads", "-static", "CROSS_COMPILE=#{host}-", configure_prefix], altlog: "config.log") + execute('configure', ['env', *envs, "./Configure", openssl_platform, "threads", "-static", "CROSS_COMPILE=#{host}-", "--prefix=/"], altlog: "config.log") end def compile execute('compile', "#{make_cmd} build_libs") end def install - execute('install', "#{make_cmd} install_dev") + execute('install', "#{make_cmd} install_dev DESTDIR=#{path}") end end @@ -104,6 +110,9 @@ def configure end super end + def install + execute('install', "#{make_cmd} install DESTDIR=#{path}") + end end # We specify -fcommon to get around duplicate definition errors in recent gcc. # See https://github.com/cockroachdb/cockroach/issues/49734 @@ -112,6 +121,7 @@ def configure recipe.configure_options << "--without-keyutils" recipe.configure_options << "--disable-nls" recipe.configure_options << "--disable-silent-rules" + recipe.configure_options << "--disable-rpath" recipe.configure_options << "--without-system-verto" recipe.configure_options << "krb5_cv_attr_constructor_destructor=yes" recipe.configure_options << "ac_cv_func_regcomp=yes" @@ -146,12 +156,13 @@ def configure_defaults '--without-zlib', '--without-icu', '--without-readline', + '--disable-rpath', 'ac_cv_search_gss_store_cred_into=', ] end def compile - execute 'compile include', "#{make_cmd} -C src/include install" - execute 'compile interfaces', "#{make_cmd} -C src/interfaces install" + execute 'compile include', "#{make_cmd} -C src/include install DESTDIR=#{path}" + execute 'compile interfaces', "#{make_cmd} -C src/interfaces install DESTDIR=#{path}" end def install end @@ -169,6 +180,9 @@ def install # Use our own library name for libpq to avoid loading of system libpq by accident. FileUtils.ln_sf File.join(postgresql_recipe.port_path, "lib/#{libpq_orig}"), File.join(postgresql_recipe.port_path, "lib/#{libpq_rubypg}") + # Link to libpq_rubypg in our ports directory without adding it as rpath (like dir_config does) + $CFLAGS << " -I#{postgresql_recipe.path}/include" + $LDFLAGS << " -L#{postgresql_recipe.path}/lib" # Avoid dependency to external libgcc.dll on x86-mingw32 $LDFLAGS << " -static-libgcc" if RUBY_PLATFORM =~ /mingw|mswin/ # Avoid: "libpq.so: undefined reference to `dlopen'" in cross-ruby-2.7.8 @@ -176,8 +190,6 @@ def install # Find libpq in the ports directory coming from lib/3.x # It is shared between all compiled ruby versions. $LDFLAGS << " '-Wl,-rpath=$$ORIGIN/../../ports/#{gem_platform}/lib'" if RUBY_PLATFORM =~ /linux/ - # Don't use pg_config for cross build, but --with-pg-* path options - dir_config('pg', "#{postgresql_recipe.path}/include", "#{postgresql_recipe.path}/lib") $defs.push( "-DPG_IS_BINARY_GEM") else From 35b2704fc79aca22488251b1910d3b8e6be6f83e Mon Sep 17 00:00:00 2001 From: Lars Kanis Date: Sat, 30 Aug 2025 15:34:03 +0200 Subject: [PATCH 111/118] Add tests for rpath or other build related paths --- spec/pg_spec.rb | 26 ++++++++++++++++++++++++++ 1 file changed, 26 insertions(+) diff --git a/spec/pg_spec.rb b/spec/pg_spec.rb index b24d3c1bf..c83bff57f 100644 --- a/spec/pg_spec.rb +++ b/spec/pg_spec.rb @@ -67,4 +67,30 @@ [PG.library_version, PG.version_string, PG.threadsafe?, PG::VERSION, PG::POSTGRESQL_LIB_PATH] ) end + + it "native gem's C-ext file shouldn't contain any rpath or other build-related paths" do + skip "applies to native binary gems only" unless PG::IS_BINARY_GEM + cext_fname = $LOADED_FEATURES.grep(/pg_ext/).first + expect(cext_fname).not_to be_nil + cext_text = File.binread(cext_fname) + expect(cext_text).to match(/Init_pg_ext/) # C-ext shoud contain the init function + expect(cext_text).not_to match(/usr\/local/) # there should be no rpath to /usr/local/rake-compiler/ruby/x86_64-unknown-linux-musl/ruby-3.4.5/lib or so + expect(cext_text).not_to match(/home\//) # there should be no path to /home/ or so + end + + it "native gem's libpq file shouldn't contain any rpath or other build-related paths" do + skip "applies to native binary gems only" unless PG::IS_BINARY_GEM + + libpq_fname = case RUBY_PLATFORM + when /mingw|mswin/ then "libpq.dll" + when /linux/ then "libpq-ruby-pg.so.1" + when /darwin/ then "libpq-ruby-pg.1.dylib" + end + + path = File.join(PG::POSTGRESQL_LIB_PATH, libpq_fname) + text = File.binread(path) + expect(text).to match(/PQconnectdb/) # libpq shoud contain the connect function + expect(text).not_to match(/usr\/local/) # there should be no rpath to build dirs + expect(text).not_to match(/home\//) # there should be no path to /home/.../ports/ or so + end end From 70551b94eab64bcfd879d91382b216224d2394b5 Mon Sep 17 00:00:00 2001 From: Lars Kanis Date: Sat, 30 Aug 2025 18:08:31 +0200 Subject: [PATCH 112/118] Add spec/pg_spec.rb to tests .. which wasn't tested in CI. We could use `rake test` instead, but it starts a separate `rspec` process, which doesn't respect our early load `-rpg`. Instead it uses the git checkout for lib files and not the files installed from the native binary gem. This in turn doesn't set POSTGRESQL_LIB_PATH correctly. --- .github/workflows/binary-gems.yml | 4 ++-- .github/workflows/source-gem.yml | 2 +- spec/env/Dockerfile.alpine | 2 +- spec/env/Dockerfile.centos | 2 +- 4 files changed, 5 insertions(+), 5 deletions(-) diff --git a/.github/workflows/binary-gems.yml b/.github/workflows/binary-gems.yml index 86ebb3fca..363a74e64 100644 --- a/.github/workflows/binary-gems.yml +++ b/.github/workflows/binary-gems.yml @@ -142,12 +142,12 @@ jobs: - run: echo $env:PATH - name: Run specs if: ${{ matrix.os != 'windows-latest' }} - run: ruby -rpg -S rspec -fd spec/**/*_spec.rb + run: ruby -rpg -S rspec -fd spec/*_spec.rb spec/**/*_spec.rb - name: Run specs if: ${{ matrix.os == 'windows-latest' }} run: | ridk enable - ruby -rpg -S rspec -fd spec/**/*_spec.rb + ruby -rpg -S rspec -fd spec/*_spec.rb spec/**/*_spec.rb - name: Print logs if job failed if: ${{ failure() && matrix.os == 'windows-latest' }} diff --git a/.github/workflows/source-gem.yml b/.github/workflows/source-gem.yml index 7b56eee9d..961902a07 100644 --- a/.github/workflows/source-gem.yml +++ b/.github/workflows/source-gem.yml @@ -147,7 +147,7 @@ jobs: PG_DEBUG: 0 # Temprary fix only for Truffleruby-24.0.0: TRUFFLERUBYOPT: --experimental-options --keep-handles-alive - run: ruby -rpg -S rspec spec/**/*_spec.rb -cfdoc + run: ruby -rpg -S rspec spec/*_spec.rb spec/**/*_spec.rb -cfdoc - name: Print db logs if job failed if: ${{ failure() && matrix.os == 'windows' }} diff --git a/spec/env/Dockerfile.alpine b/spec/env/Dockerfile.alpine index ff83b0c93..340fee53b 100644 --- a/spec/env/Dockerfile.alpine +++ b/spec/env/Dockerfile.alpine @@ -25,4 +25,4 @@ CMD ruby -v && \ bundle config set --local without 'development' && \ bundle install && \ sudo -u postgres ruby -rpg -e "p RUBY_DESCRIPTION, PG::VERSION, PG::POSTGRESQL_LIB_PATH, PG::IS_BINARY_GEM, PG::BUNDLED_LIBPQ_WITH_UNIXSOCKET; puts PG.connect.exec('SELECT version()').values; p PG.connect.host" && \ - sudo -u postgres ruby -rpg -S rspec -fd spec/**/*_spec.rb + sudo -u postgres ruby -rpg -S rspec -fd spec/*_spec.rb spec/**/*_spec.rb diff --git a/spec/env/Dockerfile.centos b/spec/env/Dockerfile.centos index bd09a2abb..06afc1fef 100644 --- a/spec/env/Dockerfile.centos +++ b/spec/env/Dockerfile.centos @@ -24,4 +24,4 @@ CMD ruby -v && \ bundle config set --local without 'development' && \ bundle install && \ sudo -u postgres ruby -rpg -e "p RUBY_DESCRIPTION, PG::VERSION, PG::POSTGRESQL_LIB_PATH, PG::IS_BINARY_GEM, PG::BUNDLED_LIBPQ_WITH_UNIXSOCKET; puts PG.connect.exec('SELECT version()').values; p PG.connect.host" && \ - sudo -u postgres ruby -rpg -S rspec -fd spec/**/*_spec.rb + sudo -u postgres ruby -rpg -S rspec -fd spec/*_spec.rb spec/**/*_spec.rb From 5e2ac6e14589f8698f127be080e94e93151552fc Mon Sep 17 00:00:00 2001 From: Lars Kanis Date: Sat, 30 Aug 2025 22:41:44 +0200 Subject: [PATCH 113/118] Temporary enable head version of rake-compiler-dock ... to fix C-ext rpath test case. --- .github/workflows/binary-gems.yml | 2 ++ Gemfile | 2 +- Rakefile | 4 ++-- 3 files changed, 5 insertions(+), 3 deletions(-) diff --git a/.github/workflows/binary-gems.yml b/.github/workflows/binary-gems.yml index 363a74e64..e70c02686 100644 --- a/.github/workflows/binary-gems.yml +++ b/.github/workflows/binary-gems.yml @@ -20,6 +20,8 @@ jobs: rcd_build: name: Build runs-on: ubuntu-latest + env: + RCD_IMAGE_VERSION: snapshot strategy: fail-fast: false matrix: diff --git a/Gemfile b/Gemfile index d0237dd8b..ae91a95d1 100644 --- a/Gemfile +++ b/Gemfile @@ -13,7 +13,7 @@ end group :test do gem "bundler", ">= 1.16", "< 3.0" gem "rake-compiler", "~> 1.0" - gem "rake-compiler-dock", "~> 1.9.1" + gem "rake-compiler-dock", "~> 1.9.1", git: "https://github.com/rake-compiler/rake-compiler-dock" gem "rspec", "~> 3.5" # "bigdecimal" is a gem on ruby-3.4+ and it's optional for ruby-pg. # Specs should succeed without it, but 4 examples are then excluded. diff --git a/Rakefile b/Rakefile index 28e8f67f1..3b9955c54 100644 --- a/Rakefile +++ b/Rakefile @@ -74,7 +74,7 @@ Rake::ExtensionTask.new do |ext| # Activate current cross compiled platform only. # This is to work around the issue that `linux` platform is selected in `linux-musl` image. ext.cross_platform = CrossLibraries.map(&:platform).select do |pl| - m = ENV["RCD_IMAGE"]&.match(/:(?[\d\.]+)-mri-(?[-\w]+)$/) + m = ENV["RCD_IMAGE"]&.match(/:(?[\w\.]+)-mri-(?[-\w]+)$/) m && m[:platform] == pl end @@ -106,7 +106,7 @@ task 'gem:native:prepare' do # Copy gem signing key and certs to be accessible from the docker container mkdir_p 'build/gem' sh "cp ~/.gem/gem-*.pem build/gem/ || true" - sh "bundle package" + sh "bundle package --all" begin OpenSSL::PKey.read(File.read(File.expand_path("~/.gem/gem-private_key.pem")), ENV["GEM_PRIVATE_KEY_PASSPHRASE"] || "") rescue OpenSSL::PKey::PKeyError From 19a35473b0ef7b4bf0d9caf7b62fb7fbbf7e168b Mon Sep 17 00:00:00 2001 From: Lars Kanis Date: Sun, 31 Aug 2025 18:50:39 +0200 Subject: [PATCH 114/118] Strip darwin C-ext to remove paths to C files ... which are only valid in the build environment. --- ext/extconf.rb | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) diff --git a/ext/extconf.rb b/ext/extconf.rb index ecfc885a0..c55133dcd 100644 --- a/ext/extconf.rb +++ b/ext/extconf.rb @@ -330,3 +330,20 @@ module PG create_header() create_makefile( "pg_ext" ) +if gem_platform + # exercise the strip command on native binary gems + # This approach borrowed from + # https://github.com/rake-compiler/rake-compiler-dock/blob/38066d479050f4fdb3956469255b35a05e5949ef/test/rcd_test/ext/mri/extconf.rb#L97C1-L110C42 + strip_tool = RbConfig::CONFIG['STRIP'] + strip_tool += ' -x' if RUBY_PLATFORM =~ /darwin/ + File.open('Makefile.new', 'w') do |o| + o.puts 'hijack: all strip' + o.puts + o.write(File.read('Makefile')) + o.puts + o.puts 'strip: $(DLLIB)' + o.puts "\t$(ECHO) Stripping $(DLLIB)" + o.puts "\t$(Q) #{strip_tool} $(DLLIB)" + end + File.rename('Makefile.new', 'Makefile') +end From 428415dd4ecb55882d2319d8b166fe73343853ba Mon Sep 17 00:00:00 2001 From: Lars Kanis Date: Mon, 1 Sep 2025 10:22:16 +0200 Subject: [PATCH 115/118] Prepare CHANGELOG for 1.6.2 [skip ci] --- CHANGELOG.md | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index d98a39cc1..a5d1cf0e8 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,13 @@ +## v1.6.2 [YYYY-MM-DD] Lars Kanis + +- Remove several absolute paths from native binaries which pointed to build directories. [#668](https://github.com/ged/ruby-pg/pull/668) +- Fix bad fallback path to `pg_service.conf`. [#666](https://github.com/ged/ruby-pg/pull/666) +- Use `rbpg_` prefix for `base64_*` functions to avoid name clashes with functions provided by other libraries like Heimdal on Macos. [#667](https://github.com/ged/ruby-pg/pull/667) +- Raise a more descriptive error message in case of `pg_ext` LoadError. [#664](https://github.com/ged/ruby-pg/pull/664) +- Freeze some constants to make them available in a Ractor context. [#660](https://github.com/ged/ruby-pg/pull/660) +- Several documentation improvements. + + ## v1.6.1 [2025-08-03] Lars Kanis - Add binary gems for platforms `aarch64-linux-musl` and `x86_64-linux-musl` without the need to install package `gcompat`. [#657](https://github.com/ged/ruby-pg/pull/657). From 227653d235fe5ed4a8e03ea09854a6d298426ff3 Mon Sep 17 00:00:00 2001 From: Lars Kanis Date: Mon, 1 Sep 2025 19:43:39 +0200 Subject: [PATCH 116/118] Update dependencies --- ext/extconf.rb | 6 +++--- .../0001-Allow-static-linking-krb5-library.patch | 0 .../0002-unknown-command-line-option-on-clang.patch | 12 ++++++++++++ .../{3.5.1 => 3.5.2}/0001-aarch64-mingw.patch | 0 ...round-of-__builtin_setjmp-only-on-MINGW-on-.patch | 0 ...cess-buffered-SSL-read-bytes-to-support-rec.patch | 0 6 files changed, 15 insertions(+), 3 deletions(-) rename ports/patches/krb5/{1.21.3 => 1.22.1}/0001-Allow-static-linking-krb5-library.patch (100%) create mode 100644 ports/patches/krb5/1.22.1/0002-unknown-command-line-option-on-clang.patch rename ports/patches/openssl/{3.5.1 => 3.5.2}/0001-aarch64-mingw.patch (100%) rename ports/patches/postgresql/{17.5 => 17.6}/0001-Use-workaround-of-__builtin_setjmp-only-on-MINGW-on-.patch (100%) rename ports/patches/postgresql/{17.5 => 17.6}/0001-libpq-Process-buffered-SSL-read-bytes-to-support-rec.patch (100%) diff --git a/ext/extconf.rb b/ext/extconf.rb index c55133dcd..431b557ce 100644 --- a/ext/extconf.rb +++ b/ext/extconf.rb @@ -27,13 +27,13 @@ gem 'mini_portile2', '~>2.1' require 'mini_portile2' - OPENSSL_VERSION = ENV['OPENSSL_VERSION'] || '3.5.1' + OPENSSL_VERSION = ENV['OPENSSL_VERSION'] || '3.5.2' OPENSSL_SOURCE_URI = "http://www.openssl.org/source/openssl-#{OPENSSL_VERSION}.tar.gz" - KRB5_VERSION = ENV['KRB5_VERSION'] || '1.21.3' + KRB5_VERSION = ENV['KRB5_VERSION'] || '1.22.1' KRB5_SOURCE_URI = "http://kerberos.org/dist/krb5/#{KRB5_VERSION[/^(\d+\.\d+)/]}/krb5-#{KRB5_VERSION}.tar.gz" - POSTGRESQL_VERSION = ENV['POSTGRESQL_VERSION'] || '17.5' + POSTGRESQL_VERSION = ENV['POSTGRESQL_VERSION'] || '17.6' POSTGRESQL_SOURCE_URI = "http://ftp.postgresql.org/pub/source/v#{POSTGRESQL_VERSION}/postgresql-#{POSTGRESQL_VERSION}.tar.bz2" class BuildRecipe < MiniPortile diff --git a/ports/patches/krb5/1.21.3/0001-Allow-static-linking-krb5-library.patch b/ports/patches/krb5/1.22.1/0001-Allow-static-linking-krb5-library.patch similarity index 100% rename from ports/patches/krb5/1.21.3/0001-Allow-static-linking-krb5-library.patch rename to ports/patches/krb5/1.22.1/0001-Allow-static-linking-krb5-library.patch diff --git a/ports/patches/krb5/1.22.1/0002-unknown-command-line-option-on-clang.patch b/ports/patches/krb5/1.22.1/0002-unknown-command-line-option-on-clang.patch new file mode 100644 index 000000000..7634c67fa --- /dev/null +++ b/ports/patches/krb5/1.22.1/0002-unknown-command-line-option-on-clang.patch @@ -0,0 +1,12 @@ +diff --git a/src/lib/krb5/krb/deltat.c b/src/lib/krb5/krb/deltat.c +index 03145c854..34cdf969e 100644 +--- a/lib/krb5/krb/deltat.c ++++ b/lib/krb5/krb/deltat.c +@@ -44,7 +44,6 @@ + #ifdef __GNUC__ + #pragma GCC diagnostic push + #pragma GCC diagnostic ignored "-Wuninitialized" +-#pragma GCC diagnostic ignored "-Wunused-but-set-variable" + #endif + + #include "k5-int.h" diff --git a/ports/patches/openssl/3.5.1/0001-aarch64-mingw.patch b/ports/patches/openssl/3.5.2/0001-aarch64-mingw.patch similarity index 100% rename from ports/patches/openssl/3.5.1/0001-aarch64-mingw.patch rename to ports/patches/openssl/3.5.2/0001-aarch64-mingw.patch diff --git a/ports/patches/postgresql/17.5/0001-Use-workaround-of-__builtin_setjmp-only-on-MINGW-on-.patch b/ports/patches/postgresql/17.6/0001-Use-workaround-of-__builtin_setjmp-only-on-MINGW-on-.patch similarity index 100% rename from ports/patches/postgresql/17.5/0001-Use-workaround-of-__builtin_setjmp-only-on-MINGW-on-.patch rename to ports/patches/postgresql/17.6/0001-Use-workaround-of-__builtin_setjmp-only-on-MINGW-on-.patch diff --git a/ports/patches/postgresql/17.5/0001-libpq-Process-buffered-SSL-read-bytes-to-support-rec.patch b/ports/patches/postgresql/17.6/0001-libpq-Process-buffered-SSL-read-bytes-to-support-rec.patch similarity index 100% rename from ports/patches/postgresql/17.5/0001-libpq-Process-buffered-SSL-read-bytes-to-support-rec.patch rename to ports/patches/postgresql/17.6/0001-libpq-Process-buffered-SSL-read-bytes-to-support-rec.patch From 90a00e51d296e8614ed6815589a26d971e9f444f Mon Sep 17 00:00:00 2001 From: Lars Kanis Date: Tue, 2 Sep 2025 08:43:41 +0200 Subject: [PATCH 117/118] Update CHANGELOG for pg-1.6.2 --- CHANGELOG.md | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index a5d1cf0e8..55a326d5c 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,4 +1,4 @@ -## v1.6.2 [YYYY-MM-DD] Lars Kanis +## v1.6.2 [2025-09-02] Lars Kanis - Remove several absolute paths from native binaries which pointed to build directories. [#668](https://github.com/ged/ruby-pg/pull/668) - Fix bad fallback path to `pg_service.conf`. [#666](https://github.com/ged/ruby-pg/pull/666) @@ -6,6 +6,7 @@ - Raise a more descriptive error message in case of `pg_ext` LoadError. [#664](https://github.com/ged/ruby-pg/pull/664) - Freeze some constants to make them available in a Ractor context. [#660](https://github.com/ged/ruby-pg/pull/660) - Several documentation improvements. +- Update native binary gems to OpenSSL-3.5.2, krb5-1.22.1 and PostgreSQL-17.6. ## v1.6.1 [2025-08-03] Lars Kanis From c75ed8da19c356b52d83da2627295760780d3d36 Mon Sep 17 00:00:00 2001 From: Lars Kanis Date: Tue, 2 Sep 2025 08:55:13 +0200 Subject: [PATCH 118/118] Bump VERSION to 1.6.2 --- lib/pg/version.rb | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/pg/version.rb b/lib/pg/version.rb index d37ddd956..56415808b 100644 --- a/lib/pg/version.rb +++ b/lib/pg/version.rb @@ -1,5 +1,5 @@ # frozen_string_literal: true module PG # Library version - VERSION = '1.6.1' + VERSION = '1.6.2' end