Skip to content

Commit 3ee76a3

Browse files
author
brian@zim.(none)
committed
Update to add in support for AZIO.
AZIO differs in that it uses mysys methods and removes all of the malloc calls.
1 parent e36df26 commit 3ee76a3

File tree

10 files changed

+1077
-59
lines changed

10 files changed

+1077
-59
lines changed

configure.in

+2-1
Original file line numberDiff line numberDiff line change
@@ -2413,7 +2413,7 @@ MYSQL_STORAGE_ENGINE(berkeley,,berkeley-db,,,,storage/bdb,,,[
24132413
MYSQL_SETUP_BERKELEY_DB
24142414
])
24152415
MYSQL_STORAGE_ENGINE(example)
2416-
MYSQL_STORAGE_ENGINE(archive)
2416+
MYSQL_STORAGE_ENGINE(archive,,,,,,storage/archive)
24172417
MYSQL_STORAGE_ENGINE(csv,,,,,tina_hton,,ha_tina.o)
24182418
MYSQL_STORAGE_ENGINE(blackhole)
24192419
MYSQL_STORAGE_ENGINE(federated)
@@ -2535,6 +2535,7 @@ AC_SUBST(MAKE_BINARY_DISTRIBUTION_OPTIONS)
25352535
AC_CONFIG_FILES(Makefile extra/Makefile mysys/Makefile dnl
25362536
strings/Makefile regex/Makefile storage/Makefile storage/heap/Makefile dnl
25372537
storage/myisam/Makefile storage/myisammrg/Makefile dnl
2538+
storage/archive/Makefile dnl
25382539
os2/Makefile os2/include/Makefile os2/include/sys/Makefile dnl
25392540
man/Makefile BUILD/Makefile vio/Makefile dnl
25402541
libmysql/Makefile client/Makefile dnl

libmysqld/Makefile.am

+1
Original file line numberDiff line numberDiff line change
@@ -81,6 +81,7 @@ sql_yacc.cc sql_yacc.h: $(top_srcdir)/sql/sql_yacc.yy
8181
INC_LIB= $(top_builddir)/regex/libregex.a \
8282
$(top_builddir)/storage/myisam/libmyisam.a \
8383
$(top_builddir)/storage/myisammrg/libmyisammrg.a \
84+
$(top_builddir)/storage/archive/libarchive.a \
8485
$(top_builddir)/storage/heap/libheap.a \
8586
$(top_builddir)/mysys/libmysys.a \
8687
$(top_builddir)/strings/libmystrings.a \

sql/Makefile.am

+1
Original file line numberDiff line numberDiff line change
@@ -32,6 +32,7 @@ bin_PROGRAMS = mysql_tzinfo_to_sql
3232
gen_lex_hash_LDFLAGS = @NOINST_LDFLAGS@
3333
LDADD = $(top_builddir)/storage/myisam/libmyisam.a \
3434
$(top_builddir)/storage/myisammrg/libmyisammrg.a \
35+
$(top_builddir)/storage/archive/libarchive.a \
3536
$(top_builddir)/storage/heap/libheap.a \
3637
$(top_builddir)/vio/libvio.a \
3738
$(top_builddir)/mysys/libmysys.a \

sql/ha_archive.cc

+55-51
Original file line numberDiff line numberDiff line change
@@ -30,13 +30,13 @@
3030
a storage engine without indexes that could compress data very well.
3131
So, welcome to a completely compressed storage engine. This storage
3232
engine only does inserts. No replace, deletes, or updates. All reads are
33-
complete table scans. Compression is done through gzip (bzip compresses
33+
complete table scans. Compression is done through azip (bzip compresses
3434
better, but only marginally, if someone asks I could add support for
35-
it too, but beaware that it costs a lot more in CPU time then gzip).
35+
it too, but beaware that it costs a lot more in CPU time then azip).
3636
3737
We keep a file pointer open for each instance of ha_archive for each read
3838
but for writes we keep one open file handle just for that. We flush it
39-
only if we have a read occur. gzip handles compressing lots of records
39+
only if we have a read occur. azip handles compressing lots of records
4040
at once much better then doing lots of little records between writes.
4141
It is possible to not lock on writes but this would then mean we couldn't
4242
handle bulk inserts as well (that is if someone was trying to read at
@@ -84,7 +84,7 @@
8484
Add truncate table command.
8585
Implement versioning, should be easy.
8686
Allow for errors, find a way to mark bad rows.
87-
Talk to the gzip guys, come up with a writable format so that updates are doable
87+
Talk to the azip guys, come up with a writable format so that updates are doable
8888
without switching to a block method.
8989
Add optional feature so that rows can be flushed at interval (which will cause less
9090
compression but may speed up ordered searches).
@@ -256,15 +256,15 @@ ha_archive::ha_archive(TABLE_SHARE *table_arg)
256256
/*
257257
This method reads the header of a datafile and returns whether or not it was successful.
258258
*/
259-
int ha_archive::read_data_header(gzFile file_to_read)
259+
int ha_archive::read_data_header(azio_stream *file_to_read)
260260
{
261261
uchar data_buffer[DATA_BUFFER_SIZE];
262262
DBUG_ENTER("ha_archive::read_data_header");
263263

264-
if (gzrewind(file_to_read) == -1)
264+
if (azrewind(file_to_read) == -1)
265265
DBUG_RETURN(HA_ERR_CRASHED_ON_USAGE);
266266

267-
if (gzread(file_to_read, data_buffer, DATA_BUFFER_SIZE) != DATA_BUFFER_SIZE)
267+
if (azread(file_to_read, data_buffer, DATA_BUFFER_SIZE) != DATA_BUFFER_SIZE)
268268
DBUG_RETURN(errno ? errno : -1);
269269

270270
DBUG_PRINT("ha_archive::read_data_header", ("Check %u", data_buffer[0]));
@@ -280,15 +280,15 @@ int ha_archive::read_data_header(gzFile file_to_read)
280280
/*
281281
This method writes out the header of a datafile and returns whether or not it was successful.
282282
*/
283-
int ha_archive::write_data_header(gzFile file_to_write)
283+
int ha_archive::write_data_header(azio_stream *file_to_write)
284284
{
285285
uchar data_buffer[DATA_BUFFER_SIZE];
286286
DBUG_ENTER("ha_archive::write_data_header");
287287

288288
data_buffer[0]= (uchar)ARCHIVE_CHECK_HEADER;
289289
data_buffer[1]= (uchar)ARCHIVE_VERSION;
290290

291-
if (gzwrite(file_to_write, &data_buffer, DATA_BUFFER_SIZE) !=
291+
if (azwrite(file_to_write, &data_buffer, DATA_BUFFER_SIZE) !=
292292
DATA_BUFFER_SIZE)
293293
goto error;
294294
DBUG_PRINT("ha_archive::write_data_header", ("Check %u", (uint)data_buffer[0]));
@@ -427,8 +427,11 @@ ARCHIVE_SHARE *ha_archive::get_share(const char *table_name, TABLE *table)
427427
a gzip file that can be both read and written we keep a writer open
428428
that is shared amoung all open tables.
429429
*/
430-
if ((share->archive_write= gzopen(share->data_file_name, "ab")) == NULL)
430+
if (!(azopen(&(share->archive_write), share->data_file_name, O_WRONLY|O_APPEND|O_BINARY)))
431+
{
432+
DBUG_PRINT("info", ("Could not open archive write file"));
431433
share->crashed= TRUE;
434+
}
432435
VOID(my_hash_insert(&archive_open_tables, (byte*) share));
433436
thr_lock_init(&share->lock);
434437
}
@@ -453,7 +456,7 @@ int ha_archive::free_share(ARCHIVE_SHARE *share)
453456
thr_lock_delete(&share->lock);
454457
VOID(pthread_mutex_destroy(&share->mutex));
455458
(void)write_meta_file(share->meta_file, share->rows_recorded, FALSE);
456-
if (gzclose(share->archive_write) == Z_ERRNO)
459+
if (azclose(&(share->archive_write)))
457460
rc= 1;
458461
if (my_close(share->meta_file, MYF(0)))
459462
rc= 1;
@@ -494,7 +497,7 @@ int ha_archive::open(const char *name, int mode, uint test_if_locked)
494497
DBUG_RETURN(HA_ERR_OUT_OF_MEM); // Not handled well by calling code!
495498
thr_lock_data_init(&share->lock,&lock,NULL);
496499

497-
if ((archive= gzopen(share->data_file_name, "rb")) == NULL)
500+
if (!(azopen(&archive, share->data_file_name, O_RDONLY|O_BINARY)))
498501
{
499502
if (errno == EROFS || errno == EACCES)
500503
DBUG_RETURN(my_errno= errno);
@@ -528,7 +531,7 @@ int ha_archive::close(void)
528531
DBUG_ENTER("ha_archive::close");
529532

530533
/* First close stream */
531-
if (gzclose(archive) == Z_ERRNO)
534+
if (azclose(&archive))
532535
rc= 1;
533536
/* then also close share */
534537
rc|= free_share(share);
@@ -574,18 +577,18 @@ int ha_archive::create(const char *name, TABLE *table_arg,
574577
error= my_errno;
575578
goto error;
576579
}
577-
if ((archive= gzdopen(create_file, "wb")) == NULL)
580+
if (!azdopen(&archive, create_file, O_WRONLY|O_BINARY))
578581
{
579582
error= errno;
580583
goto error2;
581584
}
582-
if (write_data_header(archive))
585+
if (write_data_header(&archive))
583586
{
584587
error= errno;
585588
goto error3;
586589
}
587590

588-
if (gzclose(archive))
591+
if (azclose(&archive))
589592
{
590593
error= errno;
591594
goto error2;
@@ -596,8 +599,8 @@ int ha_archive::create(const char *name, TABLE *table_arg,
596599
DBUG_RETURN(0);
597600

598601
error3:
599-
/* We already have an error, so ignore results of gzclose. */
600-
(void)gzclose(archive);
602+
/* We already have an error, so ignore results of azclose. */
603+
(void)azclose(&archive);
601604
error2:
602605
my_close(create_file, MYF(0));
603606
delete_table(name);
@@ -609,13 +612,13 @@ int ha_archive::create(const char *name, TABLE *table_arg,
609612
/*
610613
This is where the actual row is written out.
611614
*/
612-
int ha_archive::real_write_row(byte *buf, gzFile writer)
615+
int ha_archive::real_write_row(byte *buf, azio_stream *writer)
613616
{
614617
z_off_t written;
615618
uint *ptr, *end;
616619
DBUG_ENTER("ha_archive::real_write_row");
617620

618-
written= gzwrite(writer, buf, table->s->reclength);
621+
written= azwrite(writer, buf, table->s->reclength);
619622
DBUG_PRINT("ha_archive::real_write_row", ("Wrote %d bytes expected %d", written, table->s->reclength));
620623
if (!delayed_insert || !bulk_insert)
621624
share->dirty= TRUE;
@@ -636,7 +639,7 @@ int ha_archive::real_write_row(byte *buf, gzFile writer)
636639
if (size)
637640
{
638641
((Field_blob*) table->field[*ptr])->get_ptr(&data_ptr);
639-
written= gzwrite(writer, data_ptr, (unsigned)size);
642+
written= azwrite(writer, data_ptr, (unsigned)size);
640643
if (written != (z_off_t)size)
641644
DBUG_RETURN(errno ? errno : -1);
642645
}
@@ -667,7 +670,7 @@ int ha_archive::write_row(byte *buf)
667670
table->timestamp_field->set_time();
668671
pthread_mutex_lock(&share->mutex);
669672
share->rows_recorded++;
670-
rc= real_write_row(buf, share->archive_write);
673+
rc= real_write_row(buf, &(share->archive_write));
671674
pthread_mutex_unlock(&share->mutex);
672675

673676
DBUG_RETURN(rc);
@@ -694,20 +697,20 @@ int ha_archive::rnd_init(bool scan)
694697

695698
/*
696699
If dirty, we lock, and then reset/flush the data.
697-
I found that just calling gzflush() doesn't always work.
700+
I found that just calling azflush() doesn't always work.
698701
*/
699702
if (share->dirty == TRUE)
700703
{
701704
pthread_mutex_lock(&share->mutex);
702705
if (share->dirty == TRUE)
703706
{
704-
gzflush(share->archive_write, Z_SYNC_FLUSH);
707+
azflush(&(share->archive_write), Z_SYNC_FLUSH);
705708
share->dirty= FALSE;
706709
}
707710
pthread_mutex_unlock(&share->mutex);
708711
}
709712

710-
if (read_data_header(archive))
713+
if (read_data_header(&archive))
711714
DBUG_RETURN(HA_ERR_CRASHED_ON_USAGE);
712715
}
713716

@@ -719,15 +722,15 @@ int ha_archive::rnd_init(bool scan)
719722
This is the method that is used to read a row. It assumes that the row is
720723
positioned where you want it.
721724
*/
722-
int ha_archive::get_row(gzFile file_to_read, byte *buf)
725+
int ha_archive::get_row(azio_stream *file_to_read, byte *buf)
723726
{
724-
int read; // Bytes read, gzread() returns int
727+
int read; // Bytes read, azread() returns int
725728
uint *ptr, *end;
726729
char *last;
727730
size_t total_blob_length= 0;
728731
DBUG_ENTER("ha_archive::get_row");
729732

730-
read= gzread(file_to_read, buf, table->s->reclength);
733+
read= azread(file_to_read, buf, table->s->reclength);
731734
DBUG_PRINT("ha_archive::get_row", ("Read %d bytes expected %d", read, table->s->reclength));
732735

733736
if (read == Z_STREAM_ERROR)
@@ -762,7 +765,7 @@ int ha_archive::get_row(gzFile file_to_read, byte *buf)
762765
size_t size= ((Field_blob*) table->field[*ptr])->get_length();
763766
if (size)
764767
{
765-
read= gzread(file_to_read, last, size);
768+
read= azread(file_to_read, last, size);
766769
if ((size_t) read != size)
767770
DBUG_RETURN(HA_ERR_END_OF_FILE);
768771
((Field_blob*) table->field[*ptr])->set_ptr(size, last);
@@ -792,8 +795,8 @@ int ha_archive::rnd_next(byte *buf)
792795

793796
statistic_increment(table->in_use->status_var.ha_read_rnd_next_count,
794797
&LOCK_status);
795-
current_position= gztell(archive);
796-
rc= get_row(archive, buf);
798+
current_position= aztell(&archive);
799+
rc= get_row(&archive, buf);
797800

798801

799802
if (rc != HA_ERR_END_OF_FILE)
@@ -830,9 +833,9 @@ int ha_archive::rnd_pos(byte * buf, byte *pos)
830833
statistic_increment(table->in_use->status_var.ha_read_rnd_next_count,
831834
&LOCK_status);
832835
current_position= (z_off_t)my_get_ptr(pos, ref_length);
833-
(void)gzseek(archive, current_position, SEEK_SET);
836+
(void)azseek(&archive, current_position, SEEK_SET);
834837

835-
DBUG_RETURN(get_row(archive, buf));
838+
DBUG_RETURN(get_row(&archive, buf));
836839
}
837840

838841
/*
@@ -861,17 +864,17 @@ int ha_archive::optimize(THD* thd, HA_CHECK_OPT* check_opt)
861864
{
862865
DBUG_ENTER("ha_archive::optimize");
863866
int rc;
864-
gzFile writer;
867+
azio_stream writer;
865868
char writer_filename[FN_REFLEN];
866869

867870
/* Flush any waiting data */
868-
gzflush(share->archive_write, Z_SYNC_FLUSH);
871+
azflush(&(share->archive_write), Z_SYNC_FLUSH);
869872

870873
/* Lets create a file to contain the new data */
871874
fn_format(writer_filename, share->table_name, "", ARN,
872875
MY_REPLACE_EXT|MY_UNPACK_FILENAME);
873876

874-
if ((writer= gzopen(writer_filename, "wb")) == NULL)
877+
if (!(azopen(&writer, writer_filename, O_CREAT|O_WRONLY|O_TRUNC|O_BINARY)))
875878
DBUG_RETURN(HA_ERR_CRASHED_ON_USAGE);
876879

877880
/*
@@ -881,6 +884,7 @@ int ha_archive::optimize(THD* thd, HA_CHECK_OPT* check_opt)
881884

882885
if (check_opt->flags == T_EXTEND)
883886
{
887+
DBUG_PRINT("info", ("archive extended rebuild"));
884888
byte *buf;
885889

886890
/*
@@ -897,14 +901,14 @@ int ha_archive::optimize(THD* thd, HA_CHECK_OPT* check_opt)
897901
Now we will rewind the archive file so that we are positioned at the
898902
start of the file.
899903
*/
900-
rc= read_data_header(archive);
904+
rc= read_data_header(&archive);
901905

902906
/*
903907
Assuming now error from rewinding the archive file, we now write out the
904908
new header for out data file.
905909
*/
906910
if (!rc)
907-
rc= write_data_header(writer);
911+
rc= write_data_header(&writer);
908912

909913
/*
910914
On success of writing out the new header, we now fetch each row and
@@ -913,9 +917,9 @@ int ha_archive::optimize(THD* thd, HA_CHECK_OPT* check_opt)
913917
if (!rc)
914918
{
915919
share->rows_recorded= 0;
916-
while (!(rc= get_row(archive, buf)))
920+
while (!(rc= get_row(&archive, buf)))
917921
{
918-
real_write_row(buf, writer);
922+
real_write_row(buf, &writer);
919923
share->rows_recorded++;
920924
}
921925
}
@@ -926,31 +930,31 @@ int ha_archive::optimize(THD* thd, HA_CHECK_OPT* check_opt)
926930
}
927931
else
928932
{
933+
DBUG_PRINT("info", ("archive quick rebuild"));
929934
/*
930935
The quick method is to just read the data raw, and then compress it directly.
931936
*/
932-
int read; // Bytes read, gzread() returns int
937+
int read; // Bytes read, azread() returns int
933938
char block[IO_SIZE];
934-
if (gzrewind(archive) == -1)
939+
if (azrewind(&archive) == -1)
935940
{
936941
rc= HA_ERR_CRASHED_ON_USAGE;
942+
DBUG_PRINT("info", ("archive HA_ERR_CRASHED_ON_USAGE"));
937943
goto error;
938944
}
939945

940-
while ((read= gzread(archive, block, IO_SIZE)))
941-
gzwrite(writer, block, read);
946+
while ((read= azread(&archive, block, IO_SIZE)))
947+
azwrite(&writer, block, read);
942948
}
943949

944-
gzflush(writer, Z_SYNC_FLUSH);
945-
gzclose(share->archive_write);
946-
share->archive_write= writer;
950+
azclose(&writer);
947951

948952
my_rename(writer_filename,share->data_file_name,MYF(0));
949953

950954
DBUG_RETURN(0);
951955

952956
error:
953-
gzclose(writer);
957+
azclose(&writer);
954958

955959
DBUG_RETURN(rc);
956960
}
@@ -1092,7 +1096,7 @@ int ha_archive::check(THD* thd, HA_CHECK_OPT* check_opt)
10921096

10931097
thd->proc_info= "Checking table";
10941098
/* Flush any waiting data */
1095-
gzflush(share->archive_write, Z_SYNC_FLUSH);
1099+
azflush(&(share->archive_write), Z_SYNC_FLUSH);
10961100

10971101
/*
10981102
First we create a buffer that we can use for reading rows, and can pass
@@ -1106,10 +1110,10 @@ int ha_archive::check(THD* thd, HA_CHECK_OPT* check_opt)
11061110
start of the file.
11071111
*/
11081112
if (!rc)
1109-
read_data_header(archive);
1113+
read_data_header(&archive);
11101114

11111115
if (!rc)
1112-
while (!(rc= get_row(archive, buf)))
1116+
while (!(rc= get_row(&archive, buf)))
11131117
count--;
11141118

11151119
my_free((char*)buf, MYF(0));

0 commit comments

Comments
 (0)