Skip to content
Permalink

Comparing changes

Choose two branches to see what’s changed or to start a new pull request. If you need to, you can also or learn more about diff comparisons.

Open a pull request

Create a new pull request by comparing changes across two branches. If you need to, you can also . Learn more about diff comparisons here.
base repository: go-sql-driver/mysql
Failed to load repositories. Confirm that selected base ref is valid, then try again.
Loading
base: af56fba
Choose a base ref
...
head repository: go-sql-driver/mysql
Failed to load repositories. Confirm that selected head ref is valid, then try again.
Loading
compare: dfd973a
Choose a head ref
  • 4 commits
  • 8 files changed
  • 3 contributors

Commits on Apr 21, 2025

  1. test stability improvement. (#1699)

    * ensuring performance schema is enabled when testing some performance schema results
    * do not use collations affected by MariaDB character_set_collations.
    * ensure using IANA timezone in test, since tzinfo depending on system won't have deprecated tz like "US/Central" and "US/Pacific"
    
    Co-authored-by: Diego Dupin <diego.dupin@mariadb.com>
    methane and rusher authored Apr 21, 2025

    Verified

    This commit was created on GitHub.com and signed with GitHub’s verified signature.
    Copy the full SHA
    1bee809 View commit details

Commits on Apr 22, 2025

  1. Transaction Commit/Rollback returns conn's cached error (#1702)

    If a transaction connection has a cached error, return it instead of
    ErrInvalidConn during Commit/Rollback operations.
    
    Fix #1690
    
    Co-authored-by: brad-defined <77982333+brad-defined@users.noreply.github.com>
    methane and brad-defined authored Apr 22, 2025

    Verified

    This commit was created on GitHub.com and signed with GitHub’s verified signature.
    Copy the full SHA
    ac04e5f View commit details

Commits on Apr 23, 2025

  1. backport benchmark_test (#1706)

    Backport benchmark improvements.
    methane authored Apr 23, 2025

    Verified

    This commit was created on GitHub.com and signed with GitHub’s verified signature.
    Copy the full SHA
    2ed589b View commit details

Commits on Apr 25, 2025

  1. optimize readPacket (#1707)

    Avoid unnecessary allocation.
    Although this is not a bug, this is a regression in 1.9 branch.
    This PR fixes performance regression when compression is not used.
    methane authored Apr 25, 2025

    Verified

    This commit was created on GitHub.com and signed with GitHub’s verified signature.
    Copy the full SHA
    dfd973a View commit details
Showing with 126 additions and 75 deletions.
  1. +4 −0 .github/workflows/test.yml
  2. +3 −0 AUTHORS
  3. +45 −30 benchmark_test.go
  4. +8 −10 buffer.go
  5. +6 −6 compress.go
  6. +30 −24 driver_test.go
  7. +14 −3 packets.go
  8. +16 −2 transaction.go
4 changes: 4 additions & 0 deletions .github/workflows/test.yml
Original file line number Diff line number Diff line change
@@ -96,6 +96,10 @@ jobs:
run: |
go test -v '-race' '-covermode=atomic' '-coverprofile=coverage.out' -parallel 10
- name: benchmark
run: |
go test -run '^$' -bench .
- name: Send coverage
uses: shogo82148/actions-goveralls@v1
with:
3 changes: 3 additions & 0 deletions AUTHORS
Original file line number Diff line number Diff line change
@@ -25,6 +25,7 @@ Asta Xie <xiemengjun at gmail.com>
B Lamarche <blam413 at gmail.com>
Bes Dollma <bdollma@thousandeyes.com>
Bogdan Constantinescu <bog.con.bc at gmail.com>
Brad Higgins <brad at defined.net>
Brian Hendriks <brian at dolthub.com>
Bulat Gaifullin <gaifullinbf at gmail.com>
Caine Jette <jette at alum.mit.edu>
@@ -37,6 +38,7 @@ Daniel Montoya <dsmontoyam at gmail.com>
Daniel Nichter <nil at codenode.com>
Daniël van Eeden <git at myname.nl>
Dave Protasowski <dprotaso at gmail.com>
Diego Dupin <diego.dupin at gmail.com>
Dirkjan Bussink <d.bussink at gmail.com>
DisposaBoy <disposaboy at dby.me>
Egor Smolyakov <egorsmkv at gmail.com>
@@ -133,6 +135,7 @@ Ziheng Lyu <zihenglv at gmail.com>

Barracuda Networks, Inc.
Counting Ltd.
Defined Networking Inc.
DigitalOcean Inc.
Dolthub Inc.
dyves labs AG
75 changes: 45 additions & 30 deletions benchmark_test.go
Original file line number Diff line number Diff line change
@@ -46,10 +46,10 @@ func (tb *TB) checkStmt(stmt *sql.Stmt, err error) *sql.Stmt {
return stmt
}

func initDB(b *testing.B, useCompression bool, queries ...string) *sql.DB {
func initDB(b *testing.B, compress bool, queries ...string) *sql.DB {
tb := (*TB)(b)
comprStr := ""
if useCompression {
if compress {
comprStr = "&compress=1"
}
db := tb.checkDB(sql.Open(driverNameTest, dsn+comprStr))
@@ -64,16 +64,15 @@ func initDB(b *testing.B, useCompression bool, queries ...string) *sql.DB {
const concurrencyLevel = 10

func BenchmarkQuery(b *testing.B) {
benchmarkQueryHelper(b, false)
benchmarkQuery(b, false)
}

func BenchmarkQueryCompression(b *testing.B) {
benchmarkQueryHelper(b, true)
func BenchmarkQueryCompressed(b *testing.B) {
benchmarkQuery(b, true)
}

func benchmarkQueryHelper(b *testing.B, compr bool) {
func benchmarkQuery(b *testing.B, compr bool) {
tb := (*TB)(b)
b.StopTimer()
b.ReportAllocs()
db := initDB(b, compr,
"DROP TABLE IF EXISTS foo",
@@ -115,8 +114,6 @@ func benchmarkQueryHelper(b *testing.B, compr bool) {

func BenchmarkExec(b *testing.B) {
tb := (*TB)(b)
b.StopTimer()
b.ReportAllocs()
db := tb.checkDB(sql.Open(driverNameTest, dsn))
db.SetMaxIdleConns(concurrencyLevel)
defer db.Close()
@@ -128,9 +125,11 @@ func BenchmarkExec(b *testing.B) {
var wg sync.WaitGroup
wg.Add(concurrencyLevel)
defer wg.Wait()
b.StartTimer()

for i := 0; i < concurrencyLevel; i++ {
b.ReportAllocs()
b.ResetTimer()

for i := 0; i < concurrencyLevel; i++ {
go func() {
for {
if atomic.AddInt64(&remain, -1) < 0 {
@@ -158,14 +157,15 @@ func initRoundtripBenchmarks() ([]byte, int, int) {
}

func BenchmarkRoundtripTxt(b *testing.B) {
b.StopTimer()
sample, min, max := initRoundtripBenchmarks()
sampleString := string(sample)
b.ReportAllocs()
tb := (*TB)(b)
db := tb.checkDB(sql.Open(driverNameTest, dsn))
defer db.Close()
b.StartTimer()

b.ReportAllocs()
b.ResetTimer()

var result string
for i := 0; i < b.N; i++ {
length := min + i
@@ -192,15 +192,15 @@ func BenchmarkRoundtripTxt(b *testing.B) {
}

func BenchmarkRoundtripBin(b *testing.B) {
b.StopTimer()
sample, min, max := initRoundtripBenchmarks()
b.ReportAllocs()
tb := (*TB)(b)
db := tb.checkDB(sql.Open(driverNameTest, dsn))
defer db.Close()
stmt := tb.checkStmt(db.Prepare("SELECT ?"))
defer stmt.Close()
b.StartTimer()

b.ReportAllocs()
b.ResetTimer()
var result sql.RawBytes
for i := 0; i < b.N; i++ {
length := min + i
@@ -385,10 +385,9 @@ func BenchmarkQueryRawBytes(b *testing.B) {
}
}

// BenchmarkReceiveMassiveRows measures performance of receiving large number of rows.
func BenchmarkReceiveMassiveRows(b *testing.B) {
func benchmark10kRows(b *testing.B, compress bool) {
// Setup -- prepare 10000 rows.
db := initDB(b, false,
db := initDB(b, compress,
"DROP TABLE IF EXISTS foo",
"CREATE TABLE foo (id INT PRIMARY KEY, val TEXT)")
defer db.Close()
@@ -399,11 +398,14 @@ func BenchmarkReceiveMassiveRows(b *testing.B) {
b.Errorf("failed to prepare query: %v", err)
return
}

args := make([]any, 200)
for i := 1; i < 200; i+=2 {
args[i] = sval
}
for i := 0; i < 10000; i += 100 {
args := make([]any, 200)
for j := 0; j < 100; j++ {
args[j*2] = i + j
args[j*2+1] = sval
}
_, err := stmt.Exec(args...)
if err != nil {
@@ -413,30 +415,43 @@ func BenchmarkReceiveMassiveRows(b *testing.B) {
}
stmt.Close()

// Use b.Run() to skip expensive setup.
// benchmark function called several times with different b.N.
// it means heavy setup is called multiple times.
// Use b.Run() to run expensive setup only once.
// Go 1.24 introduced b.Loop() for this purpose. But we keep this
// benchmark compatible with Go 1.20.
b.Run("query", func(b *testing.B) {
b.ReportAllocs()

for i := 0; i < b.N; i++ {
rows, err := db.Query(`SELECT id, val FROM foo`)
if err != nil {
b.Errorf("failed to select: %v", err)
return
}
// rows.Scan() escapes arguments. So these variables must be defined
// before loop.
var i int
var s sql.RawBytes
for rows.Next() {
var i int
var s sql.RawBytes
err = rows.Scan(&i, &s)
if err != nil {
if err := rows.Scan(&i, &s); err != nil {
b.Errorf("failed to scan: %v", err)
_ = rows.Close()
rows.Close()
return
}
}
if err = rows.Err(); err != nil {
b.Errorf("failed to read rows: %v", err)
}
_ = rows.Close()
rows.Close()
}
})
}

// BenchmarkReceive10kRows measures performance of receiving large number of rows.
func BenchmarkReceive10kRows(b *testing.B) {
benchmark10kRows(b, false)
}

func BenchmarkReceive10kRowsCompressed(b *testing.B) {
benchmark10kRows(b, true)
}
18 changes: 8 additions & 10 deletions buffer.go
Original file line number Diff line number Diff line change
@@ -42,6 +42,11 @@ func (b *buffer) busy() bool {
return len(b.buf) > 0
}

// len returns how many bytes in the read buffer.
func (b *buffer) len() int {
return len(b.buf)
}

// fill reads into the read buffer until at least _need_ bytes are in it.
func (b *buffer) fill(need int, r readerFunc) error {
// we'll move the contents of the current buffer to dest before filling it.
@@ -86,17 +91,10 @@ func (b *buffer) fill(need int, r readerFunc) error {

// returns next N bytes from buffer.
// The returned slice is only guaranteed to be valid until the next read
func (b *buffer) readNext(need int, r readerFunc) ([]byte, error) {
if len(b.buf) < need {
// refill
if err := b.fill(need, r); err != nil {
return nil, err
}
}

data := b.buf[:need]
func (b *buffer) readNext(need int) []byte {
data := b.buf[:need:need]
b.buf = b.buf[need:]
return data, nil
return data
}

// takeBuffer returns a buffer with the requested size.
12 changes: 6 additions & 6 deletions compress.go
Original file line number Diff line number Diff line change
@@ -84,26 +84,26 @@ func (c *compIO) reset() {
c.buff.Reset()
}

func (c *compIO) readNext(need int, r readerFunc) ([]byte, error) {
func (c *compIO) readNext(need int) ([]byte, error) {
for c.buff.Len() < need {
if err := c.readCompressedPacket(r); err != nil {
if err := c.readCompressedPacket(); err != nil {
return nil, err
}
}
data := c.buff.Next(need)
return data[:need:need], nil // prevent caller writes into c.buff
}

func (c *compIO) readCompressedPacket(r readerFunc) error {
header, err := c.mc.buf.readNext(7, r) // size of compressed header
func (c *compIO) readCompressedPacket() error {
header, err := c.mc.readNext(7)
if err != nil {
return err
}
_ = header[6] // bounds check hint to compiler; guaranteed by readNext

// compressed header structure
comprLength := getUint24(header[0:3])
compressionSequence := uint8(header[3])
compressionSequence := header[3]
uncompressedLength := getUint24(header[4:7])
if debug {
fmt.Printf("uncompress cmplen=%v uncomplen=%v pkt_cmp_seq=%v expected_cmp_seq=%v\n",
@@ -120,7 +120,7 @@ func (c *compIO) readCompressedPacket(r readerFunc) error {
c.mc.sequence = compressionSequence + 1
c.mc.compressSequence = c.mc.sequence

comprData, err := c.mc.buf.readNext(comprLength, r)
comprData, err := c.mc.readNext(comprLength)
if err != nil {
return err
}
54 changes: 30 additions & 24 deletions driver_test.go
Original file line number Diff line number Diff line change
@@ -1609,35 +1609,32 @@ func TestCollation(t *testing.T) {
t.Skipf("MySQL server not running on %s", netAddr)
}

defaultCollation := "utf8mb4_general_ci"
// MariaDB may override collation specified by handshake with `character_set_collations` variable.
// https://mariadb.com/kb/en/setting-character-sets-and-collations/#changing-default-collation
// https://mariadb.com/kb/en/server-system-variables/#character_set_collations
// utf8mb4_general_ci, utf8mb3_general_ci will be overridden by default MariaDB.
// Collations other than charasets default are not overridden. So utf8mb4_unicode_ci is safe.
testCollations := []string{
"", // do not set
defaultCollation, // driver default
"latin1_general_ci",
"binary",
"utf8mb4_unicode_ci",
"cp1257_bin",
}

for _, collation := range testCollations {
var expected, tdsn string
if collation != "" {
tdsn = dsn + "&collation=" + collation
expected = collation
} else {
tdsn = dsn
expected = defaultCollation
}

runTests(t, tdsn, func(dbt *DBTest) {
var got string
if err := dbt.db.QueryRow("SELECT @@collation_connection").Scan(&got); err != nil {
dbt.Fatal(err)
}
t.Run(collation, func(t *testing.T) {
tdsn := dsn + "&collation=" + collation
expected := collation

if got != expected {
dbt.Fatalf("expected connection collation %s but got %s", expected, got)
}
runTests(t, tdsn, func(dbt *DBTest) {
var got string
if err := dbt.db.QueryRow("SELECT @@collation_connection").Scan(&got); err != nil {
dbt.Fatal(err)
}
if got != expected {
dbt.Fatalf("expected connection collation %s but got %s", expected, got)
}
})
})
}
}
@@ -1685,16 +1682,16 @@ func TestRawBytesResultExceedsBuffer(t *testing.T) {
}

func TestTimezoneConversion(t *testing.T) {
zones := []string{"UTC", "US/Central", "US/Pacific", "Local"}
zones := []string{"UTC", "America/New_York", "Asia/Hong_Kong", "Local"}

// Regression test for timezone handling
tzTest := func(dbt *DBTest) {
// Create table
dbt.mustExec("CREATE TABLE test (ts TIMESTAMP)")

// Insert local time into database (should be converted)
usCentral, _ := time.LoadLocation("US/Central")
reftime := time.Date(2014, 05, 30, 18, 03, 17, 0, time.UTC).In(usCentral)
newYorkTz, _ := time.LoadLocation("America/New_York")
reftime := time.Date(2014, 05, 30, 18, 03, 17, 0, time.UTC).In(newYorkTz)
dbt.mustExec("INSERT INTO test VALUE (?)", reftime)

// Retrieve time from DB
@@ -1713,7 +1710,7 @@ func TestTimezoneConversion(t *testing.T) {
// Check that dates match
if reftime.Unix() != dbTime.Unix() {
dbt.Errorf("times do not match.\n")
dbt.Errorf(" Now(%v)=%v\n", usCentral, reftime)
dbt.Errorf(" Now(%v)=%v\n", newYorkTz, reftime)
dbt.Errorf(" Now(UTC)=%v\n", dbTime)
}
}
@@ -3541,6 +3538,15 @@ func TestConnectionAttributes(t *testing.T) {

dbt := &DBTest{t, db}

var varName string
var varValue string
err := dbt.db.QueryRow("SHOW VARIABLES LIKE 'performance_schema'").Scan(&varName, &varValue)
if err != nil {
t.Fatalf("error: %s", err.Error())
}
if varValue != "ON" {
t.Skipf("Performance schema is not enabled. skipping")
}
queryString := "SELECT ATTR_NAME, ATTR_VALUE FROM performance_schema.session_account_connect_attrs WHERE PROCESSLIST_ID = CONNECTION_ID()"
rows := dbt.mustQuery(queryString)
defer rows.Close()
Loading