Use in-memory, on-disk-backed DB

- removed sqlite
- added stubs for MemDB
- removed sqlite3 vendor

Fixes #7

Change-Id: I97b7f274be8db5ff02d9a4e4b8f616403fd6313a
This commit is contained in:
Stephen McQuay 2016-06-22 23:01:21 -07:00
parent 9d3ce56e26
commit c575677088
No known key found for this signature in database
GPG Key ID: 1ABF428F71BAFC3D
61 changed files with 263 additions and 210223 deletions

View File

@ -17,7 +17,7 @@ import (
const window = 5 * time.Minute
func TestAdd(t *testing.T) {
db, done := testDB(t)
db, done := TestDB(t)
if db == nil {
t.Fatalf("could not create temp db")
}
@ -28,7 +28,7 @@ func TestAdd(t *testing.T) {
ts := httptest.NewServer(sm)
tok, err := db.addUser("sm@example.org")
if err != nil {
t.Fatalf("failure to add user: %v", err)
t.Errorf("failure to add user: %v", err)
}
resp, err := http.Get(ts.URL)
@ -109,7 +109,7 @@ func TestAdd(t *testing.T) {
good := fmt.Sprintf("%s/foo", ur.Host)
if !db.PackageExists(good) {
if !db.PackageExists(path(good)) {
t.Fatalf("did not find package for %s; should have posted a valid package", good)
}
p, err := db.Package(good)
@ -163,7 +163,7 @@ func TestAdd(t *testing.T) {
}
func TestInvalidPath(t *testing.T) {
db, done := testDB(t)
db, done := TestDB(t)
if db == nil {
t.Fatalf("could not create temp db")
}
@ -195,7 +195,7 @@ func TestInvalidPath(t *testing.T) {
}
func TestCannotDuplicateExistingPath(t *testing.T) {
db, done := testDB(t)
db, done := TestDB(t)
if db == nil {
t.Fatalf("could not create temp db")
}
@ -241,7 +241,7 @@ func TestCannotDuplicateExistingPath(t *testing.T) {
}
func TestCannotAddExistingSubPath(t *testing.T) {
db, done := testDB(t)
db, done := TestDB(t)
if db == nil {
t.Fatalf("could not create temp db")
}
@ -253,7 +253,7 @@ func TestCannotAddExistingSubPath(t *testing.T) {
tok, err := db.addUser("sm@example.org")
if err != nil {
t.Fatalf("failure to add user: %v", err)
t.Errorf("failure to add user: %v", err)
}
{
@ -289,7 +289,7 @@ func TestCannotAddExistingSubPath(t *testing.T) {
}
func TestMissingRepo(t *testing.T) {
db, done := testDB(t)
db, done := TestDB(t)
if db == nil {
t.Fatalf("could not create temp db")
}
@ -301,7 +301,7 @@ func TestMissingRepo(t *testing.T) {
tok, err := db.addUser("sm@example.org")
if err != nil {
t.Fatalf("failure to add user: %v", err)
t.Errorf("failure to add user: %v", err)
}
u := fmt.Sprintf("%s/foo", ts.URL)
@ -322,7 +322,7 @@ func TestMissingRepo(t *testing.T) {
}
func TestBadJson(t *testing.T) {
db, done := testDB(t)
db, done := TestDB(t)
if db == nil {
t.Fatalf("could not create temp db")
}
@ -334,7 +334,7 @@ func TestBadJson(t *testing.T) {
tok, err := db.addUser("sm@example.org")
if err != nil {
t.Fatalf("failure to add user: %v", err)
t.Errorf("failure to add user: %v", err)
}
u := fmt.Sprintf("%s/foo", ts.URL)
@ -355,7 +355,7 @@ func TestBadJson(t *testing.T) {
}
func TestNoAuth(t *testing.T) {
db, done := testDB(t)
db, done := TestDB(t)
if db == nil {
t.Fatalf("could not create temp db")
}
@ -384,7 +384,7 @@ func TestNoAuth(t *testing.T) {
}
func TestBadVcs(t *testing.T) {
db, done := testDB(t)
db, done := TestDB(t)
if db == nil {
t.Fatalf("could not create temp db")
}
@ -396,7 +396,7 @@ func TestBadVcs(t *testing.T) {
tok, err := db.addUser("sm@example.org")
if err != nil {
t.Fatalf("failure to add user: %v", err)
t.Errorf("failure to add user: %v", err)
}
u := fmt.Sprintf("%s/foo", ts.URL)
@ -415,7 +415,7 @@ func TestBadVcs(t *testing.T) {
}
func TestUnsupportedMethod(t *testing.T) {
db, done := testDB(t)
db, done := TestDB(t)
if db == nil {
t.Fatalf("could not create temp db")
}
@ -427,7 +427,7 @@ func TestUnsupportedMethod(t *testing.T) {
tok, err := db.addUser("sm@example.org")
if err != nil {
t.Fatalf("failure to add user: %v", err)
t.Errorf("failure to add user: %v", err)
}
url := fmt.Sprintf("%s/foo", ts.URL)
@ -447,7 +447,7 @@ func TestUnsupportedMethod(t *testing.T) {
}
func TestDelete(t *testing.T) {
db, done := testDB(t)
db, done := TestDB(t)
if db == nil {
t.Fatalf("could not create temp db")
}
@ -459,8 +459,9 @@ func TestDelete(t *testing.T) {
tok, err := db.addUser("sm@example.org")
if err != nil {
t.Fatalf("failure to add user: %v", err)
t.Errorf("failure to add user: %v", err)
}
t.Logf("%v", tok)
if len(db.Pkgs()) != 0 {
t.Fatalf("started with something in it; got %d, want %d", len(db.Pkgs()), 0)
@ -511,7 +512,7 @@ func TestDelete(t *testing.T) {
}
func TestSingleGet(t *testing.T) {
db, done := testDB(t)
db, done := TestDB(t)
if db == nil {
t.Fatalf("could not create temp db")
}
@ -523,10 +524,10 @@ func TestSingleGet(t *testing.T) {
tok, err := db.addUser("sm@example.org")
if err != nil {
t.Fatalf("failure to add user: %v", err)
t.Errorf("failure to add user: %v", err)
}
ns := "foo"
ns := namespace("foo")
if err := db.NSForToken(ns, tok); err != nil {
t.Fatalf("could not initialize namespace %q for user %q: %v", ns, tok, err)
@ -565,7 +566,7 @@ func TestSingleGet(t *testing.T) {
}
func TestRegister(t *testing.T) {
db, done := testDB(t)
db, done := TestDB(t)
if db == nil {
t.Fatalf("could not create temp db")
}
@ -618,7 +619,7 @@ func TestRegister(t *testing.T) {
}
func TestRoundTrip(t *testing.T) {
db, done := testDB(t)
db, done := TestDB(t)
if db == nil {
t.Fatalf("could not create temp db")
}
@ -686,7 +687,7 @@ func TestRoundTrip(t *testing.T) {
}
func TestForgot(t *testing.T) {
db, done := testDB(t)
db, done := TestDB(t)
if db == nil {
t.Fatalf("could not create temp db")
}

View File

@ -45,6 +45,8 @@ import (
"log"
"net/http"
"os"
"os/signal"
"syscall"
"time"
"mcquay.me/vain"
@ -52,7 +54,7 @@ import (
"github.com/kelseyhightower/envconfig"
)
const usage = "vaind [init] <dbname>"
const usage = "vaind <dbname>"
type config struct {
Port int
@ -72,38 +74,12 @@ type config struct {
}
func main() {
log.SetFlags(log.Lshortfile)
if len(os.Args) < 2 {
fmt.Fprintf(os.Stderr, "%s\n", usage)
os.Exit(1)
}
if os.Args[1] == "init" {
if len(os.Args) != 3 {
fmt.Fprintf(os.Stderr, "missing db name: %s\n", usage)
os.Exit(1)
}
db, err := vain.NewDB(os.Args[2])
if err != nil {
fmt.Fprintf(os.Stderr, "couldn't open db: %v\n", err)
os.Exit(1)
}
defer db.Close()
if err := db.Init(); err != nil {
fmt.Fprintf(os.Stderr, "problem initializing the db: %v\n", err)
os.Exit(1)
}
os.Exit(0)
}
db, err := vain.NewDB(os.Args[1])
if err != nil {
fmt.Fprintf(os.Stderr, "couldn't open db: %v\n", err)
os.Exit(1)
}
c := &config{
Port: 4040,
EmailTimeout: 5 * time.Minute,
@ -131,10 +107,27 @@ func main() {
os.Exit(0)
}
}
log.Printf("%+v", c)
m, err := vain.NewEmail(c.From, c.SMTPHost, c.SMTPPort)
db, err := vain.NewMemDB(os.Args[1])
if err != nil {
fmt.Fprintf(os.Stderr, "couldn't open db: %v\n", err)
os.Exit(1)
}
sigs := make(chan os.Signal)
signal.Notify(sigs, os.Interrupt, syscall.SIGTERM)
go func() {
s := <-sigs
log.Printf("signal: %+v", s)
if err := db.Sync(); err != nil {
log.Printf("problem syncing db to disk: %+v", err)
os.Exit(1)
}
os.Exit(0)
}()
m, err := vain.NewMail(c.From, c.SMTPHost, c.SMTPPort)
if err != nil {
fmt.Fprintf(os.Stderr, "problem initializing mailer: %v", err)
os.Exit(1)

427
db.go
View File

@ -1,342 +1,241 @@
package vain
import (
"database/sql"
"encoding/json"
"fmt"
"log"
"net/http"
"os"
"sync"
"time"
"github.com/jmoiron/sqlx"
// for side effects
_ "github.com/mattn/go-sqlite3"
verrors "mcquay.me/vain/errors"
vsql "mcquay.me/vain/sql"
)
// DB wraps a sqlx.DB connection and provides methods for interating with
// a vain database.
type DB struct {
conn *sqlx.DB
// NewMemDB returns a functional MemDB.
func NewMemDB(p string) (*MemDB, error) {
m := &MemDB{
filename: p,
Users: map[Email]User{},
TokToEmail: map[Token]Email{},
Packages: map[path]Package{},
Namespaces: map[namespace]Email{},
}
// NewDB opens a sqlite3 file, sets options, and reports errors.
func NewDB(path string) (*DB, error) {
conn, err := sqlx.Open("sqlite3", fmt.Sprintf("file:%s?cache=shared&mode=rwc", path))
if _, err := conn.Exec("PRAGMA foreign_keys = ON"); err != nil {
return nil, err
}
return &DB{conn}, err
}
// Init runs the embedded sql to initialize tables.
func (db *DB) Init() error {
content, err := vsql.Asset("sql/init.sql")
f, err := os.Open(p)
if err != nil {
return err
// file doesn't exist yet
return m, nil
}
_, err = db.conn.Exec(string(content))
return err
err = json.NewDecoder(f).Decode(m)
return m, err
}
// Close the underlying connection.
func (db *DB) Close() error {
return db.conn.Close()
}
// MemDB implements an in-memory, and disk-backed database for a vain server.
type MemDB struct {
filename string
// AddPackage adds p into packages table.
func (db *DB) AddPackage(p Package) error {
_, err := db.conn.NamedExec(
"INSERT INTO packages(vcs, repo, path, ns) VALUES (:vcs, :repo, :path, :ns)",
&p,
)
return err
}
l sync.RWMutex
// RemovePackage removes package with given path
func (db *DB) RemovePackage(path string) error {
_, err := db.conn.Exec("DELETE FROM packages WHERE path = ?", path)
return err
}
Users map[Email]User
TokToEmail map[Token]Email
// Pkgs returns all packages from the database
func (db *DB) Pkgs() []Package {
r := []Package{}
rows, err := db.conn.Queryx("SELECT * FROM packages")
if err != nil {
log.Printf("%+v", err)
return nil
}
for rows.Next() {
var p Package
err = rows.StructScan(&p)
if err != nil {
log.Printf("%+v", err)
return nil
}
r = append(r, p)
}
return r
}
// PackageExists tells if a package with path is in the database.
func (db *DB) PackageExists(path string) bool {
var count int
if err := db.conn.Get(&count, "SELECT COUNT(*) FROM packages WHERE path = ?", path); err != nil {
log.Printf("%+v", err)
}
r := false
switch count {
case 1:
r = true
default:
log.Printf("unexpected count of packages matching %q: %d", path, count)
}
return r
}
// Package fetches the package associated with path.
func (db *DB) Package(path string) (Package, error) {
r := Package{}
err := db.conn.Get(&r, "SELECT * FROM packages WHERE path = ?", path)
if err == sql.ErrNoRows {
return r, verrors.HTTP{
Message: fmt.Sprintf("couldn't find package %q", path),
Code: http.StatusNotFound,
}
}
return r, err
Packages map[path]Package
Namespaces map[namespace]Email
}
// NSForToken creates an entry namespaces with a relation to the token.
func (db *DB) NSForToken(ns string, tok string) error {
var err error
txn, err := db.conn.Beginx()
if err != nil {
func (m *MemDB) NSForToken(ns namespace, tok Token) error {
m.l.Lock()
defer m.l.Unlock()
e, ok := m.TokToEmail[tok]
if !ok {
return verrors.HTTP{
Message: fmt.Sprintf("problem creating transaction: %v", err),
Code: http.StatusInternalServerError,
Message: fmt.Sprintf("User for token %q not found", tok),
Code: http.StatusNotFound,
}
}
defer func() {
if err != nil {
txn.Rollback()
if owner, ok := m.Namespaces[ns]; !ok {
m.Namespaces[ns] = e
} else {
txn.Commit()
}
}()
var count int
if err = txn.Get(&count, "SELECT COUNT(*) FROM namespaces WHERE namespaces.ns = ?", ns); err != nil {
if m.Namespaces[ns] != owner {
return verrors.HTTP{
Message: fmt.Sprintf("problem matching fetching namespaces matching %q", ns),
Code: http.StatusInternalServerError,
}
}
if count == 0 {
var email string
if err = txn.Get(&email, "SELECT email FROM users WHERE token = $1", tok); err != nil {
return verrors.HTTP{
Message: fmt.Sprintf("could not find user for token %q", tok),
Code: http.StatusInternalServerError,
}
}
if _, err = txn.Exec(
"INSERT INTO namespaces(ns, email) VALUES ($1, $2)",
ns,
email,
); err != nil {
return verrors.HTTP{
Message: fmt.Sprintf("problem inserting %q into namespaces for token %q: %v", ns, tok, err),
Code: http.StatusInternalServerError,
}
}
return err
}
if err = txn.Get(&count, "SELECT COUNT(*) FROM namespaces JOIN users ON namespaces.email = users.email WHERE users.token = ? AND namespaces.ns = ?", tok, ns); err != nil {
return verrors.HTTP{
Message: fmt.Sprintf("ns: %q, tok: %q; %v", ns, tok, err),
Code: http.StatusInternalServerError,
}
}
switch count {
case 1:
err = nil
case 0:
err = verrors.HTTP{
Message: fmt.Sprintf("not authorized against namespace %q", ns),
Code: http.StatusUnauthorized,
}
default:
}
}
return m.flush(m.filename)
}
// Package fetches the package associated with path.
func (m *MemDB) Package(pth string) (Package, error) {
m.l.RLock()
pkg, ok := m.Packages[path(pth)]
m.l.RUnlock()
var err error
if !ok {
err = verrors.HTTP{
Message: fmt.Sprintf("inconsistent db; found %d results with ns (%s) with token (%s)", count, ns, tok),
Code: http.StatusInternalServerError,
Message: fmt.Sprintf("couldn't find package %q", pth),
Code: http.StatusNotFound,
}
}
return err
return pkg, err
}
// AddPackage adds p into packages table.
func (m *MemDB) AddPackage(p Package) error {
m.l.Lock()
m.Packages[path(p.Path)] = p
m.l.Unlock()
return m.flush(m.filename)
}
// RemovePackage removes package with given path
func (m *MemDB) RemovePackage(pth path) error {
m.l.Lock()
delete(m.Packages, pth)
m.l.Unlock()
return m.flush(m.filename)
}
// PackageExists tells if a package with path is in the database.
func (m *MemDB) PackageExists(pth path) bool {
m.l.RLock()
_, ok := m.Packages[path(pth)]
m.l.RUnlock()
return ok
}
// Pkgs returns all packages from the database
func (m *MemDB) Pkgs() []Package {
ps := []Package{}
m.l.RLock()
for _, p := range m.Packages {
ps = append(ps, p)
}
m.l.RUnlock()
return ps
}
// Register adds email to the database, returning an error if there was one.
func (db *DB) Register(email string) (string, error) {
var err error
txn, err := db.conn.Beginx()
if err != nil {
return "", verrors.HTTP{
Message: fmt.Sprintf("problem creating transaction: %v", err),
Code: http.StatusInternalServerError,
}
}
defer func() {
if err != nil {
txn.Rollback()
} else {
txn.Commit()
}
}()
func (m *MemDB) Register(e Email) (Token, error) {
m.l.Lock()
defer m.l.Unlock()
var count int
if err = txn.Get(&count, "SELECT COUNT(*) FROM users WHERE email = ?", email); err != nil {
if _, ok := m.Users[e]; ok {
return "", verrors.HTTP{
Message: fmt.Sprintf("could not search for email %q in db: %v", email, err),
Code: http.StatusInternalServerError,
}
}
if count != 0 {
return "", verrors.HTTP{
Message: fmt.Sprintf("duplicate email %q", email),
Message: fmt.Sprintf("duplicate email %q", e),
Code: http.StatusConflict,
}
}
tok := FreshToken()
_, err = txn.Exec(
"INSERT INTO users(email, token, requested) VALUES (?, ?, ?)",
email,
tok,
time.Now(),
)
return tok, err
m.Users[e] = User{
Email: e,
token: tok,
Requested: time.Now(),
}
m.TokToEmail[tok] = e
return tok, m.flush(m.filename)
}
// Confirm modifies the user with the given token. Used on register confirmation.
func (db *DB) Confirm(token string) (string, error) {
var err error
txn, err := db.conn.Beginx()
if err != nil {
return "", verrors.HTTP{
Message: fmt.Sprintf("problem creating transaction: %v", err),
Code: http.StatusInternalServerError,
}
}
defer func() {
if err != nil {
txn.Rollback()
} else {
txn.Commit()
}
}()
func (m *MemDB) Confirm(tok Token) (Token, error) {
m.l.Lock()
defer m.l.Unlock()
var count int
if err = txn.Get(&count, "SELECT COUNT(*) FROM users WHERE token = ?", token); err != nil {
e, ok := m.TokToEmail[tok]
if !ok {
return "", verrors.HTTP{
Message: fmt.Sprintf("could not perform search for user with token %q in db: %v", token, err),
Code: http.StatusInternalServerError,
}
}
if count != 1 {
return "", verrors.HTTP{
Message: fmt.Sprintf("bad token: %s", token),
Message: fmt.Sprintf("bad token: %s", tok),
Code: http.StatusNotFound,
}
}
newToken := FreshToken()
_, err = txn.Exec(
"UPDATE users SET token = ?, registered = 1 WHERE token = ?",
newToken,
token,
)
if err != nil {
delete(m.TokToEmail, tok)
tok = FreshToken()
u, ok := m.Users[e]
if !ok {
return "", verrors.HTTP{
Message: fmt.Sprintf("couldn't update user with token %q: %v", token, err),
Message: fmt.Sprintf("inconsistent db; found email for token %q, but no user for email %q", tok, e),
Code: http.StatusInternalServerError,
}
}
return newToken, nil
u.token = tok
m.Users[e] = u
m.TokToEmail[tok] = e
return tok, m.flush(m.filename)
}
func (db *DB) forgot(email string, window time.Duration) (string, error) {
txn, err := db.conn.Beginx()
if err != nil {
return "", verrors.HTTP{
Message: fmt.Sprintf("problem creating transaction: %v", err),
Code: http.StatusInternalServerError,
}
}
defer func() {
if err != nil {
txn.Rollback()
} else {
txn.Commit()
}
}()
// Forgot is used fetch a user's token. It implements rudimentary rate
// limiting.
func (m *MemDB) Forgot(e Email, window time.Duration) (Token, error) {
m.l.Lock()
defer m.l.Unlock()
out := struct {
Token string
Requested time.Time
}{}
if err = txn.Get(&out, "SELECT token, requested FROM users WHERE email = ?", email); err != nil {
u, ok := m.Users[e]
if !ok {
return "", verrors.HTTP{
Message: fmt.Sprintf("could not find email %q in db", email),
Message: fmt.Sprintf("could not find email %q in db", e),
Code: http.StatusNotFound,
}
}
if out.Requested.After(time.Now()) {
if u.Requested.After(time.Now()) {
return "", verrors.HTTP{
Message: fmt.Sprintf("rate limit hit for %q; try again in %0.2f mins", email, out.Requested.Sub(time.Now()).Minutes()),
Message: fmt.Sprintf("rate limit hit for %q; try again in %0.2f mins", u.Email, u.Requested.Sub(time.Now()).Minutes()),
Code: http.StatusTooManyRequests,
}
}
_, err = txn.Exec("UPDATE users SET requested = ? WHERE email = ?", time.Now().Add(window), email)
return u.token, nil
}
// Sync takes a lock, and flushes the data to disk.
func (m *MemDB) Sync() error {
m.l.RLock()
defer m.l.RUnlock()
return m.flush(m.filename)
}
// flush writes to disk, but expects the user to have taken the lock.
func (m *MemDB) flush(p string) error {
f, err := os.Create(p)
if err != nil {
return "", verrors.HTTP{
Message: fmt.Sprintf("could not update last requested time for %q: %v", email, err),
Code: http.StatusInternalServerError,
return err
}
}
return out.Token, nil
return json.NewEncoder(f).Encode(&m)
}
func (db *DB) addUser(email string) (string, error) {
func (m *MemDB) addUser(e Email) (Token, error) {
tok := FreshToken()
_, err := db.conn.Exec(
"INSERT INTO users(email, token, requested) VALUES (?, ?, ?)",
email,
tok,
time.Now(),
)
return tok, err
m.l.Lock()
m.Users[e] = User{
Email: e,
token: tok,
Requested: time.Now(),
}
m.TokToEmail[tok] = e
m.l.Unlock()
return tok, m.flush(m.filename)
}
func (db *DB) user(email string) (User, error) {
u := User{}
err := db.conn.Get(
&u,
"SELECT email, token, registered, requested FROM users WHERE email = ?",
email,
)
if err == sql.ErrNoRows {
return User{}, verrors.HTTP{
Message: fmt.Sprintf("could not find requested user's email: %q: %v", email, err),
func (m *MemDB) user(e Email) (User, error) {
m.l.Lock()
u, ok := m.Users[e]
m.l.Unlock()
var err error
if !ok {
err = verrors.HTTP{
Message: fmt.Sprintf("couldn't find user %q", e),
Code: http.StatusNotFound,
}
}

14
mail.go
View File

@ -12,13 +12,13 @@ type Mailer interface {
Send(to mail.Address, subject, msg string) error
}
// NewEmail returns *Email struct to be able to send smtp
// NewMail returns *Send struct to be able to send smtp
// or an error if it can't correctly parse the email address.
func NewEmail(from, host string, port int) (*Email, error) {
func NewMail(from, host string, port int) (*Mail, error) {
if _, err := mail.ParseAddress(from); err != nil {
return nil, fmt.Errorf("can't parse an email address for 'from': %v", err)
}
r := &Email{
r := &Mail{
host: host,
port: port,
from: from,
@ -26,16 +26,16 @@ func NewEmail(from, host string, port int) (*Email, error) {
return r, nil
}
// Email stores information required to use smtp.
type Email struct {
// Mail stores information required to use smtp.
type Mail struct {
host string
port int
from string
}
// Send sends a smtp email using the host and port in the Email struct and
// Send sends a smtp email using the host and port in the Mail struct and
//returns an error if there was a problem sending the email.
func (e Email) Send(to mail.Address, subject, msg string) error {
func (e Mail) Send(to mail.Address, subject, msg string) error {
c, err := smtp.Dial(fmt.Sprintf("%s:%d", e.host, e.port))
if err != nil {
return fmt.Errorf("couldn't dial mail server: %v", err)

View File

@ -91,7 +91,7 @@ func (s *Server) ServeHTTP(w http.ResponseWriter, req *http.Request) {
return
}
if err := verrors.ToHTTP(s.db.NSForToken(ns, tok)); err != nil {
if err := verrors.ToHTTP(s.db.NSForToken(ns, Token(tok))); err != nil {
http.Error(w, err.Message, err.Code)
return
}
@ -130,12 +130,12 @@ func (s *Server) ServeHTTP(w http.ResponseWriter, req *http.Request) {
}
case "DELETE":
p := fmt.Sprintf("%s/%s", req.Host, strings.Trim(req.URL.Path, "/"))
if !s.db.PackageExists(p) {
if !s.db.PackageExists(path(p)) {
http.Error(w, fmt.Sprintf("package %q not found", p), http.StatusNotFound)
return
}
if err := s.db.RemovePackage(p); err != nil {
if err := s.db.RemovePackage(path(p)); err != nil {
http.Error(w, fmt.Sprintf("unable to delete package: %v", err), http.StatusInternalServerError)
return
}
@ -158,7 +158,7 @@ func (s *Server) register(w http.ResponseWriter, req *http.Request) {
return
}
tok, err := s.db.Register(addr.Address)
tok, err := s.db.Register(Email(addr.Address))
if err := verrors.ToHTTP(err); err != nil {
http.Error(w, err.Message, err.Code)
return
@ -194,12 +194,12 @@ func (s *Server) confirm(w http.ResponseWriter, req *http.Request) {
http.Error(w, "must provide one email parameter", http.StatusBadRequest)
return
}
tok, err := s.db.Confirm(tok)
ttok, err := s.db.Confirm(Token(tok))
if err := verrors.ToHTTP(err); err != nil {
http.Error(w, err.Message, err.Code)
return
}
fmt.Fprintf(w, "new token: %s\n", tok)
fmt.Fprintf(w, "new token: %s\n", ttok)
}
func (s *Server) forgot(w http.ResponseWriter, req *http.Request) {
@ -216,7 +216,7 @@ func (s *Server) forgot(w http.ResponseWriter, req *http.Request) {
return
}
tok, err := s.db.forgot(addr.Address, s.emailTimeout)
tok, err := s.db.Forgot(Email(addr.Address), s.emailTimeout)
if err := verrors.ToHTTP(err); err != nil {
http.Error(w, err.Message, err.Code)
return

View File

@ -1,18 +0,0 @@
CREATE TABLE users (
email TEXT PRIMARY KEY,
token TEXT UNIQUE,
registered boolean DEFAULT 0,
requested DATETIME
);
CREATE TABLE namespaces (
ns TEXT PRIMARY KEY,
email TEXT REFERENCES users(email) ON DELETE CASCADE
);
CREATE TABLE packages (
vcs TEXT,
repo TEXT,
path TEXT UNIQUE,
ns TEXT REFERENCES namespaces(ns) ON DELETE CASCADE
);

View File

@ -1,237 +0,0 @@
// Code generated by go-bindata.
// sources:
// sql/init.sql
// DO NOT EDIT!
package sql
import (
"bytes"
"compress/gzip"
"fmt"
"io"
"io/ioutil"
"os"
"path/filepath"
"strings"
"time"
)
func bindataRead(data []byte, name string) ([]byte, error) {
gz, err := gzip.NewReader(bytes.NewBuffer(data))
if err != nil {
return nil, fmt.Errorf("Read %q: %v", name, err)
}
var buf bytes.Buffer
_, err = io.Copy(&buf, gz)
clErr := gz.Close()
if err != nil {
return nil, fmt.Errorf("Read %q: %v", name, err)
}
if clErr != nil {
return nil, err
}
return buf.Bytes(), nil
}
type asset struct {
bytes []byte
info os.FileInfo
}
type bindataFileInfo struct {
name string
size int64
mode os.FileMode
modTime time.Time
}
func (fi bindataFileInfo) Name() string {
return fi.name
}
func (fi bindataFileInfo) Size() int64 {
return fi.size
}
func (fi bindataFileInfo) Mode() os.FileMode {
return fi.mode
}
func (fi bindataFileInfo) ModTime() time.Time {
return fi.modTime
}
func (fi bindataFileInfo) IsDir() bool {
return false
}
func (fi bindataFileInfo) Sys() interface{} {
return nil
}
var _sqlInitSql = []byte("\x1f\x8b\x08\x00\x00\x09\x6e\x88\x00\xff\x74\x8f\xc1\x4e\x84\x30\x14\x45\xf7\xfd\x8a\xb7\x64\x12\x17\xee\x5d\xd5\xf2\x26\x21\x32\xa8\x9d\x92\xc8\xb2\xc2\x0b\x12\xa0\x45\x0a\x7e\xbf\x0d\x4d\x15\xe3\xd0\x5d\x7b\x6f\x7a\xce\x15\x12\xb9\x42\x50\xfc\x31\x47\x58\x1d\xcd\x0e\x12\x06\xfe\xd0\xa8\xbb\x01\x14\xbe\x29\x78\x91\xd9\x85\xcb\x0a\x9e\xb0\xba\xdb\xb2\xc5\xf6\x64\x42\x56\x16\xd9\x6b\x89\xe1\x79\xa6\xb6\x73\x0b\xcd\xd4\xc0\xbb\xb5\x03\x69\x03\x29\x9e\x79\x99\x2b\xb8\x8f\x8d\xcf\x95\x7c\xa5\x81\xd4\x53\x55\x76\x41\x76\x7a\x60\xec\x8f\x84\xd1\x23\xb9\x49\xd7\x14\x4d\x8c\x3b\xd0\xd8\x29\x4a\x3c\xa3\xc4\x42\xe0\x35\x8c\x48\xb6\xec\x04\xcf\x85\x57\xc8\xd1\x7f\x2e\xf8\x55\xf0\xf4\x06\xcf\xa3\x7a\xdd\xfe\xd0\xbe\xea\x80\x8b\xc2\x93\xdd\x5d\x27\xbd\x7c\xfc\xdf\x1d\x05\x77\x12\xbf\x23\x12\xe3\x0e\x34\xbe\x03\x00\x00\xff\xff\xc7\xbb\x93\xa7\x7b\x01\x00\x00")
func sqlInitSqlBytes() ([]byte, error) {
return bindataRead(
_sqlInitSql,
"sql/init.sql",
)
}
func sqlInitSql() (*asset, error) {
bytes, err := sqlInitSqlBytes()
if err != nil {
return nil, err
}
info := bindataFileInfo{name: "sql/init.sql", size: 379, mode: os.FileMode(436), modTime: time.Unix(1461818129, 0)}
a := &asset{bytes: bytes, info: info}
return a, nil
}
// Asset loads and returns the asset for the given name.
// It returns an error if the asset could not be found or
// could not be loaded.
func Asset(name string) ([]byte, error) {
cannonicalName := strings.Replace(name, "\\", "/", -1)
if f, ok := _bindata[cannonicalName]; ok {
a, err := f()
if err != nil {
return nil, fmt.Errorf("Asset %s can't read by error: %v", name, err)
}
return a.bytes, nil
}
return nil, fmt.Errorf("Asset %s not found", name)
}
// MustAsset is like Asset but panics when Asset would return an error.
// It simplifies safe initialization of global variables.
func MustAsset(name string) []byte {
a, err := Asset(name)
if err != nil {
panic("asset: Asset(" + name + "): " + err.Error())
}
return a
}
// AssetInfo loads and returns the asset info for the given name.
// It returns an error if the asset could not be found or
// could not be loaded.
func AssetInfo(name string) (os.FileInfo, error) {
cannonicalName := strings.Replace(name, "\\", "/", -1)
if f, ok := _bindata[cannonicalName]; ok {
a, err := f()
if err != nil {
return nil, fmt.Errorf("AssetInfo %s can't read by error: %v", name, err)
}
return a.info, nil
}
return nil, fmt.Errorf("AssetInfo %s not found", name)
}
// AssetNames returns the names of the assets.
func AssetNames() []string {
names := make([]string, 0, len(_bindata))
for name := range _bindata {
names = append(names, name)
}
return names
}
// _bindata is a table, holding each asset generator, mapped to its name.
var _bindata = map[string]func() (*asset, error){
"sql/init.sql": sqlInitSql,
}
// AssetDir returns the file names below a certain
// directory embedded in the file by go-bindata.
// For example if you run go-bindata on data/... and data contains the
// following hierarchy:
// data/
// foo.txt
// img/
// a.png
// b.png
// then AssetDir("data") would return []string{"foo.txt", "img"}
// AssetDir("data/img") would return []string{"a.png", "b.png"}
// AssetDir("foo.txt") and AssetDir("notexist") would return an error
// AssetDir("") will return []string{"data"}.
func AssetDir(name string) ([]string, error) {
node := _bintree
if len(name) != 0 {
cannonicalName := strings.Replace(name, "\\", "/", -1)
pathList := strings.Split(cannonicalName, "/")
for _, p := range pathList {
node = node.Children[p]
if node == nil {
return nil, fmt.Errorf("Asset %s not found", name)
}
}
}
if node.Func != nil {
return nil, fmt.Errorf("Asset %s not found", name)
}
rv := make([]string, 0, len(node.Children))
for childName := range node.Children {
rv = append(rv, childName)
}
return rv, nil
}
type bintree struct {
Func func() (*asset, error)
Children map[string]*bintree
}
var _bintree = &bintree{nil, map[string]*bintree{
"sql": &bintree{nil, map[string]*bintree{
"init.sql": &bintree{sqlInitSql, map[string]*bintree{}},
}},
}}
// RestoreAsset restores an asset under the given directory
func RestoreAsset(dir, name string) error {
data, err := Asset(name)
if err != nil {
return err
}
info, err := AssetInfo(name)
if err != nil {
return err
}
err = os.MkdirAll(_filePath(dir, filepath.Dir(name)), os.FileMode(0755))
if err != nil {
return err
}
err = ioutil.WriteFile(_filePath(dir, name), data, info.Mode())
if err != nil {
return err
}
err = os.Chtimes(_filePath(dir, name), info.ModTime(), info.ModTime())
if err != nil {
return err
}
return nil
}
// RestoreAssets restores an asset under the given directory recursively
func RestoreAssets(dir, name string) error {
children, err := AssetDir(name)
// File
if err != nil {
return RestoreAsset(dir, name)
}
// Dir
for _, child := range children {
err = RestoreAssets(dir, filepath.Join(name, child))
if err != nil {
return err
}
}
return nil
}
func _filePath(dir, name string) string {
cannonicalName := strings.Replace(name, "\\", "/", -1)
return filepath.Join(append([]string{dir}, strings.Split(cannonicalName, "/")...)...)
}

View File

@ -4,13 +4,15 @@ import "time"
// Storer defines the db interface.
type Storer interface {
AddPackage(p Package) error
Confirm(token string) (string, error)
NSForToken(ns string, tok string) error
NSForToken(ns namespace, tok Token) error
Package(path string) (Package, error)
PackageExists(path string) bool
AddPackage(p Package) error
RemovePackage(pth path) error
PackageExists(pth path) bool
Pkgs() []Package
Register(email string) (string, error)
RemovePackage(path string) error
forgot(email string, window time.Duration) (string, error)
Register(e Email) (Token, error)
Confirm(tok Token) (Token, error)
Forgot(e Email, window time.Duration) (Token, error)
}

View File

@ -7,25 +7,21 @@ import (
"testing"
)
func testDB(t *testing.T) (*DB, func()) {
// TestDB returns a populated MemDB in a temp location, as well as a function
// to call at cleanup time.
func TestDB(t *testing.T) (*MemDB, func()) {
dir, err := ioutil.TempDir("", "vain-testing-")
if err != nil {
t.Fatalf("could not create tmpdir for db: %v", err)
return nil, func() {}
}
name := filepath.Join(dir, "test.db")
db, err := NewDB(name)
name := filepath.Join(dir, "test.json")
db, err := NewMemDB(name)
if err != nil {
t.Fatalf("could not create db: %v", err)
return nil, func() {}
}
if err := db.Init(); err != nil {
return nil, func() {}
}
return db, func() {
db.Close()
if err := os.RemoveAll(dir); err != nil {
t.Fatalf("could not clean up tmpdir: %v", err)
}

23
vain.go
View File

@ -14,6 +14,15 @@ import (
"time"
)
// Email is a vain type for storing email addresses.
type Email string
// Token is a vain type for an api token.
type Token string
type namespace string
type path string
var vcss = map[string]bool{
"hg": true,
"git": true,
@ -39,14 +48,14 @@ type Package struct {
Repo string `json:"repo"`
Path string `json:"path"`
Ns string `json:"-"`
Ns namespace `json:"-"`
}
// User stores the information about a user including email used, their
// token, whether they have registerd and the requested timestamp
type User struct {
Email string
Token string
Email Email
token Token
Registered bool
Requested time.Time
}
@ -84,17 +93,17 @@ func Valid(p string, packages []Package) bool {
return true
}
func parseNamespace(path string) (string, error) {
func parseNamespace(path string) (namespace, error) {
path = strings.TrimLeft(path, "/")
if path == "" {
return "", errors.New("path does not contain namespace")
}
elems := strings.Split(path, "/")
return elems[0], nil
return namespace(elems[0]), nil
}
// FreshToken returns a random token string.
func FreshToken() string {
func FreshToken() Token {
buf := &bytes.Buffer{}
io.Copy(buf, io.LimitReader(rand.Reader, 6))
s := hex.EncodeToString(buf.Bytes())
@ -102,5 +111,5 @@ func FreshToken() string {
for i := 0; i < len(s)/4; i++ {
r = append(r, s[i*4:(i+1)*4])
}
return strings.Join(r, "-")
return Token(strings.Join(r, "-"))
}

View File

@ -132,7 +132,7 @@ func TestValid(t *testing.T) {
func TestNamespaceParsing(t *testing.T) {
tests := []struct {
input string
want string
want namespace
err error
}{
{

View File

@ -1,24 +0,0 @@
# Compiled Object files, Static and Dynamic libs (Shared Objects)
*.o
*.a
*.so
# Folders
_obj
_test
# Architecture specific extensions/prefixes
*.[568vq]
[568vq].out
*.cgo1.go
*.cgo2.c
_cgo_defun.c
_cgo_gotypes.go
_cgo_export.*
_testmain.go
*.exe
tags
environ

View File

@ -1,23 +0,0 @@
Copyright (c) 2013, Jason Moiron
Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation
files (the "Software"), to deal in the Software without
restriction, including without limitation the rights to use,
copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.

View File

@ -1,185 +0,0 @@
#sqlx
[![Build Status](https://drone.io/github.com/jmoiron/sqlx/status.png)](https://drone.io/github.com/jmoiron/sqlx/latest) [![Godoc](http://img.shields.io/badge/godoc-reference-blue.svg?style=flat)](https://godoc.org/github.com/jmoiron/sqlx) [![license](http://img.shields.io/badge/license-MIT-red.svg?style=flat)](https://raw.githubusercontent.com/jmoiron/sqlx/master/LICENSE)
sqlx is a library which provides a set of extensions on go's standard
`database/sql` library. The sqlx versions of `sql.DB`, `sql.TX`, `sql.Stmt`,
et al. all leave the underlying interfaces untouched, so that their interfaces
are a superset on the standard ones. This makes it relatively painless to
integrate existing codebases using database/sql with sqlx.
Major additional concepts are:
* Marshal rows into structs (with embedded struct support), maps, and slices
* Named parameter support including prepared statements
* `Get` and `Select` to go quickly from query to struct/slice
In addition to the [godoc API documentation](http://godoc.org/github.com/jmoiron/sqlx),
there is also some [standard documentation](http://jmoiron.github.io/sqlx/) that
explains how to use `database/sql` along with sqlx.
## Recent Changes
* sqlx/types.JsonText has been renamed to JSONText to follow Go naming conventions.
This breaks backwards compatibility, but it's in a way that is trivially fixable
(`s/JsonText/JSONText/g`). The `types` package is both experimental and not in
active development currently.
More importantly, [golang bug #13905](https://github.com/golang/go/issues/13905)
makes `types.JSONText` and `types.GzippedText` _potentially unsafe_, **especially**
when used with common auto-scan sqlx idioms like `Select` and `Get`.
### Backwards Compatibility
There is no Go1-like promise of absolute stability, but I take the issue seriously
and will maintain the library in a compatible state unless vital bugs prevent me
from doing so. Since [#59](https://github.com/jmoiron/sqlx/issues/59) and
[#60](https://github.com/jmoiron/sqlx/issues/60) necessitated breaking behavior,
a wider API cleanup was done at the time of fixing. It's possible this will happen
in future; if it does, a git tag will be provided for users requiring the old
behavior to continue to use it until such a time as they can migrate.
## install
go get github.com/jmoiron/sqlx
## issues
Row headers can be ambiguous (`SELECT 1 AS a, 2 AS a`), and the result of
`Columns()` does not fully qualify column names in queries like:
```sql
SELECT a.id, a.name, b.id, b.name FROM foos AS a JOIN foos AS b ON a.parent = b.id;
```
making a struct or map destination ambiguous. Use `AS` in your queries
to give columns distinct names, `rows.Scan` to scan them manually, or
`SliceScan` to get a slice of results.
## usage
Below is an example which shows some common use cases for sqlx. Check
[sqlx_test.go](https://github.com/jmoiron/sqlx/blob/master/sqlx_test.go) for more
usage.
```go
package main
import (
_ "github.com/lib/pq"
"database/sql"
"github.com/jmoiron/sqlx"
"log"
)
var schema = `
CREATE TABLE person (
first_name text,
last_name text,
email text
);
CREATE TABLE place (
country text,
city text NULL,
telcode integer
)`
type Person struct {
FirstName string `db:"first_name"`
LastName string `db:"last_name"`
Email string
}
type Place struct {
Country string
City sql.NullString
TelCode int
}
func main() {
// this Pings the database trying to connect, panics on error
// use sqlx.Open() for sql.Open() semantics
db, err := sqlx.Connect("postgres", "user=foo dbname=bar sslmode=disable")
if err != nil {
log.Fatalln(err)
}
// exec the schema or fail; multi-statement Exec behavior varies between
// database drivers; pq will exec them all, sqlite3 won't, ymmv
db.MustExec(schema)
tx := db.MustBegin()
tx.MustExec("INSERT INTO person (first_name, last_name, email) VALUES ($1, $2, $3)", "Jason", "Moiron", "jmoiron@jmoiron.net")
tx.MustExec("INSERT INTO person (first_name, last_name, email) VALUES ($1, $2, $3)", "John", "Doe", "johndoeDNE@gmail.net")
tx.MustExec("INSERT INTO place (country, city, telcode) VALUES ($1, $2, $3)", "United States", "New York", "1")
tx.MustExec("INSERT INTO place (country, telcode) VALUES ($1, $2)", "Hong Kong", "852")
tx.MustExec("INSERT INTO place (country, telcode) VALUES ($1, $2)", "Singapore", "65")
// Named queries can use structs, so if you have an existing struct (i.e. person := &Person{}) that you have populated, you can pass it in as &person
tx.NamedExec("INSERT INTO person (first_name, last_name, email) VALUES (:first_name, :last_name, :email)", &Person{"Jane", "Citizen", "jane.citzen@example.com"})
tx.Commit()
// Query the database, storing results in a []Person (wrapped in []interface{})
people := []Person{}
db.Select(&people, "SELECT * FROM person ORDER BY first_name ASC")
jason, john := people[0], people[1]
fmt.Printf("%#v\n%#v", jason, john)
// Person{FirstName:"Jason", LastName:"Moiron", Email:"jmoiron@jmoiron.net"}
// Person{FirstName:"John", LastName:"Doe", Email:"johndoeDNE@gmail.net"}
// You can also get a single result, a la QueryRow
jason = Person{}
err = db.Get(&jason, "SELECT * FROM person WHERE first_name=$1", "Jason")
fmt.Printf("%#v\n", jason)
// Person{FirstName:"Jason", LastName:"Moiron", Email:"jmoiron@jmoiron.net"}
// if you have null fields and use SELECT *, you must use sql.Null* in your struct
places := []Place{}
err = db.Select(&places, "SELECT * FROM place ORDER BY telcode ASC")
if err != nil {
fmt.Println(err)
return
}
usa, singsing, honkers := places[0], places[1], places[2]
fmt.Printf("%#v\n%#v\n%#v\n", usa, singsing, honkers)
// Place{Country:"United States", City:sql.NullString{String:"New York", Valid:true}, TelCode:1}
// Place{Country:"Singapore", City:sql.NullString{String:"", Valid:false}, TelCode:65}
// Place{Country:"Hong Kong", City:sql.NullString{String:"", Valid:false}, TelCode:852}
// Loop through rows using only one struct
place := Place{}
rows, err := db.Queryx("SELECT * FROM place")
for rows.Next() {
err := rows.StructScan(&place)
if err != nil {
log.Fatalln(err)
}
fmt.Printf("%#v\n", place)
}
// Place{Country:"United States", City:sql.NullString{String:"New York", Valid:true}, TelCode:1}
// Place{Country:"Hong Kong", City:sql.NullString{String:"", Valid:false}, TelCode:852}
// Place{Country:"Singapore", City:sql.NullString{String:"", Valid:false}, TelCode:65}
// Named queries, using `:name` as the bindvar. Automatic bindvar support
// which takes into account the dbtype based on the driverName on sqlx.Open/Connect
_, err = db.NamedExec(`INSERT INTO person (first_name,last_name,email) VALUES (:first,:last,:email)`,
map[string]interface{}{
"first": "Bin",
"last": "Smuth",
"email": "bensmith@allblacks.nz",
})
// Selects Mr. Smith from the database
rows, err = db.NamedQuery(`SELECT * FROM person WHERE first_name=:fn`, map[string]interface{}{"fn": "Bin"})
// Named queries can also use structs. Their bind names follow the same rules
// as the name -> db mapping, so struct fields are lowercased and the `db` tag
// is taken into consideration.
rows, err = db.NamedQuery(`SELECT * FROM person WHERE first_name=:first_name`, jason)
}
```

View File

@ -1,186 +0,0 @@
package sqlx
import (
"bytes"
"errors"
"reflect"
"strconv"
"strings"
"github.com/jmoiron/sqlx/reflectx"
)
// Bindvar types supported by Rebind, BindMap and BindStruct.
const (
UNKNOWN = iota
QUESTION
DOLLAR
NAMED
)
// BindType returns the bindtype for a given database given a drivername.
func BindType(driverName string) int {
switch driverName {
case "postgres", "pgx":
return DOLLAR
case "mysql":
return QUESTION
case "sqlite3":
return QUESTION
case "oci8", "ora", "goracle":
return NAMED
}
return UNKNOWN
}
// FIXME: this should be able to be tolerant of escaped ?'s in queries without
// losing much speed, and should be to avoid confusion.
// Rebind a query from the default bindtype (QUESTION) to the target bindtype.
func Rebind(bindType int, query string) string {
switch bindType {
case QUESTION, UNKNOWN:
return query
}
qb := []byte(query)
// Add space enough for 10 params before we have to allocate
rqb := make([]byte, 0, len(qb)+10)
j := 1
for _, b := range qb {
if b == '?' {
switch bindType {
case DOLLAR:
rqb = append(rqb, '$')
case NAMED:
rqb = append(rqb, ':', 'a', 'r', 'g')
}
for _, b := range strconv.Itoa(j) {
rqb = append(rqb, byte(b))
}
j++
} else {
rqb = append(rqb, b)
}
}
return string(rqb)
}
// Experimental implementation of Rebind which uses a bytes.Buffer. The code is
// much simpler and should be more resistant to odd unicode, but it is twice as
// slow. Kept here for benchmarking purposes and to possibly replace Rebind if
// problems arise with its somewhat naive handling of unicode.
func rebindBuff(bindType int, query string) string {
if bindType != DOLLAR {
return query
}
b := make([]byte, 0, len(query))
rqb := bytes.NewBuffer(b)
j := 1
for _, r := range query {
if r == '?' {
rqb.WriteRune('$')
rqb.WriteString(strconv.Itoa(j))
j++
} else {
rqb.WriteRune(r)
}
}
return rqb.String()
}
// In expands slice values in args, returning the modified query string
// and a new arg list that can be executed by a database. The `query` should
// use the `?` bindVar. The return value uses the `?` bindVar.
func In(query string, args ...interface{}) (string, []interface{}, error) {
// argMeta stores reflect.Value and length for slices and
// the value itself for non-slice arguments
type argMeta struct {
v reflect.Value
i interface{}
length int
}
var flatArgsCount int
var anySlices bool
meta := make([]argMeta, len(args))
for i, arg := range args {
v := reflect.ValueOf(arg)
t := reflectx.Deref(v.Type())
if t.Kind() == reflect.Slice {
meta[i].length = v.Len()
meta[i].v = v
anySlices = true
flatArgsCount += meta[i].length
if meta[i].length == 0 {
return "", nil, errors.New("empty slice passed to 'in' query")
}
} else {
meta[i].i = arg
flatArgsCount++
}
}
// don't do any parsing if there aren't any slices; note that this means
// some errors that we might have caught below will not be returned.
if !anySlices {
return query, args, nil
}
newArgs := make([]interface{}, 0, flatArgsCount)
var arg, offset int
var buf bytes.Buffer
for i := strings.IndexByte(query[offset:], '?'); i != -1; i = strings.IndexByte(query[offset:], '?') {
if arg >= len(meta) {
// if an argument wasn't passed, lets return an error; this is
// not actually how database/sql Exec/Query works, but since we are
// creating an argument list programmatically, we want to be able
// to catch these programmer errors earlier.
return "", nil, errors.New("number of bindVars exceeds arguments")
}
argMeta := meta[arg]
arg++
// not a slice, continue.
// our questionmark will either be written before the next expansion
// of a slice or after the loop when writing the rest of the query
if argMeta.length == 0 {
offset = offset + i + 1
newArgs = append(newArgs, argMeta.i)
continue
}
// write everything up to and including our ? character
buf.WriteString(query[:offset+i+1])
newArgs = append(newArgs, argMeta.v.Index(0).Interface())
for si := 1; si < argMeta.length; si++ {
buf.WriteString(", ?")
newArgs = append(newArgs, argMeta.v.Index(si).Interface())
}
// slice the query and reset the offset. this avoids some bookkeeping for
// the write after the loop
query = query[offset+i+1:]
offset = 0
}
buf.WriteString(query)
if arg < len(meta) {
return "", nil, errors.New("number of bindVars less than number arguments")
}
return buf.String(), newArgs, nil
}

View File

@ -1,12 +0,0 @@
// Package sqlx provides general purpose extensions to database/sql.
//
// It is intended to seamlessly wrap database/sql and provide convenience
// methods which are useful in the development of database driven applications.
// None of the underlying database/sql methods are changed. Instead all extended
// behavior is implemented through new methods defined on wrapper types.
//
// Additions include scanning into structs, named query support, rebinding
// queries for different drivers, convenient shorthands for common error handling
// and more.
//
package sqlx

View File

@ -1,336 +0,0 @@
package sqlx
// Named Query Support
//
// * BindMap - bind query bindvars to map/struct args
// * NamedExec, NamedQuery - named query w/ struct or map
// * NamedStmt - a pre-compiled named query which is a prepared statement
//
// Internal Interfaces:
//
// * compileNamedQuery - rebind a named query, returning a query and list of names
// * bindArgs, bindMapArgs, bindAnyArgs - given a list of names, return an arglist
//
import (
"database/sql"
"errors"
"fmt"
"reflect"
"strconv"
"unicode"
"github.com/jmoiron/sqlx/reflectx"
)
// NamedStmt is a prepared statement that executes named queries. Prepare it
// how you would execute a NamedQuery, but pass in a struct or map when executing.
type NamedStmt struct {
Params []string
QueryString string
Stmt *Stmt
}
// Close closes the named statement.
func (n *NamedStmt) Close() error {
return n.Stmt.Close()
}
// Exec executes a named statement using the struct passed.
func (n *NamedStmt) Exec(arg interface{}) (sql.Result, error) {
args, err := bindAnyArgs(n.Params, arg, n.Stmt.Mapper)
if err != nil {
return *new(sql.Result), err
}
return n.Stmt.Exec(args...)
}
// Query executes a named statement using the struct argument, returning rows.
func (n *NamedStmt) Query(arg interface{}) (*sql.Rows, error) {
args, err := bindAnyArgs(n.Params, arg, n.Stmt.Mapper)
if err != nil {
return nil, err
}
return n.Stmt.Query(args...)
}
// QueryRow executes a named statement against the database. Because sqlx cannot
// create a *sql.Row with an error condition pre-set for binding errors, sqlx
// returns a *sqlx.Row instead.
func (n *NamedStmt) QueryRow(arg interface{}) *Row {
args, err := bindAnyArgs(n.Params, arg, n.Stmt.Mapper)
if err != nil {
return &Row{err: err}
}
return n.Stmt.QueryRowx(args...)
}
// MustExec execs a NamedStmt, panicing on error
func (n *NamedStmt) MustExec(arg interface{}) sql.Result {
res, err := n.Exec(arg)
if err != nil {
panic(err)
}
return res
}
// Queryx using this NamedStmt
func (n *NamedStmt) Queryx(arg interface{}) (*Rows, error) {
r, err := n.Query(arg)
if err != nil {
return nil, err
}
return &Rows{Rows: r, Mapper: n.Stmt.Mapper, unsafe: isUnsafe(n)}, err
}
// QueryRowx this NamedStmt. Because of limitations with QueryRow, this is
// an alias for QueryRow.
func (n *NamedStmt) QueryRowx(arg interface{}) *Row {
return n.QueryRow(arg)
}
// Select using this NamedStmt
func (n *NamedStmt) Select(dest interface{}, arg interface{}) error {
rows, err := n.Queryx(arg)
if err != nil {
return err
}
// if something happens here, we want to make sure the rows are Closed
defer rows.Close()
return scanAll(rows, dest, false)
}
// Get using this NamedStmt
func (n *NamedStmt) Get(dest interface{}, arg interface{}) error {
r := n.QueryRowx(arg)
return r.scanAny(dest, false)
}
// Unsafe creates an unsafe version of the NamedStmt
func (n *NamedStmt) Unsafe() *NamedStmt {
r := &NamedStmt{Params: n.Params, Stmt: n.Stmt, QueryString: n.QueryString}
r.Stmt.unsafe = true
return r
}
// A union interface of preparer and binder, required to be able to prepare
// named statements (as the bindtype must be determined).
type namedPreparer interface {
Preparer
binder
}
func prepareNamed(p namedPreparer, query string) (*NamedStmt, error) {
bindType := BindType(p.DriverName())
q, args, err := compileNamedQuery([]byte(query), bindType)
if err != nil {
return nil, err
}
stmt, err := Preparex(p, q)
if err != nil {
return nil, err
}
return &NamedStmt{
QueryString: q,
Params: args,
Stmt: stmt,
}, nil
}
func bindAnyArgs(names []string, arg interface{}, m *reflectx.Mapper) ([]interface{}, error) {
if maparg, ok := arg.(map[string]interface{}); ok {
return bindMapArgs(names, maparg)
}
return bindArgs(names, arg, m)
}
// private interface to generate a list of interfaces from a given struct
// type, given a list of names to pull out of the struct. Used by public
// BindStruct interface.
func bindArgs(names []string, arg interface{}, m *reflectx.Mapper) ([]interface{}, error) {
arglist := make([]interface{}, 0, len(names))
// grab the indirected value of arg
v := reflect.ValueOf(arg)
for v = reflect.ValueOf(arg); v.Kind() == reflect.Ptr; {
v = v.Elem()
}
fields := m.TraversalsByName(v.Type(), names)
for i, t := range fields {
if len(t) == 0 {
return arglist, fmt.Errorf("could not find name %s in %#v", names[i], arg)
}
val := reflectx.FieldByIndexesReadOnly(v, t)
arglist = append(arglist, val.Interface())
}
return arglist, nil
}
// like bindArgs, but for maps.
func bindMapArgs(names []string, arg map[string]interface{}) ([]interface{}, error) {
arglist := make([]interface{}, 0, len(names))
for _, name := range names {
val, ok := arg[name]
if !ok {
return arglist, fmt.Errorf("could not find name %s in %#v", name, arg)
}
arglist = append(arglist, val)
}
return arglist, nil
}
// bindStruct binds a named parameter query with fields from a struct argument.
// The rules for binding field names to parameter names follow the same
// conventions as for StructScan, including obeying the `db` struct tags.
func bindStruct(bindType int, query string, arg interface{}, m *reflectx.Mapper) (string, []interface{}, error) {
bound, names, err := compileNamedQuery([]byte(query), bindType)
if err != nil {
return "", []interface{}{}, err
}
arglist, err := bindArgs(names, arg, m)
if err != nil {
return "", []interface{}{}, err
}
return bound, arglist, nil
}
// bindMap binds a named parameter query with a map of arguments.
func bindMap(bindType int, query string, args map[string]interface{}) (string, []interface{}, error) {
bound, names, err := compileNamedQuery([]byte(query), bindType)
if err != nil {
return "", []interface{}{}, err
}
arglist, err := bindMapArgs(names, args)
return bound, arglist, err
}
// -- Compilation of Named Queries
// Allow digits and letters in bind params; additionally runes are
// checked against underscores, meaning that bind params can have be
// alphanumeric with underscores. Mind the difference between unicode
// digits and numbers, where '5' is a digit but '五' is not.
var allowedBindRunes = []*unicode.RangeTable{unicode.Letter, unicode.Digit}
// FIXME: this function isn't safe for unicode named params, as a failing test
// can testify. This is not a regression but a failure of the original code
// as well. It should be modified to range over runes in a string rather than
// bytes, even though this is less convenient and slower. Hopefully the
// addition of the prepared NamedStmt (which will only do this once) will make
// up for the slightly slower ad-hoc NamedExec/NamedQuery.
// compile a NamedQuery into an unbound query (using the '?' bindvar) and
// a list of names.
func compileNamedQuery(qs []byte, bindType int) (query string, names []string, err error) {
names = make([]string, 0, 10)
rebound := make([]byte, 0, len(qs))
inName := false
last := len(qs) - 1
currentVar := 1
name := make([]byte, 0, 10)
for i, b := range qs {
// a ':' while we're in a name is an error
if b == ':' {
// if this is the second ':' in a '::' escape sequence, append a ':'
if inName && i > 0 && qs[i-1] == ':' {
rebound = append(rebound, ':')
inName = false
continue
} else if inName {
err = errors.New("unexpected `:` while reading named param at " + strconv.Itoa(i))
return query, names, err
}
inName = true
name = []byte{}
// if we're in a name, and this is an allowed character, continue
} else if inName && (unicode.IsOneOf(allowedBindRunes, rune(b)) || b == '_') && i != last {
// append the byte to the name if we are in a name and not on the last byte
name = append(name, b)
// if we're in a name and it's not an allowed character, the name is done
} else if inName {
inName = false
// if this is the final byte of the string and it is part of the name, then
// make sure to add it to the name
if i == last && unicode.IsOneOf(allowedBindRunes, rune(b)) {
name = append(name, b)
}
// add the string representation to the names list
names = append(names, string(name))
// add a proper bindvar for the bindType
switch bindType {
// oracle only supports named type bind vars even for positional
case NAMED:
rebound = append(rebound, ':')
rebound = append(rebound, name...)
case QUESTION, UNKNOWN:
rebound = append(rebound, '?')
case DOLLAR:
rebound = append(rebound, '$')
for _, b := range strconv.Itoa(currentVar) {
rebound = append(rebound, byte(b))
}
currentVar++
}
// add this byte to string unless it was not part of the name
if i != last {
rebound = append(rebound, b)
} else if !unicode.IsOneOf(allowedBindRunes, rune(b)) {
rebound = append(rebound, b)
}
} else {
// this is a normal byte and should just go onto the rebound query
rebound = append(rebound, b)
}
}
return string(rebound), names, err
}
// BindNamed binds a struct or a map to a query with named parameters.
// DEPRECATED: use sqlx.Named` instead of this, it may be removed in future.
func BindNamed(bindType int, query string, arg interface{}) (string, []interface{}, error) {
return bindNamedMapper(bindType, query, arg, mapper())
}
// Named takes a query using named parameters and an argument and
// returns a new query with a list of args that can be executed by
// a database. The return value uses the `?` bindvar.
func Named(query string, arg interface{}) (string, []interface{}, error) {
return bindNamedMapper(QUESTION, query, arg, mapper())
}
func bindNamedMapper(bindType int, query string, arg interface{}, m *reflectx.Mapper) (string, []interface{}, error) {
if maparg, ok := arg.(map[string]interface{}); ok {
return bindMap(bindType, query, maparg)
}
return bindStruct(bindType, query, arg, m)
}
// NamedQuery binds a named query and then runs Query on the result using the
// provided Ext (sqlx.Tx, sqlx.Db). It works with both structs and with
// map[string]interface{} types.
func NamedQuery(e Ext, query string, arg interface{}) (*Rows, error) {
q, args, err := bindNamedMapper(BindType(e.DriverName()), query, arg, mapperFor(e))
if err != nil {
return nil, err
}
return e.Queryx(q, args...)
}
// NamedExec uses BindStruct to get a query executable by the driver and
// then runs Exec on the result. Returns an error from the binding
// or the query excution itself.
func NamedExec(e Ext, query string, arg interface{}) (sql.Result, error) {
q, args, err := bindNamedMapper(BindType(e.DriverName()), query, arg, mapperFor(e))
if err != nil {
return nil, err
}
return e.Exec(q, args...)
}

View File

@ -1,227 +0,0 @@
package sqlx
import (
"database/sql"
"testing"
)
func TestCompileQuery(t *testing.T) {
table := []struct {
Q, R, D, N string
V []string
}{
// basic test for named parameters, invalid char ',' terminating
{
Q: `INSERT INTO foo (a,b,c,d) VALUES (:name, :age, :first, :last)`,
R: `INSERT INTO foo (a,b,c,d) VALUES (?, ?, ?, ?)`,
D: `INSERT INTO foo (a,b,c,d) VALUES ($1, $2, $3, $4)`,
N: `INSERT INTO foo (a,b,c,d) VALUES (:name, :age, :first, :last)`,
V: []string{"name", "age", "first", "last"},
},
// This query tests a named parameter ending the string as well as numbers
{
Q: `SELECT * FROM a WHERE first_name=:name1 AND last_name=:name2`,
R: `SELECT * FROM a WHERE first_name=? AND last_name=?`,
D: `SELECT * FROM a WHERE first_name=$1 AND last_name=$2`,
N: `SELECT * FROM a WHERE first_name=:name1 AND last_name=:name2`,
V: []string{"name1", "name2"},
},
{
Q: `SELECT "::foo" FROM a WHERE first_name=:name1 AND last_name=:name2`,
R: `SELECT ":foo" FROM a WHERE first_name=? AND last_name=?`,
D: `SELECT ":foo" FROM a WHERE first_name=$1 AND last_name=$2`,
N: `SELECT ":foo" FROM a WHERE first_name=:name1 AND last_name=:name2`,
V: []string{"name1", "name2"},
},
{
Q: `SELECT 'a::b::c' || first_name, '::::ABC::_::' FROM person WHERE first_name=:first_name AND last_name=:last_name`,
R: `SELECT 'a:b:c' || first_name, '::ABC:_:' FROM person WHERE first_name=? AND last_name=?`,
D: `SELECT 'a:b:c' || first_name, '::ABC:_:' FROM person WHERE first_name=$1 AND last_name=$2`,
N: `SELECT 'a:b:c' || first_name, '::ABC:_:' FROM person WHERE first_name=:first_name AND last_name=:last_name`,
V: []string{"first_name", "last_name"},
},
/* This unicode awareness test sadly fails, because of our byte-wise worldview.
* We could certainly iterate by Rune instead, though it's a great deal slower,
* it's probably the RightWay(tm)
{
Q: `INSERT INTO foo (a,b,c,d) VALUES (:あ, :b, :キコ, :名前)`,
R: `INSERT INTO foo (a,b,c,d) VALUES (?, ?, ?, ?)`,
D: `INSERT INTO foo (a,b,c,d) VALUES ($1, $2, $3, $4)`,
N: []string{"name", "age", "first", "last"},
},
*/
}
for _, test := range table {
qr, names, err := compileNamedQuery([]byte(test.Q), QUESTION)
if err != nil {
t.Error(err)
}
if qr != test.R {
t.Errorf("expected %s, got %s", test.R, qr)
}
if len(names) != len(test.V) {
t.Errorf("expected %#v, got %#v", test.V, names)
} else {
for i, name := range names {
if name != test.V[i] {
t.Errorf("expected %dth name to be %s, got %s", i+1, test.V[i], name)
}
}
}
qd, _, _ := compileNamedQuery([]byte(test.Q), DOLLAR)
if qd != test.D {
t.Errorf("\nexpected: `%s`\ngot: `%s`", test.D, qd)
}
qq, _, _ := compileNamedQuery([]byte(test.Q), NAMED)
if qq != test.N {
t.Errorf("\nexpected: `%s`\ngot: `%s`\n(len: %d vs %d)", test.N, qq, len(test.N), len(qq))
}
}
}
type Test struct {
t *testing.T
}
func (t Test) Error(err error, msg ...interface{}) {
if err != nil {
if len(msg) == 0 {
t.t.Error(err)
} else {
t.t.Error(msg...)
}
}
}
func (t Test) Errorf(err error, format string, args ...interface{}) {
if err != nil {
t.t.Errorf(format, args...)
}
}
func TestNamedQueries(t *testing.T) {
RunWithSchema(defaultSchema, t, func(db *DB, t *testing.T) {
loadDefaultFixture(db, t)
test := Test{t}
var ns *NamedStmt
var err error
// Check that invalid preparations fail
ns, err = db.PrepareNamed("SELECT * FROM person WHERE first_name=:first:name")
if err == nil {
t.Error("Expected an error with invalid prepared statement.")
}
ns, err = db.PrepareNamed("invalid sql")
if err == nil {
t.Error("Expected an error with invalid prepared statement.")
}
// Check closing works as anticipated
ns, err = db.PrepareNamed("SELECT * FROM person WHERE first_name=:first_name")
test.Error(err)
err = ns.Close()
test.Error(err)
ns, err = db.PrepareNamed(`
SELECT first_name, last_name, email
FROM person WHERE first_name=:first_name AND email=:email`)
test.Error(err)
// test Queryx w/ uses Query
p := Person{FirstName: "Jason", LastName: "Moiron", Email: "jmoiron@jmoiron.net"}
rows, err := ns.Queryx(p)
test.Error(err)
for rows.Next() {
var p2 Person
rows.StructScan(&p2)
if p.FirstName != p2.FirstName {
t.Errorf("got %s, expected %s", p.FirstName, p2.FirstName)
}
if p.LastName != p2.LastName {
t.Errorf("got %s, expected %s", p.LastName, p2.LastName)
}
if p.Email != p2.Email {
t.Errorf("got %s, expected %s", p.Email, p2.Email)
}
}
// test Select
people := make([]Person, 0, 5)
err = ns.Select(&people, p)
test.Error(err)
if len(people) != 1 {
t.Errorf("got %d results, expected %d", len(people), 1)
}
if p.FirstName != people[0].FirstName {
t.Errorf("got %s, expected %s", p.FirstName, people[0].FirstName)
}
if p.LastName != people[0].LastName {
t.Errorf("got %s, expected %s", p.LastName, people[0].LastName)
}
if p.Email != people[0].Email {
t.Errorf("got %s, expected %s", p.Email, people[0].Email)
}
// test Exec
ns, err = db.PrepareNamed(`
INSERT INTO person (first_name, last_name, email)
VALUES (:first_name, :last_name, :email)`)
test.Error(err)
js := Person{
FirstName: "Julien",
LastName: "Savea",
Email: "jsavea@ab.co.nz",
}
_, err = ns.Exec(js)
test.Error(err)
// Make sure we can pull him out again
p2 := Person{}
db.Get(&p2, db.Rebind("SELECT * FROM person WHERE email=?"), js.Email)
if p2.Email != js.Email {
t.Errorf("expected %s, got %s", js.Email, p2.Email)
}
// test Txn NamedStmts
tx := db.MustBegin()
txns := tx.NamedStmt(ns)
// We're going to add Steven in this txn
sl := Person{
FirstName: "Steven",
LastName: "Luatua",
Email: "sluatua@ab.co.nz",
}
_, err = txns.Exec(sl)
test.Error(err)
// then rollback...
tx.Rollback()
// looking for Steven after a rollback should fail
err = db.Get(&p2, db.Rebind("SELECT * FROM person WHERE email=?"), sl.Email)
if err != sql.ErrNoRows {
t.Errorf("expected no rows error, got %v", err)
}
// now do the same, but commit
tx = db.MustBegin()
txns = tx.NamedStmt(ns)
_, err = txns.Exec(sl)
test.Error(err)
tx.Commit()
// looking for Steven after a Commit should succeed
err = db.Get(&p2, db.Rebind("SELECT * FROM person WHERE email=?"), sl.Email)
test.Error(err)
if p2.Email != sl.Email {
t.Errorf("expected %s, got %s", sl.Email, p2.Email)
}
})
}

View File

@ -1,17 +0,0 @@
# reflectx
The sqlx package has special reflect needs. In particular, it needs to:
* be able to map a name to a field
* understand embedded structs
* understand mapping names to fields by a particular tag
* user specified name -> field mapping functions
These behaviors mimic the behaviors by the standard library marshallers and also the
behavior of standard Go accessors.
The first two are amply taken care of by `Reflect.Value.FieldByName`, and the third is
addressed by `Reflect.Value.FieldByNameFunc`, but these don't quite understand struct
tags in the ways that are vital to most marshalers, and they are slow.
This reflectx package extends reflect to achieve these goals.

View File

@ -1,371 +0,0 @@
// Package reflectx implements extensions to the standard reflect lib suitable
// for implementing marshaling and unmarshaling packages. The main Mapper type
// allows for Go-compatible named attribute access, including accessing embedded
// struct attributes and the ability to use functions and struct tags to
// customize field names.
//
package reflectx
import (
"fmt"
"reflect"
"runtime"
"strings"
"sync"
)
// A FieldInfo is a collection of metadata about a struct field.
type FieldInfo struct {
Index []int
Path string
Field reflect.StructField
Zero reflect.Value
Name string
Options map[string]string
Embedded bool
Children []*FieldInfo
Parent *FieldInfo
}
// A StructMap is an index of field metadata for a struct.
type StructMap struct {
Tree *FieldInfo
Index []*FieldInfo
Paths map[string]*FieldInfo
Names map[string]*FieldInfo
}
// GetByPath returns a *FieldInfo for a given string path.
func (f StructMap) GetByPath(path string) *FieldInfo {
return f.Paths[path]
}
// GetByTraversal returns a *FieldInfo for a given integer path. It is
// analogous to reflect.FieldByIndex.
func (f StructMap) GetByTraversal(index []int) *FieldInfo {
if len(index) == 0 {
return nil
}
tree := f.Tree
for _, i := range index {
if i >= len(tree.Children) || tree.Children[i] == nil {
return nil
}
tree = tree.Children[i]
}
return tree
}
// Mapper is a general purpose mapper of names to struct fields. A Mapper
// behaves like most marshallers, optionally obeying a field tag for name
// mapping and a function to provide a basic mapping of fields to names.
type Mapper struct {
cache map[reflect.Type]*StructMap
tagName string
tagMapFunc func(string) string
mapFunc func(string) string
mutex sync.Mutex
}
// NewMapper returns a new mapper which optionally obeys the field tag given
// by tagName. If tagName is the empty string, it is ignored.
func NewMapper(tagName string) *Mapper {
return &Mapper{
cache: make(map[reflect.Type]*StructMap),
tagName: tagName,
}
}
// NewMapperTagFunc returns a new mapper which contains a mapper for field names
// AND a mapper for tag values. This is useful for tags like json which can
// have values like "name,omitempty".
func NewMapperTagFunc(tagName string, mapFunc, tagMapFunc func(string) string) *Mapper {
return &Mapper{
cache: make(map[reflect.Type]*StructMap),
tagName: tagName,
mapFunc: mapFunc,
tagMapFunc: tagMapFunc,
}
}
// NewMapperFunc returns a new mapper which optionally obeys a field tag and
// a struct field name mapper func given by f. Tags will take precedence, but
// for any other field, the mapped name will be f(field.Name)
func NewMapperFunc(tagName string, f func(string) string) *Mapper {
return &Mapper{
cache: make(map[reflect.Type]*StructMap),
tagName: tagName,
mapFunc: f,
}
}
// TypeMap returns a mapping of field strings to int slices representing
// the traversal down the struct to reach the field.
func (m *Mapper) TypeMap(t reflect.Type) *StructMap {
m.mutex.Lock()
mapping, ok := m.cache[t]
if !ok {
mapping = getMapping(t, m.tagName, m.mapFunc, m.tagMapFunc)
m.cache[t] = mapping
}
m.mutex.Unlock()
return mapping
}
// FieldMap returns the mapper's mapping of field names to reflect values. Panics
// if v's Kind is not Struct, or v is not Indirectable to a struct kind.
func (m *Mapper) FieldMap(v reflect.Value) map[string]reflect.Value {
v = reflect.Indirect(v)
mustBe(v, reflect.Struct)
r := map[string]reflect.Value{}
tm := m.TypeMap(v.Type())
for tagName, fi := range tm.Names {
r[tagName] = FieldByIndexes(v, fi.Index)
}
return r
}
// FieldByName returns a field by the its mapped name as a reflect.Value.
// Panics if v's Kind is not Struct or v is not Indirectable to a struct Kind.
// Returns zero Value if the name is not found.
func (m *Mapper) FieldByName(v reflect.Value, name string) reflect.Value {
v = reflect.Indirect(v)
mustBe(v, reflect.Struct)
tm := m.TypeMap(v.Type())
fi, ok := tm.Names[name]
if !ok {
return v
}
return FieldByIndexes(v, fi.Index)
}
// FieldsByName returns a slice of values corresponding to the slice of names
// for the value. Panics if v's Kind is not Struct or v is not Indirectable
// to a struct Kind. Returns zero Value for each name not found.
func (m *Mapper) FieldsByName(v reflect.Value, names []string) []reflect.Value {
v = reflect.Indirect(v)
mustBe(v, reflect.Struct)
tm := m.TypeMap(v.Type())
vals := make([]reflect.Value, 0, len(names))
for _, name := range names {
fi, ok := tm.Names[name]
if !ok {
vals = append(vals, *new(reflect.Value))
} else {
vals = append(vals, FieldByIndexes(v, fi.Index))
}
}
return vals
}
// TraversalsByName returns a slice of int slices which represent the struct
// traversals for each mapped name. Panics if t is not a struct or Indirectable
// to a struct. Returns empty int slice for each name not found.
func (m *Mapper) TraversalsByName(t reflect.Type, names []string) [][]int {
t = Deref(t)
mustBe(t, reflect.Struct)
tm := m.TypeMap(t)
r := make([][]int, 0, len(names))
for _, name := range names {
fi, ok := tm.Names[name]
if !ok {
r = append(r, []int{})
} else {
r = append(r, fi.Index)
}
}
return r
}
// FieldByIndexes returns a value for a particular struct traversal.
func FieldByIndexes(v reflect.Value, indexes []int) reflect.Value {
for _, i := range indexes {
v = reflect.Indirect(v).Field(i)
// if this is a pointer, it's possible it is nil
if v.Kind() == reflect.Ptr && v.IsNil() {
alloc := reflect.New(Deref(v.Type()))
v.Set(alloc)
}
if v.Kind() == reflect.Map && v.IsNil() {
v.Set(reflect.MakeMap(v.Type()))
}
}
return v
}
// FieldByIndexesReadOnly returns a value for a particular struct traversal,
// but is not concerned with allocating nil pointers because the value is
// going to be used for reading and not setting.
func FieldByIndexesReadOnly(v reflect.Value, indexes []int) reflect.Value {
for _, i := range indexes {
v = reflect.Indirect(v).Field(i)
}
return v
}
// Deref is Indirect for reflect.Types
func Deref(t reflect.Type) reflect.Type {
if t.Kind() == reflect.Ptr {
t = t.Elem()
}
return t
}
// -- helpers & utilities --
type kinder interface {
Kind() reflect.Kind
}
// mustBe checks a value against a kind, panicing with a reflect.ValueError
// if the kind isn't that which is required.
func mustBe(v kinder, expected reflect.Kind) {
k := v.Kind()
if k != expected {
panic(&reflect.ValueError{Method: methodName(), Kind: k})
}
}
// methodName is returns the caller of the function calling methodName
func methodName() string {
pc, _, _, _ := runtime.Caller(2)
f := runtime.FuncForPC(pc)
if f == nil {
return "unknown method"
}
return f.Name()
}
type typeQueue struct {
t reflect.Type
fi *FieldInfo
pp string // Parent path
}
// A copying append that creates a new slice each time.
func apnd(is []int, i int) []int {
x := make([]int, len(is)+1)
for p, n := range is {
x[p] = n
}
x[len(x)-1] = i
return x
}
// getMapping returns a mapping for the t type, using the tagName, mapFunc and
// tagMapFunc to determine the canonical names of fields.
func getMapping(t reflect.Type, tagName string, mapFunc, tagMapFunc func(string) string) *StructMap {
m := []*FieldInfo{}
root := &FieldInfo{}
queue := []typeQueue{}
queue = append(queue, typeQueue{Deref(t), root, ""})
for len(queue) != 0 {
// pop the first item off of the queue
tq := queue[0]
queue = queue[1:]
nChildren := 0
if tq.t.Kind() == reflect.Struct {
nChildren = tq.t.NumField()
}
tq.fi.Children = make([]*FieldInfo, nChildren)
// iterate through all of its fields
for fieldPos := 0; fieldPos < nChildren; fieldPos++ {
f := tq.t.Field(fieldPos)
fi := FieldInfo{}
fi.Field = f
fi.Zero = reflect.New(f.Type).Elem()
fi.Options = map[string]string{}
var tag, name string
if tagName != "" && strings.Contains(string(f.Tag), tagName+":") {
tag = f.Tag.Get(tagName)
name = tag
} else {
if mapFunc != nil {
name = mapFunc(f.Name)
}
}
parts := strings.Split(name, ",")
if len(parts) > 1 {
name = parts[0]
for _, opt := range parts[1:] {
kv := strings.Split(opt, "=")
if len(kv) > 1 {
fi.Options[kv[0]] = kv[1]
} else {
fi.Options[kv[0]] = ""
}
}
}
if tagMapFunc != nil {
tag = tagMapFunc(tag)
}
fi.Name = name
if tq.pp == "" || (tq.pp == "" && tag == "") {
fi.Path = fi.Name
} else {
fi.Path = fmt.Sprintf("%s.%s", tq.pp, fi.Name)
}
// if the name is "-", disabled via a tag, skip it
if name == "-" {
continue
}
// skip unexported fields
if len(f.PkgPath) != 0 && !f.Anonymous {
continue
}
// bfs search of anonymous embedded structs
if f.Anonymous {
pp := tq.pp
if tag != "" {
pp = fi.Path
}
fi.Embedded = true
fi.Index = apnd(tq.fi.Index, fieldPos)
nChildren := 0
ft := Deref(f.Type)
if ft.Kind() == reflect.Struct {
nChildren = ft.NumField()
}
fi.Children = make([]*FieldInfo, nChildren)
queue = append(queue, typeQueue{Deref(f.Type), &fi, pp})
} else if fi.Zero.Kind() == reflect.Struct || (fi.Zero.Kind() == reflect.Ptr && fi.Zero.Type().Elem().Kind() == reflect.Struct) {
fi.Index = apnd(tq.fi.Index, fieldPos)
fi.Children = make([]*FieldInfo, Deref(f.Type).NumField())
queue = append(queue, typeQueue{Deref(f.Type), &fi, fi.Path})
}
fi.Index = apnd(tq.fi.Index, fieldPos)
fi.Parent = tq.fi
tq.fi.Children[fieldPos] = &fi
m = append(m, &fi)
}
}
flds := &StructMap{Index: m, Tree: root, Paths: map[string]*FieldInfo{}, Names: map[string]*FieldInfo{}}
for _, fi := range flds.Index {
flds.Paths[fi.Path] = fi
if fi.Name != "" && !fi.Embedded {
flds.Names[fi.Path] = fi
}
}
return flds
}

View File

@ -1,896 +0,0 @@
package reflectx
import (
"reflect"
"strings"
"testing"
)
func ival(v reflect.Value) int {
return v.Interface().(int)
}
func TestBasic(t *testing.T) {
type Foo struct {
A int
B int
C int
}
f := Foo{1, 2, 3}
fv := reflect.ValueOf(f)
m := NewMapperFunc("", func(s string) string { return s })
v := m.FieldByName(fv, "A")
if ival(v) != f.A {
t.Errorf("Expecting %d, got %d", ival(v), f.A)
}
v = m.FieldByName(fv, "B")
if ival(v) != f.B {
t.Errorf("Expecting %d, got %d", f.B, ival(v))
}
v = m.FieldByName(fv, "C")
if ival(v) != f.C {
t.Errorf("Expecting %d, got %d", f.C, ival(v))
}
}
func TestBasicEmbedded(t *testing.T) {
type Foo struct {
A int
}
type Bar struct {
Foo // `db:""` is implied for an embedded struct
B int
C int `db:"-"`
}
type Baz struct {
A int
Bar `db:"Bar"`
}
m := NewMapperFunc("db", func(s string) string { return s })
z := Baz{}
z.A = 1
z.B = 2
z.C = 4
z.Bar.Foo.A = 3
zv := reflect.ValueOf(z)
fields := m.TypeMap(reflect.TypeOf(z))
if len(fields.Index) != 5 {
t.Errorf("Expecting 5 fields")
}
// for _, fi := range fields.Index {
// log.Println(fi)
// }
v := m.FieldByName(zv, "A")
if ival(v) != z.A {
t.Errorf("Expecting %d, got %d", z.A, ival(v))
}
v = m.FieldByName(zv, "Bar.B")
if ival(v) != z.Bar.B {
t.Errorf("Expecting %d, got %d", z.Bar.B, ival(v))
}
v = m.FieldByName(zv, "Bar.A")
if ival(v) != z.Bar.Foo.A {
t.Errorf("Expecting %d, got %d", z.Bar.Foo.A, ival(v))
}
v = m.FieldByName(zv, "Bar.C")
if _, ok := v.Interface().(int); ok {
t.Errorf("Expecting Bar.C to not exist")
}
fi := fields.GetByPath("Bar.C")
if fi != nil {
t.Errorf("Bar.C should not exist")
}
}
func TestEmbeddedSimple(t *testing.T) {
type UUID [16]byte
type MyID struct {
UUID
}
type Item struct {
ID MyID
}
z := Item{}
m := NewMapper("db")
m.TypeMap(reflect.TypeOf(z))
}
func TestBasicEmbeddedWithTags(t *testing.T) {
type Foo struct {
A int `db:"a"`
}
type Bar struct {
Foo // `db:""` is implied for an embedded struct
B int `db:"b"`
}
type Baz struct {
A int `db:"a"`
Bar // `db:""` is implied for an embedded struct
}
m := NewMapper("db")
z := Baz{}
z.A = 1
z.B = 2
z.Bar.Foo.A = 3
zv := reflect.ValueOf(z)
fields := m.TypeMap(reflect.TypeOf(z))
if len(fields.Index) != 5 {
t.Errorf("Expecting 5 fields")
}
// for _, fi := range fields.index {
// log.Println(fi)
// }
v := m.FieldByName(zv, "a")
if ival(v) != z.Bar.Foo.A { // the dominant field
t.Errorf("Expecting %d, got %d", z.Bar.Foo.A, ival(v))
}
v = m.FieldByName(zv, "b")
if ival(v) != z.B {
t.Errorf("Expecting %d, got %d", z.B, ival(v))
}
}
func TestFlatTags(t *testing.T) {
m := NewMapper("db")
type Asset struct {
Title string `db:"title"`
}
type Post struct {
Author string `db:"author,required"`
Asset Asset `db:""`
}
// Post columns: (author title)
post := Post{Author: "Joe", Asset: Asset{Title: "Hello"}}
pv := reflect.ValueOf(post)
v := m.FieldByName(pv, "author")
if v.Interface().(string) != post.Author {
t.Errorf("Expecting %s, got %s", post.Author, v.Interface().(string))
}
v = m.FieldByName(pv, "title")
if v.Interface().(string) != post.Asset.Title {
t.Errorf("Expecting %s, got %s", post.Asset.Title, v.Interface().(string))
}
}
func TestNestedStruct(t *testing.T) {
m := NewMapper("db")
type Details struct {
Active bool `db:"active"`
}
type Asset struct {
Title string `db:"title"`
Details Details `db:"details"`
}
type Post struct {
Author string `db:"author,required"`
Asset `db:"asset"`
}
// Post columns: (author asset.title asset.details.active)
post := Post{
Author: "Joe",
Asset: Asset{Title: "Hello", Details: Details{Active: true}},
}
pv := reflect.ValueOf(post)
v := m.FieldByName(pv, "author")
if v.Interface().(string) != post.Author {
t.Errorf("Expecting %s, got %s", post.Author, v.Interface().(string))
}
v = m.FieldByName(pv, "title")
if _, ok := v.Interface().(string); ok {
t.Errorf("Expecting field to not exist")
}
v = m.FieldByName(pv, "asset.title")
if v.Interface().(string) != post.Asset.Title {
t.Errorf("Expecting %s, got %s", post.Asset.Title, v.Interface().(string))
}
v = m.FieldByName(pv, "asset.details.active")
if v.Interface().(bool) != post.Asset.Details.Active {
t.Errorf("Expecting %v, got %v", post.Asset.Details.Active, v.Interface().(bool))
}
}
func TestInlineStruct(t *testing.T) {
m := NewMapperTagFunc("db", strings.ToLower, nil)
type Employee struct {
Name string
ID int
}
type Boss Employee
type person struct {
Employee `db:"employee"`
Boss `db:"boss"`
}
// employees columns: (employee.name employee.id boss.name boss.id)
em := person{Employee: Employee{Name: "Joe", ID: 2}, Boss: Boss{Name: "Dick", ID: 1}}
ev := reflect.ValueOf(em)
fields := m.TypeMap(reflect.TypeOf(em))
if len(fields.Index) != 6 {
t.Errorf("Expecting 6 fields")
}
v := m.FieldByName(ev, "employee.name")
if v.Interface().(string) != em.Employee.Name {
t.Errorf("Expecting %s, got %s", em.Employee.Name, v.Interface().(string))
}
v = m.FieldByName(ev, "boss.id")
if ival(v) != em.Boss.ID {
t.Errorf("Expecting %v, got %v", em.Boss.ID, ival(v))
}
}
func TestFieldsEmbedded(t *testing.T) {
m := NewMapper("db")
type Person struct {
Name string `db:"name,size=64"`
}
type Place struct {
Name string `db:"name"`
}
type Article struct {
Title string `db:"title"`
}
type PP struct {
Person `db:"person,required"`
Place `db:",someflag"`
Article `db:",required"`
}
// PP columns: (person.name name title)
pp := PP{}
pp.Person.Name = "Peter"
pp.Place.Name = "Toronto"
pp.Article.Title = "Best city ever"
fields := m.TypeMap(reflect.TypeOf(pp))
// for i, f := range fields {
// log.Println(i, f)
// }
ppv := reflect.ValueOf(pp)
v := m.FieldByName(ppv, "person.name")
if v.Interface().(string) != pp.Person.Name {
t.Errorf("Expecting %s, got %s", pp.Person.Name, v.Interface().(string))
}
v = m.FieldByName(ppv, "name")
if v.Interface().(string) != pp.Place.Name {
t.Errorf("Expecting %s, got %s", pp.Place.Name, v.Interface().(string))
}
v = m.FieldByName(ppv, "title")
if v.Interface().(string) != pp.Article.Title {
t.Errorf("Expecting %s, got %s", pp.Article.Title, v.Interface().(string))
}
fi := fields.GetByPath("person")
if _, ok := fi.Options["required"]; !ok {
t.Errorf("Expecting required option to be set")
}
if !fi.Embedded {
t.Errorf("Expecting field to be embedded")
}
if len(fi.Index) != 1 || fi.Index[0] != 0 {
t.Errorf("Expecting index to be [0]")
}
fi = fields.GetByPath("person.name")
if fi == nil {
t.Errorf("Expecting person.name to exist")
}
if fi.Path != "person.name" {
t.Errorf("Expecting %s, got %s", "person.name", fi.Path)
}
if fi.Options["size"] != "64" {
t.Errorf("Expecting %s, got %s", "64", fi.Options["size"])
}
fi = fields.GetByTraversal([]int{1, 0})
if fi == nil {
t.Errorf("Expecting traveral to exist")
}
if fi.Path != "name" {
t.Errorf("Expecting %s, got %s", "name", fi.Path)
}
fi = fields.GetByTraversal([]int{2})
if fi == nil {
t.Errorf("Expecting traversal to exist")
}
if _, ok := fi.Options["required"]; !ok {
t.Errorf("Expecting required option to be set")
}
trs := m.TraversalsByName(reflect.TypeOf(pp), []string{"person.name", "name", "title"})
if !reflect.DeepEqual(trs, [][]int{{0, 0}, {1, 0}, {2, 0}}) {
t.Errorf("Expecting traversal: %v", trs)
}
}
func TestPtrFields(t *testing.T) {
m := NewMapperTagFunc("db", strings.ToLower, nil)
type Asset struct {
Title string
}
type Post struct {
*Asset `db:"asset"`
Author string
}
post := &Post{Author: "Joe", Asset: &Asset{Title: "Hiyo"}}
pv := reflect.ValueOf(post)
fields := m.TypeMap(reflect.TypeOf(post))
if len(fields.Index) != 3 {
t.Errorf("Expecting 3 fields")
}
v := m.FieldByName(pv, "asset.title")
if v.Interface().(string) != post.Asset.Title {
t.Errorf("Expecting %s, got %s", post.Asset.Title, v.Interface().(string))
}
v = m.FieldByName(pv, "author")
if v.Interface().(string) != post.Author {
t.Errorf("Expecting %s, got %s", post.Author, v.Interface().(string))
}
}
func TestNamedPtrFields(t *testing.T) {
m := NewMapperTagFunc("db", strings.ToLower, nil)
type User struct {
Name string
}
type Asset struct {
Title string
Owner *User `db:"owner"`
}
type Post struct {
Author string
Asset1 *Asset `db:"asset1"`
Asset2 *Asset `db:"asset2"`
}
post := &Post{Author: "Joe", Asset1: &Asset{Title: "Hiyo", Owner: &User{"Username"}}} // Let Asset2 be nil
pv := reflect.ValueOf(post)
fields := m.TypeMap(reflect.TypeOf(post))
if len(fields.Index) != 9 {
t.Errorf("Expecting 9 fields")
}
v := m.FieldByName(pv, "asset1.title")
if v.Interface().(string) != post.Asset1.Title {
t.Errorf("Expecting %s, got %s", post.Asset1.Title, v.Interface().(string))
}
v = m.FieldByName(pv, "asset1.owner.name")
if v.Interface().(string) != post.Asset1.Owner.Name {
t.Errorf("Expecting %s, got %s", post.Asset1.Owner.Name, v.Interface().(string))
}
v = m.FieldByName(pv, "asset2.title")
if v.Interface().(string) != post.Asset2.Title {
t.Errorf("Expecting %s, got %s", post.Asset2.Title, v.Interface().(string))
}
v = m.FieldByName(pv, "asset2.owner.name")
if v.Interface().(string) != post.Asset2.Owner.Name {
t.Errorf("Expecting %s, got %s", post.Asset2.Owner.Name, v.Interface().(string))
}
v = m.FieldByName(pv, "author")
if v.Interface().(string) != post.Author {
t.Errorf("Expecting %s, got %s", post.Author, v.Interface().(string))
}
}
func TestFieldMap(t *testing.T) {
type Foo struct {
A int
B int
C int
}
f := Foo{1, 2, 3}
m := NewMapperFunc("db", strings.ToLower)
fm := m.FieldMap(reflect.ValueOf(f))
if len(fm) != 3 {
t.Errorf("Expecting %d keys, got %d", 3, len(fm))
}
if fm["a"].Interface().(int) != 1 {
t.Errorf("Expecting %d, got %d", 1, ival(fm["a"]))
}
if fm["b"].Interface().(int) != 2 {
t.Errorf("Expecting %d, got %d", 2, ival(fm["b"]))
}
if fm["c"].Interface().(int) != 3 {
t.Errorf("Expecting %d, got %d", 3, ival(fm["c"]))
}
}
func TestTagNameMapping(t *testing.T) {
type Strategy struct {
StrategyID string `protobuf:"bytes,1,opt,name=strategy_id" json:"strategy_id,omitempty"`
StrategyName string
}
m := NewMapperTagFunc("json", strings.ToUpper, func(value string) string {
if strings.Contains(value, ",") {
return strings.Split(value, ",")[0]
}
return value
})
strategy := Strategy{"1", "Alpah"}
mapping := m.TypeMap(reflect.TypeOf(strategy))
for _, key := range []string{"strategy_id", "STRATEGYNAME"} {
if fi := mapping.GetByPath(key); fi == nil {
t.Errorf("Expecting to find key %s in mapping but did not.", key)
}
}
}
func TestMapping(t *testing.T) {
type Person struct {
ID int
Name string
WearsGlasses bool `db:"wears_glasses"`
}
m := NewMapperFunc("db", strings.ToLower)
p := Person{1, "Jason", true}
mapping := m.TypeMap(reflect.TypeOf(p))
for _, key := range []string{"id", "name", "wears_glasses"} {
if fi := mapping.GetByPath(key); fi == nil {
t.Errorf("Expecting to find key %s in mapping but did not.", key)
}
}
type SportsPerson struct {
Weight int
Age int
Person
}
s := SportsPerson{Weight: 100, Age: 30, Person: p}
mapping = m.TypeMap(reflect.TypeOf(s))
for _, key := range []string{"id", "name", "wears_glasses", "weight", "age"} {
if fi := mapping.GetByPath(key); fi == nil {
t.Errorf("Expecting to find key %s in mapping but did not.", key)
}
}
type RugbyPlayer struct {
Position int
IsIntense bool `db:"is_intense"`
IsAllBlack bool `db:"-"`
SportsPerson
}
r := RugbyPlayer{12, true, false, s}
mapping = m.TypeMap(reflect.TypeOf(r))
for _, key := range []string{"id", "name", "wears_glasses", "weight", "age", "position", "is_intense"} {
if fi := mapping.GetByPath(key); fi == nil {
t.Errorf("Expecting to find key %s in mapping but did not.", key)
}
}
if fi := mapping.GetByPath("isallblack"); fi != nil {
t.Errorf("Expecting to ignore `IsAllBlack` field")
}
}
func TestGetByTraversal(t *testing.T) {
type C struct {
C0 int
C1 int
}
type B struct {
B0 string
B1 *C
}
type A struct {
A0 int
A1 B
}
testCases := []struct {
Index []int
ExpectedName string
ExpectNil bool
}{
{
Index: []int{0},
ExpectedName: "A0",
},
{
Index: []int{1, 0},
ExpectedName: "B0",
},
{
Index: []int{1, 1, 1},
ExpectedName: "C1",
},
{
Index: []int{3, 4, 5},
ExpectNil: true,
},
{
Index: []int{},
ExpectNil: true,
},
{
Index: nil,
ExpectNil: true,
},
}
m := NewMapperFunc("db", func(n string) string { return n })
tm := m.TypeMap(reflect.TypeOf(A{}))
for i, tc := range testCases {
fi := tm.GetByTraversal(tc.Index)
if tc.ExpectNil {
if fi != nil {
t.Errorf("%d: expected nil, got %v", i, fi)
}
continue
}
if fi == nil {
t.Errorf("%d: expected %s, got nil", i, tc.ExpectedName)
continue
}
if fi.Name != tc.ExpectedName {
t.Errorf("%d: expected %s, got %s", i, tc.ExpectedName, fi.Name)
}
}
}
// TestMapperMethodsByName tests Mapper methods FieldByName and TraversalsByName
func TestMapperMethodsByName(t *testing.T) {
type C struct {
C0 string
C1 int
}
type B struct {
B0 *C `db:"B0"`
B1 C `db:"B1"`
B2 string `db:"B2"`
}
type A struct {
A0 *B `db:"A0"`
B `db:"A1"`
A2 int
a3 int
}
val := &A{
A0: &B{
B0: &C{C0: "0", C1: 1},
B1: C{C0: "2", C1: 3},
B2: "4",
},
B: B{
B0: nil,
B1: C{C0: "5", C1: 6},
B2: "7",
},
A2: 8,
}
testCases := []struct {
Name string
ExpectInvalid bool
ExpectedValue interface{}
ExpectedIndexes []int
}{
{
Name: "A0.B0.C0",
ExpectedValue: "0",
ExpectedIndexes: []int{0, 0, 0},
},
{
Name: "A0.B0.C1",
ExpectedValue: 1,
ExpectedIndexes: []int{0, 0, 1},
},
{
Name: "A0.B1.C0",
ExpectedValue: "2",
ExpectedIndexes: []int{0, 1, 0},
},
{
Name: "A0.B1.C1",
ExpectedValue: 3,
ExpectedIndexes: []int{0, 1, 1},
},
{
Name: "A0.B2",
ExpectedValue: "4",
ExpectedIndexes: []int{0, 2},
},
{
Name: "A1.B0.C0",
ExpectedValue: "",
ExpectedIndexes: []int{1, 0, 0},
},
{
Name: "A1.B0.C1",
ExpectedValue: 0,
ExpectedIndexes: []int{1, 0, 1},
},
{
Name: "A1.B1.C0",
ExpectedValue: "5",
ExpectedIndexes: []int{1, 1, 0},
},
{
Name: "A1.B1.C1",
ExpectedValue: 6,
ExpectedIndexes: []int{1, 1, 1},
},
{
Name: "A1.B2",
ExpectedValue: "7",
ExpectedIndexes: []int{1, 2},
},
{
Name: "A2",
ExpectedValue: 8,
ExpectedIndexes: []int{2},
},
{
Name: "XYZ",
ExpectInvalid: true,
ExpectedIndexes: []int{},
},
{
Name: "a3",
ExpectInvalid: true,
ExpectedIndexes: []int{},
},
}
// build the names array from the test cases
names := make([]string, len(testCases))
for i, tc := range testCases {
names[i] = tc.Name
}
m := NewMapperFunc("db", func(n string) string { return n })
v := reflect.ValueOf(val)
values := m.FieldsByName(v, names)
if len(values) != len(testCases) {
t.Errorf("expected %d values, got %d", len(testCases), len(values))
t.FailNow()
}
indexes := m.TraversalsByName(v.Type(), names)
if len(indexes) != len(testCases) {
t.Errorf("expected %d traversals, got %d", len(testCases), len(indexes))
t.FailNow()
}
for i, val := range values {
tc := testCases[i]
traversal := indexes[i]
if !reflect.DeepEqual(tc.ExpectedIndexes, traversal) {
t.Errorf("%d: expected %v, got %v", tc.ExpectedIndexes, traversal)
t.FailNow()
}
val = reflect.Indirect(val)
if tc.ExpectInvalid {
if val.IsValid() {
t.Errorf("%d: expected zero value, got %v", i, val)
}
continue
}
if !val.IsValid() {
t.Errorf("%d: expected valid value, got %v", i, val)
continue
}
actualValue := reflect.Indirect(val).Interface()
if !reflect.DeepEqual(tc.ExpectedValue, actualValue) {
t.Errorf("%d: expected %v, got %v", i, tc.ExpectedValue, actualValue)
}
}
}
func TestFieldByIndexes(t *testing.T) {
type C struct {
C0 bool
C1 string
C2 int
C3 map[string]int
}
type B struct {
B1 C
B2 *C
}
type A struct {
A1 B
A2 *B
}
testCases := []struct {
value interface{}
indexes []int
expectedValue interface{}
readOnly bool
}{
{
value: A{
A1: B{B1: C{C0: true}},
},
indexes: []int{0, 0, 0},
expectedValue: true,
readOnly: true,
},
{
value: A{
A2: &B{B2: &C{C1: "answer"}},
},
indexes: []int{1, 1, 1},
expectedValue: "answer",
readOnly: true,
},
{
value: &A{},
indexes: []int{1, 1, 3},
expectedValue: map[string]int{},
},
}
for i, tc := range testCases {
checkResults := func(v reflect.Value) {
if tc.expectedValue == nil {
if !v.IsNil() {
t.Errorf("%d: expected nil, actual %v", i, v.Interface())
}
} else {
if !reflect.DeepEqual(tc.expectedValue, v.Interface()) {
t.Errorf("%d: expected %v, actual %v", i, tc.expectedValue, v.Interface())
}
}
}
checkResults(FieldByIndexes(reflect.ValueOf(tc.value), tc.indexes))
if tc.readOnly {
checkResults(FieldByIndexesReadOnly(reflect.ValueOf(tc.value), tc.indexes))
}
}
}
func TestMustBe(t *testing.T) {
typ := reflect.TypeOf(E1{})
mustBe(typ, reflect.Struct)
defer func() {
if r := recover(); r != nil {
valueErr, ok := r.(*reflect.ValueError)
if !ok {
t.Errorf("unexpected Method: %s", valueErr.Method)
t.Error("expected panic with *reflect.ValueError")
return
}
if valueErr.Method != "github.com/jmoiron/sqlx/reflectx.TestMustBe" {
}
if valueErr.Kind != reflect.String {
t.Errorf("unexpected Kind: %s", valueErr.Kind)
}
} else {
t.Error("expected panic")
}
}()
typ = reflect.TypeOf("string")
mustBe(typ, reflect.Struct)
t.Error("got here, didn't expect to")
}
type E1 struct {
A int
}
type E2 struct {
E1
B int
}
type E3 struct {
E2
C int
}
type E4 struct {
E3
D int
}
func BenchmarkFieldNameL1(b *testing.B) {
e4 := E4{D: 1}
for i := 0; i < b.N; i++ {
v := reflect.ValueOf(e4)
f := v.FieldByName("D")
if f.Interface().(int) != 1 {
b.Fatal("Wrong value.")
}
}
}
func BenchmarkFieldNameL4(b *testing.B) {
e4 := E4{}
e4.A = 1
for i := 0; i < b.N; i++ {
v := reflect.ValueOf(e4)
f := v.FieldByName("A")
if f.Interface().(int) != 1 {
b.Fatal("Wrong value.")
}
}
}
func BenchmarkFieldPosL1(b *testing.B) {
e4 := E4{D: 1}
for i := 0; i < b.N; i++ {
v := reflect.ValueOf(e4)
f := v.Field(1)
if f.Interface().(int) != 1 {
b.Fatal("Wrong value.")
}
}
}
func BenchmarkFieldPosL4(b *testing.B) {
e4 := E4{}
e4.A = 1
for i := 0; i < b.N; i++ {
v := reflect.ValueOf(e4)
f := v.Field(0)
f = f.Field(0)
f = f.Field(0)
f = f.Field(0)
if f.Interface().(int) != 1 {
b.Fatal("Wrong value.")
}
}
}
func BenchmarkFieldByIndexL4(b *testing.B) {
e4 := E4{}
e4.A = 1
idx := []int{0, 0, 0, 0}
for i := 0; i < b.N; i++ {
v := reflect.ValueOf(e4)
f := FieldByIndexes(v, idx)
if f.Interface().(int) != 1 {
b.Fatal("Wrong value.")
}
}
}

View File

@ -1,992 +0,0 @@
package sqlx
import (
"database/sql"
"database/sql/driver"
"errors"
"fmt"
"io/ioutil"
"path/filepath"
"reflect"
"strings"
"github.com/jmoiron/sqlx/reflectx"
)
// Although the NameMapper is convenient, in practice it should not
// be relied on except for application code. If you are writing a library
// that uses sqlx, you should be aware that the name mappings you expect
// can be overridded by your user's application.
// NameMapper is used to map column names to struct field names. By default,
// it uses strings.ToLower to lowercase struct field names. It can be set
// to whatever you want, but it is encouraged to be set before sqlx is used
// as name-to-field mappings are cached after first use on a type.
var NameMapper = strings.ToLower
var origMapper = reflect.ValueOf(NameMapper)
// Rather than creating on init, this is created when necessary so that
// importers have time to customize the NameMapper.
var mpr *reflectx.Mapper
// mapper returns a valid mapper using the configured NameMapper func.
func mapper() *reflectx.Mapper {
if mpr == nil {
mpr = reflectx.NewMapperFunc("db", NameMapper)
} else if origMapper != reflect.ValueOf(NameMapper) {
// if NameMapper has changed, create a new mapper
mpr = reflectx.NewMapperFunc("db", NameMapper)
origMapper = reflect.ValueOf(NameMapper)
}
return mpr
}
// isScannable takes the reflect.Type and the actual dest value and returns
// whether or not it's Scannable. Something is scannable if:
// * it is not a struct
// * it implements sql.Scanner
// * it has no exported fields
func isScannable(t reflect.Type) bool {
if reflect.PtrTo(t).Implements(_scannerInterface) {
return true
}
if t.Kind() != reflect.Struct {
return true
}
// it's not important that we use the right mapper for this particular object,
// we're only concerned on how many exported fields this struct has
m := mapper()
if len(m.TypeMap(t).Index) == 0 {
return true
}
return false
}
// ColScanner is an interface used by MapScan and SliceScan
type ColScanner interface {
Columns() ([]string, error)
Scan(dest ...interface{}) error
Err() error
}
// Queryer is an interface used by Get and Select
type Queryer interface {
Query(query string, args ...interface{}) (*sql.Rows, error)
Queryx(query string, args ...interface{}) (*Rows, error)
QueryRowx(query string, args ...interface{}) *Row
}
// Execer is an interface used by MustExec and LoadFile
type Execer interface {
Exec(query string, args ...interface{}) (sql.Result, error)
}
// Binder is an interface for something which can bind queries (Tx, DB)
type binder interface {
DriverName() string
Rebind(string) string
BindNamed(string, interface{}) (string, []interface{}, error)
}
// Ext is a union interface which can bind, query, and exec, used by
// NamedQuery and NamedExec.
type Ext interface {
binder
Queryer
Execer
}
// Preparer is an interface used by Preparex.
type Preparer interface {
Prepare(query string) (*sql.Stmt, error)
}
// determine if any of our extensions are unsafe
func isUnsafe(i interface{}) bool {
switch v := i.(type) {
case Row:
return v.unsafe
case *Row:
return v.unsafe
case Rows:
return v.unsafe
case *Rows:
return v.unsafe
case NamedStmt:
return v.Stmt.unsafe
case *NamedStmt:
return v.Stmt.unsafe
case Stmt:
return v.unsafe
case *Stmt:
return v.unsafe
case qStmt:
return v.unsafe
case *qStmt:
return v.unsafe
case DB:
return v.unsafe
case *DB:
return v.unsafe
case Tx:
return v.unsafe
case *Tx:
return v.unsafe
case sql.Rows, *sql.Rows:
return false
default:
return false
}
}
func mapperFor(i interface{}) *reflectx.Mapper {
switch i.(type) {
case DB:
return i.(DB).Mapper
case *DB:
return i.(*DB).Mapper
case Tx:
return i.(Tx).Mapper
case *Tx:
return i.(*Tx).Mapper
default:
return mapper()
}
}
var _scannerInterface = reflect.TypeOf((*sql.Scanner)(nil)).Elem()
var _valuerInterface = reflect.TypeOf((*driver.Valuer)(nil)).Elem()
// Row is a reimplementation of sql.Row in order to gain access to the underlying
// sql.Rows.Columns() data, necessary for StructScan.
type Row struct {
err error
unsafe bool
rows *sql.Rows
Mapper *reflectx.Mapper
}
// Scan is a fixed implementation of sql.Row.Scan, which does not discard the
// underlying error from the internal rows object if it exists.
func (r *Row) Scan(dest ...interface{}) error {
if r.err != nil {
return r.err
}
// TODO(bradfitz): for now we need to defensively clone all
// []byte that the driver returned (not permitting
// *RawBytes in Rows.Scan), since we're about to close
// the Rows in our defer, when we return from this function.
// the contract with the driver.Next(...) interface is that it
// can return slices into read-only temporary memory that's
// only valid until the next Scan/Close. But the TODO is that
// for a lot of drivers, this copy will be unnecessary. We
// should provide an optional interface for drivers to
// implement to say, "don't worry, the []bytes that I return
// from Next will not be modified again." (for instance, if
// they were obtained from the network anyway) But for now we
// don't care.
defer r.rows.Close()
for _, dp := range dest {
if _, ok := dp.(*sql.RawBytes); ok {
return errors.New("sql: RawBytes isn't allowed on Row.Scan")
}
}
if !r.rows.Next() {
if err := r.rows.Err(); err != nil {
return err
}
return sql.ErrNoRows
}
err := r.rows.Scan(dest...)
if err != nil {
return err
}
// Make sure the query can be processed to completion with no errors.
if err := r.rows.Close(); err != nil {
return err
}
return nil
}
// Columns returns the underlying sql.Rows.Columns(), or the deferred error usually
// returned by Row.Scan()
func (r *Row) Columns() ([]string, error) {
if r.err != nil {
return []string{}, r.err
}
return r.rows.Columns()
}
// Err returns the error encountered while scanning.
func (r *Row) Err() error {
return r.err
}
// DB is a wrapper around sql.DB which keeps track of the driverName upon Open,
// used mostly to automatically bind named queries using the right bindvars.
type DB struct {
*sql.DB
driverName string
unsafe bool
Mapper *reflectx.Mapper
}
// NewDb returns a new sqlx DB wrapper for a pre-existing *sql.DB. The
// driverName of the original database is required for named query support.
func NewDb(db *sql.DB, driverName string) *DB {
return &DB{DB: db, driverName: driverName, Mapper: mapper()}
}
// DriverName returns the driverName passed to the Open function for this DB.
func (db *DB) DriverName() string {
return db.driverName
}
// Open is the same as sql.Open, but returns an *sqlx.DB instead.
func Open(driverName, dataSourceName string) (*DB, error) {
db, err := sql.Open(driverName, dataSourceName)
if err != nil {
return nil, err
}
return &DB{DB: db, driverName: driverName, Mapper: mapper()}, err
}
// MustOpen is the same as sql.Open, but returns an *sqlx.DB instead and panics on error.
func MustOpen(driverName, dataSourceName string) *DB {
db, err := Open(driverName, dataSourceName)
if err != nil {
panic(err)
}
return db
}
// MapperFunc sets a new mapper for this db using the default sqlx struct tag
// and the provided mapper function.
func (db *DB) MapperFunc(mf func(string) string) {
db.Mapper = reflectx.NewMapperFunc("db", mf)
}
// Rebind transforms a query from QUESTION to the DB driver's bindvar type.
func (db *DB) Rebind(query string) string {
return Rebind(BindType(db.driverName), query)
}
// Unsafe returns a version of DB which will silently succeed to scan when
// columns in the SQL result have no fields in the destination struct.
// sqlx.Stmt and sqlx.Tx which are created from this DB will inherit its
// safety behavior.
func (db *DB) Unsafe() *DB {
return &DB{DB: db.DB, driverName: db.driverName, unsafe: true, Mapper: db.Mapper}
}
// BindNamed binds a query using the DB driver's bindvar type.
func (db *DB) BindNamed(query string, arg interface{}) (string, []interface{}, error) {
return bindNamedMapper(BindType(db.driverName), query, arg, db.Mapper)
}
// NamedQuery using this DB.
func (db *DB) NamedQuery(query string, arg interface{}) (*Rows, error) {
return NamedQuery(db, query, arg)
}
// NamedExec using this DB.
func (db *DB) NamedExec(query string, arg interface{}) (sql.Result, error) {
return NamedExec(db, query, arg)
}
// Select using this DB.
func (db *DB) Select(dest interface{}, query string, args ...interface{}) error {
return Select(db, dest, query, args...)
}
// Get using this DB.
func (db *DB) Get(dest interface{}, query string, args ...interface{}) error {
return Get(db, dest, query, args...)
}
// MustBegin starts a transaction, and panics on error. Returns an *sqlx.Tx instead
// of an *sql.Tx.
func (db *DB) MustBegin() *Tx {
tx, err := db.Beginx()
if err != nil {
panic(err)
}
return tx
}
// Beginx begins a transaction and returns an *sqlx.Tx instead of an *sql.Tx.
func (db *DB) Beginx() (*Tx, error) {
tx, err := db.DB.Begin()
if err != nil {
return nil, err
}
return &Tx{Tx: tx, driverName: db.driverName, unsafe: db.unsafe, Mapper: db.Mapper}, err
}
// Queryx queries the database and returns an *sqlx.Rows.
func (db *DB) Queryx(query string, args ...interface{}) (*Rows, error) {
r, err := db.DB.Query(query, args...)
if err != nil {
return nil, err
}
return &Rows{Rows: r, unsafe: db.unsafe, Mapper: db.Mapper}, err
}
// QueryRowx queries the database and returns an *sqlx.Row.
func (db *DB) QueryRowx(query string, args ...interface{}) *Row {
rows, err := db.DB.Query(query, args...)
return &Row{rows: rows, err: err, unsafe: db.unsafe, Mapper: db.Mapper}
}
// MustExec (panic) runs MustExec using this database.
func (db *DB) MustExec(query string, args ...interface{}) sql.Result {
return MustExec(db, query, args...)
}
// Preparex returns an sqlx.Stmt instead of a sql.Stmt
func (db *DB) Preparex(query string) (*Stmt, error) {
return Preparex(db, query)
}
// PrepareNamed returns an sqlx.NamedStmt
func (db *DB) PrepareNamed(query string) (*NamedStmt, error) {
return prepareNamed(db, query)
}
// Tx is an sqlx wrapper around sql.Tx with extra functionality
type Tx struct {
*sql.Tx
driverName string
unsafe bool
Mapper *reflectx.Mapper
}
// DriverName returns the driverName used by the DB which began this transaction.
func (tx *Tx) DriverName() string {
return tx.driverName
}
// Rebind a query within a transaction's bindvar type.
func (tx *Tx) Rebind(query string) string {
return Rebind(BindType(tx.driverName), query)
}
// Unsafe returns a version of Tx which will silently succeed to scan when
// columns in the SQL result have no fields in the destination struct.
func (tx *Tx) Unsafe() *Tx {
return &Tx{Tx: tx.Tx, driverName: tx.driverName, unsafe: true, Mapper: tx.Mapper}
}
// BindNamed binds a query within a transaction's bindvar type.
func (tx *Tx) BindNamed(query string, arg interface{}) (string, []interface{}, error) {
return bindNamedMapper(BindType(tx.driverName), query, arg, tx.Mapper)
}
// NamedQuery within a transaction.
func (tx *Tx) NamedQuery(query string, arg interface{}) (*Rows, error) {
return NamedQuery(tx, query, arg)
}
// NamedExec a named query within a transaction.
func (tx *Tx) NamedExec(query string, arg interface{}) (sql.Result, error) {
return NamedExec(tx, query, arg)
}
// Select within a transaction.
func (tx *Tx) Select(dest interface{}, query string, args ...interface{}) error {
return Select(tx, dest, query, args...)
}
// Queryx within a transaction.
func (tx *Tx) Queryx(query string, args ...interface{}) (*Rows, error) {
r, err := tx.Tx.Query(query, args...)
if err != nil {
return nil, err
}
return &Rows{Rows: r, unsafe: tx.unsafe, Mapper: tx.Mapper}, err
}
// QueryRowx within a transaction.
func (tx *Tx) QueryRowx(query string, args ...interface{}) *Row {
rows, err := tx.Tx.Query(query, args...)
return &Row{rows: rows, err: err, unsafe: tx.unsafe, Mapper: tx.Mapper}
}
// Get within a transaction.
func (tx *Tx) Get(dest interface{}, query string, args ...interface{}) error {
return Get(tx, dest, query, args...)
}
// MustExec runs MustExec within a transaction.
func (tx *Tx) MustExec(query string, args ...interface{}) sql.Result {
return MustExec(tx, query, args...)
}
// Preparex a statement within a transaction.
func (tx *Tx) Preparex(query string) (*Stmt, error) {
return Preparex(tx, query)
}
// Stmtx returns a version of the prepared statement which runs within a transaction. Provided
// stmt can be either *sql.Stmt or *sqlx.Stmt.
func (tx *Tx) Stmtx(stmt interface{}) *Stmt {
var s *sql.Stmt
switch v := stmt.(type) {
case Stmt:
s = v.Stmt
case *Stmt:
s = v.Stmt
case sql.Stmt:
s = &v
case *sql.Stmt:
s = v
default:
panic(fmt.Sprintf("non-statement type %v passed to Stmtx", reflect.ValueOf(stmt).Type()))
}
return &Stmt{Stmt: tx.Stmt(s), Mapper: tx.Mapper}
}
// NamedStmt returns a version of the prepared statement which runs within a transaction.
func (tx *Tx) NamedStmt(stmt *NamedStmt) *NamedStmt {
return &NamedStmt{
QueryString: stmt.QueryString,
Params: stmt.Params,
Stmt: tx.Stmtx(stmt.Stmt),
}
}
// PrepareNamed returns an sqlx.NamedStmt
func (tx *Tx) PrepareNamed(query string) (*NamedStmt, error) {
return prepareNamed(tx, query)
}
// Stmt is an sqlx wrapper around sql.Stmt with extra functionality
type Stmt struct {
*sql.Stmt
unsafe bool
Mapper *reflectx.Mapper
}
// Unsafe returns a version of Stmt which will silently succeed to scan when
// columns in the SQL result have no fields in the destination struct.
func (s *Stmt) Unsafe() *Stmt {
return &Stmt{Stmt: s.Stmt, unsafe: true, Mapper: s.Mapper}
}
// Select using the prepared statement.
func (s *Stmt) Select(dest interface{}, args ...interface{}) error {
return Select(&qStmt{s}, dest, "", args...)
}
// Get using the prepared statement.
func (s *Stmt) Get(dest interface{}, args ...interface{}) error {
return Get(&qStmt{s}, dest, "", args...)
}
// MustExec (panic) using this statement. Note that the query portion of the error
// output will be blank, as Stmt does not expose its query.
func (s *Stmt) MustExec(args ...interface{}) sql.Result {
return MustExec(&qStmt{s}, "", args...)
}
// QueryRowx using this statement.
func (s *Stmt) QueryRowx(args ...interface{}) *Row {
qs := &qStmt{s}
return qs.QueryRowx("", args...)
}
// Queryx using this statement.
func (s *Stmt) Queryx(args ...interface{}) (*Rows, error) {
qs := &qStmt{s}
return qs.Queryx("", args...)
}
// qStmt is an unexposed wrapper which lets you use a Stmt as a Queryer & Execer by
// implementing those interfaces and ignoring the `query` argument.
type qStmt struct{ *Stmt }
func (q *qStmt) Query(query string, args ...interface{}) (*sql.Rows, error) {
return q.Stmt.Query(args...)
}
func (q *qStmt) Queryx(query string, args ...interface{}) (*Rows, error) {
r, err := q.Stmt.Query(args...)
if err != nil {
return nil, err
}
return &Rows{Rows: r, unsafe: q.Stmt.unsafe, Mapper: q.Stmt.Mapper}, err
}
func (q *qStmt) QueryRowx(query string, args ...interface{}) *Row {
rows, err := q.Stmt.Query(args...)
return &Row{rows: rows, err: err, unsafe: q.Stmt.unsafe, Mapper: q.Stmt.Mapper}
}
func (q *qStmt) Exec(query string, args ...interface{}) (sql.Result, error) {
return q.Stmt.Exec(args...)
}
// Rows is a wrapper around sql.Rows which caches costly reflect operations
// during a looped StructScan
type Rows struct {
*sql.Rows
unsafe bool
Mapper *reflectx.Mapper
// these fields cache memory use for a rows during iteration w/ structScan
started bool
fields [][]int
values []interface{}
}
// SliceScan using this Rows.
func (r *Rows) SliceScan() ([]interface{}, error) {
return SliceScan(r)
}
// MapScan using this Rows.
func (r *Rows) MapScan(dest map[string]interface{}) error {
return MapScan(r, dest)
}
// StructScan is like sql.Rows.Scan, but scans a single Row into a single Struct.
// Use this and iterate over Rows manually when the memory load of Select() might be
// prohibitive. *Rows.StructScan caches the reflect work of matching up column
// positions to fields to avoid that overhead per scan, which means it is not safe
// to run StructScan on the same Rows instance with different struct types.
func (r *Rows) StructScan(dest interface{}) error {
v := reflect.ValueOf(dest)
if v.Kind() != reflect.Ptr {
return errors.New("must pass a pointer, not a value, to StructScan destination")
}
v = reflect.Indirect(v)
if !r.started {
columns, err := r.Columns()
if err != nil {
return err
}
m := r.Mapper
r.fields = m.TraversalsByName(v.Type(), columns)
// if we are not unsafe and are missing fields, return an error
if f, err := missingFields(r.fields); err != nil && !r.unsafe {
return fmt.Errorf("missing destination name %s", columns[f])
}
r.values = make([]interface{}, len(columns))
r.started = true
}
err := fieldsByTraversal(v, r.fields, r.values, true)
if err != nil {
return err
}
// scan into the struct field pointers and append to our results
err = r.Scan(r.values...)
if err != nil {
return err
}
return r.Err()
}
// Connect to a database and verify with a ping.
func Connect(driverName, dataSourceName string) (*DB, error) {
db, err := Open(driverName, dataSourceName)
if err != nil {
return db, err
}
err = db.Ping()
return db, err
}
// MustConnect connects to a database and panics on error.
func MustConnect(driverName, dataSourceName string) *DB {
db, err := Connect(driverName, dataSourceName)
if err != nil {
panic(err)
}
return db
}
// Preparex prepares a statement.
func Preparex(p Preparer, query string) (*Stmt, error) {
s, err := p.Prepare(query)
if err != nil {
return nil, err
}
return &Stmt{Stmt: s, unsafe: isUnsafe(p), Mapper: mapperFor(p)}, err
}
// Select executes a query using the provided Queryer, and StructScans each row
// into dest, which must be a slice. If the slice elements are scannable, then
// the result set must have only one column. Otherwise, StructScan is used.
// The *sql.Rows are closed automatically.
func Select(q Queryer, dest interface{}, query string, args ...interface{}) error {
rows, err := q.Queryx(query, args...)
if err != nil {
return err
}
// if something happens here, we want to make sure the rows are Closed
defer rows.Close()
return scanAll(rows, dest, false)
}
// Get does a QueryRow using the provided Queryer, and scans the resulting row
// to dest. If dest is scannable, the result must only have one column. Otherwise,
// StructScan is used. Get will return sql.ErrNoRows like row.Scan would.
func Get(q Queryer, dest interface{}, query string, args ...interface{}) error {
r := q.QueryRowx(query, args...)
return r.scanAny(dest, false)
}
// LoadFile exec's every statement in a file (as a single call to Exec).
// LoadFile may return a nil *sql.Result if errors are encountered locating or
// reading the file at path. LoadFile reads the entire file into memory, so it
// is not suitable for loading large data dumps, but can be useful for initializing
// schemas or loading indexes.
//
// FIXME: this does not really work with multi-statement files for mattn/go-sqlite3
// or the go-mysql-driver/mysql drivers; pq seems to be an exception here. Detecting
// this by requiring something with DriverName() and then attempting to split the
// queries will be difficult to get right, and its current driver-specific behavior
// is deemed at least not complex in its incorrectness.
func LoadFile(e Execer, path string) (*sql.Result, error) {
realpath, err := filepath.Abs(path)
if err != nil {
return nil, err
}
contents, err := ioutil.ReadFile(realpath)
if err != nil {
return nil, err
}
res, err := e.Exec(string(contents))
return &res, err
}
// MustExec execs the query using e and panics if there was an error.
func MustExec(e Execer, query string, args ...interface{}) sql.Result {
res, err := e.Exec(query, args...)
if err != nil {
panic(err)
}
return res
}
// SliceScan using this Rows.
func (r *Row) SliceScan() ([]interface{}, error) {
return SliceScan(r)
}
// MapScan using this Rows.
func (r *Row) MapScan(dest map[string]interface{}) error {
return MapScan(r, dest)
}
func (r *Row) scanAny(dest interface{}, structOnly bool) error {
if r.err != nil {
return r.err
}
defer r.rows.Close()
v := reflect.ValueOf(dest)
if v.Kind() != reflect.Ptr {
return errors.New("must pass a pointer, not a value, to StructScan destination")
}
if v.IsNil() {
return errors.New("nil pointer passed to StructScan destination")
}
base := reflectx.Deref(v.Type())
scannable := isScannable(base)
if structOnly && scannable {
return structOnlyError(base)
}
columns, err := r.Columns()
if err != nil {
return err
}
if scannable && len(columns) > 1 {
return fmt.Errorf("scannable dest type %s with >1 columns (%d) in result", base.Kind(), len(columns))
}
if scannable {
return r.Scan(dest)
}
m := r.Mapper
fields := m.TraversalsByName(v.Type(), columns)
// if we are not unsafe and are missing fields, return an error
if f, err := missingFields(fields); err != nil && !r.unsafe {
return fmt.Errorf("missing destination name %s", columns[f])
}
values := make([]interface{}, len(columns))
err = fieldsByTraversal(v, fields, values, true)
if err != nil {
return err
}
// scan into the struct field pointers and append to our results
return r.Scan(values...)
}
// StructScan a single Row into dest.
func (r *Row) StructScan(dest interface{}) error {
return r.scanAny(dest, true)
}
// SliceScan a row, returning a []interface{} with values similar to MapScan.
// This function is primarily intended for use where the number of columns
// is not known. Because you can pass an []interface{} directly to Scan,
// it's recommended that you do that as it will not have to allocate new
// slices per row.
func SliceScan(r ColScanner) ([]interface{}, error) {
// ignore r.started, since we needn't use reflect for anything.
columns, err := r.Columns()
if err != nil {
return []interface{}{}, err
}
values := make([]interface{}, len(columns))
for i := range values {
values[i] = new(interface{})
}
err = r.Scan(values...)
if err != nil {
return values, err
}
for i := range columns {
values[i] = *(values[i].(*interface{}))
}
return values, r.Err()
}
// MapScan scans a single Row into the dest map[string]interface{}.
// Use this to get results for SQL that might not be under your control
// (for instance, if you're building an interface for an SQL server that
// executes SQL from input). Please do not use this as a primary interface!
// This will modify the map sent to it in place, so reuse the same map with
// care. Columns which occur more than once in the result will overwrite
// eachother!
func MapScan(r ColScanner, dest map[string]interface{}) error {
// ignore r.started, since we needn't use reflect for anything.
columns, err := r.Columns()
if err != nil {
return err
}
values := make([]interface{}, len(columns))
for i := range values {
values[i] = new(interface{})
}
err = r.Scan(values...)
if err != nil {
return err
}
for i, column := range columns {
dest[column] = *(values[i].(*interface{}))
}
return r.Err()
}
type rowsi interface {
Close() error
Columns() ([]string, error)
Err() error
Next() bool
Scan(...interface{}) error
}
// structOnlyError returns an error appropriate for type when a non-scannable
// struct is expected but something else is given
func structOnlyError(t reflect.Type) error {
isStruct := t.Kind() == reflect.Struct
isScanner := reflect.PtrTo(t).Implements(_scannerInterface)
if !isStruct {
return fmt.Errorf("expected %s but got %s", reflect.Struct, t.Kind())
}
if isScanner {
return fmt.Errorf("structscan expects a struct dest but the provided struct type %s implements scanner", t.Name())
}
return fmt.Errorf("expected a struct, but struct %s has no exported fields", t.Name())
}
// scanAll scans all rows into a destination, which must be a slice of any
// type. If the destination slice type is a Struct, then StructScan will be
// used on each row. If the destination is some other kind of base type, then
// each row must only have one column which can scan into that type. This
// allows you to do something like:
//
// rows, _ := db.Query("select id from people;")
// var ids []int
// scanAll(rows, &ids, false)
//
// and ids will be a list of the id results. I realize that this is a desirable
// interface to expose to users, but for now it will only be exposed via changes
// to `Get` and `Select`. The reason that this has been implemented like this is
// this is the only way to not duplicate reflect work in the new API while
// maintaining backwards compatibility.
func scanAll(rows rowsi, dest interface{}, structOnly bool) error {
var v, vp reflect.Value
value := reflect.ValueOf(dest)
// json.Unmarshal returns errors for these
if value.Kind() != reflect.Ptr {
return errors.New("must pass a pointer, not a value, to StructScan destination")
}
if value.IsNil() {
return errors.New("nil pointer passed to StructScan destination")
}
direct := reflect.Indirect(value)
slice, err := baseType(value.Type(), reflect.Slice)
if err != nil {
return err
}
isPtr := slice.Elem().Kind() == reflect.Ptr
base := reflectx.Deref(slice.Elem())
scannable := isScannable(base)
if structOnly && scannable {
return structOnlyError(base)
}
columns, err := rows.Columns()
if err != nil {
return err
}
// if it's a base type make sure it only has 1 column; if not return an error
if scannable && len(columns) > 1 {
return fmt.Errorf("non-struct dest type %s with >1 columns (%d)", base.Kind(), len(columns))
}
if !scannable {
var values []interface{}
var m *reflectx.Mapper
switch rows.(type) {
case *Rows:
m = rows.(*Rows).Mapper
default:
m = mapper()
}
fields := m.TraversalsByName(base, columns)
// if we are not unsafe and are missing fields, return an error
if f, err := missingFields(fields); err != nil && !isUnsafe(rows) {
return fmt.Errorf("missing destination name %s", columns[f])
}
values = make([]interface{}, len(columns))
for rows.Next() {
// create a new struct type (which returns PtrTo) and indirect it
vp = reflect.New(base)
v = reflect.Indirect(vp)
err = fieldsByTraversal(v, fields, values, true)
// scan into the struct field pointers and append to our results
err = rows.Scan(values...)
if err != nil {
return err
}
if isPtr {
direct.Set(reflect.Append(direct, vp))
} else {
direct.Set(reflect.Append(direct, v))
}
}
} else {
for rows.Next() {
vp = reflect.New(base)
err = rows.Scan(vp.Interface())
// append
if isPtr {
direct.Set(reflect.Append(direct, vp))
} else {
direct.Set(reflect.Append(direct, reflect.Indirect(vp)))
}
}
}
return rows.Err()
}
// FIXME: StructScan was the very first bit of API in sqlx, and now unfortunately
// it doesn't really feel like it's named properly. There is an incongruency
// between this and the way that StructScan (which might better be ScanStruct
// anyway) works on a rows object.
// StructScan all rows from an sql.Rows or an sqlx.Rows into the dest slice.
// StructScan will scan in the entire rows result, so if you need do not want to
// allocate structs for the entire result, use Queryx and see sqlx.Rows.StructScan.
// If rows is sqlx.Rows, it will use its mapper, otherwise it will use the default.
func StructScan(rows rowsi, dest interface{}) error {
return scanAll(rows, dest, true)
}
// reflect helpers
func baseType(t reflect.Type, expected reflect.Kind) (reflect.Type, error) {
t = reflectx.Deref(t)
if t.Kind() != expected {
return nil, fmt.Errorf("expected %s but got %s", expected, t.Kind())
}
return t, nil
}
// fieldsByName fills a values interface with fields from the passed value based
// on the traversals in int. If ptrs is true, return addresses instead of values.
// We write this instead of using FieldsByName to save allocations and map lookups
// when iterating over many rows. Empty traversals will get an interface pointer.
// Because of the necessity of requesting ptrs or values, it's considered a bit too
// specialized for inclusion in reflectx itself.
func fieldsByTraversal(v reflect.Value, traversals [][]int, values []interface{}, ptrs bool) error {
v = reflect.Indirect(v)
if v.Kind() != reflect.Struct {
return errors.New("argument not a struct")
}
for i, traversal := range traversals {
if len(traversal) == 0 {
values[i] = new(interface{})
continue
}
f := reflectx.FieldByIndexes(v, traversal)
if ptrs {
values[i] = f.Addr().Interface()
} else {
values[i] = f.Interface()
}
}
return nil
}
func missingFields(transversals [][]int) (field int, err error) {
for i, t := range transversals {
if len(t) == 0 {
return i, errors.New("missing field")
}
}
return 0, nil
}

File diff suppressed because it is too large Load Diff

View File

@ -1,5 +0,0 @@
# types
The types package provides some useful types which implement the `sql.Scanner`
and `driver.Valuer` interfaces, suitable for use as scan and value targets with
database/sql.

View File

@ -1,106 +0,0 @@
package types
import (
"bytes"
"compress/gzip"
"database/sql/driver"
"encoding/json"
"errors"
"io/ioutil"
)
// GzippedText is a []byte which transparently gzips data being submitted to
// a database and ungzips data being Scanned from a database.
type GzippedText []byte
// Value implements the driver.Valuer interface, gzipping the raw value of
// this GzippedText.
func (g GzippedText) Value() (driver.Value, error) {
b := make([]byte, 0, len(g))
buf := bytes.NewBuffer(b)
w := gzip.NewWriter(buf)
w.Write(g)
w.Close()
return buf.Bytes(), nil
}
// Scan implements the sql.Scanner interface, ungzipping the value coming off
// the wire and storing the raw result in the GzippedText.
func (g *GzippedText) Scan(src interface{}) error {
var source []byte
switch src.(type) {
case string:
source = []byte(src.(string))
case []byte:
source = src.([]byte)
default:
return errors.New("Incompatible type for GzippedText")
}
reader, err := gzip.NewReader(bytes.NewReader(source))
defer reader.Close()
b, err := ioutil.ReadAll(reader)
if err != nil {
return err
}
*g = GzippedText(b)
return nil
}
// JSONText is a json.RawMessage, which is a []byte underneath.
// Value() validates the json format in the source, and returns an error if
// the json is not valid. Scan does no validation. JSONText additionally
// implements `Unmarshal`, which unmarshals the json within to an interface{}
type JSONText json.RawMessage
// MarshalJSON returns the *j as the JSON encoding of j.
func (j *JSONText) MarshalJSON() ([]byte, error) {
return *j, nil
}
// UnmarshalJSON sets *j to a copy of data
func (j *JSONText) UnmarshalJSON(data []byte) error {
if j == nil {
return errors.New("JSONText: UnmarshalJSON on nil pointer")
}
*j = append((*j)[0:0], data...)
return nil
}
// Value returns j as a value. This does a validating unmarshal into another
// RawMessage. If j is invalid json, it returns an error.
func (j JSONText) Value() (driver.Value, error) {
var m json.RawMessage
var err = j.Unmarshal(&m)
if err != nil {
return []byte{}, err
}
return []byte(j), nil
}
// Scan stores the src in *j. No validation is done.
func (j *JSONText) Scan(src interface{}) error {
var source []byte
switch src.(type) {
case string:
source = []byte(src.(string))
case []byte:
source = src.([]byte)
default:
return errors.New("Incompatible type for JSONText")
}
*j = JSONText(append((*j)[0:0], source...))
return nil
}
// Unmarshal unmarshal's the json in j to v, as in json.Unmarshal.
func (j *JSONText) Unmarshal(v interface{}) error {
return json.Unmarshal([]byte(*j), v)
}
// Pretty printing for JSONText types
func (j JSONText) String() string {
return string(j)
}

View File

@ -1,42 +0,0 @@
package types
import "testing"
func TestGzipText(t *testing.T) {
g := GzippedText("Hello, world")
v, err := g.Value()
if err != nil {
t.Errorf("Was not expecting an error")
}
err = (&g).Scan(v)
if err != nil {
t.Errorf("Was not expecting an error")
}
if string(g) != "Hello, world" {
t.Errorf("Was expecting the string we sent in (Hello World), got %s", string(g))
}
}
func TestJSONText(t *testing.T) {
j := JSONText(`{"foo": 1, "bar": 2}`)
v, err := j.Value()
if err != nil {
t.Errorf("Was not expecting an error")
}
err = (&j).Scan(v)
if err != nil {
t.Errorf("Was not expecting an error")
}
m := map[string]interface{}{}
j.Unmarshal(&m)
if m["foo"].(float64) != 1 || m["bar"].(float64) != 2 {
t.Errorf("Expected valid json but got some garbage instead? %#v", m)
}
j = JSONText(`{"foo": 1, invalid, false}`)
v, err = j.Value()
if err == nil {
t.Errorf("Was expecting invalid json to fail!")
}
}

View File

@ -1,3 +0,0 @@
*.db
*.exe
*.dll

View File

@ -1,13 +0,0 @@
language: go
sudo: required
dist: trusty
go:
- 1.5
- 1.6
- tip
before_install:
- go get github.com/mattn/goveralls
- go get golang.org/x/tools/cmd/cover
script:
- $HOME/gopath/bin/goveralls -repotoken 3qJVUE0iQwqnCbmNcDsjYu1nh4J4KIFXx
- go test -v . -tags "libsqlite3"

View File

@ -1,21 +0,0 @@
The MIT License (MIT)
Copyright (c) 2014 Yasuhiro Matsumoto
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

View File

@ -1,81 +0,0 @@
go-sqlite3
==========
[![Build Status](https://travis-ci.org/mattn/go-sqlite3.svg?branch=master)](https://travis-ci.org/mattn/go-sqlite3)
[![Coverage Status](https://coveralls.io/repos/mattn/go-sqlite3/badge.svg?branch=master)](https://coveralls.io/r/mattn/go-sqlite3?branch=master)
[![GoDoc](https://godoc.org/github.com/mattn/go-sqlite3?status.svg)](http://godoc.org/github.com/mattn/go-sqlite3)
Description
-----------
sqlite3 driver conforming to the built-in database/sql interface
Installation
------------
This package can be installed with the go get command:
go get github.com/mattn/go-sqlite3
_go-sqlite3_ is *cgo* package.
If you want to build your app using go-sqlite3, you need gcc.
However, if you install _go-sqlite3_ with `go install github.com/mattn/go-sqlite3`, you don't need gcc to build your app anymore.
Documentation
-------------
API documentation can be found here: http://godoc.org/github.com/mattn/go-sqlite3
Examples can be found under the `./_example` directory
FAQ
---
* Want to build go-sqlite3 with libsqlite3 on my linux.
Use `go build --tags "libsqlite3 linux"`
* Want to build go-sqlite3 with libsqlite3 on OS X.
Install sqlite3 from homebrew: `brew install sqlite3`
Use `go build --tags "libsqlite3 darwin"`
* Want to build go-sqlite3 with icu extension.
Use `go build --tags "icu"`
* Can't build go-sqlite3 on windows 64bit.
> Probably, you are using go 1.0, go1.0 has a problem when it comes to compiling/linking on windows 64bit.
> See: https://github.com/mattn/go-sqlite3/issues/27
* Getting insert error while query is opened.
> You can pass some arguments into the connection string, for example, a URI.
> See: https://github.com/mattn/go-sqlite3/issues/39
* Do you want to cross compile? mingw on Linux or Mac?
> See: https://github.com/mattn/go-sqlite3/issues/106
> See also: http://www.limitlessfx.com/cross-compile-golang-app-for-windows-from-linux.html
* Want to get time.Time with current locale
Use `loc=auto` in SQLite3 filename schema like `file:foo.db?loc=auto`.
License
-------
MIT: http://mattn.mit-license.org/2012
sqlite3-binding.c, sqlite3-binding.h, sqlite3ext.h
The -binding suffix was added to avoid build failures under gccgo.
In this repository, those files are an amalgamation of code that was copied from SQLite3. The license of that code is the same as the license of SQLite3.
Author
------
Yasuhiro Matsumoto (a.k.a mattn)

View File

@ -1,133 +0,0 @@
package main
import (
"database/sql"
"fmt"
"log"
"math"
"math/rand"
sqlite "github.com/mattn/go-sqlite3"
)
// Computes x^y
func pow(x, y int64) int64 {
return int64(math.Pow(float64(x), float64(y)))
}
// Computes the bitwise exclusive-or of all its arguments
func xor(xs ...int64) int64 {
var ret int64
for _, x := range xs {
ret ^= x
}
return ret
}
// Returns a random number. It's actually deterministic here because
// we don't seed the RNG, but it's an example of a non-pure function
// from SQLite's POV.
func getrand() int64 {
return rand.Int63()
}
// Computes the standard deviation of a GROUPed BY set of values
type stddev struct {
xs []int64
// Running average calculation
sum int64
n int64
}
func newStddev() *stddev { return &stddev{} }
func (s *stddev) Step(x int64) {
s.xs = append(s.xs, x)
s.sum += x
s.n++
}
func (s *stddev) Done() float64 {
mean := float64(s.sum) / float64(s.n)
var sqDiff []float64
for _, x := range s.xs {
sqDiff = append(sqDiff, math.Pow(float64(x)-mean, 2))
}
var dev float64
for _, x := range sqDiff {
dev += x
}
dev /= float64(len(sqDiff))
return math.Sqrt(dev)
}
func main() {
sql.Register("sqlite3_custom", &sqlite.SQLiteDriver{
ConnectHook: func(conn *sqlite.SQLiteConn) error {
if err := conn.RegisterFunc("pow", pow, true); err != nil {
return err
}
if err := conn.RegisterFunc("xor", xor, true); err != nil {
return err
}
if err := conn.RegisterFunc("rand", getrand, false); err != nil {
return err
}
if err := conn.RegisterAggregator("stddev", newStddev, true); err != nil {
return err
}
return nil
},
})
db, err := sql.Open("sqlite3_custom", ":memory:")
if err != nil {
log.Fatal("Failed to open database:", err)
}
defer db.Close()
var i int64
err = db.QueryRow("SELECT pow(2,3)").Scan(&i)
if err != nil {
log.Fatal("POW query error:", err)
}
fmt.Println("pow(2,3) =", i) // 8
err = db.QueryRow("SELECT xor(1,2,3,4,5,6)").Scan(&i)
if err != nil {
log.Fatal("XOR query error:", err)
}
fmt.Println("xor(1,2,3,4,5) =", i) // 7
err = db.QueryRow("SELECT rand()").Scan(&i)
if err != nil {
log.Fatal("RAND query error:", err)
}
fmt.Println("rand() =", i) // pseudorandom
_, err = db.Exec("create table foo (department integer, profits integer)")
if err != nil {
log.Fatal("Failed to create table:", err)
}
_, err = db.Exec("insert into foo values (1, 10), (1, 20), (1, 45), (2, 42), (2, 115)")
if err != nil {
log.Fatal("Failed to insert records:", err)
}
rows, err := db.Query("select department, stddev(profits) from foo group by department")
if err != nil {
log.Fatal("STDDEV query error:", err)
}
defer rows.Close()
for rows.Next() {
var dept int64
var dev float64
if err := rows.Scan(&dept, &dev); err != nil {
log.Fatal(err)
}
fmt.Printf("dept=%d stddev=%f\n", dept, dev)
}
if err := rows.Err(); err != nil {
log.Fatal(err)
}
}

View File

@ -1,71 +0,0 @@
package main
import (
"database/sql"
"github.com/mattn/go-sqlite3"
"log"
"os"
)
func main() {
sqlite3conn := []*sqlite3.SQLiteConn{}
sql.Register("sqlite3_with_hook_example",
&sqlite3.SQLiteDriver{
ConnectHook: func(conn *sqlite3.SQLiteConn) error {
sqlite3conn = append(sqlite3conn, conn)
return nil
},
})
os.Remove("./foo.db")
os.Remove("./bar.db")
destDb, err := sql.Open("sqlite3_with_hook_example", "./foo.db")
if err != nil {
log.Fatal(err)
}
defer destDb.Close()
destDb.Ping()
_, err = destDb.Exec("create table foo(id int, value text)")
if err != nil {
log.Fatal(err)
}
_, err = destDb.Exec("insert into foo values(1, 'foo')")
if err != nil {
log.Fatal(err)
}
_, err = destDb.Exec("insert into foo values(2, 'bar')")
if err != nil {
log.Fatal(err)
}
_, err = destDb.Query("select * from foo")
if err != nil {
log.Fatal(err)
}
srcDb, err := sql.Open("sqlite3_with_hook_example", "./bar.db")
if err != nil {
log.Fatal(err)
}
defer srcDb.Close()
srcDb.Ping()
bk, err := sqlite3conn[1].Backup("main", sqlite3conn[0], "main")
if err != nil {
log.Fatal(err)
}
_, err = bk.Step(-1)
if err != nil {
log.Fatal(err)
}
_, err = destDb.Query("select * from foo")
if err != nil {
log.Fatal(err)
}
_, err = destDb.Exec("insert into foo values(3, 'bar')")
if err != nil {
log.Fatal(err)
}
bk.Finish()
}

View File

@ -1,22 +0,0 @@
ifeq ($(OS),Windows_NT)
EXE=extension.exe
EXT=sqlite3_mod_regexp.dll
RM=cmd /c del
LDFLAG=
else
EXE=extension
EXT=sqlite3_mod_regexp.so
RM=rm
LDFLAG=-fPIC
endif
all : $(EXE) $(EXT)
$(EXE) : extension.go
go build $<
$(EXT) : sqlite3_mod_regexp.c
gcc $(LDFLAG) -shared -o $@ $< -lsqlite3 -lpcre
clean :
@-$(RM) $(EXE) $(EXT)

View File

@ -1,43 +0,0 @@
package main
import (
"database/sql"
"fmt"
"github.com/mattn/go-sqlite3"
"log"
)
func main() {
sql.Register("sqlite3_with_extensions",
&sqlite3.SQLiteDriver{
Extensions: []string{
"sqlite3_mod_regexp",
},
})
db, err := sql.Open("sqlite3_with_extensions", ":memory:")
if err != nil {
log.Fatal(err)
}
defer db.Close()
// Force db to make a new connection in pool
// by putting the original in a transaction
tx, err := db.Begin()
if err != nil {
log.Fatal(err)
}
defer tx.Commit()
// New connection works (hopefully!)
rows, err := db.Query("select 'hello world' where 'hello world' regexp '^hello.*d$'")
if err != nil {
log.Fatal(err)
}
defer rows.Close()
for rows.Next() {
var helloworld string
rows.Scan(&helloworld)
fmt.Println(helloworld)
}
}

View File

@ -1,31 +0,0 @@
#include <pcre.h>
#include <string.h>
#include <stdio.h>
#include <sqlite3ext.h>
SQLITE_EXTENSION_INIT1
static void regexp_func(sqlite3_context *context, int argc, sqlite3_value **argv) {
if (argc >= 2) {
const char *target = (const char *)sqlite3_value_text(argv[1]);
const char *pattern = (const char *)sqlite3_value_text(argv[0]);
const char* errstr = NULL;
int erroff = 0;
int vec[500];
int n, rc;
pcre* re = pcre_compile(pattern, 0, &errstr, &erroff, NULL);
rc = pcre_exec(re, NULL, target, strlen(target), 0, 0, vec, 500);
if (rc <= 0) {
sqlite3_result_error(context, errstr, 0);
return;
}
sqlite3_result_int(context, 1);
}
}
#ifdef _WIN32
__declspec(dllexport)
#endif
int sqlite3_extension_init(sqlite3 *db, char **errmsg, const sqlite3_api_routines *api) {
SQLITE_EXTENSION_INIT2(api);
return sqlite3_create_function(db, "regexp", 2, SQLITE_UTF8, (void*)db, regexp_func, NULL, NULL);
}

View File

@ -1,24 +0,0 @@
ifeq ($(OS),Windows_NT)
EXE=extension.exe
EXT=sqlite3_mod_vtable.dll
RM=cmd /c del
LIBCURL=-lcurldll
LDFLAG=
else
EXE=extension
EXT=sqlite3_mod_vtable.so
RM=rm
LDFLAG=-fPIC
LIBCURL=-lcurl
endif
all : $(EXE) $(EXT)
$(EXE) : extension.go
go build $<
$(EXT) : sqlite3_mod_vtable.cc
g++ $(LDFLAG) -shared -o $@ $< -lsqlite3 $(LIBCURL)
clean :
@-$(RM) $(EXE) $(EXT)

View File

@ -1,36 +0,0 @@
package main
import (
"database/sql"
"fmt"
"github.com/mattn/go-sqlite3"
"log"
)
func main() {
sql.Register("sqlite3_with_extensions",
&sqlite3.SQLiteDriver{
Extensions: []string{
"sqlite3_mod_vtable",
},
})
db, err := sql.Open("sqlite3_with_extensions", ":memory:")
if err != nil {
log.Fatal(err)
}
defer db.Close()
db.Exec("create virtual table repo using github(id, full_name, description, html_url)")
rows, err := db.Query("select id, full_name, description, html_url from repo")
if err != nil {
log.Fatal(err)
}
defer rows.Close()
for rows.Next() {
var id, full_name, description, html_url string
rows.Scan(&id, &full_name, &description, &html_url)
fmt.Printf("%s: %s\n\t%s\n\t%s\n\n", id, full_name, description, html_url)
}
}

File diff suppressed because it is too large Load Diff

View File

@ -1,238 +0,0 @@
#include <string>
#include <sstream>
#include <sqlite3-binding.h>
#include <sqlite3ext.h>
#include <curl/curl.h>
#include "picojson.h"
#ifdef _WIN32
# define EXPORT __declspec(dllexport)
#else
# define EXPORT
#endif
SQLITE_EXTENSION_INIT1;
typedef struct {
char* data; // response data from server
size_t size; // response size of data
} MEMFILE;
MEMFILE*
memfopen() {
MEMFILE* mf = (MEMFILE*) malloc(sizeof(MEMFILE));
if (mf) {
mf->data = NULL;
mf->size = 0;
}
return mf;
}
void
memfclose(MEMFILE* mf) {
if (mf->data) free(mf->data);
free(mf);
}
size_t
memfwrite(char* ptr, size_t size, size_t nmemb, void* stream) {
MEMFILE* mf = (MEMFILE*) stream;
int block = size * nmemb;
if (!mf) return block; // through
if (!mf->data)
mf->data = (char*) malloc(block);
else
mf->data = (char*) realloc(mf->data, mf->size + block);
if (mf->data) {
memcpy(mf->data + mf->size, ptr, block);
mf->size += block;
}
return block;
}
char*
memfstrdup(MEMFILE* mf) {
char* buf;
if (mf->size == 0) return NULL;
buf = (char*) malloc(mf->size + 1);
memcpy(buf, mf->data, mf->size);
buf[mf->size] = 0;
return buf;
}
static int
my_connect(sqlite3 *db, void *pAux, int argc, const char * const *argv, sqlite3_vtab **ppVTab, char **c) {
std::stringstream ss;
ss << "CREATE TABLE " << argv[0]
<< "(id int, full_name text, description text, html_url text)";
int rc = sqlite3_declare_vtab(db, ss.str().c_str());
*ppVTab = (sqlite3_vtab *) sqlite3_malloc(sizeof(sqlite3_vtab));
memset(*ppVTab, 0, sizeof(sqlite3_vtab));
return rc;
}
static int
my_create(sqlite3 *db, void *pAux, int argc, const char * const * argv, sqlite3_vtab **ppVTab, char **c) {
return my_connect(db, pAux, argc, argv, ppVTab, c);
}
static int my_disconnect(sqlite3_vtab *pVTab) {
sqlite3_free(pVTab);
return SQLITE_OK;
}
static int
my_destroy(sqlite3_vtab *pVTab) {
sqlite3_free(pVTab);
return SQLITE_OK;
}
typedef struct {
sqlite3_vtab_cursor base;
int index;
picojson::value* rows;
} cursor;
static int
my_open(sqlite3_vtab *pVTab, sqlite3_vtab_cursor **ppCursor) {
MEMFILE* mf;
CURL* curl;
char* json;
CURLcode res = CURLE_OK;
char error[CURL_ERROR_SIZE] = {0};
char* cert_file = getenv("SSL_CERT_FILE");
mf = memfopen();
curl = curl_easy_init();
curl_easy_setopt(curl, CURLOPT_SSL_VERIFYPEER, 1);
curl_easy_setopt(curl, CURLOPT_SSL_VERIFYHOST, 2);
curl_easy_setopt(curl, CURLOPT_USERAGENT, "curl/7.29.0");
curl_easy_setopt(curl, CURLOPT_URL, "https://api.github.com/repositories");
if (cert_file)
curl_easy_setopt(curl, CURLOPT_CAINFO, cert_file);
curl_easy_setopt(curl, CURLOPT_FOLLOWLOCATION, 1);
curl_easy_setopt(curl, CURLOPT_ERRORBUFFER, error);
curl_easy_setopt(curl, CURLOPT_WRITEDATA, mf);
curl_easy_setopt(curl, CURLOPT_WRITEFUNCTION, memfwrite);
res = curl_easy_perform(curl);
curl_easy_cleanup(curl);
if (res != CURLE_OK) {
std::cerr << error << std::endl;
return SQLITE_FAIL;
}
picojson::value* v = new picojson::value;
std::string err;
picojson::parse(*v, mf->data, mf->data + mf->size, &err);
memfclose(mf);
if (!err.empty()) {
delete v;
std::cerr << err << std::endl;
return SQLITE_FAIL;
}
cursor *c = (cursor *)sqlite3_malloc(sizeof(cursor));
c->rows = v;
c->index = 0;
*ppCursor = &c->base;
return SQLITE_OK;
}
static int
my_close(cursor *c) {
delete c->rows;
sqlite3_free(c);
return SQLITE_OK;
}
static int
my_filter(cursor *c, int idxNum, const char *idxStr, int argc, sqlite3_value **argv) {
c->index = 0;
return SQLITE_OK;
}
static int
my_next(cursor *c) {
c->index++;
return SQLITE_OK;
}
static int
my_eof(cursor *c) {
return c->index >= c->rows->get<picojson::array>().size() ? 1 : 0;
}
static int
my_column(cursor *c, sqlite3_context *ctxt, int i) {
picojson::value v = c->rows->get<picojson::array>()[c->index];
picojson::object row = v.get<picojson::object>();
const char* p = NULL;
switch (i) {
case 0:
p = row["id"].to_str().c_str();
break;
case 1:
p = row["full_name"].to_str().c_str();
break;
case 2:
p = row["description"].to_str().c_str();
break;
case 3:
p = row["html_url"].to_str().c_str();
break;
}
sqlite3_result_text(ctxt, strdup(p), strlen(p), free);
return SQLITE_OK;
}
static int
my_rowid(cursor *c, sqlite3_int64 *pRowid) {
*pRowid = c->index;
return SQLITE_OK;
}
static int
my_bestindex(sqlite3_vtab *tab, sqlite3_index_info *pIdxInfo) {
return SQLITE_OK;
}
static const sqlite3_module module = {
0,
my_create,
my_connect,
my_bestindex,
my_disconnect,
my_destroy,
my_open,
(int (*)(sqlite3_vtab_cursor *)) my_close,
(int (*)(sqlite3_vtab_cursor *, int, char const *, int, sqlite3_value **)) my_filter,
(int (*)(sqlite3_vtab_cursor *)) my_next,
(int (*)(sqlite3_vtab_cursor *)) my_eof,
(int (*)(sqlite3_vtab_cursor *, sqlite3_context *, int)) my_column,
(int (*)(sqlite3_vtab_cursor *, sqlite3_int64 *)) my_rowid,
NULL, // my_update
NULL, // my_begin
NULL, // my_sync
NULL, // my_commit
NULL, // my_rollback
NULL, // my_findfunction
NULL, // my_rename
};
static void
destructor(void *arg) {
return;
}
extern "C" {
EXPORT int
sqlite3_extension_init(sqlite3 *db, char **errmsg, const sqlite3_api_routines *api) {
SQLITE_EXTENSION_INIT2(api);
sqlite3_create_module_v2(db, "github", &module, NULL, destructor);
return 0;
}
}

View File

@ -1,106 +0,0 @@
package main
import (
"database/sql"
"fmt"
_ "github.com/mattn/go-sqlite3"
"log"
"os"
)
func main() {
os.Remove("./foo.db")
db, err := sql.Open("sqlite3", "./foo.db")
if err != nil {
log.Fatal(err)
}
defer db.Close()
sqlStmt := `
create table foo (id integer not null primary key, name text);
delete from foo;
`
_, err = db.Exec(sqlStmt)
if err != nil {
log.Printf("%q: %s\n", err, sqlStmt)
return
}
tx, err := db.Begin()
if err != nil {
log.Fatal(err)
}
stmt, err := tx.Prepare("insert into foo(id, name) values(?, ?)")
if err != nil {
log.Fatal(err)
}
defer stmt.Close()
for i := 0; i < 100; i++ {
_, err = stmt.Exec(i, fmt.Sprintf("こんにちわ世界%03d", i))
if err != nil {
log.Fatal(err)
}
}
tx.Commit()
rows, err := db.Query("select id, name from foo")
if err != nil {
log.Fatal(err)
}
defer rows.Close()
for rows.Next() {
var id int
var name string
err = rows.Scan(&id, &name)
if err != nil {
log.Fatal(err)
}
fmt.Println(id, name)
}
err = rows.Err()
if err != nil {
log.Fatal(err)
}
stmt, err = db.Prepare("select name from foo where id = ?")
if err != nil {
log.Fatal(err)
}
defer stmt.Close()
var name string
err = stmt.QueryRow("3").Scan(&name)
if err != nil {
log.Fatal(err)
}
fmt.Println(name)
_, err = db.Exec("delete from foo")
if err != nil {
log.Fatal(err)
}
_, err = db.Exec("insert into foo(id, name) values(1, 'foo'), (2, 'bar'), (3, 'baz')")
if err != nil {
log.Fatal(err)
}
rows, err = db.Query("select id, name from foo")
if err != nil {
log.Fatal(err)
}
defer rows.Close()
for rows.Next() {
var id int
var name string
err = rows.Scan(&id, &name)
if err != nil {
log.Fatal(err)
}
fmt.Println(id, name)
}
err = rows.Err()
if err != nil {
log.Fatal(err)
}
}

View File

@ -1,74 +0,0 @@
// Copyright (C) 2014 Yasuhiro Matsumoto <mattn.jp@gmail.com>.
//
// Use of this source code is governed by an MIT-style
// license that can be found in the LICENSE file.
package sqlite3
/*
#ifndef USE_LIBSQLITE3
#include <sqlite3-binding.h>
#else
#include <sqlite3.h>
#endif
#include <stdlib.h>
*/
import "C"
import (
"runtime"
"unsafe"
)
type SQLiteBackup struct {
b *C.sqlite3_backup
}
func (c *SQLiteConn) Backup(dest string, conn *SQLiteConn, src string) (*SQLiteBackup, error) {
destptr := C.CString(dest)
defer C.free(unsafe.Pointer(destptr))
srcptr := C.CString(src)
defer C.free(unsafe.Pointer(srcptr))
if b := C.sqlite3_backup_init(c.db, destptr, conn.db, srcptr); b != nil {
bb := &SQLiteBackup{b: b}
runtime.SetFinalizer(bb, (*SQLiteBackup).Finish)
return bb, nil
}
return nil, c.lastError()
}
// Backs up for one step. Calls the underlying `sqlite3_backup_step` function.
// This function returns a boolean indicating if the backup is done and
// an error signalling any other error. Done is returned if the underlying C
// function returns SQLITE_DONE (Code 101)
func (b *SQLiteBackup) Step(p int) (bool, error) {
ret := C.sqlite3_backup_step(b.b, C.int(p))
if ret == C.SQLITE_DONE {
return true, nil
} else if ret != 0 && ret != C.SQLITE_LOCKED && ret != C.SQLITE_BUSY {
return false, Error{Code: ErrNo(ret)}
}
return false, nil
}
func (b *SQLiteBackup) Remaining() int {
return int(C.sqlite3_backup_remaining(b.b))
}
func (b *SQLiteBackup) PageCount() int {
return int(C.sqlite3_backup_pagecount(b.b))
}
func (b *SQLiteBackup) Finish() error {
return b.Close()
}
func (b *SQLiteBackup) Close() error {
ret := C.sqlite3_backup_finish(b.b)
if ret != 0 {
return Error{Code: ErrNo(ret)}
}
b.b = nil
runtime.SetFinalizer(b, nil)
return nil
}

View File

@ -1,336 +0,0 @@
// Copyright (C) 2014 Yasuhiro Matsumoto <mattn.jp@gmail.com>.
//
// Use of this source code is governed by an MIT-style
// license that can be found in the LICENSE file.
package sqlite3
// You can't export a Go function to C and have definitions in the C
// preamble in the same file, so we have to have callbackTrampoline in
// its own file. Because we need a separate file anyway, the support
// code for SQLite custom functions is in here.
/*
#include <sqlite3-binding.h>
#include <stdlib.h>
void _sqlite3_result_text(sqlite3_context* ctx, const char* s);
void _sqlite3_result_blob(sqlite3_context* ctx, const void* b, int l);
*/
import "C"
import (
"errors"
"fmt"
"math"
"reflect"
"sync"
"unsafe"
)
//export callbackTrampoline
func callbackTrampoline(ctx *C.sqlite3_context, argc int, argv **C.sqlite3_value) {
args := (*[(math.MaxInt32 - 1) / unsafe.Sizeof((*C.sqlite3_value)(nil))]*C.sqlite3_value)(unsafe.Pointer(argv))[:argc:argc]
fi := lookupHandle(uintptr(C.sqlite3_user_data(ctx))).(*functionInfo)
fi.Call(ctx, args)
}
//export stepTrampoline
func stepTrampoline(ctx *C.sqlite3_context, argc int, argv **C.sqlite3_value) {
args := (*[(math.MaxInt32 - 1) / unsafe.Sizeof((*C.sqlite3_value)(nil))]*C.sqlite3_value)(unsafe.Pointer(argv))[:argc:argc]
ai := lookupHandle(uintptr(C.sqlite3_user_data(ctx))).(*aggInfo)
ai.Step(ctx, args)
}
//export doneTrampoline
func doneTrampoline(ctx *C.sqlite3_context) {
handle := uintptr(C.sqlite3_user_data(ctx))
ai := lookupHandle(handle).(*aggInfo)
ai.Done(ctx)
}
// Use handles to avoid passing Go pointers to C.
type handleVal struct {
db *SQLiteConn
val interface{}
}
var handleLock sync.Mutex
var handleVals = make(map[uintptr]handleVal)
var handleIndex uintptr = 100
func newHandle(db *SQLiteConn, v interface{}) uintptr {
handleLock.Lock()
defer handleLock.Unlock()
i := handleIndex
handleIndex++
handleVals[i] = handleVal{db, v}
return i
}
func lookupHandle(handle uintptr) interface{} {
handleLock.Lock()
defer handleLock.Unlock()
r, ok := handleVals[handle]
if !ok {
if handle >= 100 && handle < handleIndex {
panic("deleted handle")
} else {
panic("invalid handle")
}
}
return r.val
}
func deleteHandles(db *SQLiteConn) {
handleLock.Lock()
defer handleLock.Unlock()
for handle, val := range handleVals {
if val.db == db {
delete(handleVals, handle)
}
}
}
// This is only here so that tests can refer to it.
type callbackArgRaw C.sqlite3_value
type callbackArgConverter func(*C.sqlite3_value) (reflect.Value, error)
type callbackArgCast struct {
f callbackArgConverter
typ reflect.Type
}
func (c callbackArgCast) Run(v *C.sqlite3_value) (reflect.Value, error) {
val, err := c.f(v)
if err != nil {
return reflect.Value{}, err
}
if !val.Type().ConvertibleTo(c.typ) {
return reflect.Value{}, fmt.Errorf("cannot convert %s to %s", val.Type(), c.typ)
}
return val.Convert(c.typ), nil
}
func callbackArgInt64(v *C.sqlite3_value) (reflect.Value, error) {
if C.sqlite3_value_type(v) != C.SQLITE_INTEGER {
return reflect.Value{}, fmt.Errorf("argument must be an INTEGER")
}
return reflect.ValueOf(int64(C.sqlite3_value_int64(v))), nil
}
func callbackArgBool(v *C.sqlite3_value) (reflect.Value, error) {
if C.sqlite3_value_type(v) != C.SQLITE_INTEGER {
return reflect.Value{}, fmt.Errorf("argument must be an INTEGER")
}
i := int64(C.sqlite3_value_int64(v))
val := false
if i != 0 {
val = true
}
return reflect.ValueOf(val), nil
}
func callbackArgFloat64(v *C.sqlite3_value) (reflect.Value, error) {
if C.sqlite3_value_type(v) != C.SQLITE_FLOAT {
return reflect.Value{}, fmt.Errorf("argument must be a FLOAT")
}
return reflect.ValueOf(float64(C.sqlite3_value_double(v))), nil
}
func callbackArgBytes(v *C.sqlite3_value) (reflect.Value, error) {
switch C.sqlite3_value_type(v) {
case C.SQLITE_BLOB:
l := C.sqlite3_value_bytes(v)
p := C.sqlite3_value_blob(v)
return reflect.ValueOf(C.GoBytes(p, l)), nil
case C.SQLITE_TEXT:
l := C.sqlite3_value_bytes(v)
c := unsafe.Pointer(C.sqlite3_value_text(v))
return reflect.ValueOf(C.GoBytes(c, l)), nil
default:
return reflect.Value{}, fmt.Errorf("argument must be BLOB or TEXT")
}
}
func callbackArgString(v *C.sqlite3_value) (reflect.Value, error) {
switch C.sqlite3_value_type(v) {
case C.SQLITE_BLOB:
l := C.sqlite3_value_bytes(v)
p := (*C.char)(C.sqlite3_value_blob(v))
return reflect.ValueOf(C.GoStringN(p, l)), nil
case C.SQLITE_TEXT:
c := (*C.char)(unsafe.Pointer(C.sqlite3_value_text(v)))
return reflect.ValueOf(C.GoString(c)), nil
default:
return reflect.Value{}, fmt.Errorf("argument must be BLOB or TEXT")
}
}
func callbackArgGeneric(v *C.sqlite3_value) (reflect.Value, error) {
switch C.sqlite3_value_type(v) {
case C.SQLITE_INTEGER:
return callbackArgInt64(v)
case C.SQLITE_FLOAT:
return callbackArgFloat64(v)
case C.SQLITE_TEXT:
return callbackArgString(v)
case C.SQLITE_BLOB:
return callbackArgBytes(v)
case C.SQLITE_NULL:
// Interpret NULL as a nil byte slice.
var ret []byte
return reflect.ValueOf(ret), nil
default:
panic("unreachable")
}
}
func callbackArg(typ reflect.Type) (callbackArgConverter, error) {
switch typ.Kind() {
case reflect.Interface:
if typ.NumMethod() != 0 {
return nil, errors.New("the only supported interface type is interface{}")
}
return callbackArgGeneric, nil
case reflect.Slice:
if typ.Elem().Kind() != reflect.Uint8 {
return nil, errors.New("the only supported slice type is []byte")
}
return callbackArgBytes, nil
case reflect.String:
return callbackArgString, nil
case reflect.Bool:
return callbackArgBool, nil
case reflect.Int64:
return callbackArgInt64, nil
case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Int, reflect.Uint:
c := callbackArgCast{callbackArgInt64, typ}
return c.Run, nil
case reflect.Float64:
return callbackArgFloat64, nil
case reflect.Float32:
c := callbackArgCast{callbackArgFloat64, typ}
return c.Run, nil
default:
return nil, fmt.Errorf("don't know how to convert to %s", typ)
}
}
func callbackConvertArgs(argv []*C.sqlite3_value, converters []callbackArgConverter, variadic callbackArgConverter) ([]reflect.Value, error) {
var args []reflect.Value
if len(argv) < len(converters) {
return nil, fmt.Errorf("function requires at least %d arguments", len(converters))
}
for i, arg := range argv[:len(converters)] {
v, err := converters[i](arg)
if err != nil {
return nil, err
}
args = append(args, v)
}
if variadic != nil {
for _, arg := range argv[len(converters):] {
v, err := variadic(arg)
if err != nil {
return nil, err
}
args = append(args, v)
}
}
return args, nil
}
type callbackRetConverter func(*C.sqlite3_context, reflect.Value) error
func callbackRetInteger(ctx *C.sqlite3_context, v reflect.Value) error {
switch v.Type().Kind() {
case reflect.Int64:
case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Int, reflect.Uint:
v = v.Convert(reflect.TypeOf(int64(0)))
case reflect.Bool:
b := v.Interface().(bool)
if b {
v = reflect.ValueOf(int64(1))
} else {
v = reflect.ValueOf(int64(0))
}
default:
return fmt.Errorf("cannot convert %s to INTEGER", v.Type())
}
C.sqlite3_result_int64(ctx, C.sqlite3_int64(v.Interface().(int64)))
return nil
}
func callbackRetFloat(ctx *C.sqlite3_context, v reflect.Value) error {
switch v.Type().Kind() {
case reflect.Float64:
case reflect.Float32:
v = v.Convert(reflect.TypeOf(float64(0)))
default:
return fmt.Errorf("cannot convert %s to FLOAT", v.Type())
}
C.sqlite3_result_double(ctx, C.double(v.Interface().(float64)))
return nil
}
func callbackRetBlob(ctx *C.sqlite3_context, v reflect.Value) error {
if v.Type().Kind() != reflect.Slice || v.Type().Elem().Kind() != reflect.Uint8 {
return fmt.Errorf("cannot convert %s to BLOB", v.Type())
}
i := v.Interface()
if i == nil || len(i.([]byte)) == 0 {
C.sqlite3_result_null(ctx)
} else {
bs := i.([]byte)
C._sqlite3_result_blob(ctx, unsafe.Pointer(&bs[0]), C.int(len(bs)))
}
return nil
}
func callbackRetText(ctx *C.sqlite3_context, v reflect.Value) error {
if v.Type().Kind() != reflect.String {
return fmt.Errorf("cannot convert %s to TEXT", v.Type())
}
C._sqlite3_result_text(ctx, C.CString(v.Interface().(string)))
return nil
}
func callbackRet(typ reflect.Type) (callbackRetConverter, error) {
switch typ.Kind() {
case reflect.Slice:
if typ.Elem().Kind() != reflect.Uint8 {
return nil, errors.New("the only supported slice type is []byte")
}
return callbackRetBlob, nil
case reflect.String:
return callbackRetText, nil
case reflect.Bool, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Int, reflect.Uint:
return callbackRetInteger, nil
case reflect.Float32, reflect.Float64:
return callbackRetFloat, nil
default:
return nil, fmt.Errorf("don't know how to convert to %s", typ)
}
}
func callbackError(ctx *C.sqlite3_context, err error) {
cstr := C.CString(err.Error())
defer C.free(unsafe.Pointer(cstr))
C.sqlite3_result_error(ctx, cstr, -1)
}
// Test support code. Tests are not allowed to import "C", so we can't
// declare any functions that use C.sqlite3_value.
func callbackSyntheticForTests(v reflect.Value, err error) callbackArgConverter {
return func(*C.sqlite3_value) (reflect.Value, error) {
return v, err
}
}

View File

@ -1,97 +0,0 @@
package sqlite3
import (
"errors"
"math"
"reflect"
"testing"
)
func TestCallbackArgCast(t *testing.T) {
intConv := callbackSyntheticForTests(reflect.ValueOf(int64(math.MaxInt64)), nil)
floatConv := callbackSyntheticForTests(reflect.ValueOf(float64(math.MaxFloat64)), nil)
errConv := callbackSyntheticForTests(reflect.Value{}, errors.New("test"))
tests := []struct {
f callbackArgConverter
o reflect.Value
}{
{intConv, reflect.ValueOf(int8(-1))},
{intConv, reflect.ValueOf(int16(-1))},
{intConv, reflect.ValueOf(int32(-1))},
{intConv, reflect.ValueOf(uint8(math.MaxUint8))},
{intConv, reflect.ValueOf(uint16(math.MaxUint16))},
{intConv, reflect.ValueOf(uint32(math.MaxUint32))},
// Special case, int64->uint64 is only 1<<63 - 1, not 1<<64 - 1
{intConv, reflect.ValueOf(uint64(math.MaxInt64))},
{floatConv, reflect.ValueOf(float32(math.Inf(1)))},
}
for _, test := range tests {
conv := callbackArgCast{test.f, test.o.Type()}
val, err := conv.Run(nil)
if err != nil {
t.Errorf("Couldn't convert to %s: %s", test.o.Type(), err)
} else if !reflect.DeepEqual(val.Interface(), test.o.Interface()) {
t.Errorf("Unexpected result from converting to %s: got %v, want %v", test.o.Type(), val.Interface(), test.o.Interface())
}
}
conv := callbackArgCast{errConv, reflect.TypeOf(int8(0))}
_, err := conv.Run(nil)
if err == nil {
t.Errorf("Expected error during callbackArgCast, but got none")
}
}
func TestCallbackConverters(t *testing.T) {
tests := []struct {
v interface{}
err bool
}{
// Unfortunately, we can't tell which converter was returned,
// but we can at least check which types can be converted.
{[]byte{0}, false},
{"text", false},
{true, false},
{int8(0), false},
{int16(0), false},
{int32(0), false},
{int64(0), false},
{uint8(0), false},
{uint16(0), false},
{uint32(0), false},
{uint64(0), false},
{int(0), false},
{uint(0), false},
{float64(0), false},
{float32(0), false},
{func() {}, true},
{complex64(complex(0, 0)), true},
{complex128(complex(0, 0)), true},
{struct{}{}, true},
{map[string]string{}, true},
{[]string{}, true},
{(*int8)(nil), true},
{make(chan int), true},
}
for _, test := range tests {
_, err := callbackArg(reflect.TypeOf(test.v))
if test.err && err == nil {
t.Errorf("Expected an error when converting %s, got no error", reflect.TypeOf(test.v))
} else if !test.err && err != nil {
t.Errorf("Expected converter when converting %s, got error: %s", reflect.TypeOf(test.v), err)
}
}
for _, test := range tests {
_, err := callbackRet(reflect.TypeOf(test.v))
if test.err && err == nil {
t.Errorf("Expected an error when converting %s, got no error", reflect.TypeOf(test.v))
} else if !test.err && err != nil {
t.Errorf("Expected converter when converting %s, got error: %s", reflect.TypeOf(test.v), err)
}
}
}

View File

@ -1,112 +0,0 @@
/*
Package sqlite3 provides interface to SQLite3 databases.
This works as a driver for database/sql.
Installation
go get github.com/mattn/go-sqlite3
Supported Types
Currently, go-sqlite3 supports the following data types.
+------------------------------+
|go | sqlite3 |
|----------|-------------------|
|nil | null |
|int | integer |
|int64 | integer |
|float64 | float |
|bool | integer |
|[]byte | blob |
|string | text |
|time.Time | timestamp/datetime|
+------------------------------+
SQLite3 Extension
You can write your own extension module for sqlite3. For example, below is an
extension for a Regexp matcher operation.
#include <pcre.h>
#include <string.h>
#include <stdio.h>
#include <sqlite3ext.h>
SQLITE_EXTENSION_INIT1
static void regexp_func(sqlite3_context *context, int argc, sqlite3_value **argv) {
if (argc >= 2) {
const char *target = (const char *)sqlite3_value_text(argv[1]);
const char *pattern = (const char *)sqlite3_value_text(argv[0]);
const char* errstr = NULL;
int erroff = 0;
int vec[500];
int n, rc;
pcre* re = pcre_compile(pattern, 0, &errstr, &erroff, NULL);
rc = pcre_exec(re, NULL, target, strlen(target), 0, 0, vec, 500);
if (rc <= 0) {
sqlite3_result_error(context, errstr, 0);
return;
}
sqlite3_result_int(context, 1);
}
}
#ifdef _WIN32
__declspec(dllexport)
#endif
int sqlite3_extension_init(sqlite3 *db, char **errmsg,
const sqlite3_api_routines *api) {
SQLITE_EXTENSION_INIT2(api);
return sqlite3_create_function(db, "regexp", 2, SQLITE_UTF8,
(void*)db, regexp_func, NULL, NULL);
}
It needs to be built as a so/dll shared library. And you need to register
the extension module like below.
sql.Register("sqlite3_with_extensions",
&sqlite3.SQLiteDriver{
Extensions: []string{
"sqlite3_mod_regexp",
},
})
Then, you can use this extension.
rows, err := db.Query("select text from mytable where name regexp '^golang'")
Connection Hook
You can hook and inject your code when the connection is established. database/sql
doesn't provide a way to get native go-sqlite3 interfaces. So if you want,
you need to set ConnectHook and get the SQLiteConn.
sql.Register("sqlite3_with_hook_example",
&sqlite3.SQLiteDriver{
ConnectHook: func(conn *sqlite3.SQLiteConn) error {
sqlite3conn = append(sqlite3conn, conn)
return nil
},
})
Go SQlite3 Extensions
If you want to register Go functions as SQLite extension functions,
call RegisterFunction from ConnectHook.
regex = func(re, s string) (bool, error) {
return regexp.MatchString(re, s)
}
sql.Register("sqlite3_with_go_func",
&sqlite3.SQLiteDriver{
ConnectHook: func(conn *sqlite3.SQLiteConn) error {
return conn.RegisterFunc("regexp", regex, true)
},
})
See the documentation of RegisterFunc for more details.
*/
package sqlite3

View File

@ -1,128 +0,0 @@
// Copyright (C) 2014 Yasuhiro Matsumoto <mattn.jp@gmail.com>.
//
// Use of this source code is governed by an MIT-style
// license that can be found in the LICENSE file.
package sqlite3
import "C"
type ErrNo int
const ErrNoMask C.int = 0xff
type ErrNoExtended int
type Error struct {
Code ErrNo /* The error code returned by SQLite */
ExtendedCode ErrNoExtended /* The extended error code returned by SQLite */
err string /* The error string returned by sqlite3_errmsg(),
this usually contains more specific details. */
}
// result codes from http://www.sqlite.org/c3ref/c_abort.html
var (
ErrError = ErrNo(1) /* SQL error or missing database */
ErrInternal = ErrNo(2) /* Internal logic error in SQLite */
ErrPerm = ErrNo(3) /* Access permission denied */
ErrAbort = ErrNo(4) /* Callback routine requested an abort */
ErrBusy = ErrNo(5) /* The database file is locked */
ErrLocked = ErrNo(6) /* A table in the database is locked */
ErrNomem = ErrNo(7) /* A malloc() failed */
ErrReadonly = ErrNo(8) /* Attempt to write a readonly database */
ErrInterrupt = ErrNo(9) /* Operation terminated by sqlite3_interrupt() */
ErrIoErr = ErrNo(10) /* Some kind of disk I/O error occurred */
ErrCorrupt = ErrNo(11) /* The database disk image is malformed */
ErrNotFound = ErrNo(12) /* Unknown opcode in sqlite3_file_control() */
ErrFull = ErrNo(13) /* Insertion failed because database is full */
ErrCantOpen = ErrNo(14) /* Unable to open the database file */
ErrProtocol = ErrNo(15) /* Database lock protocol error */
ErrEmpty = ErrNo(16) /* Database is empty */
ErrSchema = ErrNo(17) /* The database schema changed */
ErrTooBig = ErrNo(18) /* String or BLOB exceeds size limit */
ErrConstraint = ErrNo(19) /* Abort due to constraint violation */
ErrMismatch = ErrNo(20) /* Data type mismatch */
ErrMisuse = ErrNo(21) /* Library used incorrectly */
ErrNoLFS = ErrNo(22) /* Uses OS features not supported on host */
ErrAuth = ErrNo(23) /* Authorization denied */
ErrFormat = ErrNo(24) /* Auxiliary database format error */
ErrRange = ErrNo(25) /* 2nd parameter to sqlite3_bind out of range */
ErrNotADB = ErrNo(26) /* File opened that is not a database file */
ErrNotice = ErrNo(27) /* Notifications from sqlite3_log() */
ErrWarning = ErrNo(28) /* Warnings from sqlite3_log() */
)
func (err ErrNo) Error() string {
return Error{Code: err}.Error()
}
func (err ErrNo) Extend(by int) ErrNoExtended {
return ErrNoExtended(int(err) | (by << 8))
}
func (err ErrNoExtended) Error() string {
return Error{Code: ErrNo(C.int(err) & ErrNoMask), ExtendedCode: err}.Error()
}
func (err Error) Error() string {
if err.err != "" {
return err.err
}
return errorString(err)
}
// result codes from http://www.sqlite.org/c3ref/c_abort_rollback.html
var (
ErrIoErrRead = ErrIoErr.Extend(1)
ErrIoErrShortRead = ErrIoErr.Extend(2)
ErrIoErrWrite = ErrIoErr.Extend(3)
ErrIoErrFsync = ErrIoErr.Extend(4)
ErrIoErrDirFsync = ErrIoErr.Extend(5)
ErrIoErrTruncate = ErrIoErr.Extend(6)
ErrIoErrFstat = ErrIoErr.Extend(7)
ErrIoErrUnlock = ErrIoErr.Extend(8)
ErrIoErrRDlock = ErrIoErr.Extend(9)
ErrIoErrDelete = ErrIoErr.Extend(10)
ErrIoErrBlocked = ErrIoErr.Extend(11)
ErrIoErrNoMem = ErrIoErr.Extend(12)
ErrIoErrAccess = ErrIoErr.Extend(13)
ErrIoErrCheckReservedLock = ErrIoErr.Extend(14)
ErrIoErrLock = ErrIoErr.Extend(15)
ErrIoErrClose = ErrIoErr.Extend(16)
ErrIoErrDirClose = ErrIoErr.Extend(17)
ErrIoErrSHMOpen = ErrIoErr.Extend(18)
ErrIoErrSHMSize = ErrIoErr.Extend(19)
ErrIoErrSHMLock = ErrIoErr.Extend(20)
ErrIoErrSHMMap = ErrIoErr.Extend(21)
ErrIoErrSeek = ErrIoErr.Extend(22)
ErrIoErrDeleteNoent = ErrIoErr.Extend(23)
ErrIoErrMMap = ErrIoErr.Extend(24)
ErrIoErrGetTempPath = ErrIoErr.Extend(25)
ErrIoErrConvPath = ErrIoErr.Extend(26)
ErrLockedSharedCache = ErrLocked.Extend(1)
ErrBusyRecovery = ErrBusy.Extend(1)
ErrBusySnapshot = ErrBusy.Extend(2)
ErrCantOpenNoTempDir = ErrCantOpen.Extend(1)
ErrCantOpenIsDir = ErrCantOpen.Extend(2)
ErrCantOpenFullPath = ErrCantOpen.Extend(3)
ErrCantOpenConvPath = ErrCantOpen.Extend(4)
ErrCorruptVTab = ErrCorrupt.Extend(1)
ErrReadonlyRecovery = ErrReadonly.Extend(1)
ErrReadonlyCantLock = ErrReadonly.Extend(2)
ErrReadonlyRollback = ErrReadonly.Extend(3)
ErrReadonlyDbMoved = ErrReadonly.Extend(4)
ErrAbortRollback = ErrAbort.Extend(2)
ErrConstraintCheck = ErrConstraint.Extend(1)
ErrConstraintCommitHook = ErrConstraint.Extend(2)
ErrConstraintForeignKey = ErrConstraint.Extend(3)
ErrConstraintFunction = ErrConstraint.Extend(4)
ErrConstraintNotNull = ErrConstraint.Extend(5)
ErrConstraintPrimaryKey = ErrConstraint.Extend(6)
ErrConstraintTrigger = ErrConstraint.Extend(7)
ErrConstraintUnique = ErrConstraint.Extend(8)
ErrConstraintVTab = ErrConstraint.Extend(9)
ErrConstraintRowId = ErrConstraint.Extend(10)
ErrNoticeRecoverWAL = ErrNotice.Extend(1)
ErrNoticeRecoverRollback = ErrNotice.Extend(2)
ErrWarningAutoIndex = ErrWarning.Extend(1)
)

View File

@ -1,242 +0,0 @@
// Copyright (C) 2014 Yasuhiro Matsumoto <mattn.jp@gmail.com>.
//
// Use of this source code is governed by an MIT-style
// license that can be found in the LICENSE file.
package sqlite3
import (
"database/sql"
"io/ioutil"
"os"
"path"
"testing"
)
func TestSimpleError(t *testing.T) {
e := ErrError.Error()
if e != "SQL logic error or missing database" {
t.Error("wrong error code:" + e)
}
}
func TestCorruptDbErrors(t *testing.T) {
dirName, err := ioutil.TempDir("", "sqlite3")
if err != nil {
t.Fatal(err)
}
defer os.RemoveAll(dirName)
dbFileName := path.Join(dirName, "test.db")
f, err := os.Create(dbFileName)
if err != nil {
t.Error(err)
}
f.Write([]byte{1, 2, 3, 4, 5})
f.Close()
db, err := sql.Open("sqlite3", dbFileName)
if err == nil {
_, err = db.Exec("drop table foo")
}
sqliteErr := err.(Error)
if sqliteErr.Code != ErrNotADB {
t.Error("wrong error code for corrupted DB")
}
if err.Error() == "" {
t.Error("wrong error string for corrupted DB")
}
db.Close()
}
func TestSqlLogicErrors(t *testing.T) {
dirName, err := ioutil.TempDir("", "sqlite3")
if err != nil {
t.Fatal(err)
}
defer os.RemoveAll(dirName)
dbFileName := path.Join(dirName, "test.db")
db, err := sql.Open("sqlite3", dbFileName)
if err != nil {
t.Error(err)
}
defer db.Close()
_, err = db.Exec("CREATE TABLE Foo (id INTEGER PRIMARY KEY)")
if err != nil {
t.Error(err)
}
const expectedErr = "table Foo already exists"
_, err = db.Exec("CREATE TABLE Foo (id INTEGER PRIMARY KEY)")
if err.Error() != expectedErr {
t.Errorf("Unexpected error: %s, expected %s", err.Error(), expectedErr)
}
}
func TestExtendedErrorCodes_ForeignKey(t *testing.T) {
dirName, err := ioutil.TempDir("", "sqlite3-err")
if err != nil {
t.Fatal(err)
}
defer os.RemoveAll(dirName)
dbFileName := path.Join(dirName, "test.db")
db, err := sql.Open("sqlite3", dbFileName)
if err != nil {
t.Error(err)
}
defer db.Close()
_, err = db.Exec("PRAGMA foreign_keys=ON;")
if err != nil {
t.Errorf("PRAGMA foreign_keys=ON: %v", err)
}
_, err = db.Exec(`CREATE TABLE Foo (
id INTEGER PRIMARY KEY AUTOINCREMENT,
value INTEGER NOT NULL,
ref INTEGER NULL REFERENCES Foo (id),
UNIQUE(value)
);`)
if err != nil {
t.Error(err)
}
_, err = db.Exec("INSERT INTO Foo (ref, value) VALUES (100, 100);")
if err == nil {
t.Error("No error!")
} else {
sqliteErr := err.(Error)
if sqliteErr.Code != ErrConstraint {
t.Errorf("Wrong basic error code: %d != %d",
sqliteErr.Code, ErrConstraint)
}
if sqliteErr.ExtendedCode != ErrConstraintForeignKey {
t.Errorf("Wrong extended error code: %d != %d",
sqliteErr.ExtendedCode, ErrConstraintForeignKey)
}
}
}
func TestExtendedErrorCodes_NotNull(t *testing.T) {
dirName, err := ioutil.TempDir("", "sqlite3-err")
if err != nil {
t.Fatal(err)
}
defer os.RemoveAll(dirName)
dbFileName := path.Join(dirName, "test.db")
db, err := sql.Open("sqlite3", dbFileName)
if err != nil {
t.Error(err)
}
defer db.Close()
_, err = db.Exec("PRAGMA foreign_keys=ON;")
if err != nil {
t.Errorf("PRAGMA foreign_keys=ON: %v", err)
}
_, err = db.Exec(`CREATE TABLE Foo (
id INTEGER PRIMARY KEY AUTOINCREMENT,
value INTEGER NOT NULL,
ref INTEGER NULL REFERENCES Foo (id),
UNIQUE(value)
);`)
if err != nil {
t.Error(err)
}
res, err := db.Exec("INSERT INTO Foo (value) VALUES (100);")
if err != nil {
t.Fatalf("Creating first row: %v", err)
}
id, err := res.LastInsertId()
if err != nil {
t.Fatalf("Retrieving last insert id: %v", err)
}
_, err = db.Exec("INSERT INTO Foo (ref) VALUES (?);", id)
if err == nil {
t.Error("No error!")
} else {
sqliteErr := err.(Error)
if sqliteErr.Code != ErrConstraint {
t.Errorf("Wrong basic error code: %d != %d",
sqliteErr.Code, ErrConstraint)
}
if sqliteErr.ExtendedCode != ErrConstraintNotNull {
t.Errorf("Wrong extended error code: %d != %d",
sqliteErr.ExtendedCode, ErrConstraintNotNull)
}
}
}
func TestExtendedErrorCodes_Unique(t *testing.T) {
dirName, err := ioutil.TempDir("", "sqlite3-err")
if err != nil {
t.Fatal(err)
}
defer os.RemoveAll(dirName)
dbFileName := path.Join(dirName, "test.db")
db, err := sql.Open("sqlite3", dbFileName)
if err != nil {
t.Error(err)
}
defer db.Close()
_, err = db.Exec("PRAGMA foreign_keys=ON;")
if err != nil {
t.Errorf("PRAGMA foreign_keys=ON: %v", err)
}
_, err = db.Exec(`CREATE TABLE Foo (
id INTEGER PRIMARY KEY AUTOINCREMENT,
value INTEGER NOT NULL,
ref INTEGER NULL REFERENCES Foo (id),
UNIQUE(value)
);`)
if err != nil {
t.Error(err)
}
res, err := db.Exec("INSERT INTO Foo (value) VALUES (100);")
if err != nil {
t.Fatalf("Creating first row: %v", err)
}
id, err := res.LastInsertId()
if err != nil {
t.Fatalf("Retrieving last insert id: %v", err)
}
_, err = db.Exec("INSERT INTO Foo (ref, value) VALUES (?, 100);", id)
if err == nil {
t.Error("No error!")
} else {
sqliteErr := err.(Error)
if sqliteErr.Code != ErrConstraint {
t.Errorf("Wrong basic error code: %d != %d",
sqliteErr.Code, ErrConstraint)
}
if sqliteErr.ExtendedCode != ErrConstraintUnique {
t.Errorf("Wrong extended error code: %d != %d",
sqliteErr.ExtendedCode, ErrConstraintUnique)
}
extended := sqliteErr.Code.Extend(3).Error()
expected := "constraint failed"
if extended != expected {
t.Errorf("Wrong basic error code: %q != %q",
extended, expected)
}
}
}

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -1,130 +0,0 @@
// Copyright (C) 2015 Yasuhiro Matsumoto <mattn.jp@gmail.com>.
//
// Use of this source code is governed by an MIT-style
// license that can be found in the LICENSE file.
package sqlite3
import (
"database/sql"
"os"
"testing"
)
func TestFTS3(t *testing.T) {
tempFilename := TempFilename(t)
defer os.Remove(tempFilename)
db, err := sql.Open("sqlite3", tempFilename)
if err != nil {
t.Fatal("Failed to open database:", err)
}
defer db.Close()
_, err = db.Exec("DROP TABLE foo")
_, err = db.Exec("CREATE VIRTUAL TABLE foo USING fts3(id INTEGER PRIMARY KEY, value TEXT)")
if err != nil {
t.Fatal("Failed to create table:", err)
}
_, err = db.Exec("INSERT INTO foo(id, value) VALUES(?, ?)", 1, `今日の 晩御飯は 天麩羅よ`)
if err != nil {
t.Fatal("Failed to insert value:", err)
}
_, err = db.Exec("INSERT INTO foo(id, value) VALUES(?, ?)", 2, `今日は いい 天気だ`)
if err != nil {
t.Fatal("Failed to insert value:", err)
}
rows, err := db.Query("SELECT id, value FROM foo WHERE value MATCH '今日* 天*'")
if err != nil {
t.Fatal("Unable to query foo table:", err)
}
defer rows.Close()
for rows.Next() {
var id int
var value string
if err := rows.Scan(&id, &value); err != nil {
t.Error("Unable to scan results:", err)
continue
}
if id == 1 && value != `今日の 晩御飯は 天麩羅よ` {
t.Error("Value for id 1 should be `今日の 晩御飯は 天麩羅よ`, but:", value)
} else if id == 2 && value != `今日は いい 天気だ` {
t.Error("Value for id 2 should be `今日は いい 天気だ`, but:", value)
}
}
rows, err = db.Query("SELECT value FROM foo WHERE value MATCH '今日* 天麩羅*'")
if err != nil {
t.Fatal("Unable to query foo table:", err)
}
defer rows.Close()
var value string
if !rows.Next() {
t.Fatal("Result should be only one")
}
if err := rows.Scan(&value); err != nil {
t.Fatal("Unable to scan results:", err)
}
if value != `今日の 晩御飯は 天麩羅よ` {
t.Fatal("Value should be `今日の 晩御飯は 天麩羅よ`, but:", value)
}
if rows.Next() {
t.Fatal("Result should be only one")
}
}
func TestFTS4(t *testing.T) {
tempFilename := TempFilename(t)
defer os.Remove(tempFilename)
db, err := sql.Open("sqlite3", tempFilename)
if err != nil {
t.Fatal("Failed to open database:", err)
}
defer db.Close()
_, err = db.Exec("DROP TABLE foo")
_, err = db.Exec("CREATE VIRTUAL TABLE foo USING fts4(tokenize=unicode61, id INTEGER PRIMARY KEY, value TEXT)")
switch {
case err != nil && err.Error() == "unknown tokenizer: unicode61":
t.Skip("FTS4 not supported")
case err != nil:
t.Fatal("Failed to create table:", err)
}
_, err = db.Exec("INSERT INTO foo(id, value) VALUES(?, ?)", 1, `février`)
if err != nil {
t.Fatal("Failed to insert value:", err)
}
rows, err := db.Query("SELECT value FROM foo WHERE value MATCH 'fevrier'")
if err != nil {
t.Fatal("Unable to query foo table:", err)
}
defer rows.Close()
var value string
if !rows.Next() {
t.Fatal("Result should be only one")
}
if err := rows.Scan(&value); err != nil {
t.Fatal("Unable to scan results:", err)
}
if value != `février` {
t.Fatal("Value should be `février`, but:", value)
}
if rows.Next() {
t.Fatal("Result should be only one")
}
}

View File

@ -1,13 +0,0 @@
// Copyright (C) 2014 Yasuhiro Matsumoto <mattn.jp@gmail.com>.
//
// Use of this source code is governed by an MIT-style
// license that can be found in the LICENSE file.
// +build fts5
package sqlite3
/*
#cgo CFLAGS: -DSQLITE_ENABLE_FTS5
#cgo LDFLAGS: -lm
*/
import "C"

View File

@ -1,13 +0,0 @@
// Copyright (C) 2014 Yasuhiro Matsumoto <mattn.jp@gmail.com>.
//
// Use of this source code is governed by an MIT-style
// license that can be found in the LICENSE file.
// +build icu
package sqlite3
/*
#cgo LDFLAGS: -licuuc -licui18n
#cgo CFLAGS: -DSQLITE_ENABLE_ICU
*/
import "C"

View File

@ -1,12 +0,0 @@
// Copyright (C) 2014 Yasuhiro Matsumoto <mattn.jp@gmail.com>.
//
// Use of this source code is governed by an MIT-style
// license that can be found in the LICENSE file.
// +build json1
package sqlite3
/*
#cgo CFLAGS: -DSQLITE_ENABLE_JSON1
*/
import "C"

View File

@ -1,14 +0,0 @@
// Copyright (C) 2014 Yasuhiro Matsumoto <mattn.jp@gmail.com>.
//
// Use of this source code is governed by an MIT-style
// license that can be found in the LICENSE file.
// +build libsqlite3
package sqlite3
/*
#cgo CFLAGS: -DUSE_LIBSQLITE3
#cgo linux LDFLAGS: -lsqlite3
#cgo darwin LDFLAGS: -L/usr/local/opt/sqlite/lib -lsqlite3
*/
import "C"

View File

@ -1,63 +0,0 @@
// Copyright (C) 2014 Yasuhiro Matsumoto <mattn.jp@gmail.com>.
//
// Use of this source code is governed by an MIT-style
// license that can be found in the LICENSE file.
// +build !sqlite_omit_load_extension
package sqlite3
/*
#include <sqlite3-binding.h>
#include <stdlib.h>
*/
import "C"
import (
"errors"
"unsafe"
)
func (c *SQLiteConn) loadExtensions(extensions []string) error {
rv := C.sqlite3_enable_load_extension(c.db, 1)
if rv != C.SQLITE_OK {
return errors.New(C.GoString(C.sqlite3_errmsg(c.db)))
}
for _, extension := range extensions {
cext := C.CString(extension)
defer C.free(unsafe.Pointer(cext))
rv = C.sqlite3_load_extension(c.db, cext, nil, nil)
if rv != C.SQLITE_OK {
return errors.New(C.GoString(C.sqlite3_errmsg(c.db)))
}
}
rv = C.sqlite3_enable_load_extension(c.db, 0)
if rv != C.SQLITE_OK {
return errors.New(C.GoString(C.sqlite3_errmsg(c.db)))
}
return nil
}
func (c *SQLiteConn) LoadExtension(lib string, entry string) error {
rv := C.sqlite3_enable_load_extension(c.db, 1)
if rv != C.SQLITE_OK {
return errors.New(C.GoString(C.sqlite3_errmsg(c.db)))
}
clib := C.CString(lib)
defer C.free(unsafe.Pointer(clib))
centry := C.CString(entry)
defer C.free(unsafe.Pointer(centry))
rv = C.sqlite3_load_extension(c.db, clib, centry, nil)
if rv != C.SQLITE_OK {
return errors.New(C.GoString(C.sqlite3_errmsg(c.db)))
}
rv = C.sqlite3_enable_load_extension(c.db, 0)
if rv != C.SQLITE_OK {
return errors.New(C.GoString(C.sqlite3_errmsg(c.db)))
}
return nil
}

View File

@ -1,23 +0,0 @@
// Copyright (C) 2014 Yasuhiro Matsumoto <mattn.jp@gmail.com>.
//
// Use of this source code is governed by an MIT-style
// license that can be found in the LICENSE file.
// +build sqlite_omit_load_extension
package sqlite3
/*
#cgo CFLAGS: -DSQLITE_OMIT_LOAD_EXTENSION
*/
import "C"
import (
"errors"
)
func (c *SQLiteConn) loadExtensions(extensions []string) error {
return errors.New("Extensions have been disabled for static builds")
}
func (c *SQLiteConn) LoadExtension(lib string, entry string) error {
return errors.New("Extensions have been disabled for static builds")
}

View File

@ -1,13 +0,0 @@
// Copyright (C) 2014 Yasuhiro Matsumoto <mattn.jp@gmail.com>.
//
// Use of this source code is governed by an MIT-style
// license that can be found in the LICENSE file.
// +build !windows
package sqlite3
/*
#cgo CFLAGS: -I.
#cgo linux LDFLAGS: -ldl
*/
import "C"

File diff suppressed because it is too large Load Diff

View File

@ -1,409 +0,0 @@
package sqlite3_test
import (
"database/sql"
"fmt"
"math/rand"
"regexp"
"strconv"
"sync"
"testing"
"time"
)
type Dialect int
const (
SQLITE Dialect = iota
POSTGRESQL
MYSQL
)
type DB struct {
*testing.T
*sql.DB
dialect Dialect
once sync.Once
}
var db *DB
// the following tables will be created and dropped during the test
var testTables = []string{"foo", "bar", "t", "bench"}
var tests = []testing.InternalTest{
{"TestBlobs", TestBlobs},
{"TestManyQueryRow", TestManyQueryRow},
{"TestTxQuery", TestTxQuery},
{"TestPreparedStmt", TestPreparedStmt},
}
var benchmarks = []testing.InternalBenchmark{
{"BenchmarkExec", BenchmarkExec},
{"BenchmarkQuery", BenchmarkQuery},
{"BenchmarkParams", BenchmarkParams},
{"BenchmarkStmt", BenchmarkStmt},
{"BenchmarkRows", BenchmarkRows},
{"BenchmarkStmtRows", BenchmarkStmtRows},
}
// RunTests runs the SQL test suite
func RunTests(t *testing.T, d *sql.DB, dialect Dialect) {
db = &DB{t, d, dialect, sync.Once{}}
testing.RunTests(func(string, string) (bool, error) { return true, nil }, tests)
if !testing.Short() {
for _, b := range benchmarks {
fmt.Printf("%-20s", b.Name)
r := testing.Benchmark(b.F)
fmt.Printf("%10d %10.0f req/s\n", r.N, float64(r.N)/r.T.Seconds())
}
}
db.tearDown()
}
func (db *DB) mustExec(sql string, args ...interface{}) sql.Result {
res, err := db.Exec(sql, args...)
if err != nil {
db.Fatalf("Error running %q: %v", sql, err)
}
return res
}
func (db *DB) tearDown() {
for _, tbl := range testTables {
switch db.dialect {
case SQLITE:
db.mustExec("drop table if exists " + tbl)
case MYSQL, POSTGRESQL:
db.mustExec("drop table if exists " + tbl)
default:
db.Fatal("unkown dialect")
}
}
}
// q replaces ? parameters if needed
func (db *DB) q(sql string) string {
switch db.dialect {
case POSTGRESQL: // repace with $1, $2, ..
qrx := regexp.MustCompile(`\?`)
n := 0
return qrx.ReplaceAllStringFunc(sql, func(string) string {
n++
return "$" + strconv.Itoa(n)
})
}
return sql
}
func (db *DB) blobType(size int) string {
switch db.dialect {
case SQLITE:
return fmt.Sprintf("blob[%d]", size)
case POSTGRESQL:
return "bytea"
case MYSQL:
return fmt.Sprintf("VARBINARY(%d)", size)
}
panic("unkown dialect")
}
func (db *DB) serialPK() string {
switch db.dialect {
case SQLITE:
return "integer primary key autoincrement"
case POSTGRESQL:
return "serial primary key"
case MYSQL:
return "integer primary key auto_increment"
}
panic("unkown dialect")
}
func (db *DB) now() string {
switch db.dialect {
case SQLITE:
return "datetime('now')"
case POSTGRESQL:
return "now()"
case MYSQL:
return "now()"
}
panic("unkown dialect")
}
func makeBench() {
if _, err := db.Exec("create table bench (n varchar(32), i integer, d double, s varchar(32), t datetime)"); err != nil {
panic(err)
}
st, err := db.Prepare("insert into bench values (?, ?, ?, ?, ?)")
if err != nil {
panic(err)
}
defer st.Close()
for i := 0; i < 100; i++ {
if _, err = st.Exec(nil, i, float64(i), fmt.Sprintf("%d", i), time.Now()); err != nil {
panic(err)
}
}
}
func TestResult(t *testing.T) {
db.tearDown()
db.mustExec("create temporary table test (id " + db.serialPK() + ", name varchar(10))")
for i := 1; i < 3; i++ {
r := db.mustExec(db.q("insert into test (name) values (?)"), fmt.Sprintf("row %d", i))
n, err := r.RowsAffected()
if err != nil {
t.Fatal(err)
}
if n != 1 {
t.Errorf("got %v, want %v", n, 1)
}
n, err = r.LastInsertId()
if err != nil {
t.Fatal(err)
}
if n != int64(i) {
t.Errorf("got %v, want %v", n, i)
}
}
if _, err := db.Exec("error!"); err == nil {
t.Fatalf("expected error")
}
}
func TestBlobs(t *testing.T) {
db.tearDown()
var blob = []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15}
db.mustExec("create table foo (id integer primary key, bar " + db.blobType(16) + ")")
db.mustExec(db.q("insert into foo (id, bar) values(?,?)"), 0, blob)
want := fmt.Sprintf("%x", blob)
b := make([]byte, 16)
err := db.QueryRow(db.q("select bar from foo where id = ?"), 0).Scan(&b)
got := fmt.Sprintf("%x", b)
if err != nil {
t.Errorf("[]byte scan: %v", err)
} else if got != want {
t.Errorf("for []byte, got %q; want %q", got, want)
}
err = db.QueryRow(db.q("select bar from foo where id = ?"), 0).Scan(&got)
want = string(blob)
if err != nil {
t.Errorf("string scan: %v", err)
} else if got != want {
t.Errorf("for string, got %q; want %q", got, want)
}
}
func TestManyQueryRow(t *testing.T) {
if testing.Short() {
t.Log("skipping in short mode")
return
}
db.tearDown()
db.mustExec("create table foo (id integer primary key, name varchar(50))")
db.mustExec(db.q("insert into foo (id, name) values(?,?)"), 1, "bob")
var name string
for i := 0; i < 10000; i++ {
err := db.QueryRow(db.q("select name from foo where id = ?"), 1).Scan(&name)
if err != nil || name != "bob" {
t.Fatalf("on query %d: err=%v, name=%q", i, err, name)
}
}
}
func TestTxQuery(t *testing.T) {
db.tearDown()
tx, err := db.Begin()
if err != nil {
t.Fatal(err)
}
defer tx.Rollback()
_, err = tx.Exec("create table foo (id integer primary key, name varchar(50))")
if err != nil {
t.Fatal(err)
}
_, err = tx.Exec(db.q("insert into foo (id, name) values(?,?)"), 1, "bob")
if err != nil {
t.Fatal(err)
}
r, err := tx.Query(db.q("select name from foo where id = ?"), 1)
if err != nil {
t.Fatal(err)
}
defer r.Close()
if !r.Next() {
if r.Err() != nil {
t.Fatal(err)
}
t.Fatal("expected one rows")
}
var name string
err = r.Scan(&name)
if err != nil {
t.Fatal(err)
}
}
func TestPreparedStmt(t *testing.T) {
db.tearDown()
db.mustExec("CREATE TABLE t (count INT)")
sel, err := db.Prepare("SELECT count FROM t ORDER BY count DESC")
if err != nil {
t.Fatalf("prepare 1: %v", err)
}
ins, err := db.Prepare(db.q("INSERT INTO t (count) VALUES (?)"))
if err != nil {
t.Fatalf("prepare 2: %v", err)
}
for n := 1; n <= 3; n++ {
if _, err := ins.Exec(n); err != nil {
t.Fatalf("insert(%d) = %v", n, err)
}
}
const nRuns = 10
var wg sync.WaitGroup
for i := 0; i < nRuns; i++ {
wg.Add(1)
go func() {
defer wg.Done()
for j := 0; j < 10; j++ {
count := 0
if err := sel.QueryRow().Scan(&count); err != nil && err != sql.ErrNoRows {
t.Errorf("Query: %v", err)
return
}
if _, err := ins.Exec(rand.Intn(100)); err != nil {
t.Errorf("Insert: %v", err)
return
}
}
}()
}
wg.Wait()
}
// Benchmarks need to use panic() since b.Error errors are lost when
// running via testing.Benchmark() I would like to run these via go
// test -bench but calling Benchmark() from a benchmark test
// currently hangs go.
func BenchmarkExec(b *testing.B) {
for i := 0; i < b.N; i++ {
if _, err := db.Exec("select 1"); err != nil {
panic(err)
}
}
}
func BenchmarkQuery(b *testing.B) {
for i := 0; i < b.N; i++ {
var n sql.NullString
var i int
var f float64
var s string
// var t time.Time
if err := db.QueryRow("select null, 1, 1.1, 'foo'").Scan(&n, &i, &f, &s); err != nil {
panic(err)
}
}
}
func BenchmarkParams(b *testing.B) {
for i := 0; i < b.N; i++ {
var n sql.NullString
var i int
var f float64
var s string
// var t time.Time
if err := db.QueryRow("select ?, ?, ?, ?", nil, 1, 1.1, "foo").Scan(&n, &i, &f, &s); err != nil {
panic(err)
}
}
}
func BenchmarkStmt(b *testing.B) {
st, err := db.Prepare("select ?, ?, ?, ?")
if err != nil {
panic(err)
}
defer st.Close()
for n := 0; n < b.N; n++ {
var n sql.NullString
var i int
var f float64
var s string
// var t time.Time
if err := st.QueryRow(nil, 1, 1.1, "foo").Scan(&n, &i, &f, &s); err != nil {
panic(err)
}
}
}
func BenchmarkRows(b *testing.B) {
db.once.Do(makeBench)
for n := 0; n < b.N; n++ {
var n sql.NullString
var i int
var f float64
var s string
var t time.Time
r, err := db.Query("select * from bench")
if err != nil {
panic(err)
}
for r.Next() {
if err = r.Scan(&n, &i, &f, &s, &t); err != nil {
panic(err)
}
}
if err = r.Err(); err != nil {
panic(err)
}
}
}
func BenchmarkStmtRows(b *testing.B) {
db.once.Do(makeBench)
st, err := db.Prepare("select * from bench")
if err != nil {
panic(err)
}
defer st.Close()
for n := 0; n < b.N; n++ {
var n sql.NullString
var i int
var f float64
var s string
var t time.Time
r, err := st.Query()
if err != nil {
panic(err)
}
for r.Next() {
if err = r.Scan(&n, &i, &f, &s, &t); err != nil {
panic(err)
}
}
if err = r.Err(); err != nil {
panic(err)
}
}
}

View File

@ -1,14 +0,0 @@
// Copyright (C) 2014 Yasuhiro Matsumoto <mattn.jp@gmail.com>.
//
// Use of this source code is governed by an MIT-style
// license that can be found in the LICENSE file.
// +build windows
package sqlite3
/*
#cgo CFLAGS: -I. -fno-stack-check -fno-stack-protector -mno-stack-arg-probe
#cgo windows,386 CFLAGS: -D_USE_32BIT_TIME_T
#cgo LDFLAGS: -lmingwex -lmingw32
*/
import "C"

View File

@ -1,546 +0,0 @@
/*
** 2006 June 7
**
** The author disclaims copyright to this source code. In place of
** a legal notice, here is a blessing:
**
** May you do good and not evil.
** May you find forgiveness for yourself and forgive others.
** May you share freely, never taking more than you give.
**
*************************************************************************
** This header file defines the SQLite interface for use by
** shared libraries that want to be imported as extensions into
** an SQLite instance. Shared libraries that intend to be loaded
** as extensions by SQLite should #include this file instead of
** sqlite3.h.
*/
#ifndef _SQLITE3EXT_H_
#define _SQLITE3EXT_H_
#include "sqlite3-binding.h"
typedef struct sqlite3_api_routines sqlite3_api_routines;
/*
** The following structure holds pointers to all of the SQLite API
** routines.
**
** WARNING: In order to maintain backwards compatibility, add new
** interfaces to the end of this structure only. If you insert new
** interfaces in the middle of this structure, then older different
** versions of SQLite will not be able to load each other's shared
** libraries!
*/
struct sqlite3_api_routines {
void * (*aggregate_context)(sqlite3_context*,int nBytes);
int (*aggregate_count)(sqlite3_context*);
int (*bind_blob)(sqlite3_stmt*,int,const void*,int n,void(*)(void*));
int (*bind_double)(sqlite3_stmt*,int,double);
int (*bind_int)(sqlite3_stmt*,int,int);
int (*bind_int64)(sqlite3_stmt*,int,sqlite_int64);
int (*bind_null)(sqlite3_stmt*,int);
int (*bind_parameter_count)(sqlite3_stmt*);
int (*bind_parameter_index)(sqlite3_stmt*,const char*zName);
const char * (*bind_parameter_name)(sqlite3_stmt*,int);
int (*bind_text)(sqlite3_stmt*,int,const char*,int n,void(*)(void*));
int (*bind_text16)(sqlite3_stmt*,int,const void*,int,void(*)(void*));
int (*bind_value)(sqlite3_stmt*,int,const sqlite3_value*);
int (*busy_handler)(sqlite3*,int(*)(void*,int),void*);
int (*busy_timeout)(sqlite3*,int ms);
int (*changes)(sqlite3*);
int (*close)(sqlite3*);
int (*collation_needed)(sqlite3*,void*,void(*)(void*,sqlite3*,
int eTextRep,const char*));
int (*collation_needed16)(sqlite3*,void*,void(*)(void*,sqlite3*,
int eTextRep,const void*));
const void * (*column_blob)(sqlite3_stmt*,int iCol);
int (*column_bytes)(sqlite3_stmt*,int iCol);
int (*column_bytes16)(sqlite3_stmt*,int iCol);
int (*column_count)(sqlite3_stmt*pStmt);
const char * (*column_database_name)(sqlite3_stmt*,int);
const void * (*column_database_name16)(sqlite3_stmt*,int);
const char * (*column_decltype)(sqlite3_stmt*,int i);
const void * (*column_decltype16)(sqlite3_stmt*,int);
double (*column_double)(sqlite3_stmt*,int iCol);
int (*column_int)(sqlite3_stmt*,int iCol);
sqlite_int64 (*column_int64)(sqlite3_stmt*,int iCol);
const char * (*column_name)(sqlite3_stmt*,int);
const void * (*column_name16)(sqlite3_stmt*,int);
const char * (*column_origin_name)(sqlite3_stmt*,int);
const void * (*column_origin_name16)(sqlite3_stmt*,int);
const char * (*column_table_name)(sqlite3_stmt*,int);
const void * (*column_table_name16)(sqlite3_stmt*,int);
const unsigned char * (*column_text)(sqlite3_stmt*,int iCol);
const void * (*column_text16)(sqlite3_stmt*,int iCol);
int (*column_type)(sqlite3_stmt*,int iCol);
sqlite3_value* (*column_value)(sqlite3_stmt*,int iCol);
void * (*commit_hook)(sqlite3*,int(*)(void*),void*);
int (*complete)(const char*sql);
int (*complete16)(const void*sql);
int (*create_collation)(sqlite3*,const char*,int,void*,
int(*)(void*,int,const void*,int,const void*));
int (*create_collation16)(sqlite3*,const void*,int,void*,
int(*)(void*,int,const void*,int,const void*));
int (*create_function)(sqlite3*,const char*,int,int,void*,
void (*xFunc)(sqlite3_context*,int,sqlite3_value**),
void (*xStep)(sqlite3_context*,int,sqlite3_value**),
void (*xFinal)(sqlite3_context*));
int (*create_function16)(sqlite3*,const void*,int,int,void*,
void (*xFunc)(sqlite3_context*,int,sqlite3_value**),
void (*xStep)(sqlite3_context*,int,sqlite3_value**),
void (*xFinal)(sqlite3_context*));
int (*create_module)(sqlite3*,const char*,const sqlite3_module*,void*);
int (*data_count)(sqlite3_stmt*pStmt);
sqlite3 * (*db_handle)(sqlite3_stmt*);
int (*declare_vtab)(sqlite3*,const char*);
int (*enable_shared_cache)(int);
int (*errcode)(sqlite3*db);
const char * (*errmsg)(sqlite3*);
const void * (*errmsg16)(sqlite3*);
int (*exec)(sqlite3*,const char*,sqlite3_callback,void*,char**);
int (*expired)(sqlite3_stmt*);
int (*finalize)(sqlite3_stmt*pStmt);
void (*free)(void*);
void (*free_table)(char**result);
int (*get_autocommit)(sqlite3*);
void * (*get_auxdata)(sqlite3_context*,int);
int (*get_table)(sqlite3*,const char*,char***,int*,int*,char**);
int (*global_recover)(void);
void (*interruptx)(sqlite3*);
sqlite_int64 (*last_insert_rowid)(sqlite3*);
const char * (*libversion)(void);
int (*libversion_number)(void);
void *(*malloc)(int);
char * (*mprintf)(const char*,...);
int (*open)(const char*,sqlite3**);
int (*open16)(const void*,sqlite3**);
int (*prepare)(sqlite3*,const char*,int,sqlite3_stmt**,const char**);
int (*prepare16)(sqlite3*,const void*,int,sqlite3_stmt**,const void**);
void * (*profile)(sqlite3*,void(*)(void*,const char*,sqlite_uint64),void*);
void (*progress_handler)(sqlite3*,int,int(*)(void*),void*);
void *(*realloc)(void*,int);
int (*reset)(sqlite3_stmt*pStmt);
void (*result_blob)(sqlite3_context*,const void*,int,void(*)(void*));
void (*result_double)(sqlite3_context*,double);
void (*result_error)(sqlite3_context*,const char*,int);
void (*result_error16)(sqlite3_context*,const void*,int);
void (*result_int)(sqlite3_context*,int);
void (*result_int64)(sqlite3_context*,sqlite_int64);
void (*result_null)(sqlite3_context*);
void (*result_text)(sqlite3_context*,const char*,int,void(*)(void*));
void (*result_text16)(sqlite3_context*,const void*,int,void(*)(void*));
void (*result_text16be)(sqlite3_context*,const void*,int,void(*)(void*));
void (*result_text16le)(sqlite3_context*,const void*,int,void(*)(void*));
void (*result_value)(sqlite3_context*,sqlite3_value*);
void * (*rollback_hook)(sqlite3*,void(*)(void*),void*);
int (*set_authorizer)(sqlite3*,int(*)(void*,int,const char*,const char*,
const char*,const char*),void*);
void (*set_auxdata)(sqlite3_context*,int,void*,void (*)(void*));
char * (*snprintf)(int,char*,const char*,...);
int (*step)(sqlite3_stmt*);
int (*table_column_metadata)(sqlite3*,const char*,const char*,const char*,
char const**,char const**,int*,int*,int*);
void (*thread_cleanup)(void);
int (*total_changes)(sqlite3*);
void * (*trace)(sqlite3*,void(*xTrace)(void*,const char*),void*);
int (*transfer_bindings)(sqlite3_stmt*,sqlite3_stmt*);
void * (*update_hook)(sqlite3*,void(*)(void*,int ,char const*,char const*,
sqlite_int64),void*);
void * (*user_data)(sqlite3_context*);
const void * (*value_blob)(sqlite3_value*);
int (*value_bytes)(sqlite3_value*);
int (*value_bytes16)(sqlite3_value*);
double (*value_double)(sqlite3_value*);
int (*value_int)(sqlite3_value*);
sqlite_int64 (*value_int64)(sqlite3_value*);
int (*value_numeric_type)(sqlite3_value*);
const unsigned char * (*value_text)(sqlite3_value*);
const void * (*value_text16)(sqlite3_value*);
const void * (*value_text16be)(sqlite3_value*);
const void * (*value_text16le)(sqlite3_value*);
int (*value_type)(sqlite3_value*);
char *(*vmprintf)(const char*,va_list);
/* Added ??? */
int (*overload_function)(sqlite3*, const char *zFuncName, int nArg);
/* Added by 3.3.13 */
int (*prepare_v2)(sqlite3*,const char*,int,sqlite3_stmt**,const char**);
int (*prepare16_v2)(sqlite3*,const void*,int,sqlite3_stmt**,const void**);
int (*clear_bindings)(sqlite3_stmt*);
/* Added by 3.4.1 */
int (*create_module_v2)(sqlite3*,const char*,const sqlite3_module*,void*,
void (*xDestroy)(void *));
/* Added by 3.5.0 */
int (*bind_zeroblob)(sqlite3_stmt*,int,int);
int (*blob_bytes)(sqlite3_blob*);
int (*blob_close)(sqlite3_blob*);
int (*blob_open)(sqlite3*,const char*,const char*,const char*,sqlite3_int64,
int,sqlite3_blob**);
int (*blob_read)(sqlite3_blob*,void*,int,int);
int (*blob_write)(sqlite3_blob*,const void*,int,int);
int (*create_collation_v2)(sqlite3*,const char*,int,void*,
int(*)(void*,int,const void*,int,const void*),
void(*)(void*));
int (*file_control)(sqlite3*,const char*,int,void*);
sqlite3_int64 (*memory_highwater)(int);
sqlite3_int64 (*memory_used)(void);
sqlite3_mutex *(*mutex_alloc)(int);
void (*mutex_enter)(sqlite3_mutex*);
void (*mutex_free)(sqlite3_mutex*);
void (*mutex_leave)(sqlite3_mutex*);
int (*mutex_try)(sqlite3_mutex*);
int (*open_v2)(const char*,sqlite3**,int,const char*);
int (*release_memory)(int);
void (*result_error_nomem)(sqlite3_context*);
void (*result_error_toobig)(sqlite3_context*);
int (*sleep)(int);
void (*soft_heap_limit)(int);
sqlite3_vfs *(*vfs_find)(const char*);
int (*vfs_register)(sqlite3_vfs*,int);
int (*vfs_unregister)(sqlite3_vfs*);
int (*xthreadsafe)(void);
void (*result_zeroblob)(sqlite3_context*,int);
void (*result_error_code)(sqlite3_context*,int);
int (*test_control)(int, ...);
void (*randomness)(int,void*);
sqlite3 *(*context_db_handle)(sqlite3_context*);
int (*extended_result_codes)(sqlite3*,int);
int (*limit)(sqlite3*,int,int);
sqlite3_stmt *(*next_stmt)(sqlite3*,sqlite3_stmt*);
const char *(*sql)(sqlite3_stmt*);
int (*status)(int,int*,int*,int);
int (*backup_finish)(sqlite3_backup*);
sqlite3_backup *(*backup_init)(sqlite3*,const char*,sqlite3*,const char*);
int (*backup_pagecount)(sqlite3_backup*);
int (*backup_remaining)(sqlite3_backup*);
int (*backup_step)(sqlite3_backup*,int);
const char *(*compileoption_get)(int);
int (*compileoption_used)(const char*);
int (*create_function_v2)(sqlite3*,const char*,int,int,void*,
void (*xFunc)(sqlite3_context*,int,sqlite3_value**),
void (*xStep)(sqlite3_context*,int,sqlite3_value**),
void (*xFinal)(sqlite3_context*),
void(*xDestroy)(void*));
int (*db_config)(sqlite3*,int,...);
sqlite3_mutex *(*db_mutex)(sqlite3*);
int (*db_status)(sqlite3*,int,int*,int*,int);
int (*extended_errcode)(sqlite3*);
void (*log)(int,const char*,...);
sqlite3_int64 (*soft_heap_limit64)(sqlite3_int64);
const char *(*sourceid)(void);
int (*stmt_status)(sqlite3_stmt*,int,int);
int (*strnicmp)(const char*,const char*,int);
int (*unlock_notify)(sqlite3*,void(*)(void**,int),void*);
int (*wal_autocheckpoint)(sqlite3*,int);
int (*wal_checkpoint)(sqlite3*,const char*);
void *(*wal_hook)(sqlite3*,int(*)(void*,sqlite3*,const char*,int),void*);
int (*blob_reopen)(sqlite3_blob*,sqlite3_int64);
int (*vtab_config)(sqlite3*,int op,...);
int (*vtab_on_conflict)(sqlite3*);
/* Version 3.7.16 and later */
int (*close_v2)(sqlite3*);
const char *(*db_filename)(sqlite3*,const char*);
int (*db_readonly)(sqlite3*,const char*);
int (*db_release_memory)(sqlite3*);
const char *(*errstr)(int);
int (*stmt_busy)(sqlite3_stmt*);
int (*stmt_readonly)(sqlite3_stmt*);
int (*stricmp)(const char*,const char*);
int (*uri_boolean)(const char*,const char*,int);
sqlite3_int64 (*uri_int64)(const char*,const char*,sqlite3_int64);
const char *(*uri_parameter)(const char*,const char*);
char *(*vsnprintf)(int,char*,const char*,va_list);
int (*wal_checkpoint_v2)(sqlite3*,const char*,int,int*,int*);
/* Version 3.8.7 and later */
int (*auto_extension)(void(*)(void));
int (*bind_blob64)(sqlite3_stmt*,int,const void*,sqlite3_uint64,
void(*)(void*));
int (*bind_text64)(sqlite3_stmt*,int,const char*,sqlite3_uint64,
void(*)(void*),unsigned char);
int (*cancel_auto_extension)(void(*)(void));
int (*load_extension)(sqlite3*,const char*,const char*,char**);
void *(*malloc64)(sqlite3_uint64);
sqlite3_uint64 (*msize)(void*);
void *(*realloc64)(void*,sqlite3_uint64);
void (*reset_auto_extension)(void);
void (*result_blob64)(sqlite3_context*,const void*,sqlite3_uint64,
void(*)(void*));
void (*result_text64)(sqlite3_context*,const char*,sqlite3_uint64,
void(*)(void*), unsigned char);
int (*strglob)(const char*,const char*);
/* Version 3.8.11 and later */
sqlite3_value *(*value_dup)(const sqlite3_value*);
void (*value_free)(sqlite3_value*);
int (*result_zeroblob64)(sqlite3_context*,sqlite3_uint64);
int (*bind_zeroblob64)(sqlite3_stmt*, int, sqlite3_uint64);
/* Version 3.9.0 and later */
unsigned int (*value_subtype)(sqlite3_value*);
void (*result_subtype)(sqlite3_context*,unsigned int);
/* Version 3.10.0 and later */
int (*status64)(int,sqlite3_int64*,sqlite3_int64*,int);
int (*strlike)(const char*,const char*,unsigned int);
int (*db_cacheflush)(sqlite3*);
/* Version 3.12.0 and later */
int (*system_errno)(sqlite3*);
};
/*
** The following macros redefine the API routines so that they are
** redirected through the global sqlite3_api structure.
**
** This header file is also used by the loadext.c source file
** (part of the main SQLite library - not an extension) so that
** it can get access to the sqlite3_api_routines structure
** definition. But the main library does not want to redefine
** the API. So the redefinition macros are only valid if the
** SQLITE_CORE macros is undefined.
*/
#if !defined(SQLITE_CORE) && !defined(SQLITE_OMIT_LOAD_EXTENSION)
#define sqlite3_aggregate_context sqlite3_api->aggregate_context
#ifndef SQLITE_OMIT_DEPRECATED
#define sqlite3_aggregate_count sqlite3_api->aggregate_count
#endif
#define sqlite3_bind_blob sqlite3_api->bind_blob
#define sqlite3_bind_double sqlite3_api->bind_double
#define sqlite3_bind_int sqlite3_api->bind_int
#define sqlite3_bind_int64 sqlite3_api->bind_int64
#define sqlite3_bind_null sqlite3_api->bind_null
#define sqlite3_bind_parameter_count sqlite3_api->bind_parameter_count
#define sqlite3_bind_parameter_index sqlite3_api->bind_parameter_index
#define sqlite3_bind_parameter_name sqlite3_api->bind_parameter_name
#define sqlite3_bind_text sqlite3_api->bind_text
#define sqlite3_bind_text16 sqlite3_api->bind_text16
#define sqlite3_bind_value sqlite3_api->bind_value
#define sqlite3_busy_handler sqlite3_api->busy_handler
#define sqlite3_busy_timeout sqlite3_api->busy_timeout
#define sqlite3_changes sqlite3_api->changes
#define sqlite3_close sqlite3_api->close
#define sqlite3_collation_needed sqlite3_api->collation_needed
#define sqlite3_collation_needed16 sqlite3_api->collation_needed16
#define sqlite3_column_blob sqlite3_api->column_blob
#define sqlite3_column_bytes sqlite3_api->column_bytes
#define sqlite3_column_bytes16 sqlite3_api->column_bytes16
#define sqlite3_column_count sqlite3_api->column_count
#define sqlite3_column_database_name sqlite3_api->column_database_name
#define sqlite3_column_database_name16 sqlite3_api->column_database_name16
#define sqlite3_column_decltype sqlite3_api->column_decltype
#define sqlite3_column_decltype16 sqlite3_api->column_decltype16
#define sqlite3_column_double sqlite3_api->column_double
#define sqlite3_column_int sqlite3_api->column_int
#define sqlite3_column_int64 sqlite3_api->column_int64
#define sqlite3_column_name sqlite3_api->column_name
#define sqlite3_column_name16 sqlite3_api->column_name16
#define sqlite3_column_origin_name sqlite3_api->column_origin_name
#define sqlite3_column_origin_name16 sqlite3_api->column_origin_name16
#define sqlite3_column_table_name sqlite3_api->column_table_name
#define sqlite3_column_table_name16 sqlite3_api->column_table_name16
#define sqlite3_column_text sqlite3_api->column_text
#define sqlite3_column_text16 sqlite3_api->column_text16
#define sqlite3_column_type sqlite3_api->column_type
#define sqlite3_column_value sqlite3_api->column_value
#define sqlite3_commit_hook sqlite3_api->commit_hook
#define sqlite3_complete sqlite3_api->complete
#define sqlite3_complete16 sqlite3_api->complete16
#define sqlite3_create_collation sqlite3_api->create_collation
#define sqlite3_create_collation16 sqlite3_api->create_collation16
#define sqlite3_create_function sqlite3_api->create_function
#define sqlite3_create_function16 sqlite3_api->create_function16
#define sqlite3_create_module sqlite3_api->create_module
#define sqlite3_create_module_v2 sqlite3_api->create_module_v2
#define sqlite3_data_count sqlite3_api->data_count
#define sqlite3_db_handle sqlite3_api->db_handle
#define sqlite3_declare_vtab sqlite3_api->declare_vtab
#define sqlite3_enable_shared_cache sqlite3_api->enable_shared_cache
#define sqlite3_errcode sqlite3_api->errcode
#define sqlite3_errmsg sqlite3_api->errmsg
#define sqlite3_errmsg16 sqlite3_api->errmsg16
#define sqlite3_exec sqlite3_api->exec
#ifndef SQLITE_OMIT_DEPRECATED
#define sqlite3_expired sqlite3_api->expired
#endif
#define sqlite3_finalize sqlite3_api->finalize
#define sqlite3_free sqlite3_api->free
#define sqlite3_free_table sqlite3_api->free_table
#define sqlite3_get_autocommit sqlite3_api->get_autocommit
#define sqlite3_get_auxdata sqlite3_api->get_auxdata
#define sqlite3_get_table sqlite3_api->get_table
#ifndef SQLITE_OMIT_DEPRECATED
#define sqlite3_global_recover sqlite3_api->global_recover
#endif
#define sqlite3_interrupt sqlite3_api->interruptx
#define sqlite3_last_insert_rowid sqlite3_api->last_insert_rowid
#define sqlite3_libversion sqlite3_api->libversion
#define sqlite3_libversion_number sqlite3_api->libversion_number
#define sqlite3_malloc sqlite3_api->malloc
#define sqlite3_mprintf sqlite3_api->mprintf
#define sqlite3_open sqlite3_api->open
#define sqlite3_open16 sqlite3_api->open16
#define sqlite3_prepare sqlite3_api->prepare
#define sqlite3_prepare16 sqlite3_api->prepare16
#define sqlite3_prepare_v2 sqlite3_api->prepare_v2
#define sqlite3_prepare16_v2 sqlite3_api->prepare16_v2
#define sqlite3_profile sqlite3_api->profile
#define sqlite3_progress_handler sqlite3_api->progress_handler
#define sqlite3_realloc sqlite3_api->realloc
#define sqlite3_reset sqlite3_api->reset
#define sqlite3_result_blob sqlite3_api->result_blob
#define sqlite3_result_double sqlite3_api->result_double
#define sqlite3_result_error sqlite3_api->result_error
#define sqlite3_result_error16 sqlite3_api->result_error16
#define sqlite3_result_int sqlite3_api->result_int
#define sqlite3_result_int64 sqlite3_api->result_int64
#define sqlite3_result_null sqlite3_api->result_null
#define sqlite3_result_text sqlite3_api->result_text
#define sqlite3_result_text16 sqlite3_api->result_text16
#define sqlite3_result_text16be sqlite3_api->result_text16be
#define sqlite3_result_text16le sqlite3_api->result_text16le
#define sqlite3_result_value sqlite3_api->result_value
#define sqlite3_rollback_hook sqlite3_api->rollback_hook
#define sqlite3_set_authorizer sqlite3_api->set_authorizer
#define sqlite3_set_auxdata sqlite3_api->set_auxdata
#define sqlite3_snprintf sqlite3_api->snprintf
#define sqlite3_step sqlite3_api->step
#define sqlite3_table_column_metadata sqlite3_api->table_column_metadata
#define sqlite3_thread_cleanup sqlite3_api->thread_cleanup
#define sqlite3_total_changes sqlite3_api->total_changes
#define sqlite3_trace sqlite3_api->trace
#ifndef SQLITE_OMIT_DEPRECATED
#define sqlite3_transfer_bindings sqlite3_api->transfer_bindings
#endif
#define sqlite3_update_hook sqlite3_api->update_hook
#define sqlite3_user_data sqlite3_api->user_data
#define sqlite3_value_blob sqlite3_api->value_blob
#define sqlite3_value_bytes sqlite3_api->value_bytes
#define sqlite3_value_bytes16 sqlite3_api->value_bytes16
#define sqlite3_value_double sqlite3_api->value_double
#define sqlite3_value_int sqlite3_api->value_int
#define sqlite3_value_int64 sqlite3_api->value_int64
#define sqlite3_value_numeric_type sqlite3_api->value_numeric_type
#define sqlite3_value_text sqlite3_api->value_text
#define sqlite3_value_text16 sqlite3_api->value_text16
#define sqlite3_value_text16be sqlite3_api->value_text16be
#define sqlite3_value_text16le sqlite3_api->value_text16le
#define sqlite3_value_type sqlite3_api->value_type
#define sqlite3_vmprintf sqlite3_api->vmprintf
#define sqlite3_vsnprintf sqlite3_api->vsnprintf
#define sqlite3_overload_function sqlite3_api->overload_function
#define sqlite3_prepare_v2 sqlite3_api->prepare_v2
#define sqlite3_prepare16_v2 sqlite3_api->prepare16_v2
#define sqlite3_clear_bindings sqlite3_api->clear_bindings
#define sqlite3_bind_zeroblob sqlite3_api->bind_zeroblob
#define sqlite3_blob_bytes sqlite3_api->blob_bytes
#define sqlite3_blob_close sqlite3_api->blob_close
#define sqlite3_blob_open sqlite3_api->blob_open
#define sqlite3_blob_read sqlite3_api->blob_read
#define sqlite3_blob_write sqlite3_api->blob_write
#define sqlite3_create_collation_v2 sqlite3_api->create_collation_v2
#define sqlite3_file_control sqlite3_api->file_control
#define sqlite3_memory_highwater sqlite3_api->memory_highwater
#define sqlite3_memory_used sqlite3_api->memory_used
#define sqlite3_mutex_alloc sqlite3_api->mutex_alloc
#define sqlite3_mutex_enter sqlite3_api->mutex_enter
#define sqlite3_mutex_free sqlite3_api->mutex_free
#define sqlite3_mutex_leave sqlite3_api->mutex_leave
#define sqlite3_mutex_try sqlite3_api->mutex_try
#define sqlite3_open_v2 sqlite3_api->open_v2
#define sqlite3_release_memory sqlite3_api->release_memory
#define sqlite3_result_error_nomem sqlite3_api->result_error_nomem
#define sqlite3_result_error_toobig sqlite3_api->result_error_toobig
#define sqlite3_sleep sqlite3_api->sleep
#define sqlite3_soft_heap_limit sqlite3_api->soft_heap_limit
#define sqlite3_vfs_find sqlite3_api->vfs_find
#define sqlite3_vfs_register sqlite3_api->vfs_register
#define sqlite3_vfs_unregister sqlite3_api->vfs_unregister
#define sqlite3_threadsafe sqlite3_api->xthreadsafe
#define sqlite3_result_zeroblob sqlite3_api->result_zeroblob
#define sqlite3_result_error_code sqlite3_api->result_error_code
#define sqlite3_test_control sqlite3_api->test_control
#define sqlite3_randomness sqlite3_api->randomness
#define sqlite3_context_db_handle sqlite3_api->context_db_handle
#define sqlite3_extended_result_codes sqlite3_api->extended_result_codes
#define sqlite3_limit sqlite3_api->limit
#define sqlite3_next_stmt sqlite3_api->next_stmt
#define sqlite3_sql sqlite3_api->sql
#define sqlite3_status sqlite3_api->status
#define sqlite3_backup_finish sqlite3_api->backup_finish
#define sqlite3_backup_init sqlite3_api->backup_init
#define sqlite3_backup_pagecount sqlite3_api->backup_pagecount
#define sqlite3_backup_remaining sqlite3_api->backup_remaining
#define sqlite3_backup_step sqlite3_api->backup_step
#define sqlite3_compileoption_get sqlite3_api->compileoption_get
#define sqlite3_compileoption_used sqlite3_api->compileoption_used
#define sqlite3_create_function_v2 sqlite3_api->create_function_v2
#define sqlite3_db_config sqlite3_api->db_config
#define sqlite3_db_mutex sqlite3_api->db_mutex
#define sqlite3_db_status sqlite3_api->db_status
#define sqlite3_extended_errcode sqlite3_api->extended_errcode
#define sqlite3_log sqlite3_api->log
#define sqlite3_soft_heap_limit64 sqlite3_api->soft_heap_limit64
#define sqlite3_sourceid sqlite3_api->sourceid
#define sqlite3_stmt_status sqlite3_api->stmt_status
#define sqlite3_strnicmp sqlite3_api->strnicmp
#define sqlite3_unlock_notify sqlite3_api->unlock_notify
#define sqlite3_wal_autocheckpoint sqlite3_api->wal_autocheckpoint
#define sqlite3_wal_checkpoint sqlite3_api->wal_checkpoint
#define sqlite3_wal_hook sqlite3_api->wal_hook
#define sqlite3_blob_reopen sqlite3_api->blob_reopen
#define sqlite3_vtab_config sqlite3_api->vtab_config
#define sqlite3_vtab_on_conflict sqlite3_api->vtab_on_conflict
/* Version 3.7.16 and later */
#define sqlite3_close_v2 sqlite3_api->close_v2
#define sqlite3_db_filename sqlite3_api->db_filename
#define sqlite3_db_readonly sqlite3_api->db_readonly
#define sqlite3_db_release_memory sqlite3_api->db_release_memory
#define sqlite3_errstr sqlite3_api->errstr
#define sqlite3_stmt_busy sqlite3_api->stmt_busy
#define sqlite3_stmt_readonly sqlite3_api->stmt_readonly
#define sqlite3_stricmp sqlite3_api->stricmp
#define sqlite3_uri_boolean sqlite3_api->uri_boolean
#define sqlite3_uri_int64 sqlite3_api->uri_int64
#define sqlite3_uri_parameter sqlite3_api->uri_parameter
#define sqlite3_uri_vsnprintf sqlite3_api->vsnprintf
#define sqlite3_wal_checkpoint_v2 sqlite3_api->wal_checkpoint_v2
/* Version 3.8.7 and later */
#define sqlite3_auto_extension sqlite3_api->auto_extension
#define sqlite3_bind_blob64 sqlite3_api->bind_blob64
#define sqlite3_bind_text64 sqlite3_api->bind_text64
#define sqlite3_cancel_auto_extension sqlite3_api->cancel_auto_extension
#define sqlite3_load_extension sqlite3_api->load_extension
#define sqlite3_malloc64 sqlite3_api->malloc64
#define sqlite3_msize sqlite3_api->msize
#define sqlite3_realloc64 sqlite3_api->realloc64
#define sqlite3_reset_auto_extension sqlite3_api->reset_auto_extension
#define sqlite3_result_blob64 sqlite3_api->result_blob64
#define sqlite3_result_text64 sqlite3_api->result_text64
#define sqlite3_strglob sqlite3_api->strglob
/* Version 3.8.11 and later */
#define sqlite3_value_dup sqlite3_api->value_dup
#define sqlite3_value_free sqlite3_api->value_free
#define sqlite3_result_zeroblob64 sqlite3_api->result_zeroblob64
#define sqlite3_bind_zeroblob64 sqlite3_api->bind_zeroblob64
/* Version 3.9.0 and later */
#define sqlite3_value_subtype sqlite3_api->value_subtype
#define sqlite3_result_subtype sqlite3_api->result_subtype
/* Version 3.10.0 and later */
#define sqlite3_status64 sqlite3_api->status64
#define sqlite3_strlike sqlite3_api->strlike
#define sqlite3_db_cacheflush sqlite3_api->db_cacheflush
/* Version 3.12.0 and later */
#define sqlite3_system_errno sqlite3_api->system_errno
#endif /* !defined(SQLITE_CORE) && !defined(SQLITE_OMIT_LOAD_EXTENSION) */
#if !defined(SQLITE_CORE) && !defined(SQLITE_OMIT_LOAD_EXTENSION)
/* This case when the file really is being compiled as a loadable
** extension */
# define SQLITE_EXTENSION_INIT1 const sqlite3_api_routines *sqlite3_api=0;
# define SQLITE_EXTENSION_INIT2(v) sqlite3_api=v;
# define SQLITE_EXTENSION_INIT3 \
extern const sqlite3_api_routines *sqlite3_api;
#else
/* This case when the file is being statically linked into the
** application */
# define SQLITE_EXTENSION_INIT1 /*no-op*/
# define SQLITE_EXTENSION_INIT2(v) (void)v; /* unused parameter */
# define SQLITE_EXTENSION_INIT3 /*no-op*/
#endif
#endif /* _SQLITE3EXT_H_ */