1
0
forked from sm/vain

vendor deps

Change-Id: Ia5385cbe6b9d6c9ad6065b7d2c402ebe164246d0
This commit is contained in:
Stephen McQuay 2016-06-04 10:47:11 -07:00
parent 435e28966a
commit 4fb9e35a17
No known key found for this signature in database
GPG Key ID: 1ABF428F71BAFC3D
62 changed files with 210833 additions and 0 deletions

23
vendor/github.com/elazarl/go-bindata-assetfs/LICENSE generated vendored Normal file
View File

@ -0,0 +1,23 @@
Copyright (c) 2014, Elazar Leibovich
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

46
vendor/github.com/elazarl/go-bindata-assetfs/README.md generated vendored Normal file
View File

@ -0,0 +1,46 @@
# go-bindata-assetfs
Serve embedded files from [jteeuwen/go-bindata](https://github.com/jteeuwen/go-bindata) with `net/http`.
[GoDoc](http://godoc.org/github.com/elazarl/go-bindata-assetfs)
### Installation
Install with
$ go get github.com/jteeuwen/go-bindata/...
$ go get github.com/elazarl/go-bindata-assetfs/...
### Creating embedded data
Usage is identical to [jteeuwen/go-bindata](https://github.com/jteeuwen/go-bindata) usage,
instead of running `go-bindata` run `go-bindata-assetfs`.
The tool will create a `bindata_assetfs.go` file, which contains the embedded data.
A typical use case is
$ go-bindata-assetfs data/...
### Using assetFS in your code
The generated file provides an `assetFS()` function that returns a `http.Filesystem`
wrapping the embedded files. What you usually want to do is:
http.Handle("/", http.FileServer(assetFS()))
This would run an HTTP server serving the embedded files.
## Without running binary tool
You can always just run the `go-bindata` tool, and then
use
import "github.com/elazarl/go-bindata-assetfs"
...
http.Handle("/",
http.FileServer(
&assetfs.AssetFS{Asset: Asset, AssetDir: AssetDir, AssetInfo: AssetInfo, Prefix: "data"}))
to serve files embedded from the `data` directory.

158
vendor/github.com/elazarl/go-bindata-assetfs/assetfs.go generated vendored Normal file
View File

@ -0,0 +1,158 @@
package assetfs
import (
"bytes"
"errors"
"io"
"io/ioutil"
"net/http"
"os"
"path"
"path/filepath"
"time"
)
var (
defaultFileTimestamp = time.Now()
)
// FakeFile implements os.FileInfo interface for a given path and size
type FakeFile struct {
// Path is the path of this file
Path string
// Dir marks of the path is a directory
Dir bool
// Len is the length of the fake file, zero if it is a directory
Len int64
// Timestamp is the ModTime of this file
Timestamp time.Time
}
func (f *FakeFile) Name() string {
_, name := filepath.Split(f.Path)
return name
}
func (f *FakeFile) Mode() os.FileMode {
mode := os.FileMode(0644)
if f.Dir {
return mode | os.ModeDir
}
return mode
}
func (f *FakeFile) ModTime() time.Time {
return f.Timestamp
}
func (f *FakeFile) Size() int64 {
return f.Len
}
func (f *FakeFile) IsDir() bool {
return f.Mode().IsDir()
}
func (f *FakeFile) Sys() interface{} {
return nil
}
// AssetFile implements http.File interface for a no-directory file with content
type AssetFile struct {
*bytes.Reader
io.Closer
FakeFile
}
func NewAssetFile(name string, content []byte, timestamp time.Time) *AssetFile {
if timestamp.IsZero() {
timestamp = defaultFileTimestamp
}
return &AssetFile{
bytes.NewReader(content),
ioutil.NopCloser(nil),
FakeFile{name, false, int64(len(content)), timestamp}}
}
func (f *AssetFile) Readdir(count int) ([]os.FileInfo, error) {
return nil, errors.New("not a directory")
}
func (f *AssetFile) Size() int64 {
return f.FakeFile.Size()
}
func (f *AssetFile) Stat() (os.FileInfo, error) {
return f, nil
}
// AssetDirectory implements http.File interface for a directory
type AssetDirectory struct {
AssetFile
ChildrenRead int
Children []os.FileInfo
}
func NewAssetDirectory(name string, children []string, fs *AssetFS) *AssetDirectory {
fileinfos := make([]os.FileInfo, 0, len(children))
for _, child := range children {
_, err := fs.AssetDir(filepath.Join(name, child))
fileinfos = append(fileinfos, &FakeFile{child, err == nil, 0, time.Time{}})
}
return &AssetDirectory{
AssetFile{
bytes.NewReader(nil),
ioutil.NopCloser(nil),
FakeFile{name, true, 0, time.Time{}},
},
0,
fileinfos}
}
func (f *AssetDirectory) Readdir(count int) ([]os.FileInfo, error) {
if count <= 0 {
return f.Children, nil
}
if f.ChildrenRead+count > len(f.Children) {
count = len(f.Children) - f.ChildrenRead
}
rv := f.Children[f.ChildrenRead : f.ChildrenRead+count]
f.ChildrenRead += count
return rv, nil
}
func (f *AssetDirectory) Stat() (os.FileInfo, error) {
return f, nil
}
// AssetFS implements http.FileSystem, allowing
// embedded files to be served from net/http package.
type AssetFS struct {
// Asset should return content of file in path if exists
Asset func(path string) ([]byte, error)
// AssetDir should return list of files in the path
AssetDir func(path string) ([]string, error)
// AssetInfo should return the info of file in path if exists
AssetInfo func(path string) (os.FileInfo, error)
// Prefix would be prepended to http requests
Prefix string
}
func (fs *AssetFS) Open(name string) (http.File, error) {
name = path.Join(fs.Prefix, name)
if len(name) > 0 && name[0] == '/' {
name = name[1:]
}
if b, err := fs.Asset(name); err == nil {
timestamp := defaultFileTimestamp
if info, err := fs.AssetInfo(name); err == nil {
timestamp = info.ModTime()
}
return NewAssetFile(name, b, timestamp), nil
}
if children, err := fs.AssetDir(name); err == nil {
return NewAssetDirectory(name, children, fs), nil
} else {
return nil, err
}
}

13
vendor/github.com/elazarl/go-bindata-assetfs/doc.go generated vendored Normal file
View File

@ -0,0 +1,13 @@
// assetfs allows packages to serve static content embedded
// with the go-bindata tool with the standard net/http package.
//
// See https://github.com/jteeuwen/go-bindata for more information
// about embedding binary data with go-bindata.
//
// Usage example, after running
// $ go-bindata data/...
// use:
// http.Handle("/",
// http.FileServer(
// &assetfs.AssetFS{Asset: Asset, AssetDir: AssetDir, Prefix: "data"}))
package assetfs

View File

@ -0,0 +1,97 @@
package main
import (
"bufio"
"bytes"
"flag"
"fmt"
"os"
"os/exec"
"strings"
)
const bindatafile = "bindata.go"
func isDebug(args []string) bool {
flagset := flag.NewFlagSet("", flag.ContinueOnError)
debug := flagset.Bool("debug", false, "")
debugArgs := make([]string, 0)
for _, arg := range args {
if strings.HasPrefix(arg, "-debug") {
debugArgs = append(debugArgs, arg)
}
}
flagset.Parse(debugArgs)
if debug == nil {
return false
}
return *debug
}
func main() {
if _, err := exec.LookPath("go-bindata"); err != nil {
fmt.Println("Cannot find go-bindata executable in path")
fmt.Println("Maybe you need: go get github.com/elazarl/go-bindata-assetfs/...")
os.Exit(1)
}
cmd := exec.Command("go-bindata", os.Args[1:]...)
cmd.Stdin = os.Stdin
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr
if err := cmd.Run(); err != nil {
os.Exit(1)
}
in, err := os.Open(bindatafile)
if err != nil {
fmt.Fprintln(os.Stderr, "Cannot read", bindatafile, err)
return
}
out, err := os.Create("bindata_assetfs.go")
if err != nil {
fmt.Fprintln(os.Stderr, "Cannot write 'bindata_assetfs.go'", err)
return
}
debug := isDebug(os.Args[1:])
r := bufio.NewReader(in)
done := false
for line, isPrefix, err := r.ReadLine(); err == nil; line, isPrefix, err = r.ReadLine() {
if !isPrefix {
line = append(line, '\n')
}
if _, err := out.Write(line); err != nil {
fmt.Fprintln(os.Stderr, "Cannot write to 'bindata_assetfs.go'", err)
return
}
if !done && !isPrefix && bytes.HasPrefix(line, []byte("import (")) {
if debug {
fmt.Fprintln(out, "\t\"net/http\"")
} else {
fmt.Fprintln(out, "\t\"github.com/elazarl/go-bindata-assetfs\"")
}
done = true
}
}
if debug {
fmt.Fprintln(out, `
func assetFS() http.FileSystem {
for k := range _bintree.Children {
return http.Dir(k)
}
panic("unreachable")
}`)
} else {
fmt.Fprintln(out, `
func assetFS() *assetfs.AssetFS {
for k := range _bintree.Children {
return &assetfs.AssetFS{Asset: Asset, AssetDir: AssetDir, AssetInfo: AssetInfo, Prefix: k}
}
panic("unreachable")
}`)
}
// Close files BEFORE remove calls (don't use defer).
in.Close()
out.Close()
if err := os.Remove(bindatafile); err != nil {
fmt.Fprintln(os.Stderr, "Cannot remove", bindatafile, err)
}
}

24
vendor/github.com/jmoiron/sqlx/.gitignore generated vendored Normal file
View File

@ -0,0 +1,24 @@
# Compiled Object files, Static and Dynamic libs (Shared Objects)
*.o
*.a
*.so
# Folders
_obj
_test
# Architecture specific extensions/prefixes
*.[568vq]
[568vq].out
*.cgo1.go
*.cgo2.c
_cgo_defun.c
_cgo_gotypes.go
_cgo_export.*
_testmain.go
*.exe
tags
environ

23
vendor/github.com/jmoiron/sqlx/LICENSE generated vendored Normal file
View File

@ -0,0 +1,23 @@
Copyright (c) 2013, Jason Moiron
Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation
files (the "Software"), to deal in the Software without
restriction, including without limitation the rights to use,
copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.

185
vendor/github.com/jmoiron/sqlx/README.md generated vendored Normal file
View File

@ -0,0 +1,185 @@
#sqlx
[![Build Status](https://drone.io/github.com/jmoiron/sqlx/status.png)](https://drone.io/github.com/jmoiron/sqlx/latest) [![Godoc](http://img.shields.io/badge/godoc-reference-blue.svg?style=flat)](https://godoc.org/github.com/jmoiron/sqlx) [![license](http://img.shields.io/badge/license-MIT-red.svg?style=flat)](https://raw.githubusercontent.com/jmoiron/sqlx/master/LICENSE)
sqlx is a library which provides a set of extensions on go's standard
`database/sql` library. The sqlx versions of `sql.DB`, `sql.TX`, `sql.Stmt`,
et al. all leave the underlying interfaces untouched, so that their interfaces
are a superset on the standard ones. This makes it relatively painless to
integrate existing codebases using database/sql with sqlx.
Major additional concepts are:
* Marshal rows into structs (with embedded struct support), maps, and slices
* Named parameter support including prepared statements
* `Get` and `Select` to go quickly from query to struct/slice
In addition to the [godoc API documentation](http://godoc.org/github.com/jmoiron/sqlx),
there is also some [standard documentation](http://jmoiron.github.io/sqlx/) that
explains how to use `database/sql` along with sqlx.
## Recent Changes
* sqlx/types.JsonText has been renamed to JSONText to follow Go naming conventions.
This breaks backwards compatibility, but it's in a way that is trivially fixable
(`s/JsonText/JSONText/g`). The `types` package is both experimental and not in
active development currently.
More importantly, [golang bug #13905](https://github.com/golang/go/issues/13905)
makes `types.JSONText` and `types.GzippedText` _potentially unsafe_, **especially**
when used with common auto-scan sqlx idioms like `Select` and `Get`.
### Backwards Compatibility
There is no Go1-like promise of absolute stability, but I take the issue seriously
and will maintain the library in a compatible state unless vital bugs prevent me
from doing so. Since [#59](https://github.com/jmoiron/sqlx/issues/59) and
[#60](https://github.com/jmoiron/sqlx/issues/60) necessitated breaking behavior,
a wider API cleanup was done at the time of fixing. It's possible this will happen
in future; if it does, a git tag will be provided for users requiring the old
behavior to continue to use it until such a time as they can migrate.
## install
go get github.com/jmoiron/sqlx
## issues
Row headers can be ambiguous (`SELECT 1 AS a, 2 AS a`), and the result of
`Columns()` does not fully qualify column names in queries like:
```sql
SELECT a.id, a.name, b.id, b.name FROM foos AS a JOIN foos AS b ON a.parent = b.id;
```
making a struct or map destination ambiguous. Use `AS` in your queries
to give columns distinct names, `rows.Scan` to scan them manually, or
`SliceScan` to get a slice of results.
## usage
Below is an example which shows some common use cases for sqlx. Check
[sqlx_test.go](https://github.com/jmoiron/sqlx/blob/master/sqlx_test.go) for more
usage.
```go
package main
import (
_ "github.com/lib/pq"
"database/sql"
"github.com/jmoiron/sqlx"
"log"
)
var schema = `
CREATE TABLE person (
first_name text,
last_name text,
email text
);
CREATE TABLE place (
country text,
city text NULL,
telcode integer
)`
type Person struct {
FirstName string `db:"first_name"`
LastName string `db:"last_name"`
Email string
}
type Place struct {
Country string
City sql.NullString
TelCode int
}
func main() {
// this Pings the database trying to connect, panics on error
// use sqlx.Open() for sql.Open() semantics
db, err := sqlx.Connect("postgres", "user=foo dbname=bar sslmode=disable")
if err != nil {
log.Fatalln(err)
}
// exec the schema or fail; multi-statement Exec behavior varies between
// database drivers; pq will exec them all, sqlite3 won't, ymmv
db.MustExec(schema)
tx := db.MustBegin()
tx.MustExec("INSERT INTO person (first_name, last_name, email) VALUES ($1, $2, $3)", "Jason", "Moiron", "jmoiron@jmoiron.net")
tx.MustExec("INSERT INTO person (first_name, last_name, email) VALUES ($1, $2, $3)", "John", "Doe", "johndoeDNE@gmail.net")
tx.MustExec("INSERT INTO place (country, city, telcode) VALUES ($1, $2, $3)", "United States", "New York", "1")
tx.MustExec("INSERT INTO place (country, telcode) VALUES ($1, $2)", "Hong Kong", "852")
tx.MustExec("INSERT INTO place (country, telcode) VALUES ($1, $2)", "Singapore", "65")
// Named queries can use structs, so if you have an existing struct (i.e. person := &Person{}) that you have populated, you can pass it in as &person
tx.NamedExec("INSERT INTO person (first_name, last_name, email) VALUES (:first_name, :last_name, :email)", &Person{"Jane", "Citizen", "jane.citzen@example.com"})
tx.Commit()
// Query the database, storing results in a []Person (wrapped in []interface{})
people := []Person{}
db.Select(&people, "SELECT * FROM person ORDER BY first_name ASC")
jason, john := people[0], people[1]
fmt.Printf("%#v\n%#v", jason, john)
// Person{FirstName:"Jason", LastName:"Moiron", Email:"jmoiron@jmoiron.net"}
// Person{FirstName:"John", LastName:"Doe", Email:"johndoeDNE@gmail.net"}
// You can also get a single result, a la QueryRow
jason = Person{}
err = db.Get(&jason, "SELECT * FROM person WHERE first_name=$1", "Jason")
fmt.Printf("%#v\n", jason)
// Person{FirstName:"Jason", LastName:"Moiron", Email:"jmoiron@jmoiron.net"}
// if you have null fields and use SELECT *, you must use sql.Null* in your struct
places := []Place{}
err = db.Select(&places, "SELECT * FROM place ORDER BY telcode ASC")
if err != nil {
fmt.Println(err)
return
}
usa, singsing, honkers := places[0], places[1], places[2]
fmt.Printf("%#v\n%#v\n%#v\n", usa, singsing, honkers)
// Place{Country:"United States", City:sql.NullString{String:"New York", Valid:true}, TelCode:1}
// Place{Country:"Singapore", City:sql.NullString{String:"", Valid:false}, TelCode:65}
// Place{Country:"Hong Kong", City:sql.NullString{String:"", Valid:false}, TelCode:852}
// Loop through rows using only one struct
place := Place{}
rows, err := db.Queryx("SELECT * FROM place")
for rows.Next() {
err := rows.StructScan(&place)
if err != nil {
log.Fatalln(err)
}
fmt.Printf("%#v\n", place)
}
// Place{Country:"United States", City:sql.NullString{String:"New York", Valid:true}, TelCode:1}
// Place{Country:"Hong Kong", City:sql.NullString{String:"", Valid:false}, TelCode:852}
// Place{Country:"Singapore", City:sql.NullString{String:"", Valid:false}, TelCode:65}
// Named queries, using `:name` as the bindvar. Automatic bindvar support
// which takes into account the dbtype based on the driverName on sqlx.Open/Connect
_, err = db.NamedExec(`INSERT INTO person (first_name,last_name,email) VALUES (:first,:last,:email)`,
map[string]interface{}{
"first": "Bin",
"last": "Smuth",
"email": "bensmith@allblacks.nz",
})
// Selects Mr. Smith from the database
rows, err = db.NamedQuery(`SELECT * FROM person WHERE first_name=:fn`, map[string]interface{}{"fn": "Bin"})
// Named queries can also use structs. Their bind names follow the same rules
// as the name -> db mapping, so struct fields are lowercased and the `db` tag
// is taken into consideration.
rows, err = db.NamedQuery(`SELECT * FROM person WHERE first_name=:first_name`, jason)
}
```

186
vendor/github.com/jmoiron/sqlx/bind.go generated vendored Normal file
View File

@ -0,0 +1,186 @@
package sqlx
import (
"bytes"
"errors"
"reflect"
"strconv"
"strings"
"github.com/jmoiron/sqlx/reflectx"
)
// Bindvar types supported by Rebind, BindMap and BindStruct.
const (
UNKNOWN = iota
QUESTION
DOLLAR
NAMED
)
// BindType returns the bindtype for a given database given a drivername.
func BindType(driverName string) int {
switch driverName {
case "postgres", "pgx":
return DOLLAR
case "mysql":
return QUESTION
case "sqlite3":
return QUESTION
case "oci8", "ora", "goracle":
return NAMED
}
return UNKNOWN
}
// FIXME: this should be able to be tolerant of escaped ?'s in queries without
// losing much speed, and should be to avoid confusion.
// Rebind a query from the default bindtype (QUESTION) to the target bindtype.
func Rebind(bindType int, query string) string {
switch bindType {
case QUESTION, UNKNOWN:
return query
}
qb := []byte(query)
// Add space enough for 10 params before we have to allocate
rqb := make([]byte, 0, len(qb)+10)
j := 1
for _, b := range qb {
if b == '?' {
switch bindType {
case DOLLAR:
rqb = append(rqb, '$')
case NAMED:
rqb = append(rqb, ':', 'a', 'r', 'g')
}
for _, b := range strconv.Itoa(j) {
rqb = append(rqb, byte(b))
}
j++
} else {
rqb = append(rqb, b)
}
}
return string(rqb)
}
// Experimental implementation of Rebind which uses a bytes.Buffer. The code is
// much simpler and should be more resistant to odd unicode, but it is twice as
// slow. Kept here for benchmarking purposes and to possibly replace Rebind if
// problems arise with its somewhat naive handling of unicode.
func rebindBuff(bindType int, query string) string {
if bindType != DOLLAR {
return query
}
b := make([]byte, 0, len(query))
rqb := bytes.NewBuffer(b)
j := 1
for _, r := range query {
if r == '?' {
rqb.WriteRune('$')
rqb.WriteString(strconv.Itoa(j))
j++
} else {
rqb.WriteRune(r)
}
}
return rqb.String()
}
// In expands slice values in args, returning the modified query string
// and a new arg list that can be executed by a database. The `query` should
// use the `?` bindVar. The return value uses the `?` bindVar.
func In(query string, args ...interface{}) (string, []interface{}, error) {
// argMeta stores reflect.Value and length for slices and
// the value itself for non-slice arguments
type argMeta struct {
v reflect.Value
i interface{}
length int
}
var flatArgsCount int
var anySlices bool
meta := make([]argMeta, len(args))
for i, arg := range args {
v := reflect.ValueOf(arg)
t := reflectx.Deref(v.Type())
if t.Kind() == reflect.Slice {
meta[i].length = v.Len()
meta[i].v = v
anySlices = true
flatArgsCount += meta[i].length
if meta[i].length == 0 {
return "", nil, errors.New("empty slice passed to 'in' query")
}
} else {
meta[i].i = arg
flatArgsCount++
}
}
// don't do any parsing if there aren't any slices; note that this means
// some errors that we might have caught below will not be returned.
if !anySlices {
return query, args, nil
}
newArgs := make([]interface{}, 0, flatArgsCount)
var arg, offset int
var buf bytes.Buffer
for i := strings.IndexByte(query[offset:], '?'); i != -1; i = strings.IndexByte(query[offset:], '?') {
if arg >= len(meta) {
// if an argument wasn't passed, lets return an error; this is
// not actually how database/sql Exec/Query works, but since we are
// creating an argument list programmatically, we want to be able
// to catch these programmer errors earlier.
return "", nil, errors.New("number of bindVars exceeds arguments")
}
argMeta := meta[arg]
arg++
// not a slice, continue.
// our questionmark will either be written before the next expansion
// of a slice or after the loop when writing the rest of the query
if argMeta.length == 0 {
offset = offset + i + 1
newArgs = append(newArgs, argMeta.i)
continue
}
// write everything up to and including our ? character
buf.WriteString(query[:offset+i+1])
newArgs = append(newArgs, argMeta.v.Index(0).Interface())
for si := 1; si < argMeta.length; si++ {
buf.WriteString(", ?")
newArgs = append(newArgs, argMeta.v.Index(si).Interface())
}
// slice the query and reset the offset. this avoids some bookkeeping for
// the write after the loop
query = query[offset+i+1:]
offset = 0
}
buf.WriteString(query)
if arg < len(meta) {
return "", nil, errors.New("number of bindVars less than number arguments")
}
return buf.String(), newArgs, nil
}

12
vendor/github.com/jmoiron/sqlx/doc.go generated vendored Normal file
View File

@ -0,0 +1,12 @@
// Package sqlx provides general purpose extensions to database/sql.
//
// It is intended to seamlessly wrap database/sql and provide convenience
// methods which are useful in the development of database driven applications.
// None of the underlying database/sql methods are changed. Instead all extended
// behavior is implemented through new methods defined on wrapper types.
//
// Additions include scanning into structs, named query support, rebinding
// queries for different drivers, convenient shorthands for common error handling
// and more.
//
package sqlx

336
vendor/github.com/jmoiron/sqlx/named.go generated vendored Normal file
View File

@ -0,0 +1,336 @@
package sqlx
// Named Query Support
//
// * BindMap - bind query bindvars to map/struct args
// * NamedExec, NamedQuery - named query w/ struct or map
// * NamedStmt - a pre-compiled named query which is a prepared statement
//
// Internal Interfaces:
//
// * compileNamedQuery - rebind a named query, returning a query and list of names
// * bindArgs, bindMapArgs, bindAnyArgs - given a list of names, return an arglist
//
import (
"database/sql"
"errors"
"fmt"
"reflect"
"strconv"
"unicode"
"github.com/jmoiron/sqlx/reflectx"
)
// NamedStmt is a prepared statement that executes named queries. Prepare it
// how you would execute a NamedQuery, but pass in a struct or map when executing.
type NamedStmt struct {
Params []string
QueryString string
Stmt *Stmt
}
// Close closes the named statement.
func (n *NamedStmt) Close() error {
return n.Stmt.Close()
}
// Exec executes a named statement using the struct passed.
func (n *NamedStmt) Exec(arg interface{}) (sql.Result, error) {
args, err := bindAnyArgs(n.Params, arg, n.Stmt.Mapper)
if err != nil {
return *new(sql.Result), err
}
return n.Stmt.Exec(args...)
}
// Query executes a named statement using the struct argument, returning rows.
func (n *NamedStmt) Query(arg interface{}) (*sql.Rows, error) {
args, err := bindAnyArgs(n.Params, arg, n.Stmt.Mapper)
if err != nil {
return nil, err
}
return n.Stmt.Query(args...)
}
// QueryRow executes a named statement against the database. Because sqlx cannot
// create a *sql.Row with an error condition pre-set for binding errors, sqlx
// returns a *sqlx.Row instead.
func (n *NamedStmt) QueryRow(arg interface{}) *Row {
args, err := bindAnyArgs(n.Params, arg, n.Stmt.Mapper)
if err != nil {
return &Row{err: err}
}
return n.Stmt.QueryRowx(args...)
}
// MustExec execs a NamedStmt, panicing on error
func (n *NamedStmt) MustExec(arg interface{}) sql.Result {
res, err := n.Exec(arg)
if err != nil {
panic(err)
}
return res
}
// Queryx using this NamedStmt
func (n *NamedStmt) Queryx(arg interface{}) (*Rows, error) {
r, err := n.Query(arg)
if err != nil {
return nil, err
}
return &Rows{Rows: r, Mapper: n.Stmt.Mapper, unsafe: isUnsafe(n)}, err
}
// QueryRowx this NamedStmt. Because of limitations with QueryRow, this is
// an alias for QueryRow.
func (n *NamedStmt) QueryRowx(arg interface{}) *Row {
return n.QueryRow(arg)
}
// Select using this NamedStmt
func (n *NamedStmt) Select(dest interface{}, arg interface{}) error {
rows, err := n.Queryx(arg)
if err != nil {
return err
}
// if something happens here, we want to make sure the rows are Closed
defer rows.Close()
return scanAll(rows, dest, false)
}
// Get using this NamedStmt
func (n *NamedStmt) Get(dest interface{}, arg interface{}) error {
r := n.QueryRowx(arg)
return r.scanAny(dest, false)
}
// Unsafe creates an unsafe version of the NamedStmt
func (n *NamedStmt) Unsafe() *NamedStmt {
r := &NamedStmt{Params: n.Params, Stmt: n.Stmt, QueryString: n.QueryString}
r.Stmt.unsafe = true
return r
}
// A union interface of preparer and binder, required to be able to prepare
// named statements (as the bindtype must be determined).
type namedPreparer interface {
Preparer
binder
}
func prepareNamed(p namedPreparer, query string) (*NamedStmt, error) {
bindType := BindType(p.DriverName())
q, args, err := compileNamedQuery([]byte(query), bindType)
if err != nil {
return nil, err
}
stmt, err := Preparex(p, q)
if err != nil {
return nil, err
}
return &NamedStmt{
QueryString: q,
Params: args,
Stmt: stmt,
}, nil
}
func bindAnyArgs(names []string, arg interface{}, m *reflectx.Mapper) ([]interface{}, error) {
if maparg, ok := arg.(map[string]interface{}); ok {
return bindMapArgs(names, maparg)
}
return bindArgs(names, arg, m)
}
// private interface to generate a list of interfaces from a given struct
// type, given a list of names to pull out of the struct. Used by public
// BindStruct interface.
func bindArgs(names []string, arg interface{}, m *reflectx.Mapper) ([]interface{}, error) {
arglist := make([]interface{}, 0, len(names))
// grab the indirected value of arg
v := reflect.ValueOf(arg)
for v = reflect.ValueOf(arg); v.Kind() == reflect.Ptr; {
v = v.Elem()
}
fields := m.TraversalsByName(v.Type(), names)
for i, t := range fields {
if len(t) == 0 {
return arglist, fmt.Errorf("could not find name %s in %#v", names[i], arg)
}
val := reflectx.FieldByIndexesReadOnly(v, t)
arglist = append(arglist, val.Interface())
}
return arglist, nil
}
// like bindArgs, but for maps.
func bindMapArgs(names []string, arg map[string]interface{}) ([]interface{}, error) {
arglist := make([]interface{}, 0, len(names))
for _, name := range names {
val, ok := arg[name]
if !ok {
return arglist, fmt.Errorf("could not find name %s in %#v", name, arg)
}
arglist = append(arglist, val)
}
return arglist, nil
}
// bindStruct binds a named parameter query with fields from a struct argument.
// The rules for binding field names to parameter names follow the same
// conventions as for StructScan, including obeying the `db` struct tags.
func bindStruct(bindType int, query string, arg interface{}, m *reflectx.Mapper) (string, []interface{}, error) {
bound, names, err := compileNamedQuery([]byte(query), bindType)
if err != nil {
return "", []interface{}{}, err
}
arglist, err := bindArgs(names, arg, m)
if err != nil {
return "", []interface{}{}, err
}
return bound, arglist, nil
}
// bindMap binds a named parameter query with a map of arguments.
func bindMap(bindType int, query string, args map[string]interface{}) (string, []interface{}, error) {
bound, names, err := compileNamedQuery([]byte(query), bindType)
if err != nil {
return "", []interface{}{}, err
}
arglist, err := bindMapArgs(names, args)
return bound, arglist, err
}
// -- Compilation of Named Queries
// Allow digits and letters in bind params; additionally runes are
// checked against underscores, meaning that bind params can have be
// alphanumeric with underscores. Mind the difference between unicode
// digits and numbers, where '5' is a digit but '五' is not.
var allowedBindRunes = []*unicode.RangeTable{unicode.Letter, unicode.Digit}
// FIXME: this function isn't safe for unicode named params, as a failing test
// can testify. This is not a regression but a failure of the original code
// as well. It should be modified to range over runes in a string rather than
// bytes, even though this is less convenient and slower. Hopefully the
// addition of the prepared NamedStmt (which will only do this once) will make
// up for the slightly slower ad-hoc NamedExec/NamedQuery.
// compile a NamedQuery into an unbound query (using the '?' bindvar) and
// a list of names.
func compileNamedQuery(qs []byte, bindType int) (query string, names []string, err error) {
names = make([]string, 0, 10)
rebound := make([]byte, 0, len(qs))
inName := false
last := len(qs) - 1
currentVar := 1
name := make([]byte, 0, 10)
for i, b := range qs {
// a ':' while we're in a name is an error
if b == ':' {
// if this is the second ':' in a '::' escape sequence, append a ':'
if inName && i > 0 && qs[i-1] == ':' {
rebound = append(rebound, ':')
inName = false
continue
} else if inName {
err = errors.New("unexpected `:` while reading named param at " + strconv.Itoa(i))
return query, names, err
}
inName = true
name = []byte{}
// if we're in a name, and this is an allowed character, continue
} else if inName && (unicode.IsOneOf(allowedBindRunes, rune(b)) || b == '_') && i != last {
// append the byte to the name if we are in a name and not on the last byte
name = append(name, b)
// if we're in a name and it's not an allowed character, the name is done
} else if inName {
inName = false
// if this is the final byte of the string and it is part of the name, then
// make sure to add it to the name
if i == last && unicode.IsOneOf(allowedBindRunes, rune(b)) {
name = append(name, b)
}
// add the string representation to the names list
names = append(names, string(name))
// add a proper bindvar for the bindType
switch bindType {
// oracle only supports named type bind vars even for positional
case NAMED:
rebound = append(rebound, ':')
rebound = append(rebound, name...)
case QUESTION, UNKNOWN:
rebound = append(rebound, '?')
case DOLLAR:
rebound = append(rebound, '$')
for _, b := range strconv.Itoa(currentVar) {
rebound = append(rebound, byte(b))
}
currentVar++
}
// add this byte to string unless it was not part of the name
if i != last {
rebound = append(rebound, b)
} else if !unicode.IsOneOf(allowedBindRunes, rune(b)) {
rebound = append(rebound, b)
}
} else {
// this is a normal byte and should just go onto the rebound query
rebound = append(rebound, b)
}
}
return string(rebound), names, err
}
// BindNamed binds a struct or a map to a query with named parameters.
// DEPRECATED: use sqlx.Named` instead of this, it may be removed in future.
func BindNamed(bindType int, query string, arg interface{}) (string, []interface{}, error) {
return bindNamedMapper(bindType, query, arg, mapper())
}
// Named takes a query using named parameters and an argument and
// returns a new query with a list of args that can be executed by
// a database. The return value uses the `?` bindvar.
func Named(query string, arg interface{}) (string, []interface{}, error) {
return bindNamedMapper(QUESTION, query, arg, mapper())
}
func bindNamedMapper(bindType int, query string, arg interface{}, m *reflectx.Mapper) (string, []interface{}, error) {
if maparg, ok := arg.(map[string]interface{}); ok {
return bindMap(bindType, query, maparg)
}
return bindStruct(bindType, query, arg, m)
}
// NamedQuery binds a named query and then runs Query on the result using the
// provided Ext (sqlx.Tx, sqlx.Db). It works with both structs and with
// map[string]interface{} types.
func NamedQuery(e Ext, query string, arg interface{}) (*Rows, error) {
q, args, err := bindNamedMapper(BindType(e.DriverName()), query, arg, mapperFor(e))
if err != nil {
return nil, err
}
return e.Queryx(q, args...)
}
// NamedExec uses BindStruct to get a query executable by the driver and
// then runs Exec on the result. Returns an error from the binding
// or the query excution itself.
func NamedExec(e Ext, query string, arg interface{}) (sql.Result, error) {
q, args, err := bindNamedMapper(BindType(e.DriverName()), query, arg, mapperFor(e))
if err != nil {
return nil, err
}
return e.Exec(q, args...)
}

227
vendor/github.com/jmoiron/sqlx/named_test.go generated vendored Normal file
View File

@ -0,0 +1,227 @@
package sqlx
import (
"database/sql"
"testing"
)
func TestCompileQuery(t *testing.T) {
table := []struct {
Q, R, D, N string
V []string
}{
// basic test for named parameters, invalid char ',' terminating
{
Q: `INSERT INTO foo (a,b,c,d) VALUES (:name, :age, :first, :last)`,
R: `INSERT INTO foo (a,b,c,d) VALUES (?, ?, ?, ?)`,
D: `INSERT INTO foo (a,b,c,d) VALUES ($1, $2, $3, $4)`,
N: `INSERT INTO foo (a,b,c,d) VALUES (:name, :age, :first, :last)`,
V: []string{"name", "age", "first", "last"},
},
// This query tests a named parameter ending the string as well as numbers
{
Q: `SELECT * FROM a WHERE first_name=:name1 AND last_name=:name2`,
R: `SELECT * FROM a WHERE first_name=? AND last_name=?`,
D: `SELECT * FROM a WHERE first_name=$1 AND last_name=$2`,
N: `SELECT * FROM a WHERE first_name=:name1 AND last_name=:name2`,
V: []string{"name1", "name2"},
},
{
Q: `SELECT "::foo" FROM a WHERE first_name=:name1 AND last_name=:name2`,
R: `SELECT ":foo" FROM a WHERE first_name=? AND last_name=?`,
D: `SELECT ":foo" FROM a WHERE first_name=$1 AND last_name=$2`,
N: `SELECT ":foo" FROM a WHERE first_name=:name1 AND last_name=:name2`,
V: []string{"name1", "name2"},
},
{
Q: `SELECT 'a::b::c' || first_name, '::::ABC::_::' FROM person WHERE first_name=:first_name AND last_name=:last_name`,
R: `SELECT 'a:b:c' || first_name, '::ABC:_:' FROM person WHERE first_name=? AND last_name=?`,
D: `SELECT 'a:b:c' || first_name, '::ABC:_:' FROM person WHERE first_name=$1 AND last_name=$2`,
N: `SELECT 'a:b:c' || first_name, '::ABC:_:' FROM person WHERE first_name=:first_name AND last_name=:last_name`,
V: []string{"first_name", "last_name"},
},
/* This unicode awareness test sadly fails, because of our byte-wise worldview.
* We could certainly iterate by Rune instead, though it's a great deal slower,
* it's probably the RightWay(tm)
{
Q: `INSERT INTO foo (a,b,c,d) VALUES (:あ, :b, :キコ, :名前)`,
R: `INSERT INTO foo (a,b,c,d) VALUES (?, ?, ?, ?)`,
D: `INSERT INTO foo (a,b,c,d) VALUES ($1, $2, $3, $4)`,
N: []string{"name", "age", "first", "last"},
},
*/
}
for _, test := range table {
qr, names, err := compileNamedQuery([]byte(test.Q), QUESTION)
if err != nil {
t.Error(err)
}
if qr != test.R {
t.Errorf("expected %s, got %s", test.R, qr)
}
if len(names) != len(test.V) {
t.Errorf("expected %#v, got %#v", test.V, names)
} else {
for i, name := range names {
if name != test.V[i] {
t.Errorf("expected %dth name to be %s, got %s", i+1, test.V[i], name)
}
}
}
qd, _, _ := compileNamedQuery([]byte(test.Q), DOLLAR)
if qd != test.D {
t.Errorf("\nexpected: `%s`\ngot: `%s`", test.D, qd)
}
qq, _, _ := compileNamedQuery([]byte(test.Q), NAMED)
if qq != test.N {
t.Errorf("\nexpected: `%s`\ngot: `%s`\n(len: %d vs %d)", test.N, qq, len(test.N), len(qq))
}
}
}
type Test struct {
t *testing.T
}
func (t Test) Error(err error, msg ...interface{}) {
if err != nil {
if len(msg) == 0 {
t.t.Error(err)
} else {
t.t.Error(msg...)
}
}
}
func (t Test) Errorf(err error, format string, args ...interface{}) {
if err != nil {
t.t.Errorf(format, args...)
}
}
func TestNamedQueries(t *testing.T) {
RunWithSchema(defaultSchema, t, func(db *DB, t *testing.T) {
loadDefaultFixture(db, t)
test := Test{t}
var ns *NamedStmt
var err error
// Check that invalid preparations fail
ns, err = db.PrepareNamed("SELECT * FROM person WHERE first_name=:first:name")
if err == nil {
t.Error("Expected an error with invalid prepared statement.")
}
ns, err = db.PrepareNamed("invalid sql")
if err == nil {
t.Error("Expected an error with invalid prepared statement.")
}
// Check closing works as anticipated
ns, err = db.PrepareNamed("SELECT * FROM person WHERE first_name=:first_name")
test.Error(err)
err = ns.Close()
test.Error(err)
ns, err = db.PrepareNamed(`
SELECT first_name, last_name, email
FROM person WHERE first_name=:first_name AND email=:email`)
test.Error(err)
// test Queryx w/ uses Query
p := Person{FirstName: "Jason", LastName: "Moiron", Email: "jmoiron@jmoiron.net"}
rows, err := ns.Queryx(p)
test.Error(err)
for rows.Next() {
var p2 Person
rows.StructScan(&p2)
if p.FirstName != p2.FirstName {
t.Errorf("got %s, expected %s", p.FirstName, p2.FirstName)
}
if p.LastName != p2.LastName {
t.Errorf("got %s, expected %s", p.LastName, p2.LastName)
}
if p.Email != p2.Email {
t.Errorf("got %s, expected %s", p.Email, p2.Email)
}
}
// test Select
people := make([]Person, 0, 5)
err = ns.Select(&people, p)
test.Error(err)
if len(people) != 1 {
t.Errorf("got %d results, expected %d", len(people), 1)
}
if p.FirstName != people[0].FirstName {
t.Errorf("got %s, expected %s", p.FirstName, people[0].FirstName)
}
if p.LastName != people[0].LastName {
t.Errorf("got %s, expected %s", p.LastName, people[0].LastName)
}
if p.Email != people[0].Email {
t.Errorf("got %s, expected %s", p.Email, people[0].Email)
}
// test Exec
ns, err = db.PrepareNamed(`
INSERT INTO person (first_name, last_name, email)
VALUES (:first_name, :last_name, :email)`)
test.Error(err)
js := Person{
FirstName: "Julien",
LastName: "Savea",
Email: "jsavea@ab.co.nz",
}
_, err = ns.Exec(js)
test.Error(err)
// Make sure we can pull him out again
p2 := Person{}
db.Get(&p2, db.Rebind("SELECT * FROM person WHERE email=?"), js.Email)
if p2.Email != js.Email {
t.Errorf("expected %s, got %s", js.Email, p2.Email)
}
// test Txn NamedStmts
tx := db.MustBegin()
txns := tx.NamedStmt(ns)
// We're going to add Steven in this txn
sl := Person{
FirstName: "Steven",
LastName: "Luatua",
Email: "sluatua@ab.co.nz",
}
_, err = txns.Exec(sl)
test.Error(err)
// then rollback...
tx.Rollback()
// looking for Steven after a rollback should fail
err = db.Get(&p2, db.Rebind("SELECT * FROM person WHERE email=?"), sl.Email)
if err != sql.ErrNoRows {
t.Errorf("expected no rows error, got %v", err)
}
// now do the same, but commit
tx = db.MustBegin()
txns = tx.NamedStmt(ns)
_, err = txns.Exec(sl)
test.Error(err)
tx.Commit()
// looking for Steven after a Commit should succeed
err = db.Get(&p2, db.Rebind("SELECT * FROM person WHERE email=?"), sl.Email)
test.Error(err)
if p2.Email != sl.Email {
t.Errorf("expected %s, got %s", sl.Email, p2.Email)
}
})
}

17
vendor/github.com/jmoiron/sqlx/reflectx/README.md generated vendored Normal file
View File

@ -0,0 +1,17 @@
# reflectx
The sqlx package has special reflect needs. In particular, it needs to:
* be able to map a name to a field
* understand embedded structs
* understand mapping names to fields by a particular tag
* user specified name -> field mapping functions
These behaviors mimic the behaviors by the standard library marshallers and also the
behavior of standard Go accessors.
The first two are amply taken care of by `Reflect.Value.FieldByName`, and the third is
addressed by `Reflect.Value.FieldByNameFunc`, but these don't quite understand struct
tags in the ways that are vital to most marshalers, and they are slow.
This reflectx package extends reflect to achieve these goals.

371
vendor/github.com/jmoiron/sqlx/reflectx/reflect.go generated vendored Normal file
View File

@ -0,0 +1,371 @@
// Package reflectx implements extensions to the standard reflect lib suitable
// for implementing marshaling and unmarshaling packages. The main Mapper type
// allows for Go-compatible named attribute access, including accessing embedded
// struct attributes and the ability to use functions and struct tags to
// customize field names.
//
package reflectx
import (
"fmt"
"reflect"
"runtime"
"strings"
"sync"
)
// A FieldInfo is a collection of metadata about a struct field.
type FieldInfo struct {
Index []int
Path string
Field reflect.StructField
Zero reflect.Value
Name string
Options map[string]string
Embedded bool
Children []*FieldInfo
Parent *FieldInfo
}
// A StructMap is an index of field metadata for a struct.
type StructMap struct {
Tree *FieldInfo
Index []*FieldInfo
Paths map[string]*FieldInfo
Names map[string]*FieldInfo
}
// GetByPath returns a *FieldInfo for a given string path.
func (f StructMap) GetByPath(path string) *FieldInfo {
return f.Paths[path]
}
// GetByTraversal returns a *FieldInfo for a given integer path. It is
// analogous to reflect.FieldByIndex.
func (f StructMap) GetByTraversal(index []int) *FieldInfo {
if len(index) == 0 {
return nil
}
tree := f.Tree
for _, i := range index {
if i >= len(tree.Children) || tree.Children[i] == nil {
return nil
}
tree = tree.Children[i]
}
return tree
}
// Mapper is a general purpose mapper of names to struct fields. A Mapper
// behaves like most marshallers, optionally obeying a field tag for name
// mapping and a function to provide a basic mapping of fields to names.
type Mapper struct {
cache map[reflect.Type]*StructMap
tagName string
tagMapFunc func(string) string
mapFunc func(string) string
mutex sync.Mutex
}
// NewMapper returns a new mapper which optionally obeys the field tag given
// by tagName. If tagName is the empty string, it is ignored.
func NewMapper(tagName string) *Mapper {
return &Mapper{
cache: make(map[reflect.Type]*StructMap),
tagName: tagName,
}
}
// NewMapperTagFunc returns a new mapper which contains a mapper for field names
// AND a mapper for tag values. This is useful for tags like json which can
// have values like "name,omitempty".
func NewMapperTagFunc(tagName string, mapFunc, tagMapFunc func(string) string) *Mapper {
return &Mapper{
cache: make(map[reflect.Type]*StructMap),
tagName: tagName,
mapFunc: mapFunc,
tagMapFunc: tagMapFunc,
}
}
// NewMapperFunc returns a new mapper which optionally obeys a field tag and
// a struct field name mapper func given by f. Tags will take precedence, but
// for any other field, the mapped name will be f(field.Name)
func NewMapperFunc(tagName string, f func(string) string) *Mapper {
return &Mapper{
cache: make(map[reflect.Type]*StructMap),
tagName: tagName,
mapFunc: f,
}
}
// TypeMap returns a mapping of field strings to int slices representing
// the traversal down the struct to reach the field.
func (m *Mapper) TypeMap(t reflect.Type) *StructMap {
m.mutex.Lock()
mapping, ok := m.cache[t]
if !ok {
mapping = getMapping(t, m.tagName, m.mapFunc, m.tagMapFunc)
m.cache[t] = mapping
}
m.mutex.Unlock()
return mapping
}
// FieldMap returns the mapper's mapping of field names to reflect values. Panics
// if v's Kind is not Struct, or v is not Indirectable to a struct kind.
func (m *Mapper) FieldMap(v reflect.Value) map[string]reflect.Value {
v = reflect.Indirect(v)
mustBe(v, reflect.Struct)
r := map[string]reflect.Value{}
tm := m.TypeMap(v.Type())
for tagName, fi := range tm.Names {
r[tagName] = FieldByIndexes(v, fi.Index)
}
return r
}
// FieldByName returns a field by the its mapped name as a reflect.Value.
// Panics if v's Kind is not Struct or v is not Indirectable to a struct Kind.
// Returns zero Value if the name is not found.
func (m *Mapper) FieldByName(v reflect.Value, name string) reflect.Value {
v = reflect.Indirect(v)
mustBe(v, reflect.Struct)
tm := m.TypeMap(v.Type())
fi, ok := tm.Names[name]
if !ok {
return v
}
return FieldByIndexes(v, fi.Index)
}
// FieldsByName returns a slice of values corresponding to the slice of names
// for the value. Panics if v's Kind is not Struct or v is not Indirectable
// to a struct Kind. Returns zero Value for each name not found.
func (m *Mapper) FieldsByName(v reflect.Value, names []string) []reflect.Value {
v = reflect.Indirect(v)
mustBe(v, reflect.Struct)
tm := m.TypeMap(v.Type())
vals := make([]reflect.Value, 0, len(names))
for _, name := range names {
fi, ok := tm.Names[name]
if !ok {
vals = append(vals, *new(reflect.Value))
} else {
vals = append(vals, FieldByIndexes(v, fi.Index))
}
}
return vals
}
// TraversalsByName returns a slice of int slices which represent the struct
// traversals for each mapped name. Panics if t is not a struct or Indirectable
// to a struct. Returns empty int slice for each name not found.
func (m *Mapper) TraversalsByName(t reflect.Type, names []string) [][]int {
t = Deref(t)
mustBe(t, reflect.Struct)
tm := m.TypeMap(t)
r := make([][]int, 0, len(names))
for _, name := range names {
fi, ok := tm.Names[name]
if !ok {
r = append(r, []int{})
} else {
r = append(r, fi.Index)
}
}
return r
}
// FieldByIndexes returns a value for a particular struct traversal.
func FieldByIndexes(v reflect.Value, indexes []int) reflect.Value {
for _, i := range indexes {
v = reflect.Indirect(v).Field(i)
// if this is a pointer, it's possible it is nil
if v.Kind() == reflect.Ptr && v.IsNil() {
alloc := reflect.New(Deref(v.Type()))
v.Set(alloc)
}
if v.Kind() == reflect.Map && v.IsNil() {
v.Set(reflect.MakeMap(v.Type()))
}
}
return v
}
// FieldByIndexesReadOnly returns a value for a particular struct traversal,
// but is not concerned with allocating nil pointers because the value is
// going to be used for reading and not setting.
func FieldByIndexesReadOnly(v reflect.Value, indexes []int) reflect.Value {
for _, i := range indexes {
v = reflect.Indirect(v).Field(i)
}
return v
}
// Deref is Indirect for reflect.Types
func Deref(t reflect.Type) reflect.Type {
if t.Kind() == reflect.Ptr {
t = t.Elem()
}
return t
}
// -- helpers & utilities --
type kinder interface {
Kind() reflect.Kind
}
// mustBe checks a value against a kind, panicing with a reflect.ValueError
// if the kind isn't that which is required.
func mustBe(v kinder, expected reflect.Kind) {
k := v.Kind()
if k != expected {
panic(&reflect.ValueError{Method: methodName(), Kind: k})
}
}
// methodName is returns the caller of the function calling methodName
func methodName() string {
pc, _, _, _ := runtime.Caller(2)
f := runtime.FuncForPC(pc)
if f == nil {
return "unknown method"
}
return f.Name()
}
type typeQueue struct {
t reflect.Type
fi *FieldInfo
pp string // Parent path
}
// A copying append that creates a new slice each time.
func apnd(is []int, i int) []int {
x := make([]int, len(is)+1)
for p, n := range is {
x[p] = n
}
x[len(x)-1] = i
return x
}
// getMapping returns a mapping for the t type, using the tagName, mapFunc and
// tagMapFunc to determine the canonical names of fields.
func getMapping(t reflect.Type, tagName string, mapFunc, tagMapFunc func(string) string) *StructMap {
m := []*FieldInfo{}
root := &FieldInfo{}
queue := []typeQueue{}
queue = append(queue, typeQueue{Deref(t), root, ""})
for len(queue) != 0 {
// pop the first item off of the queue
tq := queue[0]
queue = queue[1:]
nChildren := 0
if tq.t.Kind() == reflect.Struct {
nChildren = tq.t.NumField()
}
tq.fi.Children = make([]*FieldInfo, nChildren)
// iterate through all of its fields
for fieldPos := 0; fieldPos < nChildren; fieldPos++ {
f := tq.t.Field(fieldPos)
fi := FieldInfo{}
fi.Field = f
fi.Zero = reflect.New(f.Type).Elem()
fi.Options = map[string]string{}
var tag, name string
if tagName != "" && strings.Contains(string(f.Tag), tagName+":") {
tag = f.Tag.Get(tagName)
name = tag
} else {
if mapFunc != nil {
name = mapFunc(f.Name)
}
}
parts := strings.Split(name, ",")
if len(parts) > 1 {
name = parts[0]
for _, opt := range parts[1:] {
kv := strings.Split(opt, "=")
if len(kv) > 1 {
fi.Options[kv[0]] = kv[1]
} else {
fi.Options[kv[0]] = ""
}
}
}
if tagMapFunc != nil {
tag = tagMapFunc(tag)
}
fi.Name = name
if tq.pp == "" || (tq.pp == "" && tag == "") {
fi.Path = fi.Name
} else {
fi.Path = fmt.Sprintf("%s.%s", tq.pp, fi.Name)
}
// if the name is "-", disabled via a tag, skip it
if name == "-" {
continue
}
// skip unexported fields
if len(f.PkgPath) != 0 && !f.Anonymous {
continue
}
// bfs search of anonymous embedded structs
if f.Anonymous {
pp := tq.pp
if tag != "" {
pp = fi.Path
}
fi.Embedded = true
fi.Index = apnd(tq.fi.Index, fieldPos)
nChildren := 0
ft := Deref(f.Type)
if ft.Kind() == reflect.Struct {
nChildren = ft.NumField()
}
fi.Children = make([]*FieldInfo, nChildren)
queue = append(queue, typeQueue{Deref(f.Type), &fi, pp})
} else if fi.Zero.Kind() == reflect.Struct || (fi.Zero.Kind() == reflect.Ptr && fi.Zero.Type().Elem().Kind() == reflect.Struct) {
fi.Index = apnd(tq.fi.Index, fieldPos)
fi.Children = make([]*FieldInfo, Deref(f.Type).NumField())
queue = append(queue, typeQueue{Deref(f.Type), &fi, fi.Path})
}
fi.Index = apnd(tq.fi.Index, fieldPos)
fi.Parent = tq.fi
tq.fi.Children[fieldPos] = &fi
m = append(m, &fi)
}
}
flds := &StructMap{Index: m, Tree: root, Paths: map[string]*FieldInfo{}, Names: map[string]*FieldInfo{}}
for _, fi := range flds.Index {
flds.Paths[fi.Path] = fi
if fi.Name != "" && !fi.Embedded {
flds.Names[fi.Path] = fi
}
}
return flds
}

896
vendor/github.com/jmoiron/sqlx/reflectx/reflect_test.go generated vendored Normal file
View File

@ -0,0 +1,896 @@
package reflectx
import (
"reflect"
"strings"
"testing"
)
func ival(v reflect.Value) int {
return v.Interface().(int)
}
func TestBasic(t *testing.T) {
type Foo struct {
A int
B int
C int
}
f := Foo{1, 2, 3}
fv := reflect.ValueOf(f)
m := NewMapperFunc("", func(s string) string { return s })
v := m.FieldByName(fv, "A")
if ival(v) != f.A {
t.Errorf("Expecting %d, got %d", ival(v), f.A)
}
v = m.FieldByName(fv, "B")
if ival(v) != f.B {
t.Errorf("Expecting %d, got %d", f.B, ival(v))
}
v = m.FieldByName(fv, "C")
if ival(v) != f.C {
t.Errorf("Expecting %d, got %d", f.C, ival(v))
}
}
func TestBasicEmbedded(t *testing.T) {
type Foo struct {
A int
}
type Bar struct {
Foo // `db:""` is implied for an embedded struct
B int
C int `db:"-"`
}
type Baz struct {
A int
Bar `db:"Bar"`
}
m := NewMapperFunc("db", func(s string) string { return s })
z := Baz{}
z.A = 1
z.B = 2
z.C = 4
z.Bar.Foo.A = 3
zv := reflect.ValueOf(z)
fields := m.TypeMap(reflect.TypeOf(z))
if len(fields.Index) != 5 {
t.Errorf("Expecting 5 fields")
}
// for _, fi := range fields.Index {
// log.Println(fi)
// }
v := m.FieldByName(zv, "A")
if ival(v) != z.A {
t.Errorf("Expecting %d, got %d", z.A, ival(v))
}
v = m.FieldByName(zv, "Bar.B")
if ival(v) != z.Bar.B {
t.Errorf("Expecting %d, got %d", z.Bar.B, ival(v))
}
v = m.FieldByName(zv, "Bar.A")
if ival(v) != z.Bar.Foo.A {
t.Errorf("Expecting %d, got %d", z.Bar.Foo.A, ival(v))
}
v = m.FieldByName(zv, "Bar.C")
if _, ok := v.Interface().(int); ok {
t.Errorf("Expecting Bar.C to not exist")
}
fi := fields.GetByPath("Bar.C")
if fi != nil {
t.Errorf("Bar.C should not exist")
}
}
func TestEmbeddedSimple(t *testing.T) {
type UUID [16]byte
type MyID struct {
UUID
}
type Item struct {
ID MyID
}
z := Item{}
m := NewMapper("db")
m.TypeMap(reflect.TypeOf(z))
}
func TestBasicEmbeddedWithTags(t *testing.T) {
type Foo struct {
A int `db:"a"`
}
type Bar struct {
Foo // `db:""` is implied for an embedded struct
B int `db:"b"`
}
type Baz struct {
A int `db:"a"`
Bar // `db:""` is implied for an embedded struct
}
m := NewMapper("db")
z := Baz{}
z.A = 1
z.B = 2
z.Bar.Foo.A = 3
zv := reflect.ValueOf(z)
fields := m.TypeMap(reflect.TypeOf(z))
if len(fields.Index) != 5 {
t.Errorf("Expecting 5 fields")
}
// for _, fi := range fields.index {
// log.Println(fi)
// }
v := m.FieldByName(zv, "a")
if ival(v) != z.Bar.Foo.A { // the dominant field
t.Errorf("Expecting %d, got %d", z.Bar.Foo.A, ival(v))
}
v = m.FieldByName(zv, "b")
if ival(v) != z.B {
t.Errorf("Expecting %d, got %d", z.B, ival(v))
}
}
func TestFlatTags(t *testing.T) {
m := NewMapper("db")
type Asset struct {
Title string `db:"title"`
}
type Post struct {
Author string `db:"author,required"`
Asset Asset `db:""`
}
// Post columns: (author title)
post := Post{Author: "Joe", Asset: Asset{Title: "Hello"}}
pv := reflect.ValueOf(post)
v := m.FieldByName(pv, "author")
if v.Interface().(string) != post.Author {
t.Errorf("Expecting %s, got %s", post.Author, v.Interface().(string))
}
v = m.FieldByName(pv, "title")
if v.Interface().(string) != post.Asset.Title {
t.Errorf("Expecting %s, got %s", post.Asset.Title, v.Interface().(string))
}
}
func TestNestedStruct(t *testing.T) {
m := NewMapper("db")
type Details struct {
Active bool `db:"active"`
}
type Asset struct {
Title string `db:"title"`
Details Details `db:"details"`
}
type Post struct {
Author string `db:"author,required"`
Asset `db:"asset"`
}
// Post columns: (author asset.title asset.details.active)
post := Post{
Author: "Joe",
Asset: Asset{Title: "Hello", Details: Details{Active: true}},
}
pv := reflect.ValueOf(post)
v := m.FieldByName(pv, "author")
if v.Interface().(string) != post.Author {
t.Errorf("Expecting %s, got %s", post.Author, v.Interface().(string))
}
v = m.FieldByName(pv, "title")
if _, ok := v.Interface().(string); ok {
t.Errorf("Expecting field to not exist")
}
v = m.FieldByName(pv, "asset.title")
if v.Interface().(string) != post.Asset.Title {
t.Errorf("Expecting %s, got %s", post.Asset.Title, v.Interface().(string))
}
v = m.FieldByName(pv, "asset.details.active")
if v.Interface().(bool) != post.Asset.Details.Active {
t.Errorf("Expecting %v, got %v", post.Asset.Details.Active, v.Interface().(bool))
}
}
func TestInlineStruct(t *testing.T) {
m := NewMapperTagFunc("db", strings.ToLower, nil)
type Employee struct {
Name string
ID int
}
type Boss Employee
type person struct {
Employee `db:"employee"`
Boss `db:"boss"`
}
// employees columns: (employee.name employee.id boss.name boss.id)
em := person{Employee: Employee{Name: "Joe", ID: 2}, Boss: Boss{Name: "Dick", ID: 1}}
ev := reflect.ValueOf(em)
fields := m.TypeMap(reflect.TypeOf(em))
if len(fields.Index) != 6 {
t.Errorf("Expecting 6 fields")
}
v := m.FieldByName(ev, "employee.name")
if v.Interface().(string) != em.Employee.Name {
t.Errorf("Expecting %s, got %s", em.Employee.Name, v.Interface().(string))
}
v = m.FieldByName(ev, "boss.id")
if ival(v) != em.Boss.ID {
t.Errorf("Expecting %v, got %v", em.Boss.ID, ival(v))
}
}
func TestFieldsEmbedded(t *testing.T) {
m := NewMapper("db")
type Person struct {
Name string `db:"name,size=64"`
}
type Place struct {
Name string `db:"name"`
}
type Article struct {
Title string `db:"title"`
}
type PP struct {
Person `db:"person,required"`
Place `db:",someflag"`
Article `db:",required"`
}
// PP columns: (person.name name title)
pp := PP{}
pp.Person.Name = "Peter"
pp.Place.Name = "Toronto"
pp.Article.Title = "Best city ever"
fields := m.TypeMap(reflect.TypeOf(pp))
// for i, f := range fields {
// log.Println(i, f)
// }
ppv := reflect.ValueOf(pp)
v := m.FieldByName(ppv, "person.name")
if v.Interface().(string) != pp.Person.Name {
t.Errorf("Expecting %s, got %s", pp.Person.Name, v.Interface().(string))
}
v = m.FieldByName(ppv, "name")
if v.Interface().(string) != pp.Place.Name {
t.Errorf("Expecting %s, got %s", pp.Place.Name, v.Interface().(string))
}
v = m.FieldByName(ppv, "title")
if v.Interface().(string) != pp.Article.Title {
t.Errorf("Expecting %s, got %s", pp.Article.Title, v.Interface().(string))
}
fi := fields.GetByPath("person")
if _, ok := fi.Options["required"]; !ok {
t.Errorf("Expecting required option to be set")
}
if !fi.Embedded {
t.Errorf("Expecting field to be embedded")
}
if len(fi.Index) != 1 || fi.Index[0] != 0 {
t.Errorf("Expecting index to be [0]")
}
fi = fields.GetByPath("person.name")
if fi == nil {
t.Errorf("Expecting person.name to exist")
}
if fi.Path != "person.name" {
t.Errorf("Expecting %s, got %s", "person.name", fi.Path)
}
if fi.Options["size"] != "64" {
t.Errorf("Expecting %s, got %s", "64", fi.Options["size"])
}
fi = fields.GetByTraversal([]int{1, 0})
if fi == nil {
t.Errorf("Expecting traveral to exist")
}
if fi.Path != "name" {
t.Errorf("Expecting %s, got %s", "name", fi.Path)
}
fi = fields.GetByTraversal([]int{2})
if fi == nil {
t.Errorf("Expecting traversal to exist")
}
if _, ok := fi.Options["required"]; !ok {
t.Errorf("Expecting required option to be set")
}
trs := m.TraversalsByName(reflect.TypeOf(pp), []string{"person.name", "name", "title"})
if !reflect.DeepEqual(trs, [][]int{{0, 0}, {1, 0}, {2, 0}}) {
t.Errorf("Expecting traversal: %v", trs)
}
}
func TestPtrFields(t *testing.T) {
m := NewMapperTagFunc("db", strings.ToLower, nil)
type Asset struct {
Title string
}
type Post struct {
*Asset `db:"asset"`
Author string
}
post := &Post{Author: "Joe", Asset: &Asset{Title: "Hiyo"}}
pv := reflect.ValueOf(post)
fields := m.TypeMap(reflect.TypeOf(post))
if len(fields.Index) != 3 {
t.Errorf("Expecting 3 fields")
}
v := m.FieldByName(pv, "asset.title")
if v.Interface().(string) != post.Asset.Title {
t.Errorf("Expecting %s, got %s", post.Asset.Title, v.Interface().(string))
}
v = m.FieldByName(pv, "author")
if v.Interface().(string) != post.Author {
t.Errorf("Expecting %s, got %s", post.Author, v.Interface().(string))
}
}
func TestNamedPtrFields(t *testing.T) {
m := NewMapperTagFunc("db", strings.ToLower, nil)
type User struct {
Name string
}
type Asset struct {
Title string
Owner *User `db:"owner"`
}
type Post struct {
Author string
Asset1 *Asset `db:"asset1"`
Asset2 *Asset `db:"asset2"`
}
post := &Post{Author: "Joe", Asset1: &Asset{Title: "Hiyo", Owner: &User{"Username"}}} // Let Asset2 be nil
pv := reflect.ValueOf(post)
fields := m.TypeMap(reflect.TypeOf(post))
if len(fields.Index) != 9 {
t.Errorf("Expecting 9 fields")
}
v := m.FieldByName(pv, "asset1.title")
if v.Interface().(string) != post.Asset1.Title {
t.Errorf("Expecting %s, got %s", post.Asset1.Title, v.Interface().(string))
}
v = m.FieldByName(pv, "asset1.owner.name")
if v.Interface().(string) != post.Asset1.Owner.Name {
t.Errorf("Expecting %s, got %s", post.Asset1.Owner.Name, v.Interface().(string))
}
v = m.FieldByName(pv, "asset2.title")
if v.Interface().(string) != post.Asset2.Title {
t.Errorf("Expecting %s, got %s", post.Asset2.Title, v.Interface().(string))
}
v = m.FieldByName(pv, "asset2.owner.name")
if v.Interface().(string) != post.Asset2.Owner.Name {
t.Errorf("Expecting %s, got %s", post.Asset2.Owner.Name, v.Interface().(string))
}
v = m.FieldByName(pv, "author")
if v.Interface().(string) != post.Author {
t.Errorf("Expecting %s, got %s", post.Author, v.Interface().(string))
}
}
func TestFieldMap(t *testing.T) {
type Foo struct {
A int
B int
C int
}
f := Foo{1, 2, 3}
m := NewMapperFunc("db", strings.ToLower)
fm := m.FieldMap(reflect.ValueOf(f))
if len(fm) != 3 {
t.Errorf("Expecting %d keys, got %d", 3, len(fm))
}
if fm["a"].Interface().(int) != 1 {
t.Errorf("Expecting %d, got %d", 1, ival(fm["a"]))
}
if fm["b"].Interface().(int) != 2 {
t.Errorf("Expecting %d, got %d", 2, ival(fm["b"]))
}
if fm["c"].Interface().(int) != 3 {
t.Errorf("Expecting %d, got %d", 3, ival(fm["c"]))
}
}
func TestTagNameMapping(t *testing.T) {
type Strategy struct {
StrategyID string `protobuf:"bytes,1,opt,name=strategy_id" json:"strategy_id,omitempty"`
StrategyName string
}
m := NewMapperTagFunc("json", strings.ToUpper, func(value string) string {
if strings.Contains(value, ",") {
return strings.Split(value, ",")[0]
}
return value
})
strategy := Strategy{"1", "Alpah"}
mapping := m.TypeMap(reflect.TypeOf(strategy))
for _, key := range []string{"strategy_id", "STRATEGYNAME"} {
if fi := mapping.GetByPath(key); fi == nil {
t.Errorf("Expecting to find key %s in mapping but did not.", key)
}
}
}
func TestMapping(t *testing.T) {
type Person struct {
ID int
Name string
WearsGlasses bool `db:"wears_glasses"`
}
m := NewMapperFunc("db", strings.ToLower)
p := Person{1, "Jason", true}
mapping := m.TypeMap(reflect.TypeOf(p))
for _, key := range []string{"id", "name", "wears_glasses"} {
if fi := mapping.GetByPath(key); fi == nil {
t.Errorf("Expecting to find key %s in mapping but did not.", key)
}
}
type SportsPerson struct {
Weight int
Age int
Person
}
s := SportsPerson{Weight: 100, Age: 30, Person: p}
mapping = m.TypeMap(reflect.TypeOf(s))
for _, key := range []string{"id", "name", "wears_glasses", "weight", "age"} {
if fi := mapping.GetByPath(key); fi == nil {
t.Errorf("Expecting to find key %s in mapping but did not.", key)
}
}
type RugbyPlayer struct {
Position int
IsIntense bool `db:"is_intense"`
IsAllBlack bool `db:"-"`
SportsPerson
}
r := RugbyPlayer{12, true, false, s}
mapping = m.TypeMap(reflect.TypeOf(r))
for _, key := range []string{"id", "name", "wears_glasses", "weight", "age", "position", "is_intense"} {
if fi := mapping.GetByPath(key); fi == nil {
t.Errorf("Expecting to find key %s in mapping but did not.", key)
}
}
if fi := mapping.GetByPath("isallblack"); fi != nil {
t.Errorf("Expecting to ignore `IsAllBlack` field")
}
}
func TestGetByTraversal(t *testing.T) {
type C struct {
C0 int
C1 int
}
type B struct {
B0 string
B1 *C
}
type A struct {
A0 int
A1 B
}
testCases := []struct {
Index []int
ExpectedName string
ExpectNil bool
}{
{
Index: []int{0},
ExpectedName: "A0",
},
{
Index: []int{1, 0},
ExpectedName: "B0",
},
{
Index: []int{1, 1, 1},
ExpectedName: "C1",
},
{
Index: []int{3, 4, 5},
ExpectNil: true,
},
{
Index: []int{},
ExpectNil: true,
},
{
Index: nil,
ExpectNil: true,
},
}
m := NewMapperFunc("db", func(n string) string { return n })
tm := m.TypeMap(reflect.TypeOf(A{}))
for i, tc := range testCases {
fi := tm.GetByTraversal(tc.Index)
if tc.ExpectNil {
if fi != nil {
t.Errorf("%d: expected nil, got %v", i, fi)
}
continue
}
if fi == nil {
t.Errorf("%d: expected %s, got nil", i, tc.ExpectedName)
continue
}
if fi.Name != tc.ExpectedName {
t.Errorf("%d: expected %s, got %s", i, tc.ExpectedName, fi.Name)
}
}
}
// TestMapperMethodsByName tests Mapper methods FieldByName and TraversalsByName
func TestMapperMethodsByName(t *testing.T) {
type C struct {
C0 string
C1 int
}
type B struct {
B0 *C `db:"B0"`
B1 C `db:"B1"`
B2 string `db:"B2"`
}
type A struct {
A0 *B `db:"A0"`
B `db:"A1"`
A2 int
a3 int
}
val := &A{
A0: &B{
B0: &C{C0: "0", C1: 1},
B1: C{C0: "2", C1: 3},
B2: "4",
},
B: B{
B0: nil,
B1: C{C0: "5", C1: 6},
B2: "7",
},
A2: 8,
}
testCases := []struct {
Name string
ExpectInvalid bool
ExpectedValue interface{}
ExpectedIndexes []int
}{
{
Name: "A0.B0.C0",
ExpectedValue: "0",
ExpectedIndexes: []int{0, 0, 0},
},
{
Name: "A0.B0.C1",
ExpectedValue: 1,
ExpectedIndexes: []int{0, 0, 1},
},
{
Name: "A0.B1.C0",
ExpectedValue: "2",
ExpectedIndexes: []int{0, 1, 0},
},
{
Name: "A0.B1.C1",
ExpectedValue: 3,
ExpectedIndexes: []int{0, 1, 1},
},
{
Name: "A0.B2",
ExpectedValue: "4",
ExpectedIndexes: []int{0, 2},
},
{
Name: "A1.B0.C0",
ExpectedValue: "",
ExpectedIndexes: []int{1, 0, 0},
},
{
Name: "A1.B0.C1",
ExpectedValue: 0,
ExpectedIndexes: []int{1, 0, 1},
},
{
Name: "A1.B1.C0",
ExpectedValue: "5",
ExpectedIndexes: []int{1, 1, 0},
},
{
Name: "A1.B1.C1",
ExpectedValue: 6,
ExpectedIndexes: []int{1, 1, 1},
},
{
Name: "A1.B2",
ExpectedValue: "7",
ExpectedIndexes: []int{1, 2},
},
{
Name: "A2",
ExpectedValue: 8,
ExpectedIndexes: []int{2},
},
{
Name: "XYZ",
ExpectInvalid: true,
ExpectedIndexes: []int{},
},
{
Name: "a3",
ExpectInvalid: true,
ExpectedIndexes: []int{},
},
}
// build the names array from the test cases
names := make([]string, len(testCases))
for i, tc := range testCases {
names[i] = tc.Name
}
m := NewMapperFunc("db", func(n string) string { return n })
v := reflect.ValueOf(val)
values := m.FieldsByName(v, names)
if len(values) != len(testCases) {
t.Errorf("expected %d values, got %d", len(testCases), len(values))
t.FailNow()
}
indexes := m.TraversalsByName(v.Type(), names)
if len(indexes) != len(testCases) {
t.Errorf("expected %d traversals, got %d", len(testCases), len(indexes))
t.FailNow()
}
for i, val := range values {
tc := testCases[i]
traversal := indexes[i]
if !reflect.DeepEqual(tc.ExpectedIndexes, traversal) {
t.Errorf("%d: expected %v, got %v", tc.ExpectedIndexes, traversal)
t.FailNow()
}
val = reflect.Indirect(val)
if tc.ExpectInvalid {
if val.IsValid() {
t.Errorf("%d: expected zero value, got %v", i, val)
}
continue
}
if !val.IsValid() {
t.Errorf("%d: expected valid value, got %v", i, val)
continue
}
actualValue := reflect.Indirect(val).Interface()
if !reflect.DeepEqual(tc.ExpectedValue, actualValue) {
t.Errorf("%d: expected %v, got %v", i, tc.ExpectedValue, actualValue)
}
}
}
func TestFieldByIndexes(t *testing.T) {
type C struct {
C0 bool
C1 string
C2 int
C3 map[string]int
}
type B struct {
B1 C
B2 *C
}
type A struct {
A1 B
A2 *B
}
testCases := []struct {
value interface{}
indexes []int
expectedValue interface{}
readOnly bool
}{
{
value: A{
A1: B{B1: C{C0: true}},
},
indexes: []int{0, 0, 0},
expectedValue: true,
readOnly: true,
},
{
value: A{
A2: &B{B2: &C{C1: "answer"}},
},
indexes: []int{1, 1, 1},
expectedValue: "answer",
readOnly: true,
},
{
value: &A{},
indexes: []int{1, 1, 3},
expectedValue: map[string]int{},
},
}
for i, tc := range testCases {
checkResults := func(v reflect.Value) {
if tc.expectedValue == nil {
if !v.IsNil() {
t.Errorf("%d: expected nil, actual %v", i, v.Interface())
}
} else {
if !reflect.DeepEqual(tc.expectedValue, v.Interface()) {
t.Errorf("%d: expected %v, actual %v", i, tc.expectedValue, v.Interface())
}
}
}
checkResults(FieldByIndexes(reflect.ValueOf(tc.value), tc.indexes))
if tc.readOnly {
checkResults(FieldByIndexesReadOnly(reflect.ValueOf(tc.value), tc.indexes))
}
}
}
func TestMustBe(t *testing.T) {
typ := reflect.TypeOf(E1{})
mustBe(typ, reflect.Struct)
defer func() {
if r := recover(); r != nil {
valueErr, ok := r.(*reflect.ValueError)
if !ok {
t.Errorf("unexpected Method: %s", valueErr.Method)
t.Error("expected panic with *reflect.ValueError")
return
}
if valueErr.Method != "github.com/jmoiron/sqlx/reflectx.TestMustBe" {
}
if valueErr.Kind != reflect.String {
t.Errorf("unexpected Kind: %s", valueErr.Kind)
}
} else {
t.Error("expected panic")
}
}()
typ = reflect.TypeOf("string")
mustBe(typ, reflect.Struct)
t.Error("got here, didn't expect to")
}
type E1 struct {
A int
}
type E2 struct {
E1
B int
}
type E3 struct {
E2
C int
}
type E4 struct {
E3
D int
}
func BenchmarkFieldNameL1(b *testing.B) {
e4 := E4{D: 1}
for i := 0; i < b.N; i++ {
v := reflect.ValueOf(e4)
f := v.FieldByName("D")
if f.Interface().(int) != 1 {
b.Fatal("Wrong value.")
}
}
}
func BenchmarkFieldNameL4(b *testing.B) {
e4 := E4{}
e4.A = 1
for i := 0; i < b.N; i++ {
v := reflect.ValueOf(e4)
f := v.FieldByName("A")
if f.Interface().(int) != 1 {
b.Fatal("Wrong value.")
}
}
}
func BenchmarkFieldPosL1(b *testing.B) {
e4 := E4{D: 1}
for i := 0; i < b.N; i++ {
v := reflect.ValueOf(e4)
f := v.Field(1)
if f.Interface().(int) != 1 {
b.Fatal("Wrong value.")
}
}
}
func BenchmarkFieldPosL4(b *testing.B) {
e4 := E4{}
e4.A = 1
for i := 0; i < b.N; i++ {
v := reflect.ValueOf(e4)
f := v.Field(0)
f = f.Field(0)
f = f.Field(0)
f = f.Field(0)
if f.Interface().(int) != 1 {
b.Fatal("Wrong value.")
}
}
}
func BenchmarkFieldByIndexL4(b *testing.B) {
e4 := E4{}
e4.A = 1
idx := []int{0, 0, 0, 0}
for i := 0; i < b.N; i++ {
v := reflect.ValueOf(e4)
f := FieldByIndexes(v, idx)
if f.Interface().(int) != 1 {
b.Fatal("Wrong value.")
}
}
}

992
vendor/github.com/jmoiron/sqlx/sqlx.go generated vendored Normal file
View File

@ -0,0 +1,992 @@
package sqlx
import (
"database/sql"
"database/sql/driver"
"errors"
"fmt"
"io/ioutil"
"path/filepath"
"reflect"
"strings"
"github.com/jmoiron/sqlx/reflectx"
)
// Although the NameMapper is convenient, in practice it should not
// be relied on except for application code. If you are writing a library
// that uses sqlx, you should be aware that the name mappings you expect
// can be overridded by your user's application.
// NameMapper is used to map column names to struct field names. By default,
// it uses strings.ToLower to lowercase struct field names. It can be set
// to whatever you want, but it is encouraged to be set before sqlx is used
// as name-to-field mappings are cached after first use on a type.
var NameMapper = strings.ToLower
var origMapper = reflect.ValueOf(NameMapper)
// Rather than creating on init, this is created when necessary so that
// importers have time to customize the NameMapper.
var mpr *reflectx.Mapper
// mapper returns a valid mapper using the configured NameMapper func.
func mapper() *reflectx.Mapper {
if mpr == nil {
mpr = reflectx.NewMapperFunc("db", NameMapper)
} else if origMapper != reflect.ValueOf(NameMapper) {
// if NameMapper has changed, create a new mapper
mpr = reflectx.NewMapperFunc("db", NameMapper)
origMapper = reflect.ValueOf(NameMapper)
}
return mpr
}
// isScannable takes the reflect.Type and the actual dest value and returns
// whether or not it's Scannable. Something is scannable if:
// * it is not a struct
// * it implements sql.Scanner
// * it has no exported fields
func isScannable(t reflect.Type) bool {
if reflect.PtrTo(t).Implements(_scannerInterface) {
return true
}
if t.Kind() != reflect.Struct {
return true
}
// it's not important that we use the right mapper for this particular object,
// we're only concerned on how many exported fields this struct has
m := mapper()
if len(m.TypeMap(t).Index) == 0 {
return true
}
return false
}
// ColScanner is an interface used by MapScan and SliceScan
type ColScanner interface {
Columns() ([]string, error)
Scan(dest ...interface{}) error
Err() error
}
// Queryer is an interface used by Get and Select
type Queryer interface {
Query(query string, args ...interface{}) (*sql.Rows, error)
Queryx(query string, args ...interface{}) (*Rows, error)
QueryRowx(query string, args ...interface{}) *Row
}
// Execer is an interface used by MustExec and LoadFile
type Execer interface {
Exec(query string, args ...interface{}) (sql.Result, error)
}
// Binder is an interface for something which can bind queries (Tx, DB)
type binder interface {
DriverName() string
Rebind(string) string
BindNamed(string, interface{}) (string, []interface{}, error)
}
// Ext is a union interface which can bind, query, and exec, used by
// NamedQuery and NamedExec.
type Ext interface {
binder
Queryer
Execer
}
// Preparer is an interface used by Preparex.
type Preparer interface {
Prepare(query string) (*sql.Stmt, error)
}
// determine if any of our extensions are unsafe
func isUnsafe(i interface{}) bool {
switch v := i.(type) {
case Row:
return v.unsafe
case *Row:
return v.unsafe
case Rows:
return v.unsafe
case *Rows:
return v.unsafe
case NamedStmt:
return v.Stmt.unsafe
case *NamedStmt:
return v.Stmt.unsafe
case Stmt:
return v.unsafe
case *Stmt:
return v.unsafe
case qStmt:
return v.unsafe
case *qStmt:
return v.unsafe
case DB:
return v.unsafe
case *DB:
return v.unsafe
case Tx:
return v.unsafe
case *Tx:
return v.unsafe
case sql.Rows, *sql.Rows:
return false
default:
return false
}
}
func mapperFor(i interface{}) *reflectx.Mapper {
switch i.(type) {
case DB:
return i.(DB).Mapper
case *DB:
return i.(*DB).Mapper
case Tx:
return i.(Tx).Mapper
case *Tx:
return i.(*Tx).Mapper
default:
return mapper()
}
}
var _scannerInterface = reflect.TypeOf((*sql.Scanner)(nil)).Elem()
var _valuerInterface = reflect.TypeOf((*driver.Valuer)(nil)).Elem()
// Row is a reimplementation of sql.Row in order to gain access to the underlying
// sql.Rows.Columns() data, necessary for StructScan.
type Row struct {
err error
unsafe bool
rows *sql.Rows
Mapper *reflectx.Mapper
}
// Scan is a fixed implementation of sql.Row.Scan, which does not discard the
// underlying error from the internal rows object if it exists.
func (r *Row) Scan(dest ...interface{}) error {
if r.err != nil {
return r.err
}
// TODO(bradfitz): for now we need to defensively clone all
// []byte that the driver returned (not permitting
// *RawBytes in Rows.Scan), since we're about to close
// the Rows in our defer, when we return from this function.
// the contract with the driver.Next(...) interface is that it
// can return slices into read-only temporary memory that's
// only valid until the next Scan/Close. But the TODO is that
// for a lot of drivers, this copy will be unnecessary. We
// should provide an optional interface for drivers to
// implement to say, "don't worry, the []bytes that I return
// from Next will not be modified again." (for instance, if
// they were obtained from the network anyway) But for now we
// don't care.
defer r.rows.Close()
for _, dp := range dest {
if _, ok := dp.(*sql.RawBytes); ok {
return errors.New("sql: RawBytes isn't allowed on Row.Scan")
}
}
if !r.rows.Next() {
if err := r.rows.Err(); err != nil {
return err
}
return sql.ErrNoRows
}
err := r.rows.Scan(dest...)
if err != nil {
return err
}
// Make sure the query can be processed to completion with no errors.
if err := r.rows.Close(); err != nil {
return err
}
return nil
}
// Columns returns the underlying sql.Rows.Columns(), or the deferred error usually
// returned by Row.Scan()
func (r *Row) Columns() ([]string, error) {
if r.err != nil {
return []string{}, r.err
}
return r.rows.Columns()
}
// Err returns the error encountered while scanning.
func (r *Row) Err() error {
return r.err
}
// DB is a wrapper around sql.DB which keeps track of the driverName upon Open,
// used mostly to automatically bind named queries using the right bindvars.
type DB struct {
*sql.DB
driverName string
unsafe bool
Mapper *reflectx.Mapper
}
// NewDb returns a new sqlx DB wrapper for a pre-existing *sql.DB. The
// driverName of the original database is required for named query support.
func NewDb(db *sql.DB, driverName string) *DB {
return &DB{DB: db, driverName: driverName, Mapper: mapper()}
}
// DriverName returns the driverName passed to the Open function for this DB.
func (db *DB) DriverName() string {
return db.driverName
}
// Open is the same as sql.Open, but returns an *sqlx.DB instead.
func Open(driverName, dataSourceName string) (*DB, error) {
db, err := sql.Open(driverName, dataSourceName)
if err != nil {
return nil, err
}
return &DB{DB: db, driverName: driverName, Mapper: mapper()}, err
}
// MustOpen is the same as sql.Open, but returns an *sqlx.DB instead and panics on error.
func MustOpen(driverName, dataSourceName string) *DB {
db, err := Open(driverName, dataSourceName)
if err != nil {
panic(err)
}
return db
}
// MapperFunc sets a new mapper for this db using the default sqlx struct tag
// and the provided mapper function.
func (db *DB) MapperFunc(mf func(string) string) {
db.Mapper = reflectx.NewMapperFunc("db", mf)
}
// Rebind transforms a query from QUESTION to the DB driver's bindvar type.
func (db *DB) Rebind(query string) string {
return Rebind(BindType(db.driverName), query)
}
// Unsafe returns a version of DB which will silently succeed to scan when
// columns in the SQL result have no fields in the destination struct.
// sqlx.Stmt and sqlx.Tx which are created from this DB will inherit its
// safety behavior.
func (db *DB) Unsafe() *DB {
return &DB{DB: db.DB, driverName: db.driverName, unsafe: true, Mapper: db.Mapper}
}
// BindNamed binds a query using the DB driver's bindvar type.
func (db *DB) BindNamed(query string, arg interface{}) (string, []interface{}, error) {
return bindNamedMapper(BindType(db.driverName), query, arg, db.Mapper)
}
// NamedQuery using this DB.
func (db *DB) NamedQuery(query string, arg interface{}) (*Rows, error) {
return NamedQuery(db, query, arg)
}
// NamedExec using this DB.
func (db *DB) NamedExec(query string, arg interface{}) (sql.Result, error) {
return NamedExec(db, query, arg)
}
// Select using this DB.
func (db *DB) Select(dest interface{}, query string, args ...interface{}) error {
return Select(db, dest, query, args...)
}
// Get using this DB.
func (db *DB) Get(dest interface{}, query string, args ...interface{}) error {
return Get(db, dest, query, args...)
}
// MustBegin starts a transaction, and panics on error. Returns an *sqlx.Tx instead
// of an *sql.Tx.
func (db *DB) MustBegin() *Tx {
tx, err := db.Beginx()
if err != nil {
panic(err)
}
return tx
}
// Beginx begins a transaction and returns an *sqlx.Tx instead of an *sql.Tx.
func (db *DB) Beginx() (*Tx, error) {
tx, err := db.DB.Begin()
if err != nil {
return nil, err
}
return &Tx{Tx: tx, driverName: db.driverName, unsafe: db.unsafe, Mapper: db.Mapper}, err
}
// Queryx queries the database and returns an *sqlx.Rows.
func (db *DB) Queryx(query string, args ...interface{}) (*Rows, error) {
r, err := db.DB.Query(query, args...)
if err != nil {
return nil, err
}
return &Rows{Rows: r, unsafe: db.unsafe, Mapper: db.Mapper}, err
}
// QueryRowx queries the database and returns an *sqlx.Row.
func (db *DB) QueryRowx(query string, args ...interface{}) *Row {
rows, err := db.DB.Query(query, args...)
return &Row{rows: rows, err: err, unsafe: db.unsafe, Mapper: db.Mapper}
}
// MustExec (panic) runs MustExec using this database.
func (db *DB) MustExec(query string, args ...interface{}) sql.Result {
return MustExec(db, query, args...)
}
// Preparex returns an sqlx.Stmt instead of a sql.Stmt
func (db *DB) Preparex(query string) (*Stmt, error) {
return Preparex(db, query)
}
// PrepareNamed returns an sqlx.NamedStmt
func (db *DB) PrepareNamed(query string) (*NamedStmt, error) {
return prepareNamed(db, query)
}
// Tx is an sqlx wrapper around sql.Tx with extra functionality
type Tx struct {
*sql.Tx
driverName string
unsafe bool
Mapper *reflectx.Mapper
}
// DriverName returns the driverName used by the DB which began this transaction.
func (tx *Tx) DriverName() string {
return tx.driverName
}
// Rebind a query within a transaction's bindvar type.
func (tx *Tx) Rebind(query string) string {
return Rebind(BindType(tx.driverName), query)
}
// Unsafe returns a version of Tx which will silently succeed to scan when
// columns in the SQL result have no fields in the destination struct.
func (tx *Tx) Unsafe() *Tx {
return &Tx{Tx: tx.Tx, driverName: tx.driverName, unsafe: true, Mapper: tx.Mapper}
}
// BindNamed binds a query within a transaction's bindvar type.
func (tx *Tx) BindNamed(query string, arg interface{}) (string, []interface{}, error) {
return bindNamedMapper(BindType(tx.driverName), query, arg, tx.Mapper)
}
// NamedQuery within a transaction.
func (tx *Tx) NamedQuery(query string, arg interface{}) (*Rows, error) {
return NamedQuery(tx, query, arg)
}
// NamedExec a named query within a transaction.
func (tx *Tx) NamedExec(query string, arg interface{}) (sql.Result, error) {
return NamedExec(tx, query, arg)
}
// Select within a transaction.
func (tx *Tx) Select(dest interface{}, query string, args ...interface{}) error {
return Select(tx, dest, query, args...)
}
// Queryx within a transaction.
func (tx *Tx) Queryx(query string, args ...interface{}) (*Rows, error) {
r, err := tx.Tx.Query(query, args...)
if err != nil {
return nil, err
}
return &Rows{Rows: r, unsafe: tx.unsafe, Mapper: tx.Mapper}, err
}
// QueryRowx within a transaction.
func (tx *Tx) QueryRowx(query string, args ...interface{}) *Row {
rows, err := tx.Tx.Query(query, args...)
return &Row{rows: rows, err: err, unsafe: tx.unsafe, Mapper: tx.Mapper}
}
// Get within a transaction.
func (tx *Tx) Get(dest interface{}, query string, args ...interface{}) error {
return Get(tx, dest, query, args...)
}
// MustExec runs MustExec within a transaction.
func (tx *Tx) MustExec(query string, args ...interface{}) sql.Result {
return MustExec(tx, query, args...)
}
// Preparex a statement within a transaction.
func (tx *Tx) Preparex(query string) (*Stmt, error) {
return Preparex(tx, query)
}
// Stmtx returns a version of the prepared statement which runs within a transaction. Provided
// stmt can be either *sql.Stmt or *sqlx.Stmt.
func (tx *Tx) Stmtx(stmt interface{}) *Stmt {
var s *sql.Stmt
switch v := stmt.(type) {
case Stmt:
s = v.Stmt
case *Stmt:
s = v.Stmt
case sql.Stmt:
s = &v
case *sql.Stmt:
s = v
default:
panic(fmt.Sprintf("non-statement type %v passed to Stmtx", reflect.ValueOf(stmt).Type()))
}
return &Stmt{Stmt: tx.Stmt(s), Mapper: tx.Mapper}
}
// NamedStmt returns a version of the prepared statement which runs within a transaction.
func (tx *Tx) NamedStmt(stmt *NamedStmt) *NamedStmt {
return &NamedStmt{
QueryString: stmt.QueryString,
Params: stmt.Params,
Stmt: tx.Stmtx(stmt.Stmt),
}
}
// PrepareNamed returns an sqlx.NamedStmt
func (tx *Tx) PrepareNamed(query string) (*NamedStmt, error) {
return prepareNamed(tx, query)
}
// Stmt is an sqlx wrapper around sql.Stmt with extra functionality
type Stmt struct {
*sql.Stmt
unsafe bool
Mapper *reflectx.Mapper
}
// Unsafe returns a version of Stmt which will silently succeed to scan when
// columns in the SQL result have no fields in the destination struct.
func (s *Stmt) Unsafe() *Stmt {
return &Stmt{Stmt: s.Stmt, unsafe: true, Mapper: s.Mapper}
}
// Select using the prepared statement.
func (s *Stmt) Select(dest interface{}, args ...interface{}) error {
return Select(&qStmt{s}, dest, "", args...)
}
// Get using the prepared statement.
func (s *Stmt) Get(dest interface{}, args ...interface{}) error {
return Get(&qStmt{s}, dest, "", args...)
}
// MustExec (panic) using this statement. Note that the query portion of the error
// output will be blank, as Stmt does not expose its query.
func (s *Stmt) MustExec(args ...interface{}) sql.Result {
return MustExec(&qStmt{s}, "", args...)
}
// QueryRowx using this statement.
func (s *Stmt) QueryRowx(args ...interface{}) *Row {
qs := &qStmt{s}
return qs.QueryRowx("", args...)
}
// Queryx using this statement.
func (s *Stmt) Queryx(args ...interface{}) (*Rows, error) {
qs := &qStmt{s}
return qs.Queryx("", args...)
}
// qStmt is an unexposed wrapper which lets you use a Stmt as a Queryer & Execer by
// implementing those interfaces and ignoring the `query` argument.
type qStmt struct{ *Stmt }
func (q *qStmt) Query(query string, args ...interface{}) (*sql.Rows, error) {
return q.Stmt.Query(args...)
}
func (q *qStmt) Queryx(query string, args ...interface{}) (*Rows, error) {
r, err := q.Stmt.Query(args...)
if err != nil {
return nil, err
}
return &Rows{Rows: r, unsafe: q.Stmt.unsafe, Mapper: q.Stmt.Mapper}, err
}
func (q *qStmt) QueryRowx(query string, args ...interface{}) *Row {
rows, err := q.Stmt.Query(args...)
return &Row{rows: rows, err: err, unsafe: q.Stmt.unsafe, Mapper: q.Stmt.Mapper}
}
func (q *qStmt) Exec(query string, args ...interface{}) (sql.Result, error) {
return q.Stmt.Exec(args...)
}
// Rows is a wrapper around sql.Rows which caches costly reflect operations
// during a looped StructScan
type Rows struct {
*sql.Rows
unsafe bool
Mapper *reflectx.Mapper
// these fields cache memory use for a rows during iteration w/ structScan
started bool
fields [][]int
values []interface{}
}
// SliceScan using this Rows.
func (r *Rows) SliceScan() ([]interface{}, error) {
return SliceScan(r)
}
// MapScan using this Rows.
func (r *Rows) MapScan(dest map[string]interface{}) error {
return MapScan(r, dest)
}
// StructScan is like sql.Rows.Scan, but scans a single Row into a single Struct.
// Use this and iterate over Rows manually when the memory load of Select() might be
// prohibitive. *Rows.StructScan caches the reflect work of matching up column
// positions to fields to avoid that overhead per scan, which means it is not safe
// to run StructScan on the same Rows instance with different struct types.
func (r *Rows) StructScan(dest interface{}) error {
v := reflect.ValueOf(dest)
if v.Kind() != reflect.Ptr {
return errors.New("must pass a pointer, not a value, to StructScan destination")
}
v = reflect.Indirect(v)
if !r.started {
columns, err := r.Columns()
if err != nil {
return err
}
m := r.Mapper
r.fields = m.TraversalsByName(v.Type(), columns)
// if we are not unsafe and are missing fields, return an error
if f, err := missingFields(r.fields); err != nil && !r.unsafe {
return fmt.Errorf("missing destination name %s", columns[f])
}
r.values = make([]interface{}, len(columns))
r.started = true
}
err := fieldsByTraversal(v, r.fields, r.values, true)
if err != nil {
return err
}
// scan into the struct field pointers and append to our results
err = r.Scan(r.values...)
if err != nil {
return err
}
return r.Err()
}
// Connect to a database and verify with a ping.
func Connect(driverName, dataSourceName string) (*DB, error) {
db, err := Open(driverName, dataSourceName)
if err != nil {
return db, err
}
err = db.Ping()
return db, err
}
// MustConnect connects to a database and panics on error.
func MustConnect(driverName, dataSourceName string) *DB {
db, err := Connect(driverName, dataSourceName)
if err != nil {
panic(err)
}
return db
}
// Preparex prepares a statement.
func Preparex(p Preparer, query string) (*Stmt, error) {
s, err := p.Prepare(query)
if err != nil {
return nil, err
}
return &Stmt{Stmt: s, unsafe: isUnsafe(p), Mapper: mapperFor(p)}, err
}
// Select executes a query using the provided Queryer, and StructScans each row
// into dest, which must be a slice. If the slice elements are scannable, then
// the result set must have only one column. Otherwise, StructScan is used.
// The *sql.Rows are closed automatically.
func Select(q Queryer, dest interface{}, query string, args ...interface{}) error {
rows, err := q.Queryx(query, args...)
if err != nil {
return err
}
// if something happens here, we want to make sure the rows are Closed
defer rows.Close()
return scanAll(rows, dest, false)
}
// Get does a QueryRow using the provided Queryer, and scans the resulting row
// to dest. If dest is scannable, the result must only have one column. Otherwise,
// StructScan is used. Get will return sql.ErrNoRows like row.Scan would.
func Get(q Queryer, dest interface{}, query string, args ...interface{}) error {
r := q.QueryRowx(query, args...)
return r.scanAny(dest, false)
}
// LoadFile exec's every statement in a file (as a single call to Exec).
// LoadFile may return a nil *sql.Result if errors are encountered locating or
// reading the file at path. LoadFile reads the entire file into memory, so it
// is not suitable for loading large data dumps, but can be useful for initializing
// schemas or loading indexes.
//
// FIXME: this does not really work with multi-statement files for mattn/go-sqlite3
// or the go-mysql-driver/mysql drivers; pq seems to be an exception here. Detecting
// this by requiring something with DriverName() and then attempting to split the
// queries will be difficult to get right, and its current driver-specific behavior
// is deemed at least not complex in its incorrectness.
func LoadFile(e Execer, path string) (*sql.Result, error) {
realpath, err := filepath.Abs(path)
if err != nil {
return nil, err
}
contents, err := ioutil.ReadFile(realpath)
if err != nil {
return nil, err
}
res, err := e.Exec(string(contents))
return &res, err
}
// MustExec execs the query using e and panics if there was an error.
func MustExec(e Execer, query string, args ...interface{}) sql.Result {
res, err := e.Exec(query, args...)
if err != nil {
panic(err)
}
return res
}
// SliceScan using this Rows.
func (r *Row) SliceScan() ([]interface{}, error) {
return SliceScan(r)
}
// MapScan using this Rows.
func (r *Row) MapScan(dest map[string]interface{}) error {
return MapScan(r, dest)
}
func (r *Row) scanAny(dest interface{}, structOnly bool) error {
if r.err != nil {
return r.err
}
defer r.rows.Close()
v := reflect.ValueOf(dest)
if v.Kind() != reflect.Ptr {
return errors.New("must pass a pointer, not a value, to StructScan destination")
}
if v.IsNil() {
return errors.New("nil pointer passed to StructScan destination")
}
base := reflectx.Deref(v.Type())
scannable := isScannable(base)
if structOnly && scannable {
return structOnlyError(base)
}
columns, err := r.Columns()
if err != nil {
return err
}
if scannable && len(columns) > 1 {
return fmt.Errorf("scannable dest type %s with >1 columns (%d) in result", base.Kind(), len(columns))
}
if scannable {
return r.Scan(dest)
}
m := r.Mapper
fields := m.TraversalsByName(v.Type(), columns)
// if we are not unsafe and are missing fields, return an error
if f, err := missingFields(fields); err != nil && !r.unsafe {
return fmt.Errorf("missing destination name %s", columns[f])
}
values := make([]interface{}, len(columns))
err = fieldsByTraversal(v, fields, values, true)
if err != nil {
return err
}
// scan into the struct field pointers and append to our results
return r.Scan(values...)
}
// StructScan a single Row into dest.
func (r *Row) StructScan(dest interface{}) error {
return r.scanAny(dest, true)
}
// SliceScan a row, returning a []interface{} with values similar to MapScan.
// This function is primarily intended for use where the number of columns
// is not known. Because you can pass an []interface{} directly to Scan,
// it's recommended that you do that as it will not have to allocate new
// slices per row.
func SliceScan(r ColScanner) ([]interface{}, error) {
// ignore r.started, since we needn't use reflect for anything.
columns, err := r.Columns()
if err != nil {
return []interface{}{}, err
}
values := make([]interface{}, len(columns))
for i := range values {
values[i] = new(interface{})
}
err = r.Scan(values...)
if err != nil {
return values, err
}
for i := range columns {
values[i] = *(values[i].(*interface{}))
}
return values, r.Err()
}
// MapScan scans a single Row into the dest map[string]interface{}.
// Use this to get results for SQL that might not be under your control
// (for instance, if you're building an interface for an SQL server that
// executes SQL from input). Please do not use this as a primary interface!
// This will modify the map sent to it in place, so reuse the same map with
// care. Columns which occur more than once in the result will overwrite
// eachother!
func MapScan(r ColScanner, dest map[string]interface{}) error {
// ignore r.started, since we needn't use reflect for anything.
columns, err := r.Columns()
if err != nil {
return err
}
values := make([]interface{}, len(columns))
for i := range values {
values[i] = new(interface{})
}
err = r.Scan(values...)
if err != nil {
return err
}
for i, column := range columns {
dest[column] = *(values[i].(*interface{}))
}
return r.Err()
}
type rowsi interface {
Close() error
Columns() ([]string, error)
Err() error
Next() bool
Scan(...interface{}) error
}
// structOnlyError returns an error appropriate for type when a non-scannable
// struct is expected but something else is given
func structOnlyError(t reflect.Type) error {
isStruct := t.Kind() == reflect.Struct
isScanner := reflect.PtrTo(t).Implements(_scannerInterface)
if !isStruct {
return fmt.Errorf("expected %s but got %s", reflect.Struct, t.Kind())
}
if isScanner {
return fmt.Errorf("structscan expects a struct dest but the provided struct type %s implements scanner", t.Name())
}
return fmt.Errorf("expected a struct, but struct %s has no exported fields", t.Name())
}
// scanAll scans all rows into a destination, which must be a slice of any
// type. If the destination slice type is a Struct, then StructScan will be
// used on each row. If the destination is some other kind of base type, then
// each row must only have one column which can scan into that type. This
// allows you to do something like:
//
// rows, _ := db.Query("select id from people;")
// var ids []int
// scanAll(rows, &ids, false)
//
// and ids will be a list of the id results. I realize that this is a desirable
// interface to expose to users, but for now it will only be exposed via changes
// to `Get` and `Select`. The reason that this has been implemented like this is
// this is the only way to not duplicate reflect work in the new API while
// maintaining backwards compatibility.
func scanAll(rows rowsi, dest interface{}, structOnly bool) error {
var v, vp reflect.Value
value := reflect.ValueOf(dest)
// json.Unmarshal returns errors for these
if value.Kind() != reflect.Ptr {
return errors.New("must pass a pointer, not a value, to StructScan destination")
}
if value.IsNil() {
return errors.New("nil pointer passed to StructScan destination")
}
direct := reflect.Indirect(value)
slice, err := baseType(value.Type(), reflect.Slice)
if err != nil {
return err
}
isPtr := slice.Elem().Kind() == reflect.Ptr
base := reflectx.Deref(slice.Elem())
scannable := isScannable(base)
if structOnly && scannable {
return structOnlyError(base)
}
columns, err := rows.Columns()
if err != nil {
return err
}
// if it's a base type make sure it only has 1 column; if not return an error
if scannable && len(columns) > 1 {
return fmt.Errorf("non-struct dest type %s with >1 columns (%d)", base.Kind(), len(columns))
}
if !scannable {
var values []interface{}
var m *reflectx.Mapper
switch rows.(type) {
case *Rows:
m = rows.(*Rows).Mapper
default:
m = mapper()
}
fields := m.TraversalsByName(base, columns)
// if we are not unsafe and are missing fields, return an error
if f, err := missingFields(fields); err != nil && !isUnsafe(rows) {
return fmt.Errorf("missing destination name %s", columns[f])
}
values = make([]interface{}, len(columns))
for rows.Next() {
// create a new struct type (which returns PtrTo) and indirect it
vp = reflect.New(base)
v = reflect.Indirect(vp)
err = fieldsByTraversal(v, fields, values, true)
// scan into the struct field pointers and append to our results
err = rows.Scan(values...)
if err != nil {
return err
}
if isPtr {
direct.Set(reflect.Append(direct, vp))
} else {
direct.Set(reflect.Append(direct, v))
}
}
} else {
for rows.Next() {
vp = reflect.New(base)
err = rows.Scan(vp.Interface())
// append
if isPtr {
direct.Set(reflect.Append(direct, vp))
} else {
direct.Set(reflect.Append(direct, reflect.Indirect(vp)))
}
}
}
return rows.Err()
}
// FIXME: StructScan was the very first bit of API in sqlx, and now unfortunately
// it doesn't really feel like it's named properly. There is an incongruency
// between this and the way that StructScan (which might better be ScanStruct
// anyway) works on a rows object.
// StructScan all rows from an sql.Rows or an sqlx.Rows into the dest slice.
// StructScan will scan in the entire rows result, so if you need do not want to
// allocate structs for the entire result, use Queryx and see sqlx.Rows.StructScan.
// If rows is sqlx.Rows, it will use its mapper, otherwise it will use the default.
func StructScan(rows rowsi, dest interface{}) error {
return scanAll(rows, dest, true)
}
// reflect helpers
func baseType(t reflect.Type, expected reflect.Kind) (reflect.Type, error) {
t = reflectx.Deref(t)
if t.Kind() != expected {
return nil, fmt.Errorf("expected %s but got %s", expected, t.Kind())
}
return t, nil
}
// fieldsByName fills a values interface with fields from the passed value based
// on the traversals in int. If ptrs is true, return addresses instead of values.
// We write this instead of using FieldsByName to save allocations and map lookups
// when iterating over many rows. Empty traversals will get an interface pointer.
// Because of the necessity of requesting ptrs or values, it's considered a bit too
// specialized for inclusion in reflectx itself.
func fieldsByTraversal(v reflect.Value, traversals [][]int, values []interface{}, ptrs bool) error {
v = reflect.Indirect(v)
if v.Kind() != reflect.Struct {
return errors.New("argument not a struct")
}
for i, traversal := range traversals {
if len(traversal) == 0 {
values[i] = new(interface{})
continue
}
f := reflectx.FieldByIndexes(v, traversal)
if ptrs {
values[i] = f.Addr().Interface()
} else {
values[i] = f.Interface()
}
}
return nil
}
func missingFields(transversals [][]int) (field int, err error) {
for i, t := range transversals {
if len(t) == 0 {
return i, errors.New("missing field")
}
}
return 0, nil
}

1674
vendor/github.com/jmoiron/sqlx/sqlx_test.go generated vendored Normal file

File diff suppressed because it is too large Load Diff

5
vendor/github.com/jmoiron/sqlx/types/README.md generated vendored Normal file
View File

@ -0,0 +1,5 @@
# types
The types package provides some useful types which implement the `sql.Scanner`
and `driver.Valuer` interfaces, suitable for use as scan and value targets with
database/sql.

106
vendor/github.com/jmoiron/sqlx/types/types.go generated vendored Normal file
View File

@ -0,0 +1,106 @@
package types
import (
"bytes"
"compress/gzip"
"database/sql/driver"
"encoding/json"
"errors"
"io/ioutil"
)
// GzippedText is a []byte which transparently gzips data being submitted to
// a database and ungzips data being Scanned from a database.
type GzippedText []byte
// Value implements the driver.Valuer interface, gzipping the raw value of
// this GzippedText.
func (g GzippedText) Value() (driver.Value, error) {
b := make([]byte, 0, len(g))
buf := bytes.NewBuffer(b)
w := gzip.NewWriter(buf)
w.Write(g)
w.Close()
return buf.Bytes(), nil
}
// Scan implements the sql.Scanner interface, ungzipping the value coming off
// the wire and storing the raw result in the GzippedText.
func (g *GzippedText) Scan(src interface{}) error {
var source []byte
switch src.(type) {
case string:
source = []byte(src.(string))
case []byte:
source = src.([]byte)
default:
return errors.New("Incompatible type for GzippedText")
}
reader, err := gzip.NewReader(bytes.NewReader(source))
defer reader.Close()
b, err := ioutil.ReadAll(reader)
if err != nil {
return err
}
*g = GzippedText(b)
return nil
}
// JSONText is a json.RawMessage, which is a []byte underneath.
// Value() validates the json format in the source, and returns an error if
// the json is not valid. Scan does no validation. JSONText additionally
// implements `Unmarshal`, which unmarshals the json within to an interface{}
type JSONText json.RawMessage
// MarshalJSON returns the *j as the JSON encoding of j.
func (j *JSONText) MarshalJSON() ([]byte, error) {
return *j, nil
}
// UnmarshalJSON sets *j to a copy of data
func (j *JSONText) UnmarshalJSON(data []byte) error {
if j == nil {
return errors.New("JSONText: UnmarshalJSON on nil pointer")
}
*j = append((*j)[0:0], data...)
return nil
}
// Value returns j as a value. This does a validating unmarshal into another
// RawMessage. If j is invalid json, it returns an error.
func (j JSONText) Value() (driver.Value, error) {
var m json.RawMessage
var err = j.Unmarshal(&m)
if err != nil {
return []byte{}, err
}
return []byte(j), nil
}
// Scan stores the src in *j. No validation is done.
func (j *JSONText) Scan(src interface{}) error {
var source []byte
switch src.(type) {
case string:
source = []byte(src.(string))
case []byte:
source = src.([]byte)
default:
return errors.New("Incompatible type for JSONText")
}
*j = JSONText(append((*j)[0:0], source...))
return nil
}
// Unmarshal unmarshal's the json in j to v, as in json.Unmarshal.
func (j *JSONText) Unmarshal(v interface{}) error {
return json.Unmarshal([]byte(*j), v)
}
// Pretty printing for JSONText types
func (j JSONText) String() string {
return string(j)
}

42
vendor/github.com/jmoiron/sqlx/types/types_test.go generated vendored Normal file
View File

@ -0,0 +1,42 @@
package types
import "testing"
func TestGzipText(t *testing.T) {
g := GzippedText("Hello, world")
v, err := g.Value()
if err != nil {
t.Errorf("Was not expecting an error")
}
err = (&g).Scan(v)
if err != nil {
t.Errorf("Was not expecting an error")
}
if string(g) != "Hello, world" {
t.Errorf("Was expecting the string we sent in (Hello World), got %s", string(g))
}
}
func TestJSONText(t *testing.T) {
j := JSONText(`{"foo": 1, "bar": 2}`)
v, err := j.Value()
if err != nil {
t.Errorf("Was not expecting an error")
}
err = (&j).Scan(v)
if err != nil {
t.Errorf("Was not expecting an error")
}
m := map[string]interface{}{}
j.Unmarshal(&m)
if m["foo"].(float64) != 1 || m["bar"].(float64) != 2 {
t.Errorf("Expected valid json but got some garbage instead? %#v", m)
}
j = JSONText(`{"foo": 1, invalid, false}`)
v, err = j.Value()
if err == nil {
t.Errorf("Was expecting invalid json to fail!")
}
}

View File

@ -0,0 +1,7 @@
language: go
go:
- 1.4
- 1.5
- 1.6
- tip

19
vendor/github.com/kelseyhightower/envconfig/LICENSE generated vendored Normal file
View File

@ -0,0 +1,19 @@
Copyright (c) 2013 Kelsey Hightower
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
of the Software, and to permit persons to whom the Software is furnished to do
so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

View File

@ -0,0 +1,2 @@
Kelsey Hightower kelsey.hightower@gmail.com github.com/kelseyhightower
Travis Parker travis.parker@gmail.com github.com/teepark

161
vendor/github.com/kelseyhightower/envconfig/README.md generated vendored Normal file
View File

@ -0,0 +1,161 @@
# envconfig
[![Build Status](https://travis-ci.org/kelseyhightower/envconfig.png)](https://travis-ci.org/kelseyhightower/envconfig)
```Go
import "github.com/kelseyhightower/envconfig"
```
## Documentation
See [godoc](http://godoc.org/github.com/kelseyhightower/envconfig)
## Usage
Set some environment variables:
```Bash
export MYAPP_DEBUG=false
export MYAPP_PORT=8080
export MYAPP_USER=Kelsey
export MYAPP_RATE="0.5"
export MYAPP_TIMEOUT="3m"
export MYAPP_USERS="rob,ken,robert"
```
Write some code:
```Go
package main
import (
"fmt"
"log"
"time"
"github.com/kelseyhightower/envconfig"
)
type Specification struct {
Debug bool
Port int
User string
Users []string
Rate float32
Timeout time.Duration
}
func main() {
var s Specification
err := envconfig.Process("myapp", &s)
if err != nil {
log.Fatal(err.Error())
}
format := "Debug: %v\nPort: %d\nUser: %s\nRate: %f\nTimeout: %s\n"
_, err = fmt.Printf(format, s.Debug, s.Port, s.User, s.Rate)
if err != nil {
log.Fatal(err.Error())
}
fmt.Println("Users:")
for _, u := range s.Users {
fmt.Printf(" %s\n", u)
}
}
```
Results:
```Bash
Debug: false
Port: 8080
User: Kelsey
Rate: 0.500000
Timeout: 3m0s
Users:
rob
ken
robert
```
## Struct Tag Support
Envconfig supports the use of struct tags to specify alternate, default, and required
environment variables.
For example, consider the following struct:
```Go
type Specification struct {
MultiWordVar string `envconfig:"multi_word_var"`
DefaultVar string `default:"foobar"`
RequiredVar string `required:"true"`
IgnoredVar string `ignored:"true"`
}
```
Envconfig will process value for `MultiWordVar` by populating it with the
value for `MYAPP_MULTI_WORD_VAR`.
```Bash
export MYAPP_MULTI_WORD_VAR="this will be the value"
# export MYAPP_MULTIWORDVAR="and this will not"
```
If envconfig can't find an environment variable value for `MYAPP_DEFAULTVAR`,
it will populate it with "foobar" as a default value.
If envconfig can't find an environment variable value for `MYAPP_REQUIREDVAR`,
it will return an error when asked to process the struct.
If envconfig can't find an environment variable in the form `PREFIX_MYVAR`, and there
is a struct tag defined, it will try to populate your variable with an environment
variable that directly matches the envconfig tag in your struct definition:
```shell
export SERVICE_HOST=127.0.0.1
export MYAPP_DEBUG=true
```
```Go
type Specification struct {
ServiceHost string `envconfig:"SERVICE_HOST"`
Debug bool
}
```
Envconfig won't process a field with the "ignored" tag set to "true", even if a corresponding
environment variable is set.
## Supported Struct Field Types
envconfig supports supports these struct field types:
* string
* int8, int16, int32, int64
* bool
* float32, float64
Embedded structs using these fields are also supported.
## Custom Decoders
Any field whose type (or pointer-to-type) implements `envconfig.Decoder` can
control its own deserialization:
```Bash
export DNS_SERVER=8.8.8.8
```
```Go
type IPDecoder net.IP
func (ipd *IPDecoder) Decode(value string) error {
*ipd = IPDecoder(net.ParseIP(value))
return nil
}
type DNSConfig struct {
Address IPDecoder `envconfig:"DNS_SERVER"`
}
```

8
vendor/github.com/kelseyhightower/envconfig/doc.go generated vendored Normal file
View File

@ -0,0 +1,8 @@
// Copyright (c) 2013 Kelsey Hightower. All rights reserved.
// Use of this source code is governed by the MIT License that can be found in
// the LICENSE file.
// Package envconfig implements decoding of environment variables based on a user
// defined specification. A typical use is using environment variables for
// configuration settings.
package envconfig

View File

@ -0,0 +1,201 @@
// Copyright (c) 2013 Kelsey Hightower. All rights reserved.
// Use of this source code is governed by the MIT License that can be found in
// the LICENSE file.
package envconfig
import (
"errors"
"fmt"
"reflect"
"strconv"
"strings"
"syscall"
"time"
)
// ErrInvalidSpecification indicates that a specification is of the wrong type.
var ErrInvalidSpecification = errors.New("specification must be a struct pointer")
// A ParseError occurs when an environment variable cannot be converted to
// the type required by a struct field during assignment.
type ParseError struct {
KeyName string
FieldName string
TypeName string
Value string
}
// A Decoder is a type that knows how to de-serialize environment variables
// into itself.
type Decoder interface {
Decode(value string) error
}
func (e *ParseError) Error() string {
return fmt.Sprintf("envconfig.Process: assigning %[1]s to %[2]s: converting '%[3]s' to type %[4]s", e.KeyName, e.FieldName, e.Value, e.TypeName)
}
// Process populates the specified struct based on environment variables
func Process(prefix string, spec interface{}) error {
s := reflect.ValueOf(spec)
if s.Kind() != reflect.Ptr {
return ErrInvalidSpecification
}
s = s.Elem()
if s.Kind() != reflect.Struct {
return ErrInvalidSpecification
}
typeOfSpec := s.Type()
for i := 0; i < s.NumField(); i++ {
f := s.Field(i)
if !f.CanSet() || typeOfSpec.Field(i).Tag.Get("ignored") == "true" {
continue
}
if typeOfSpec.Field(i).Anonymous && f.Kind() == reflect.Struct {
embeddedPtr := f.Addr().Interface()
if err := Process(prefix, embeddedPtr); err != nil {
return err
}
f.Set(reflect.ValueOf(embeddedPtr).Elem())
}
alt := typeOfSpec.Field(i).Tag.Get("envconfig")
fieldName := typeOfSpec.Field(i).Name
if alt != "" {
fieldName = alt
}
key := strings.ToUpper(fmt.Sprintf("%s_%s", prefix, fieldName))
// `os.Getenv` cannot differentiate between an explicitly set empty value
// and an unset value. `os.LookupEnv` is preferred to `syscall.Getenv`,
// but it is only available in go1.5 or newer.
value, ok := syscall.Getenv(key)
if !ok && alt != "" {
key := strings.ToUpper(fieldName)
value, ok = syscall.Getenv(key)
}
def := typeOfSpec.Field(i).Tag.Get("default")
if def != "" && !ok {
value = def
}
req := typeOfSpec.Field(i).Tag.Get("required")
if !ok && def == "" {
if req == "true" {
return fmt.Errorf("required key %s missing value", key)
}
continue
}
err := processField(value, f)
if err != nil {
return &ParseError{
KeyName: key,
FieldName: fieldName,
TypeName: f.Type().String(),
Value: value,
}
}
}
return nil
}
// MustProcess is the same as Process but panics if an error occurs
func MustProcess(prefix string, spec interface{}) {
if err := Process(prefix, spec); err != nil {
panic(err)
}
}
func processField(value string, field reflect.Value) error {
typ := field.Type()
decoder := decoderFrom(field)
if decoder != nil {
return decoder.Decode(value)
}
if typ.Kind() == reflect.Ptr {
typ = typ.Elem()
if field.IsNil() {
field.Set(reflect.New(typ))
}
field = field.Elem()
}
switch typ.Kind() {
case reflect.String:
field.SetString(value)
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
var (
val int64
err error
)
if field.Kind() == reflect.Int64 && typ.PkgPath() == "time" && typ.Name() == "Duration" {
var d time.Duration
d, err = time.ParseDuration(value)
val = int64(d)
} else {
val, err = strconv.ParseInt(value, 0, typ.Bits())
}
if err != nil {
return err
}
field.SetInt(val)
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
val, err := strconv.ParseUint(value, 0, typ.Bits())
if err != nil {
return err
}
field.SetUint(val)
case reflect.Bool:
val, err := strconv.ParseBool(value)
if err != nil {
return err
}
field.SetBool(val)
case reflect.Float32, reflect.Float64:
val, err := strconv.ParseFloat(value, typ.Bits())
if err != nil {
return err
}
field.SetFloat(val)
case reflect.Slice:
vals := strings.Split(value, ",")
sl := reflect.MakeSlice(typ, len(vals), len(vals))
for i, val := range vals {
err := processField(val, sl.Index(i))
if err != nil {
return err
}
}
field.Set(sl)
}
return nil
}
func decoderFrom(field reflect.Value) Decoder {
if field.CanInterface() {
dec, ok := field.Interface().(Decoder)
if ok {
return dec
}
}
// also check if pointer-to-type implements Decoder,
// and we can get a pointer to our field
if field.CanAddr() {
field = field.Addr()
dec, ok := field.Interface().(Decoder)
if ok {
return dec
}
}
return nil
}

View File

@ -0,0 +1,493 @@
// Copyright (c) 2013 Kelsey Hightower. All rights reserved.
// Use of this source code is governed by the MIT License that can be found in
// the LICENSE file.
package envconfig
import (
"os"
"testing"
"time"
)
type Specification struct {
Embedded
EmbeddedButIgnored `ignored:"true"`
Debug bool
Port int
Rate float32
User string
TTL uint32
Timeout time.Duration
AdminUsers []string
MagicNumbers []int
MultiWordVar string
SomePointer *string
SomePointerWithDefault *string `default:"foo2baz"`
MultiWordVarWithAlt string `envconfig:"MULTI_WORD_VAR_WITH_ALT"`
MultiWordVarWithLowerCaseAlt string `envconfig:"multi_word_var_with_lower_case_alt"`
NoPrefixWithAlt string `envconfig:"SERVICE_HOST"`
DefaultVar string `default:"foobar"`
RequiredVar string `required:"true"`
NoPrefixDefault string `envconfig:"BROKER" default:"127.0.0.1"`
RequiredDefault string `required:"true" default:"foo2bar"`
Ignored string `ignored:"true"`
}
type Embedded struct {
Enabled bool
EmbeddedPort int
MultiWordVar string
MultiWordVarWithAlt string `envconfig:"MULTI_WITH_DIFFERENT_ALT"`
EmbeddedAlt string `envconfig:"EMBEDDED_WITH_ALT"`
EmbeddedIgnored string `ignored:"true"`
}
type EmbeddedButIgnored struct {
FirstEmbeddedButIgnored string
SecondEmbeddedButIgnored string
}
func TestProcess(t *testing.T) {
var s Specification
os.Clearenv()
os.Setenv("ENV_CONFIG_DEBUG", "true")
os.Setenv("ENV_CONFIG_PORT", "8080")
os.Setenv("ENV_CONFIG_RATE", "0.5")
os.Setenv("ENV_CONFIG_USER", "Kelsey")
os.Setenv("ENV_CONFIG_TIMEOUT", "2m")
os.Setenv("ENV_CONFIG_ADMINUSERS", "John,Adam,Will")
os.Setenv("ENV_CONFIG_MAGICNUMBERS", "5,10,20")
os.Setenv("SERVICE_HOST", "127.0.0.1")
os.Setenv("ENV_CONFIG_TTL", "30")
os.Setenv("ENV_CONFIG_REQUIREDVAR", "foo")
os.Setenv("ENV_CONFIG_IGNORED", "was-not-ignored")
err := Process("env_config", &s)
if err != nil {
t.Error(err.Error())
}
if s.NoPrefixWithAlt != "127.0.0.1" {
t.Errorf("expected %v, got %v", "127.0.0.1", s.NoPrefixWithAlt)
}
if !s.Debug {
t.Errorf("expected %v, got %v", true, s.Debug)
}
if s.Port != 8080 {
t.Errorf("expected %d, got %v", 8080, s.Port)
}
if s.Rate != 0.5 {
t.Errorf("expected %f, got %v", 0.5, s.Rate)
}
if s.TTL != 30 {
t.Errorf("expected %d, got %v", 30, s.TTL)
}
if s.User != "Kelsey" {
t.Errorf("expected %s, got %s", "Kelsey", s.User)
}
if s.Timeout != 2*time.Minute {
t.Errorf("expected %s, got %s", 2*time.Minute, s.Timeout)
}
if s.RequiredVar != "foo" {
t.Errorf("expected %s, got %s", "foo", s.RequiredVar)
}
if len(s.AdminUsers) != 3 ||
s.AdminUsers[0] != "John" ||
s.AdminUsers[1] != "Adam" ||
s.AdminUsers[2] != "Will" {
t.Errorf("expected %#v, got %#v", []string{"John", "Adam", "Will"}, s.AdminUsers)
}
if len(s.MagicNumbers) != 3 ||
s.MagicNumbers[0] != 5 ||
s.MagicNumbers[1] != 10 ||
s.MagicNumbers[2] != 20 {
t.Errorf("expected %#v, got %#v", []int{5, 10, 20}, s.MagicNumbers)
}
if s.Ignored != "" {
t.Errorf("expected empty string, got %#v", s.Ignored)
}
}
func TestParseErrorBool(t *testing.T) {
var s Specification
os.Clearenv()
os.Setenv("ENV_CONFIG_DEBUG", "string")
os.Setenv("ENV_CONFIG_REQUIREDVAR", "foo")
err := Process("env_config", &s)
v, ok := err.(*ParseError)
if !ok {
t.Errorf("expected ParseError, got %v", v)
}
if v.FieldName != "Debug" {
t.Errorf("expected %s, got %v", "Debug", v.FieldName)
}
if s.Debug != false {
t.Errorf("expected %v, got %v", false, s.Debug)
}
}
func TestParseErrorFloat32(t *testing.T) {
var s Specification
os.Clearenv()
os.Setenv("ENV_CONFIG_RATE", "string")
os.Setenv("ENV_CONFIG_REQUIREDVAR", "foo")
err := Process("env_config", &s)
v, ok := err.(*ParseError)
if !ok {
t.Errorf("expected ParseError, got %v", v)
}
if v.FieldName != "Rate" {
t.Errorf("expected %s, got %v", "Rate", v.FieldName)
}
if s.Rate != 0 {
t.Errorf("expected %v, got %v", 0, s.Rate)
}
}
func TestParseErrorInt(t *testing.T) {
var s Specification
os.Clearenv()
os.Setenv("ENV_CONFIG_PORT", "string")
os.Setenv("ENV_CONFIG_REQUIREDVAR", "foo")
err := Process("env_config", &s)
v, ok := err.(*ParseError)
if !ok {
t.Errorf("expected ParseError, got %v", v)
}
if v.FieldName != "Port" {
t.Errorf("expected %s, got %v", "Port", v.FieldName)
}
if s.Port != 0 {
t.Errorf("expected %v, got %v", 0, s.Port)
}
}
func TestParseErrorUint(t *testing.T) {
var s Specification
os.Clearenv()
os.Setenv("ENV_CONFIG_TTL", "-30")
err := Process("env_config", &s)
v, ok := err.(*ParseError)
if !ok {
t.Errorf("expected ParseError, got %v", v)
}
if v.FieldName != "TTL" {
t.Errorf("expected %s, got %v", "TTL", v.FieldName)
}
if s.TTL != 0 {
t.Errorf("expected %v, got %v", 0, s.TTL)
}
}
func TestErrInvalidSpecification(t *testing.T) {
m := make(map[string]string)
err := Process("env_config", &m)
if err != ErrInvalidSpecification {
t.Errorf("expected %v, got %v", ErrInvalidSpecification, err)
}
}
func TestUnsetVars(t *testing.T) {
var s Specification
os.Clearenv()
os.Setenv("USER", "foo")
os.Setenv("ENV_CONFIG_REQUIREDVAR", "foo")
if err := Process("env_config", &s); err != nil {
t.Error(err.Error())
}
// If the var is not defined the non-prefixed version should not be used
// unless the struct tag says so
if s.User != "" {
t.Errorf("expected %q, got %q", "", s.User)
}
}
func TestAlternateVarNames(t *testing.T) {
var s Specification
os.Clearenv()
os.Setenv("ENV_CONFIG_MULTI_WORD_VAR", "foo")
os.Setenv("ENV_CONFIG_MULTI_WORD_VAR_WITH_ALT", "bar")
os.Setenv("ENV_CONFIG_MULTI_WORD_VAR_WITH_LOWER_CASE_ALT", "baz")
os.Setenv("ENV_CONFIG_REQUIREDVAR", "foo")
if err := Process("env_config", &s); err != nil {
t.Error(err.Error())
}
// Setting the alt version of the var in the environment has no effect if
// the struct tag is not supplied
if s.MultiWordVar != "" {
t.Errorf("expected %q, got %q", "", s.MultiWordVar)
}
// Setting the alt version of the var in the environment correctly sets
// the value if the struct tag IS supplied
if s.MultiWordVarWithAlt != "bar" {
t.Errorf("expected %q, got %q", "bar", s.MultiWordVarWithAlt)
}
// Alt value is not case sensitive and is treated as all uppercase
if s.MultiWordVarWithLowerCaseAlt != "baz" {
t.Errorf("expected %q, got %q", "baz", s.MultiWordVarWithLowerCaseAlt)
}
}
func TestRequiredVar(t *testing.T) {
var s Specification
os.Clearenv()
os.Setenv("ENV_CONFIG_REQUIREDVAR", "foobar")
if err := Process("env_config", &s); err != nil {
t.Error(err.Error())
}
if s.RequiredVar != "foobar" {
t.Errorf("expected %s, got %s", "foobar", s.RequiredVar)
}
}
func TestBlankDefaultVar(t *testing.T) {
var s Specification
os.Clearenv()
os.Setenv("ENV_CONFIG_REQUIREDVAR", "requiredvalue")
if err := Process("env_config", &s); err != nil {
t.Error(err.Error())
}
if s.DefaultVar != "foobar" {
t.Errorf("expected %s, got %s", "foobar", s.DefaultVar)
}
if *s.SomePointerWithDefault != "foo2baz" {
t.Errorf("expected %s, got %s", "foo2baz", *s.SomePointerWithDefault)
}
}
func TestNonBlankDefaultVar(t *testing.T) {
var s Specification
os.Clearenv()
os.Setenv("ENV_CONFIG_DEFAULTVAR", "nondefaultval")
os.Setenv("ENV_CONFIG_REQUIREDVAR", "requiredvalue")
if err := Process("env_config", &s); err != nil {
t.Error(err.Error())
}
if s.DefaultVar != "nondefaultval" {
t.Errorf("expected %s, got %s", "nondefaultval", s.DefaultVar)
}
}
func TestExplicitBlankDefaultVar(t *testing.T) {
var s Specification
os.Clearenv()
os.Setenv("ENV_CONFIG_DEFAULTVAR", "")
os.Setenv("ENV_CONFIG_REQUIREDVAR", "")
if err := Process("env_config", &s); err != nil {
t.Error(err.Error())
}
if s.DefaultVar != "" {
t.Errorf("expected %s, got %s", "\"\"", s.DefaultVar)
}
}
func TestAlternateNameDefaultVar(t *testing.T) {
var s Specification
os.Clearenv()
os.Setenv("BROKER", "betterbroker")
os.Setenv("ENV_CONFIG_REQUIREDVAR", "foo")
if err := Process("env_config", &s); err != nil {
t.Error(err.Error())
}
if s.NoPrefixDefault != "betterbroker" {
t.Errorf("expected %q, got %q", "betterbroker", s.NoPrefixDefault)
}
os.Clearenv()
os.Setenv("ENV_CONFIG_REQUIREDVAR", "foo")
if err := Process("env_config", &s); err != nil {
t.Error(err.Error())
}
if s.NoPrefixDefault != "127.0.0.1" {
t.Errorf("expected %q, got %q", "127.0.0.1", s.NoPrefixDefault)
}
}
func TestRequiredDefault(t *testing.T) {
var s Specification
os.Clearenv()
os.Setenv("ENV_CONFIG_REQUIREDVAR", "foo")
if err := Process("env_config", &s); err != nil {
t.Error(err.Error())
}
if s.RequiredDefault != "foo2bar" {
t.Errorf("expected %q, got %q", "foo2bar", s.RequiredDefault)
}
}
func TestPointerFieldBlank(t *testing.T) {
var s Specification
os.Clearenv()
os.Setenv("ENV_CONFIG_REQUIREDVAR", "foo")
if err := Process("env_config", &s); err != nil {
t.Error(err.Error())
}
if s.SomePointer != nil {
t.Errorf("expected <nil>, got %2", *s.SomePointer)
}
}
func TestMustProcess(t *testing.T) {
var s Specification
os.Clearenv()
os.Setenv("ENV_CONFIG_DEBUG", "true")
os.Setenv("ENV_CONFIG_PORT", "8080")
os.Setenv("ENV_CONFIG_RATE", "0.5")
os.Setenv("ENV_CONFIG_USER", "Kelsey")
os.Setenv("SERVICE_HOST", "127.0.0.1")
os.Setenv("ENV_CONFIG_REQUIREDVAR", "foo")
MustProcess("env_config", &s)
defer func() {
if err := recover(); err != nil {
return
}
t.Error("expected panic")
}()
m := make(map[string]string)
MustProcess("env_config", &m)
}
func TestEmbeddedStruct(t *testing.T) {
var s Specification
os.Clearenv()
os.Setenv("ENV_CONFIG_REQUIREDVAR", "required")
os.Setenv("ENV_CONFIG_ENABLED", "true")
os.Setenv("ENV_CONFIG_EMBEDDEDPORT", "1234")
os.Setenv("ENV_CONFIG_MULTIWORDVAR", "foo")
os.Setenv("ENV_CONFIG_MULTI_WORD_VAR_WITH_ALT", "bar")
os.Setenv("ENV_CONFIG_MULTI_WITH_DIFFERENT_ALT", "baz")
os.Setenv("ENV_CONFIG_EMBEDDED_WITH_ALT", "foobar")
os.Setenv("ENV_CONFIG_SOMEPOINTER", "foobaz")
os.Setenv("ENV_CONFIG_EMBEDDED_IGNORED", "was-not-ignored")
if err := Process("env_config", &s); err != nil {
t.Error(err.Error())
}
if !s.Enabled {
t.Errorf("expected %v, got %v", true, s.Enabled)
}
if s.EmbeddedPort != 1234 {
t.Errorf("expected %d, got %v", 1234, s.EmbeddedPort)
}
if s.MultiWordVar != "foo" {
t.Errorf("expected %s, got %s", "foo", s.MultiWordVar)
}
if s.Embedded.MultiWordVar != "foo" {
t.Errorf("expected %s, got %s", "foo", s.Embedded.MultiWordVar)
}
if s.MultiWordVarWithAlt != "bar" {
t.Errorf("expected %s, got %s", "bar", s.MultiWordVarWithAlt)
}
if s.Embedded.MultiWordVarWithAlt != "baz" {
t.Errorf("expected %s, got %s", "baz", s.Embedded.MultiWordVarWithAlt)
}
if s.EmbeddedAlt != "foobar" {
t.Errorf("expected %s, got %s", "foobar", s.EmbeddedAlt)
}
if *s.SomePointer != "foobaz" {
t.Errorf("expected %s, got %s", "foobaz", *s.SomePointer)
}
if s.EmbeddedIgnored != "" {
t.Errorf("expected empty string, got %#v", s.Ignored)
}
}
func TestEmbeddedButIgnoredStruct(t *testing.T) {
var s Specification
os.Clearenv()
os.Setenv("ENV_CONFIG_REQUIREDVAR", "required")
os.Setenv("ENV_CONFIG_FIRSTEMBEDDEDBUTIGNORED", "was-not-ignored")
os.Setenv("ENV_CONFIG_SECONDEMBEDDEDBUTIGNORED", "was-not-ignored")
if err := Process("env_config", &s); err != nil {
t.Error(err.Error())
}
if s.FirstEmbeddedButIgnored != "" {
t.Errorf("expected empty string, got %#v", s.Ignored)
}
if s.SecondEmbeddedButIgnored != "" {
t.Errorf("expected empty string, got %#v", s.Ignored)
}
}
func TestNonPointerFailsProperly(t *testing.T) {
var s Specification
os.Clearenv()
os.Setenv("ENV_CONFIG_REQUIREDVAR", "snap")
err := Process("env_config", s)
if err != ErrInvalidSpecification {
t.Errorf("non-pointer should fail with ErrInvalidSpecification, was instead %s", err)
}
}
func TestCustomDecoder(t *testing.T) {
s := struct {
Foo string
Bar bracketed
}{}
os.Clearenv()
os.Setenv("ENV_CONFIG_FOO", "foo")
os.Setenv("ENV_CONFIG_BAR", "bar")
if err := Process("env_config", &s); err != nil {
t.Error(err.Error())
}
if s.Foo != "foo" {
t.Errorf("foo: expected 'foo', got %q", s.Foo)
}
if string(s.Bar) != "[bar]" {
t.Errorf("bar: expected '[bar]', got %q", string(s.Bar))
}
}
func TestCustomDecoderWithPointer(t *testing.T) {
s := struct {
Foo string
Bar *bracketed
}{}
// Decode would panic when b is nil, so make sure it
// has an initial value to replace.
var b bracketed = "initial_value"
s.Bar = &b
os.Clearenv()
os.Setenv("ENV_CONFIG_FOO", "foo")
os.Setenv("ENV_CONFIG_BAR", "bar")
if err := Process("env_config", &s); err != nil {
t.Error(err.Error())
}
if s.Foo != "foo" {
t.Errorf("foo: expected 'foo', got %q", s.Foo)
}
if string(*s.Bar) != "[bar]" {
t.Errorf("bar: expected '[bar]', got %q", string(*s.Bar))
}
}
type bracketed string
func (b *bracketed) Decode(value string) error {
*b = bracketed("[" + value + "]")
return nil
}

3
vendor/github.com/mattn/go-sqlite3/.gitignore generated vendored Normal file
View File

@ -0,0 +1,3 @@
*.db
*.exe
*.dll

13
vendor/github.com/mattn/go-sqlite3/.travis.yml generated vendored Normal file
View File

@ -0,0 +1,13 @@
language: go
sudo: required
dist: trusty
go:
- 1.5
- 1.6
- tip
before_install:
- go get github.com/mattn/goveralls
- go get golang.org/x/tools/cmd/cover
script:
- $HOME/gopath/bin/goveralls -repotoken 3qJVUE0iQwqnCbmNcDsjYu1nh4J4KIFXx
- go test -v . -tags "libsqlite3"

21
vendor/github.com/mattn/go-sqlite3/LICENSE generated vendored Normal file
View File

@ -0,0 +1,21 @@
The MIT License (MIT)
Copyright (c) 2014 Yasuhiro Matsumoto
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

81
vendor/github.com/mattn/go-sqlite3/README.md generated vendored Normal file
View File

@ -0,0 +1,81 @@
go-sqlite3
==========
[![Build Status](https://travis-ci.org/mattn/go-sqlite3.svg?branch=master)](https://travis-ci.org/mattn/go-sqlite3)
[![Coverage Status](https://coveralls.io/repos/mattn/go-sqlite3/badge.svg?branch=master)](https://coveralls.io/r/mattn/go-sqlite3?branch=master)
[![GoDoc](https://godoc.org/github.com/mattn/go-sqlite3?status.svg)](http://godoc.org/github.com/mattn/go-sqlite3)
Description
-----------
sqlite3 driver conforming to the built-in database/sql interface
Installation
------------
This package can be installed with the go get command:
go get github.com/mattn/go-sqlite3
_go-sqlite3_ is *cgo* package.
If you want to build your app using go-sqlite3, you need gcc.
However, if you install _go-sqlite3_ with `go install github.com/mattn/go-sqlite3`, you don't need gcc to build your app anymore.
Documentation
-------------
API documentation can be found here: http://godoc.org/github.com/mattn/go-sqlite3
Examples can be found under the `./_example` directory
FAQ
---
* Want to build go-sqlite3 with libsqlite3 on my linux.
Use `go build --tags "libsqlite3 linux"`
* Want to build go-sqlite3 with libsqlite3 on OS X.
Install sqlite3 from homebrew: `brew install sqlite3`
Use `go build --tags "libsqlite3 darwin"`
* Want to build go-sqlite3 with icu extension.
Use `go build --tags "icu"`
* Can't build go-sqlite3 on windows 64bit.
> Probably, you are using go 1.0, go1.0 has a problem when it comes to compiling/linking on windows 64bit.
> See: https://github.com/mattn/go-sqlite3/issues/27
* Getting insert error while query is opened.
> You can pass some arguments into the connection string, for example, a URI.
> See: https://github.com/mattn/go-sqlite3/issues/39
* Do you want to cross compile? mingw on Linux or Mac?
> See: https://github.com/mattn/go-sqlite3/issues/106
> See also: http://www.limitlessfx.com/cross-compile-golang-app-for-windows-from-linux.html
* Want to get time.Time with current locale
Use `loc=auto` in SQLite3 filename schema like `file:foo.db?loc=auto`.
License
-------
MIT: http://mattn.mit-license.org/2012
sqlite3-binding.c, sqlite3-binding.h, sqlite3ext.h
The -binding suffix was added to avoid build failures under gccgo.
In this repository, those files are an amalgamation of code that was copied from SQLite3. The license of that code is the same as the license of SQLite3.
Author
------
Yasuhiro Matsumoto (a.k.a mattn)

View File

@ -0,0 +1,133 @@
package main
import (
"database/sql"
"fmt"
"log"
"math"
"math/rand"
sqlite "github.com/mattn/go-sqlite3"
)
// Computes x^y
func pow(x, y int64) int64 {
return int64(math.Pow(float64(x), float64(y)))
}
// Computes the bitwise exclusive-or of all its arguments
func xor(xs ...int64) int64 {
var ret int64
for _, x := range xs {
ret ^= x
}
return ret
}
// Returns a random number. It's actually deterministic here because
// we don't seed the RNG, but it's an example of a non-pure function
// from SQLite's POV.
func getrand() int64 {
return rand.Int63()
}
// Computes the standard deviation of a GROUPed BY set of values
type stddev struct {
xs []int64
// Running average calculation
sum int64
n int64
}
func newStddev() *stddev { return &stddev{} }
func (s *stddev) Step(x int64) {
s.xs = append(s.xs, x)
s.sum += x
s.n++
}
func (s *stddev) Done() float64 {
mean := float64(s.sum) / float64(s.n)
var sqDiff []float64
for _, x := range s.xs {
sqDiff = append(sqDiff, math.Pow(float64(x)-mean, 2))
}
var dev float64
for _, x := range sqDiff {
dev += x
}
dev /= float64(len(sqDiff))
return math.Sqrt(dev)
}
func main() {
sql.Register("sqlite3_custom", &sqlite.SQLiteDriver{
ConnectHook: func(conn *sqlite.SQLiteConn) error {
if err := conn.RegisterFunc("pow", pow, true); err != nil {
return err
}
if err := conn.RegisterFunc("xor", xor, true); err != nil {
return err
}
if err := conn.RegisterFunc("rand", getrand, false); err != nil {
return err
}
if err := conn.RegisterAggregator("stddev", newStddev, true); err != nil {
return err
}
return nil
},
})
db, err := sql.Open("sqlite3_custom", ":memory:")
if err != nil {
log.Fatal("Failed to open database:", err)
}
defer db.Close()
var i int64
err = db.QueryRow("SELECT pow(2,3)").Scan(&i)
if err != nil {
log.Fatal("POW query error:", err)
}
fmt.Println("pow(2,3) =", i) // 8
err = db.QueryRow("SELECT xor(1,2,3,4,5,6)").Scan(&i)
if err != nil {
log.Fatal("XOR query error:", err)
}
fmt.Println("xor(1,2,3,4,5) =", i) // 7
err = db.QueryRow("SELECT rand()").Scan(&i)
if err != nil {
log.Fatal("RAND query error:", err)
}
fmt.Println("rand() =", i) // pseudorandom
_, err = db.Exec("create table foo (department integer, profits integer)")
if err != nil {
log.Fatal("Failed to create table:", err)
}
_, err = db.Exec("insert into foo values (1, 10), (1, 20), (1, 45), (2, 42), (2, 115)")
if err != nil {
log.Fatal("Failed to insert records:", err)
}
rows, err := db.Query("select department, stddev(profits) from foo group by department")
if err != nil {
log.Fatal("STDDEV query error:", err)
}
defer rows.Close()
for rows.Next() {
var dept int64
var dev float64
if err := rows.Scan(&dept, &dev); err != nil {
log.Fatal(err)
}
fmt.Printf("dept=%d stddev=%f\n", dept, dev)
}
if err := rows.Err(); err != nil {
log.Fatal(err)
}
}

View File

@ -0,0 +1,71 @@
package main
import (
"database/sql"
"github.com/mattn/go-sqlite3"
"log"
"os"
)
func main() {
sqlite3conn := []*sqlite3.SQLiteConn{}
sql.Register("sqlite3_with_hook_example",
&sqlite3.SQLiteDriver{
ConnectHook: func(conn *sqlite3.SQLiteConn) error {
sqlite3conn = append(sqlite3conn, conn)
return nil
},
})
os.Remove("./foo.db")
os.Remove("./bar.db")
destDb, err := sql.Open("sqlite3_with_hook_example", "./foo.db")
if err != nil {
log.Fatal(err)
}
defer destDb.Close()
destDb.Ping()
_, err = destDb.Exec("create table foo(id int, value text)")
if err != nil {
log.Fatal(err)
}
_, err = destDb.Exec("insert into foo values(1, 'foo')")
if err != nil {
log.Fatal(err)
}
_, err = destDb.Exec("insert into foo values(2, 'bar')")
if err != nil {
log.Fatal(err)
}
_, err = destDb.Query("select * from foo")
if err != nil {
log.Fatal(err)
}
srcDb, err := sql.Open("sqlite3_with_hook_example", "./bar.db")
if err != nil {
log.Fatal(err)
}
defer srcDb.Close()
srcDb.Ping()
bk, err := sqlite3conn[1].Backup("main", sqlite3conn[0], "main")
if err != nil {
log.Fatal(err)
}
_, err = bk.Step(-1)
if err != nil {
log.Fatal(err)
}
_, err = destDb.Query("select * from foo")
if err != nil {
log.Fatal(err)
}
_, err = destDb.Exec("insert into foo values(3, 'bar')")
if err != nil {
log.Fatal(err)
}
bk.Finish()
}

View File

@ -0,0 +1,22 @@
ifeq ($(OS),Windows_NT)
EXE=extension.exe
EXT=sqlite3_mod_regexp.dll
RM=cmd /c del
LDFLAG=
else
EXE=extension
EXT=sqlite3_mod_regexp.so
RM=rm
LDFLAG=-fPIC
endif
all : $(EXE) $(EXT)
$(EXE) : extension.go
go build $<
$(EXT) : sqlite3_mod_regexp.c
gcc $(LDFLAG) -shared -o $@ $< -lsqlite3 -lpcre
clean :
@-$(RM) $(EXE) $(EXT)

View File

@ -0,0 +1,43 @@
package main
import (
"database/sql"
"fmt"
"github.com/mattn/go-sqlite3"
"log"
)
func main() {
sql.Register("sqlite3_with_extensions",
&sqlite3.SQLiteDriver{
Extensions: []string{
"sqlite3_mod_regexp",
},
})
db, err := sql.Open("sqlite3_with_extensions", ":memory:")
if err != nil {
log.Fatal(err)
}
defer db.Close()
// Force db to make a new connection in pool
// by putting the original in a transaction
tx, err := db.Begin()
if err != nil {
log.Fatal(err)
}
defer tx.Commit()
// New connection works (hopefully!)
rows, err := db.Query("select 'hello world' where 'hello world' regexp '^hello.*d$'")
if err != nil {
log.Fatal(err)
}
defer rows.Close()
for rows.Next() {
var helloworld string
rows.Scan(&helloworld)
fmt.Println(helloworld)
}
}

View File

@ -0,0 +1,31 @@
#include <pcre.h>
#include <string.h>
#include <stdio.h>
#include <sqlite3ext.h>
SQLITE_EXTENSION_INIT1
static void regexp_func(sqlite3_context *context, int argc, sqlite3_value **argv) {
if (argc >= 2) {
const char *target = (const char *)sqlite3_value_text(argv[1]);
const char *pattern = (const char *)sqlite3_value_text(argv[0]);
const char* errstr = NULL;
int erroff = 0;
int vec[500];
int n, rc;
pcre* re = pcre_compile(pattern, 0, &errstr, &erroff, NULL);
rc = pcre_exec(re, NULL, target, strlen(target), 0, 0, vec, 500);
if (rc <= 0) {
sqlite3_result_error(context, errstr, 0);
return;
}
sqlite3_result_int(context, 1);
}
}
#ifdef _WIN32
__declspec(dllexport)
#endif
int sqlite3_extension_init(sqlite3 *db, char **errmsg, const sqlite3_api_routines *api) {
SQLITE_EXTENSION_INIT2(api);
return sqlite3_create_function(db, "regexp", 2, SQLITE_UTF8, (void*)db, regexp_func, NULL, NULL);
}

View File

@ -0,0 +1,24 @@
ifeq ($(OS),Windows_NT)
EXE=extension.exe
EXT=sqlite3_mod_vtable.dll
RM=cmd /c del
LIBCURL=-lcurldll
LDFLAG=
else
EXE=extension
EXT=sqlite3_mod_vtable.so
RM=rm
LDFLAG=-fPIC
LIBCURL=-lcurl
endif
all : $(EXE) $(EXT)
$(EXE) : extension.go
go build $<
$(EXT) : sqlite3_mod_vtable.cc
g++ $(LDFLAG) -shared -o $@ $< -lsqlite3 $(LIBCURL)
clean :
@-$(RM) $(EXE) $(EXT)

View File

@ -0,0 +1,36 @@
package main
import (
"database/sql"
"fmt"
"github.com/mattn/go-sqlite3"
"log"
)
func main() {
sql.Register("sqlite3_with_extensions",
&sqlite3.SQLiteDriver{
Extensions: []string{
"sqlite3_mod_vtable",
},
})
db, err := sql.Open("sqlite3_with_extensions", ":memory:")
if err != nil {
log.Fatal(err)
}
defer db.Close()
db.Exec("create virtual table repo using github(id, full_name, description, html_url)")
rows, err := db.Query("select id, full_name, description, html_url from repo")
if err != nil {
log.Fatal(err)
}
defer rows.Close()
for rows.Next() {
var id, full_name, description, html_url string
rows.Scan(&id, &full_name, &description, &html_url)
fmt.Printf("%s: %s\n\t%s\n\t%s\n\n", id, full_name, description, html_url)
}
}

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,238 @@
#include <string>
#include <sstream>
#include <sqlite3-binding.h>
#include <sqlite3ext.h>
#include <curl/curl.h>
#include "picojson.h"
#ifdef _WIN32
# define EXPORT __declspec(dllexport)
#else
# define EXPORT
#endif
SQLITE_EXTENSION_INIT1;
typedef struct {
char* data; // response data from server
size_t size; // response size of data
} MEMFILE;
MEMFILE*
memfopen() {
MEMFILE* mf = (MEMFILE*) malloc(sizeof(MEMFILE));
if (mf) {
mf->data = NULL;
mf->size = 0;
}
return mf;
}
void
memfclose(MEMFILE* mf) {
if (mf->data) free(mf->data);
free(mf);
}
size_t
memfwrite(char* ptr, size_t size, size_t nmemb, void* stream) {
MEMFILE* mf = (MEMFILE*) stream;
int block = size * nmemb;
if (!mf) return block; // through
if (!mf->data)
mf->data = (char*) malloc(block);
else
mf->data = (char*) realloc(mf->data, mf->size + block);
if (mf->data) {
memcpy(mf->data + mf->size, ptr, block);
mf->size += block;
}
return block;
}
char*
memfstrdup(MEMFILE* mf) {
char* buf;
if (mf->size == 0) return NULL;
buf = (char*) malloc(mf->size + 1);
memcpy(buf, mf->data, mf->size);
buf[mf->size] = 0;
return buf;
}
static int
my_connect(sqlite3 *db, void *pAux, int argc, const char * const *argv, sqlite3_vtab **ppVTab, char **c) {
std::stringstream ss;
ss << "CREATE TABLE " << argv[0]
<< "(id int, full_name text, description text, html_url text)";
int rc = sqlite3_declare_vtab(db, ss.str().c_str());
*ppVTab = (sqlite3_vtab *) sqlite3_malloc(sizeof(sqlite3_vtab));
memset(*ppVTab, 0, sizeof(sqlite3_vtab));
return rc;
}
static int
my_create(sqlite3 *db, void *pAux, int argc, const char * const * argv, sqlite3_vtab **ppVTab, char **c) {
return my_connect(db, pAux, argc, argv, ppVTab, c);
}
static int my_disconnect(sqlite3_vtab *pVTab) {
sqlite3_free(pVTab);
return SQLITE_OK;
}
static int
my_destroy(sqlite3_vtab *pVTab) {
sqlite3_free(pVTab);
return SQLITE_OK;
}
typedef struct {
sqlite3_vtab_cursor base;
int index;
picojson::value* rows;
} cursor;
static int
my_open(sqlite3_vtab *pVTab, sqlite3_vtab_cursor **ppCursor) {
MEMFILE* mf;
CURL* curl;
char* json;
CURLcode res = CURLE_OK;
char error[CURL_ERROR_SIZE] = {0};
char* cert_file = getenv("SSL_CERT_FILE");
mf = memfopen();
curl = curl_easy_init();
curl_easy_setopt(curl, CURLOPT_SSL_VERIFYPEER, 1);
curl_easy_setopt(curl, CURLOPT_SSL_VERIFYHOST, 2);
curl_easy_setopt(curl, CURLOPT_USERAGENT, "curl/7.29.0");
curl_easy_setopt(curl, CURLOPT_URL, "https://api.github.com/repositories");
if (cert_file)
curl_easy_setopt(curl, CURLOPT_CAINFO, cert_file);
curl_easy_setopt(curl, CURLOPT_FOLLOWLOCATION, 1);
curl_easy_setopt(curl, CURLOPT_ERRORBUFFER, error);
curl_easy_setopt(curl, CURLOPT_WRITEDATA, mf);
curl_easy_setopt(curl, CURLOPT_WRITEFUNCTION, memfwrite);
res = curl_easy_perform(curl);
curl_easy_cleanup(curl);
if (res != CURLE_OK) {
std::cerr << error << std::endl;
return SQLITE_FAIL;
}
picojson::value* v = new picojson::value;
std::string err;
picojson::parse(*v, mf->data, mf->data + mf->size, &err);
memfclose(mf);
if (!err.empty()) {
delete v;
std::cerr << err << std::endl;
return SQLITE_FAIL;
}
cursor *c = (cursor *)sqlite3_malloc(sizeof(cursor));
c->rows = v;
c->index = 0;
*ppCursor = &c->base;
return SQLITE_OK;
}
static int
my_close(cursor *c) {
delete c->rows;
sqlite3_free(c);
return SQLITE_OK;
}
static int
my_filter(cursor *c, int idxNum, const char *idxStr, int argc, sqlite3_value **argv) {
c->index = 0;
return SQLITE_OK;
}
static int
my_next(cursor *c) {
c->index++;
return SQLITE_OK;
}
static int
my_eof(cursor *c) {
return c->index >= c->rows->get<picojson::array>().size() ? 1 : 0;
}
static int
my_column(cursor *c, sqlite3_context *ctxt, int i) {
picojson::value v = c->rows->get<picojson::array>()[c->index];
picojson::object row = v.get<picojson::object>();
const char* p = NULL;
switch (i) {
case 0:
p = row["id"].to_str().c_str();
break;
case 1:
p = row["full_name"].to_str().c_str();
break;
case 2:
p = row["description"].to_str().c_str();
break;
case 3:
p = row["html_url"].to_str().c_str();
break;
}
sqlite3_result_text(ctxt, strdup(p), strlen(p), free);
return SQLITE_OK;
}
static int
my_rowid(cursor *c, sqlite3_int64 *pRowid) {
*pRowid = c->index;
return SQLITE_OK;
}
static int
my_bestindex(sqlite3_vtab *tab, sqlite3_index_info *pIdxInfo) {
return SQLITE_OK;
}
static const sqlite3_module module = {
0,
my_create,
my_connect,
my_bestindex,
my_disconnect,
my_destroy,
my_open,
(int (*)(sqlite3_vtab_cursor *)) my_close,
(int (*)(sqlite3_vtab_cursor *, int, char const *, int, sqlite3_value **)) my_filter,
(int (*)(sqlite3_vtab_cursor *)) my_next,
(int (*)(sqlite3_vtab_cursor *)) my_eof,
(int (*)(sqlite3_vtab_cursor *, sqlite3_context *, int)) my_column,
(int (*)(sqlite3_vtab_cursor *, sqlite3_int64 *)) my_rowid,
NULL, // my_update
NULL, // my_begin
NULL, // my_sync
NULL, // my_commit
NULL, // my_rollback
NULL, // my_findfunction
NULL, // my_rename
};
static void
destructor(void *arg) {
return;
}
extern "C" {
EXPORT int
sqlite3_extension_init(sqlite3 *db, char **errmsg, const sqlite3_api_routines *api) {
SQLITE_EXTENSION_INIT2(api);
sqlite3_create_module_v2(db, "github", &module, NULL, destructor);
return 0;
}
}

View File

@ -0,0 +1,106 @@
package main
import (
"database/sql"
"fmt"
_ "github.com/mattn/go-sqlite3"
"log"
"os"
)
func main() {
os.Remove("./foo.db")
db, err := sql.Open("sqlite3", "./foo.db")
if err != nil {
log.Fatal(err)
}
defer db.Close()
sqlStmt := `
create table foo (id integer not null primary key, name text);
delete from foo;
`
_, err = db.Exec(sqlStmt)
if err != nil {
log.Printf("%q: %s\n", err, sqlStmt)
return
}
tx, err := db.Begin()
if err != nil {
log.Fatal(err)
}
stmt, err := tx.Prepare("insert into foo(id, name) values(?, ?)")
if err != nil {
log.Fatal(err)
}
defer stmt.Close()
for i := 0; i < 100; i++ {
_, err = stmt.Exec(i, fmt.Sprintf("こんにちわ世界%03d", i))
if err != nil {
log.Fatal(err)
}
}
tx.Commit()
rows, err := db.Query("select id, name from foo")
if err != nil {
log.Fatal(err)
}
defer rows.Close()
for rows.Next() {
var id int
var name string
err = rows.Scan(&id, &name)
if err != nil {
log.Fatal(err)
}
fmt.Println(id, name)
}
err = rows.Err()
if err != nil {
log.Fatal(err)
}
stmt, err = db.Prepare("select name from foo where id = ?")
if err != nil {
log.Fatal(err)
}
defer stmt.Close()
var name string
err = stmt.QueryRow("3").Scan(&name)
if err != nil {
log.Fatal(err)
}
fmt.Println(name)
_, err = db.Exec("delete from foo")
if err != nil {
log.Fatal(err)
}
_, err = db.Exec("insert into foo(id, name) values(1, 'foo'), (2, 'bar'), (3, 'baz')")
if err != nil {
log.Fatal(err)
}
rows, err = db.Query("select id, name from foo")
if err != nil {
log.Fatal(err)
}
defer rows.Close()
for rows.Next() {
var id int
var name string
err = rows.Scan(&id, &name)
if err != nil {
log.Fatal(err)
}
fmt.Println(id, name)
}
err = rows.Err()
if err != nil {
log.Fatal(err)
}
}

74
vendor/github.com/mattn/go-sqlite3/backup.go generated vendored Normal file
View File

@ -0,0 +1,74 @@
// Copyright (C) 2014 Yasuhiro Matsumoto <mattn.jp@gmail.com>.
//
// Use of this source code is governed by an MIT-style
// license that can be found in the LICENSE file.
package sqlite3
/*
#ifndef USE_LIBSQLITE3
#include <sqlite3-binding.h>
#else
#include <sqlite3.h>
#endif
#include <stdlib.h>
*/
import "C"
import (
"runtime"
"unsafe"
)
type SQLiteBackup struct {
b *C.sqlite3_backup
}
func (c *SQLiteConn) Backup(dest string, conn *SQLiteConn, src string) (*SQLiteBackup, error) {
destptr := C.CString(dest)
defer C.free(unsafe.Pointer(destptr))
srcptr := C.CString(src)
defer C.free(unsafe.Pointer(srcptr))
if b := C.sqlite3_backup_init(c.db, destptr, conn.db, srcptr); b != nil {
bb := &SQLiteBackup{b: b}
runtime.SetFinalizer(bb, (*SQLiteBackup).Finish)
return bb, nil
}
return nil, c.lastError()
}
// Backs up for one step. Calls the underlying `sqlite3_backup_step` function.
// This function returns a boolean indicating if the backup is done and
// an error signalling any other error. Done is returned if the underlying C
// function returns SQLITE_DONE (Code 101)
func (b *SQLiteBackup) Step(p int) (bool, error) {
ret := C.sqlite3_backup_step(b.b, C.int(p))
if ret == C.SQLITE_DONE {
return true, nil
} else if ret != 0 && ret != C.SQLITE_LOCKED && ret != C.SQLITE_BUSY {
return false, Error{Code: ErrNo(ret)}
}
return false, nil
}
func (b *SQLiteBackup) Remaining() int {
return int(C.sqlite3_backup_remaining(b.b))
}
func (b *SQLiteBackup) PageCount() int {
return int(C.sqlite3_backup_pagecount(b.b))
}
func (b *SQLiteBackup) Finish() error {
return b.Close()
}
func (b *SQLiteBackup) Close() error {
ret := C.sqlite3_backup_finish(b.b)
if ret != 0 {
return Error{Code: ErrNo(ret)}
}
b.b = nil
runtime.SetFinalizer(b, nil)
return nil
}

336
vendor/github.com/mattn/go-sqlite3/callback.go generated vendored Normal file
View File

@ -0,0 +1,336 @@
// Copyright (C) 2014 Yasuhiro Matsumoto <mattn.jp@gmail.com>.
//
// Use of this source code is governed by an MIT-style
// license that can be found in the LICENSE file.
package sqlite3
// You can't export a Go function to C and have definitions in the C
// preamble in the same file, so we have to have callbackTrampoline in
// its own file. Because we need a separate file anyway, the support
// code for SQLite custom functions is in here.
/*
#include <sqlite3-binding.h>
#include <stdlib.h>
void _sqlite3_result_text(sqlite3_context* ctx, const char* s);
void _sqlite3_result_blob(sqlite3_context* ctx, const void* b, int l);
*/
import "C"
import (
"errors"
"fmt"
"math"
"reflect"
"sync"
"unsafe"
)
//export callbackTrampoline
func callbackTrampoline(ctx *C.sqlite3_context, argc int, argv **C.sqlite3_value) {
args := (*[(math.MaxInt32 - 1) / unsafe.Sizeof((*C.sqlite3_value)(nil))]*C.sqlite3_value)(unsafe.Pointer(argv))[:argc:argc]
fi := lookupHandle(uintptr(C.sqlite3_user_data(ctx))).(*functionInfo)
fi.Call(ctx, args)
}
//export stepTrampoline
func stepTrampoline(ctx *C.sqlite3_context, argc int, argv **C.sqlite3_value) {
args := (*[(math.MaxInt32 - 1) / unsafe.Sizeof((*C.sqlite3_value)(nil))]*C.sqlite3_value)(unsafe.Pointer(argv))[:argc:argc]
ai := lookupHandle(uintptr(C.sqlite3_user_data(ctx))).(*aggInfo)
ai.Step(ctx, args)
}
//export doneTrampoline
func doneTrampoline(ctx *C.sqlite3_context) {
handle := uintptr(C.sqlite3_user_data(ctx))
ai := lookupHandle(handle).(*aggInfo)
ai.Done(ctx)
}
// Use handles to avoid passing Go pointers to C.
type handleVal struct {
db *SQLiteConn
val interface{}
}
var handleLock sync.Mutex
var handleVals = make(map[uintptr]handleVal)
var handleIndex uintptr = 100
func newHandle(db *SQLiteConn, v interface{}) uintptr {
handleLock.Lock()
defer handleLock.Unlock()
i := handleIndex
handleIndex++
handleVals[i] = handleVal{db, v}
return i
}
func lookupHandle(handle uintptr) interface{} {
handleLock.Lock()
defer handleLock.Unlock()
r, ok := handleVals[handle]
if !ok {
if handle >= 100 && handle < handleIndex {
panic("deleted handle")
} else {
panic("invalid handle")
}
}
return r.val
}
func deleteHandles(db *SQLiteConn) {
handleLock.Lock()
defer handleLock.Unlock()
for handle, val := range handleVals {
if val.db == db {
delete(handleVals, handle)
}
}
}
// This is only here so that tests can refer to it.
type callbackArgRaw C.sqlite3_value
type callbackArgConverter func(*C.sqlite3_value) (reflect.Value, error)
type callbackArgCast struct {
f callbackArgConverter
typ reflect.Type
}
func (c callbackArgCast) Run(v *C.sqlite3_value) (reflect.Value, error) {
val, err := c.f(v)
if err != nil {
return reflect.Value{}, err
}
if !val.Type().ConvertibleTo(c.typ) {
return reflect.Value{}, fmt.Errorf("cannot convert %s to %s", val.Type(), c.typ)
}
return val.Convert(c.typ), nil
}
func callbackArgInt64(v *C.sqlite3_value) (reflect.Value, error) {
if C.sqlite3_value_type(v) != C.SQLITE_INTEGER {
return reflect.Value{}, fmt.Errorf("argument must be an INTEGER")
}
return reflect.ValueOf(int64(C.sqlite3_value_int64(v))), nil
}
func callbackArgBool(v *C.sqlite3_value) (reflect.Value, error) {
if C.sqlite3_value_type(v) != C.SQLITE_INTEGER {
return reflect.Value{}, fmt.Errorf("argument must be an INTEGER")
}
i := int64(C.sqlite3_value_int64(v))
val := false
if i != 0 {
val = true
}
return reflect.ValueOf(val), nil
}
func callbackArgFloat64(v *C.sqlite3_value) (reflect.Value, error) {
if C.sqlite3_value_type(v) != C.SQLITE_FLOAT {
return reflect.Value{}, fmt.Errorf("argument must be a FLOAT")
}
return reflect.ValueOf(float64(C.sqlite3_value_double(v))), nil
}
func callbackArgBytes(v *C.sqlite3_value) (reflect.Value, error) {
switch C.sqlite3_value_type(v) {
case C.SQLITE_BLOB:
l := C.sqlite3_value_bytes(v)
p := C.sqlite3_value_blob(v)
return reflect.ValueOf(C.GoBytes(p, l)), nil
case C.SQLITE_TEXT:
l := C.sqlite3_value_bytes(v)
c := unsafe.Pointer(C.sqlite3_value_text(v))
return reflect.ValueOf(C.GoBytes(c, l)), nil
default:
return reflect.Value{}, fmt.Errorf("argument must be BLOB or TEXT")
}
}
func callbackArgString(v *C.sqlite3_value) (reflect.Value, error) {
switch C.sqlite3_value_type(v) {
case C.SQLITE_BLOB:
l := C.sqlite3_value_bytes(v)
p := (*C.char)(C.sqlite3_value_blob(v))
return reflect.ValueOf(C.GoStringN(p, l)), nil
case C.SQLITE_TEXT:
c := (*C.char)(unsafe.Pointer(C.sqlite3_value_text(v)))
return reflect.ValueOf(C.GoString(c)), nil
default:
return reflect.Value{}, fmt.Errorf("argument must be BLOB or TEXT")
}
}
func callbackArgGeneric(v *C.sqlite3_value) (reflect.Value, error) {
switch C.sqlite3_value_type(v) {
case C.SQLITE_INTEGER:
return callbackArgInt64(v)
case C.SQLITE_FLOAT:
return callbackArgFloat64(v)
case C.SQLITE_TEXT:
return callbackArgString(v)
case C.SQLITE_BLOB:
return callbackArgBytes(v)
case C.SQLITE_NULL:
// Interpret NULL as a nil byte slice.
var ret []byte
return reflect.ValueOf(ret), nil
default:
panic("unreachable")
}
}
func callbackArg(typ reflect.Type) (callbackArgConverter, error) {
switch typ.Kind() {
case reflect.Interface:
if typ.NumMethod() != 0 {
return nil, errors.New("the only supported interface type is interface{}")
}
return callbackArgGeneric, nil
case reflect.Slice:
if typ.Elem().Kind() != reflect.Uint8 {
return nil, errors.New("the only supported slice type is []byte")
}
return callbackArgBytes, nil
case reflect.String:
return callbackArgString, nil
case reflect.Bool:
return callbackArgBool, nil
case reflect.Int64:
return callbackArgInt64, nil
case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Int, reflect.Uint:
c := callbackArgCast{callbackArgInt64, typ}
return c.Run, nil
case reflect.Float64:
return callbackArgFloat64, nil
case reflect.Float32:
c := callbackArgCast{callbackArgFloat64, typ}
return c.Run, nil
default:
return nil, fmt.Errorf("don't know how to convert to %s", typ)
}
}
func callbackConvertArgs(argv []*C.sqlite3_value, converters []callbackArgConverter, variadic callbackArgConverter) ([]reflect.Value, error) {
var args []reflect.Value
if len(argv) < len(converters) {
return nil, fmt.Errorf("function requires at least %d arguments", len(converters))
}
for i, arg := range argv[:len(converters)] {
v, err := converters[i](arg)
if err != nil {
return nil, err
}
args = append(args, v)
}
if variadic != nil {
for _, arg := range argv[len(converters):] {
v, err := variadic(arg)
if err != nil {
return nil, err
}
args = append(args, v)
}
}
return args, nil
}
type callbackRetConverter func(*C.sqlite3_context, reflect.Value) error
func callbackRetInteger(ctx *C.sqlite3_context, v reflect.Value) error {
switch v.Type().Kind() {
case reflect.Int64:
case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Int, reflect.Uint:
v = v.Convert(reflect.TypeOf(int64(0)))
case reflect.Bool:
b := v.Interface().(bool)
if b {
v = reflect.ValueOf(int64(1))
} else {
v = reflect.ValueOf(int64(0))
}
default:
return fmt.Errorf("cannot convert %s to INTEGER", v.Type())
}
C.sqlite3_result_int64(ctx, C.sqlite3_int64(v.Interface().(int64)))
return nil
}
func callbackRetFloat(ctx *C.sqlite3_context, v reflect.Value) error {
switch v.Type().Kind() {
case reflect.Float64:
case reflect.Float32:
v = v.Convert(reflect.TypeOf(float64(0)))
default:
return fmt.Errorf("cannot convert %s to FLOAT", v.Type())
}
C.sqlite3_result_double(ctx, C.double(v.Interface().(float64)))
return nil
}
func callbackRetBlob(ctx *C.sqlite3_context, v reflect.Value) error {
if v.Type().Kind() != reflect.Slice || v.Type().Elem().Kind() != reflect.Uint8 {
return fmt.Errorf("cannot convert %s to BLOB", v.Type())
}
i := v.Interface()
if i == nil || len(i.([]byte)) == 0 {
C.sqlite3_result_null(ctx)
} else {
bs := i.([]byte)
C._sqlite3_result_blob(ctx, unsafe.Pointer(&bs[0]), C.int(len(bs)))
}
return nil
}
func callbackRetText(ctx *C.sqlite3_context, v reflect.Value) error {
if v.Type().Kind() != reflect.String {
return fmt.Errorf("cannot convert %s to TEXT", v.Type())
}
C._sqlite3_result_text(ctx, C.CString(v.Interface().(string)))
return nil
}
func callbackRet(typ reflect.Type) (callbackRetConverter, error) {
switch typ.Kind() {
case reflect.Slice:
if typ.Elem().Kind() != reflect.Uint8 {
return nil, errors.New("the only supported slice type is []byte")
}
return callbackRetBlob, nil
case reflect.String:
return callbackRetText, nil
case reflect.Bool, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Int, reflect.Uint:
return callbackRetInteger, nil
case reflect.Float32, reflect.Float64:
return callbackRetFloat, nil
default:
return nil, fmt.Errorf("don't know how to convert to %s", typ)
}
}
func callbackError(ctx *C.sqlite3_context, err error) {
cstr := C.CString(err.Error())
defer C.free(unsafe.Pointer(cstr))
C.sqlite3_result_error(ctx, cstr, -1)
}
// Test support code. Tests are not allowed to import "C", so we can't
// declare any functions that use C.sqlite3_value.
func callbackSyntheticForTests(v reflect.Value, err error) callbackArgConverter {
return func(*C.sqlite3_value) (reflect.Value, error) {
return v, err
}
}

97
vendor/github.com/mattn/go-sqlite3/callback_test.go generated vendored Normal file
View File

@ -0,0 +1,97 @@
package sqlite3
import (
"errors"
"math"
"reflect"
"testing"
)
func TestCallbackArgCast(t *testing.T) {
intConv := callbackSyntheticForTests(reflect.ValueOf(int64(math.MaxInt64)), nil)
floatConv := callbackSyntheticForTests(reflect.ValueOf(float64(math.MaxFloat64)), nil)
errConv := callbackSyntheticForTests(reflect.Value{}, errors.New("test"))
tests := []struct {
f callbackArgConverter
o reflect.Value
}{
{intConv, reflect.ValueOf(int8(-1))},
{intConv, reflect.ValueOf(int16(-1))},
{intConv, reflect.ValueOf(int32(-1))},
{intConv, reflect.ValueOf(uint8(math.MaxUint8))},
{intConv, reflect.ValueOf(uint16(math.MaxUint16))},
{intConv, reflect.ValueOf(uint32(math.MaxUint32))},
// Special case, int64->uint64 is only 1<<63 - 1, not 1<<64 - 1
{intConv, reflect.ValueOf(uint64(math.MaxInt64))},
{floatConv, reflect.ValueOf(float32(math.Inf(1)))},
}
for _, test := range tests {
conv := callbackArgCast{test.f, test.o.Type()}
val, err := conv.Run(nil)
if err != nil {
t.Errorf("Couldn't convert to %s: %s", test.o.Type(), err)
} else if !reflect.DeepEqual(val.Interface(), test.o.Interface()) {
t.Errorf("Unexpected result from converting to %s: got %v, want %v", test.o.Type(), val.Interface(), test.o.Interface())
}
}
conv := callbackArgCast{errConv, reflect.TypeOf(int8(0))}
_, err := conv.Run(nil)
if err == nil {
t.Errorf("Expected error during callbackArgCast, but got none")
}
}
func TestCallbackConverters(t *testing.T) {
tests := []struct {
v interface{}
err bool
}{
// Unfortunately, we can't tell which converter was returned,
// but we can at least check which types can be converted.
{[]byte{0}, false},
{"text", false},
{true, false},
{int8(0), false},
{int16(0), false},
{int32(0), false},
{int64(0), false},
{uint8(0), false},
{uint16(0), false},
{uint32(0), false},
{uint64(0), false},
{int(0), false},
{uint(0), false},
{float64(0), false},
{float32(0), false},
{func() {}, true},
{complex64(complex(0, 0)), true},
{complex128(complex(0, 0)), true},
{struct{}{}, true},
{map[string]string{}, true},
{[]string{}, true},
{(*int8)(nil), true},
{make(chan int), true},
}
for _, test := range tests {
_, err := callbackArg(reflect.TypeOf(test.v))
if test.err && err == nil {
t.Errorf("Expected an error when converting %s, got no error", reflect.TypeOf(test.v))
} else if !test.err && err != nil {
t.Errorf("Expected converter when converting %s, got error: %s", reflect.TypeOf(test.v), err)
}
}
for _, test := range tests {
_, err := callbackRet(reflect.TypeOf(test.v))
if test.err && err == nil {
t.Errorf("Expected an error when converting %s, got no error", reflect.TypeOf(test.v))
} else if !test.err && err != nil {
t.Errorf("Expected converter when converting %s, got error: %s", reflect.TypeOf(test.v), err)
}
}
}

112
vendor/github.com/mattn/go-sqlite3/doc.go generated vendored Normal file
View File

@ -0,0 +1,112 @@
/*
Package sqlite3 provides interface to SQLite3 databases.
This works as a driver for database/sql.
Installation
go get github.com/mattn/go-sqlite3
Supported Types
Currently, go-sqlite3 supports the following data types.
+------------------------------+
|go | sqlite3 |
|----------|-------------------|
|nil | null |
|int | integer |
|int64 | integer |
|float64 | float |
|bool | integer |
|[]byte | blob |
|string | text |
|time.Time | timestamp/datetime|
+------------------------------+
SQLite3 Extension
You can write your own extension module for sqlite3. For example, below is an
extension for a Regexp matcher operation.
#include <pcre.h>
#include <string.h>
#include <stdio.h>
#include <sqlite3ext.h>
SQLITE_EXTENSION_INIT1
static void regexp_func(sqlite3_context *context, int argc, sqlite3_value **argv) {
if (argc >= 2) {
const char *target = (const char *)sqlite3_value_text(argv[1]);
const char *pattern = (const char *)sqlite3_value_text(argv[0]);
const char* errstr = NULL;
int erroff = 0;
int vec[500];
int n, rc;
pcre* re = pcre_compile(pattern, 0, &errstr, &erroff, NULL);
rc = pcre_exec(re, NULL, target, strlen(target), 0, 0, vec, 500);
if (rc <= 0) {
sqlite3_result_error(context, errstr, 0);
return;
}
sqlite3_result_int(context, 1);
}
}
#ifdef _WIN32
__declspec(dllexport)
#endif
int sqlite3_extension_init(sqlite3 *db, char **errmsg,
const sqlite3_api_routines *api) {
SQLITE_EXTENSION_INIT2(api);
return sqlite3_create_function(db, "regexp", 2, SQLITE_UTF8,
(void*)db, regexp_func, NULL, NULL);
}
It needs to be built as a so/dll shared library. And you need to register
the extension module like below.
sql.Register("sqlite3_with_extensions",
&sqlite3.SQLiteDriver{
Extensions: []string{
"sqlite3_mod_regexp",
},
})
Then, you can use this extension.
rows, err := db.Query("select text from mytable where name regexp '^golang'")
Connection Hook
You can hook and inject your code when the connection is established. database/sql
doesn't provide a way to get native go-sqlite3 interfaces. So if you want,
you need to set ConnectHook and get the SQLiteConn.
sql.Register("sqlite3_with_hook_example",
&sqlite3.SQLiteDriver{
ConnectHook: func(conn *sqlite3.SQLiteConn) error {
sqlite3conn = append(sqlite3conn, conn)
return nil
},
})
Go SQlite3 Extensions
If you want to register Go functions as SQLite extension functions,
call RegisterFunction from ConnectHook.
regex = func(re, s string) (bool, error) {
return regexp.MatchString(re, s)
}
sql.Register("sqlite3_with_go_func",
&sqlite3.SQLiteDriver{
ConnectHook: func(conn *sqlite3.SQLiteConn) error {
return conn.RegisterFunc("regexp", regex, true)
},
})
See the documentation of RegisterFunc for more details.
*/
package sqlite3

128
vendor/github.com/mattn/go-sqlite3/error.go generated vendored Normal file
View File

@ -0,0 +1,128 @@
// Copyright (C) 2014 Yasuhiro Matsumoto <mattn.jp@gmail.com>.
//
// Use of this source code is governed by an MIT-style
// license that can be found in the LICENSE file.
package sqlite3
import "C"
type ErrNo int
const ErrNoMask C.int = 0xff
type ErrNoExtended int
type Error struct {
Code ErrNo /* The error code returned by SQLite */
ExtendedCode ErrNoExtended /* The extended error code returned by SQLite */
err string /* The error string returned by sqlite3_errmsg(),
this usually contains more specific details. */
}
// result codes from http://www.sqlite.org/c3ref/c_abort.html
var (
ErrError = ErrNo(1) /* SQL error or missing database */
ErrInternal = ErrNo(2) /* Internal logic error in SQLite */
ErrPerm = ErrNo(3) /* Access permission denied */
ErrAbort = ErrNo(4) /* Callback routine requested an abort */
ErrBusy = ErrNo(5) /* The database file is locked */
ErrLocked = ErrNo(6) /* A table in the database is locked */
ErrNomem = ErrNo(7) /* A malloc() failed */
ErrReadonly = ErrNo(8) /* Attempt to write a readonly database */
ErrInterrupt = ErrNo(9) /* Operation terminated by sqlite3_interrupt() */
ErrIoErr = ErrNo(10) /* Some kind of disk I/O error occurred */
ErrCorrupt = ErrNo(11) /* The database disk image is malformed */
ErrNotFound = ErrNo(12) /* Unknown opcode in sqlite3_file_control() */
ErrFull = ErrNo(13) /* Insertion failed because database is full */
ErrCantOpen = ErrNo(14) /* Unable to open the database file */
ErrProtocol = ErrNo(15) /* Database lock protocol error */
ErrEmpty = ErrNo(16) /* Database is empty */
ErrSchema = ErrNo(17) /* The database schema changed */
ErrTooBig = ErrNo(18) /* String or BLOB exceeds size limit */
ErrConstraint = ErrNo(19) /* Abort due to constraint violation */
ErrMismatch = ErrNo(20) /* Data type mismatch */
ErrMisuse = ErrNo(21) /* Library used incorrectly */
ErrNoLFS = ErrNo(22) /* Uses OS features not supported on host */
ErrAuth = ErrNo(23) /* Authorization denied */
ErrFormat = ErrNo(24) /* Auxiliary database format error */
ErrRange = ErrNo(25) /* 2nd parameter to sqlite3_bind out of range */
ErrNotADB = ErrNo(26) /* File opened that is not a database file */
ErrNotice = ErrNo(27) /* Notifications from sqlite3_log() */
ErrWarning = ErrNo(28) /* Warnings from sqlite3_log() */
)
func (err ErrNo) Error() string {
return Error{Code: err}.Error()
}
func (err ErrNo) Extend(by int) ErrNoExtended {
return ErrNoExtended(int(err) | (by << 8))
}
func (err ErrNoExtended) Error() string {
return Error{Code: ErrNo(C.int(err) & ErrNoMask), ExtendedCode: err}.Error()
}
func (err Error) Error() string {
if err.err != "" {
return err.err
}
return errorString(err)
}
// result codes from http://www.sqlite.org/c3ref/c_abort_rollback.html
var (
ErrIoErrRead = ErrIoErr.Extend(1)
ErrIoErrShortRead = ErrIoErr.Extend(2)
ErrIoErrWrite = ErrIoErr.Extend(3)
ErrIoErrFsync = ErrIoErr.Extend(4)
ErrIoErrDirFsync = ErrIoErr.Extend(5)
ErrIoErrTruncate = ErrIoErr.Extend(6)
ErrIoErrFstat = ErrIoErr.Extend(7)
ErrIoErrUnlock = ErrIoErr.Extend(8)
ErrIoErrRDlock = ErrIoErr.Extend(9)
ErrIoErrDelete = ErrIoErr.Extend(10)
ErrIoErrBlocked = ErrIoErr.Extend(11)
ErrIoErrNoMem = ErrIoErr.Extend(12)
ErrIoErrAccess = ErrIoErr.Extend(13)
ErrIoErrCheckReservedLock = ErrIoErr.Extend(14)
ErrIoErrLock = ErrIoErr.Extend(15)
ErrIoErrClose = ErrIoErr.Extend(16)
ErrIoErrDirClose = ErrIoErr.Extend(17)
ErrIoErrSHMOpen = ErrIoErr.Extend(18)
ErrIoErrSHMSize = ErrIoErr.Extend(19)
ErrIoErrSHMLock = ErrIoErr.Extend(20)
ErrIoErrSHMMap = ErrIoErr.Extend(21)
ErrIoErrSeek = ErrIoErr.Extend(22)
ErrIoErrDeleteNoent = ErrIoErr.Extend(23)
ErrIoErrMMap = ErrIoErr.Extend(24)
ErrIoErrGetTempPath = ErrIoErr.Extend(25)
ErrIoErrConvPath = ErrIoErr.Extend(26)
ErrLockedSharedCache = ErrLocked.Extend(1)
ErrBusyRecovery = ErrBusy.Extend(1)
ErrBusySnapshot = ErrBusy.Extend(2)
ErrCantOpenNoTempDir = ErrCantOpen.Extend(1)
ErrCantOpenIsDir = ErrCantOpen.Extend(2)
ErrCantOpenFullPath = ErrCantOpen.Extend(3)
ErrCantOpenConvPath = ErrCantOpen.Extend(4)
ErrCorruptVTab = ErrCorrupt.Extend(1)
ErrReadonlyRecovery = ErrReadonly.Extend(1)
ErrReadonlyCantLock = ErrReadonly.Extend(2)
ErrReadonlyRollback = ErrReadonly.Extend(3)
ErrReadonlyDbMoved = ErrReadonly.Extend(4)
ErrAbortRollback = ErrAbort.Extend(2)
ErrConstraintCheck = ErrConstraint.Extend(1)
ErrConstraintCommitHook = ErrConstraint.Extend(2)
ErrConstraintForeignKey = ErrConstraint.Extend(3)
ErrConstraintFunction = ErrConstraint.Extend(4)
ErrConstraintNotNull = ErrConstraint.Extend(5)
ErrConstraintPrimaryKey = ErrConstraint.Extend(6)
ErrConstraintTrigger = ErrConstraint.Extend(7)
ErrConstraintUnique = ErrConstraint.Extend(8)
ErrConstraintVTab = ErrConstraint.Extend(9)
ErrConstraintRowId = ErrConstraint.Extend(10)
ErrNoticeRecoverWAL = ErrNotice.Extend(1)
ErrNoticeRecoverRollback = ErrNotice.Extend(2)
ErrWarningAutoIndex = ErrWarning.Extend(1)
)

242
vendor/github.com/mattn/go-sqlite3/error_test.go generated vendored Normal file
View File

@ -0,0 +1,242 @@
// Copyright (C) 2014 Yasuhiro Matsumoto <mattn.jp@gmail.com>.
//
// Use of this source code is governed by an MIT-style
// license that can be found in the LICENSE file.
package sqlite3
import (
"database/sql"
"io/ioutil"
"os"
"path"
"testing"
)
func TestSimpleError(t *testing.T) {
e := ErrError.Error()
if e != "SQL logic error or missing database" {
t.Error("wrong error code:" + e)
}
}
func TestCorruptDbErrors(t *testing.T) {
dirName, err := ioutil.TempDir("", "sqlite3")
if err != nil {
t.Fatal(err)
}
defer os.RemoveAll(dirName)
dbFileName := path.Join(dirName, "test.db")
f, err := os.Create(dbFileName)
if err != nil {
t.Error(err)
}
f.Write([]byte{1, 2, 3, 4, 5})
f.Close()
db, err := sql.Open("sqlite3", dbFileName)
if err == nil {
_, err = db.Exec("drop table foo")
}
sqliteErr := err.(Error)
if sqliteErr.Code != ErrNotADB {
t.Error("wrong error code for corrupted DB")
}
if err.Error() == "" {
t.Error("wrong error string for corrupted DB")
}
db.Close()
}
func TestSqlLogicErrors(t *testing.T) {
dirName, err := ioutil.TempDir("", "sqlite3")
if err != nil {
t.Fatal(err)
}
defer os.RemoveAll(dirName)
dbFileName := path.Join(dirName, "test.db")
db, err := sql.Open("sqlite3", dbFileName)
if err != nil {
t.Error(err)
}
defer db.Close()
_, err = db.Exec("CREATE TABLE Foo (id INTEGER PRIMARY KEY)")
if err != nil {
t.Error(err)
}
const expectedErr = "table Foo already exists"
_, err = db.Exec("CREATE TABLE Foo (id INTEGER PRIMARY KEY)")
if err.Error() != expectedErr {
t.Errorf("Unexpected error: %s, expected %s", err.Error(), expectedErr)
}
}
func TestExtendedErrorCodes_ForeignKey(t *testing.T) {
dirName, err := ioutil.TempDir("", "sqlite3-err")
if err != nil {
t.Fatal(err)
}
defer os.RemoveAll(dirName)
dbFileName := path.Join(dirName, "test.db")
db, err := sql.Open("sqlite3", dbFileName)
if err != nil {
t.Error(err)
}
defer db.Close()
_, err = db.Exec("PRAGMA foreign_keys=ON;")
if err != nil {
t.Errorf("PRAGMA foreign_keys=ON: %v", err)
}
_, err = db.Exec(`CREATE TABLE Foo (
id INTEGER PRIMARY KEY AUTOINCREMENT,
value INTEGER NOT NULL,
ref INTEGER NULL REFERENCES Foo (id),
UNIQUE(value)
);`)
if err != nil {
t.Error(err)
}
_, err = db.Exec("INSERT INTO Foo (ref, value) VALUES (100, 100);")
if err == nil {
t.Error("No error!")
} else {
sqliteErr := err.(Error)
if sqliteErr.Code != ErrConstraint {
t.Errorf("Wrong basic error code: %d != %d",
sqliteErr.Code, ErrConstraint)
}
if sqliteErr.ExtendedCode != ErrConstraintForeignKey {
t.Errorf("Wrong extended error code: %d != %d",
sqliteErr.ExtendedCode, ErrConstraintForeignKey)
}
}
}
func TestExtendedErrorCodes_NotNull(t *testing.T) {
dirName, err := ioutil.TempDir("", "sqlite3-err")
if err != nil {
t.Fatal(err)
}
defer os.RemoveAll(dirName)
dbFileName := path.Join(dirName, "test.db")
db, err := sql.Open("sqlite3", dbFileName)
if err != nil {
t.Error(err)
}
defer db.Close()
_, err = db.Exec("PRAGMA foreign_keys=ON;")
if err != nil {
t.Errorf("PRAGMA foreign_keys=ON: %v", err)
}
_, err = db.Exec(`CREATE TABLE Foo (
id INTEGER PRIMARY KEY AUTOINCREMENT,
value INTEGER NOT NULL,
ref INTEGER NULL REFERENCES Foo (id),
UNIQUE(value)
);`)
if err != nil {
t.Error(err)
}
res, err := db.Exec("INSERT INTO Foo (value) VALUES (100);")
if err != nil {
t.Fatalf("Creating first row: %v", err)
}
id, err := res.LastInsertId()
if err != nil {
t.Fatalf("Retrieving last insert id: %v", err)
}
_, err = db.Exec("INSERT INTO Foo (ref) VALUES (?);", id)
if err == nil {
t.Error("No error!")
} else {
sqliteErr := err.(Error)
if sqliteErr.Code != ErrConstraint {
t.Errorf("Wrong basic error code: %d != %d",
sqliteErr.Code, ErrConstraint)
}
if sqliteErr.ExtendedCode != ErrConstraintNotNull {
t.Errorf("Wrong extended error code: %d != %d",
sqliteErr.ExtendedCode, ErrConstraintNotNull)
}
}
}
func TestExtendedErrorCodes_Unique(t *testing.T) {
dirName, err := ioutil.TempDir("", "sqlite3-err")
if err != nil {
t.Fatal(err)
}
defer os.RemoveAll(dirName)
dbFileName := path.Join(dirName, "test.db")
db, err := sql.Open("sqlite3", dbFileName)
if err != nil {
t.Error(err)
}
defer db.Close()
_, err = db.Exec("PRAGMA foreign_keys=ON;")
if err != nil {
t.Errorf("PRAGMA foreign_keys=ON: %v", err)
}
_, err = db.Exec(`CREATE TABLE Foo (
id INTEGER PRIMARY KEY AUTOINCREMENT,
value INTEGER NOT NULL,
ref INTEGER NULL REFERENCES Foo (id),
UNIQUE(value)
);`)
if err != nil {
t.Error(err)
}
res, err := db.Exec("INSERT INTO Foo (value) VALUES (100);")
if err != nil {
t.Fatalf("Creating first row: %v", err)
}
id, err := res.LastInsertId()
if err != nil {
t.Fatalf("Retrieving last insert id: %v", err)
}
_, err = db.Exec("INSERT INTO Foo (ref, value) VALUES (?, 100);", id)
if err == nil {
t.Error("No error!")
} else {
sqliteErr := err.(Error)
if sqliteErr.Code != ErrConstraint {
t.Errorf("Wrong basic error code: %d != %d",
sqliteErr.Code, ErrConstraint)
}
if sqliteErr.ExtendedCode != ErrConstraintUnique {
t.Errorf("Wrong extended error code: %d != %d",
sqliteErr.ExtendedCode, ErrConstraintUnique)
}
extended := sqliteErr.Code.Extend(3).Error()
expected := "constraint failed"
if extended != expected {
t.Errorf("Wrong basic error code: %q != %q",
extended, expected)
}
}
}

189319
vendor/github.com/mattn/go-sqlite3/sqlite3-binding.c generated vendored Normal file

File diff suppressed because it is too large Load Diff

8733
vendor/github.com/mattn/go-sqlite3/sqlite3-binding.h generated vendored Normal file

File diff suppressed because it is too large Load Diff

1006
vendor/github.com/mattn/go-sqlite3/sqlite3.go generated vendored Normal file

File diff suppressed because it is too large Load Diff

130
vendor/github.com/mattn/go-sqlite3/sqlite3_fts3_test.go generated vendored Normal file
View File

@ -0,0 +1,130 @@
// Copyright (C) 2015 Yasuhiro Matsumoto <mattn.jp@gmail.com>.
//
// Use of this source code is governed by an MIT-style
// license that can be found in the LICENSE file.
package sqlite3
import (
"database/sql"
"os"
"testing"
)
func TestFTS3(t *testing.T) {
tempFilename := TempFilename(t)
defer os.Remove(tempFilename)
db, err := sql.Open("sqlite3", tempFilename)
if err != nil {
t.Fatal("Failed to open database:", err)
}
defer db.Close()
_, err = db.Exec("DROP TABLE foo")
_, err = db.Exec("CREATE VIRTUAL TABLE foo USING fts3(id INTEGER PRIMARY KEY, value TEXT)")
if err != nil {
t.Fatal("Failed to create table:", err)
}
_, err = db.Exec("INSERT INTO foo(id, value) VALUES(?, ?)", 1, `今日の 晩御飯は 天麩羅よ`)
if err != nil {
t.Fatal("Failed to insert value:", err)
}
_, err = db.Exec("INSERT INTO foo(id, value) VALUES(?, ?)", 2, `今日は いい 天気だ`)
if err != nil {
t.Fatal("Failed to insert value:", err)
}
rows, err := db.Query("SELECT id, value FROM foo WHERE value MATCH '今日* 天*'")
if err != nil {
t.Fatal("Unable to query foo table:", err)
}
defer rows.Close()
for rows.Next() {
var id int
var value string
if err := rows.Scan(&id, &value); err != nil {
t.Error("Unable to scan results:", err)
continue
}
if id == 1 && value != `今日の 晩御飯は 天麩羅よ` {
t.Error("Value for id 1 should be `今日の 晩御飯は 天麩羅よ`, but:", value)
} else if id == 2 && value != `今日は いい 天気だ` {
t.Error("Value for id 2 should be `今日は いい 天気だ`, but:", value)
}
}
rows, err = db.Query("SELECT value FROM foo WHERE value MATCH '今日* 天麩羅*'")
if err != nil {
t.Fatal("Unable to query foo table:", err)
}
defer rows.Close()
var value string
if !rows.Next() {
t.Fatal("Result should be only one")
}
if err := rows.Scan(&value); err != nil {
t.Fatal("Unable to scan results:", err)
}
if value != `今日の 晩御飯は 天麩羅よ` {
t.Fatal("Value should be `今日の 晩御飯は 天麩羅よ`, but:", value)
}
if rows.Next() {
t.Fatal("Result should be only one")
}
}
func TestFTS4(t *testing.T) {
tempFilename := TempFilename(t)
defer os.Remove(tempFilename)
db, err := sql.Open("sqlite3", tempFilename)
if err != nil {
t.Fatal("Failed to open database:", err)
}
defer db.Close()
_, err = db.Exec("DROP TABLE foo")
_, err = db.Exec("CREATE VIRTUAL TABLE foo USING fts4(tokenize=unicode61, id INTEGER PRIMARY KEY, value TEXT)")
switch {
case err != nil && err.Error() == "unknown tokenizer: unicode61":
t.Skip("FTS4 not supported")
case err != nil:
t.Fatal("Failed to create table:", err)
}
_, err = db.Exec("INSERT INTO foo(id, value) VALUES(?, ?)", 1, `février`)
if err != nil {
t.Fatal("Failed to insert value:", err)
}
rows, err := db.Query("SELECT value FROM foo WHERE value MATCH 'fevrier'")
if err != nil {
t.Fatal("Unable to query foo table:", err)
}
defer rows.Close()
var value string
if !rows.Next() {
t.Fatal("Result should be only one")
}
if err := rows.Scan(&value); err != nil {
t.Fatal("Unable to scan results:", err)
}
if value != `février` {
t.Fatal("Value should be `février`, but:", value)
}
if rows.Next() {
t.Fatal("Result should be only one")
}
}

13
vendor/github.com/mattn/go-sqlite3/sqlite3_fts5.go generated vendored Normal file
View File

@ -0,0 +1,13 @@
// Copyright (C) 2014 Yasuhiro Matsumoto <mattn.jp@gmail.com>.
//
// Use of this source code is governed by an MIT-style
// license that can be found in the LICENSE file.
// +build fts5
package sqlite3
/*
#cgo CFLAGS: -DSQLITE_ENABLE_FTS5
#cgo LDFLAGS: -lm
*/
import "C"

13
vendor/github.com/mattn/go-sqlite3/sqlite3_icu.go generated vendored Normal file
View File

@ -0,0 +1,13 @@
// Copyright (C) 2014 Yasuhiro Matsumoto <mattn.jp@gmail.com>.
//
// Use of this source code is governed by an MIT-style
// license that can be found in the LICENSE file.
// +build icu
package sqlite3
/*
#cgo LDFLAGS: -licuuc -licui18n
#cgo CFLAGS: -DSQLITE_ENABLE_ICU
*/
import "C"

12
vendor/github.com/mattn/go-sqlite3/sqlite3_json1.go generated vendored Normal file
View File

@ -0,0 +1,12 @@
// Copyright (C) 2014 Yasuhiro Matsumoto <mattn.jp@gmail.com>.
//
// Use of this source code is governed by an MIT-style
// license that can be found in the LICENSE file.
// +build json1
package sqlite3
/*
#cgo CFLAGS: -DSQLITE_ENABLE_JSON1
*/
import "C"

View File

@ -0,0 +1,14 @@
// Copyright (C) 2014 Yasuhiro Matsumoto <mattn.jp@gmail.com>.
//
// Use of this source code is governed by an MIT-style
// license that can be found in the LICENSE file.
// +build libsqlite3
package sqlite3
/*
#cgo CFLAGS: -DUSE_LIBSQLITE3
#cgo linux LDFLAGS: -lsqlite3
#cgo darwin LDFLAGS: -L/usr/local/opt/sqlite/lib -lsqlite3
*/
import "C"

View File

@ -0,0 +1,63 @@
// Copyright (C) 2014 Yasuhiro Matsumoto <mattn.jp@gmail.com>.
//
// Use of this source code is governed by an MIT-style
// license that can be found in the LICENSE file.
// +build !sqlite_omit_load_extension
package sqlite3
/*
#include <sqlite3-binding.h>
#include <stdlib.h>
*/
import "C"
import (
"errors"
"unsafe"
)
func (c *SQLiteConn) loadExtensions(extensions []string) error {
rv := C.sqlite3_enable_load_extension(c.db, 1)
if rv != C.SQLITE_OK {
return errors.New(C.GoString(C.sqlite3_errmsg(c.db)))
}
for _, extension := range extensions {
cext := C.CString(extension)
defer C.free(unsafe.Pointer(cext))
rv = C.sqlite3_load_extension(c.db, cext, nil, nil)
if rv != C.SQLITE_OK {
return errors.New(C.GoString(C.sqlite3_errmsg(c.db)))
}
}
rv = C.sqlite3_enable_load_extension(c.db, 0)
if rv != C.SQLITE_OK {
return errors.New(C.GoString(C.sqlite3_errmsg(c.db)))
}
return nil
}
func (c *SQLiteConn) LoadExtension(lib string, entry string) error {
rv := C.sqlite3_enable_load_extension(c.db, 1)
if rv != C.SQLITE_OK {
return errors.New(C.GoString(C.sqlite3_errmsg(c.db)))
}
clib := C.CString(lib)
defer C.free(unsafe.Pointer(clib))
centry := C.CString(entry)
defer C.free(unsafe.Pointer(centry))
rv = C.sqlite3_load_extension(c.db, clib, centry, nil)
if rv != C.SQLITE_OK {
return errors.New(C.GoString(C.sqlite3_errmsg(c.db)))
}
rv = C.sqlite3_enable_load_extension(c.db, 0)
if rv != C.SQLITE_OK {
return errors.New(C.GoString(C.sqlite3_errmsg(c.db)))
}
return nil
}

View File

@ -0,0 +1,23 @@
// Copyright (C) 2014 Yasuhiro Matsumoto <mattn.jp@gmail.com>.
//
// Use of this source code is governed by an MIT-style
// license that can be found in the LICENSE file.
// +build sqlite_omit_load_extension
package sqlite3
/*
#cgo CFLAGS: -DSQLITE_OMIT_LOAD_EXTENSION
*/
import "C"
import (
"errors"
)
func (c *SQLiteConn) loadExtensions(extensions []string) error {
return errors.New("Extensions have been disabled for static builds")
}
func (c *SQLiteConn) LoadExtension(lib string, entry string) error {
return errors.New("Extensions have been disabled for static builds")
}

13
vendor/github.com/mattn/go-sqlite3/sqlite3_other.go generated vendored Normal file
View File

@ -0,0 +1,13 @@
// Copyright (C) 2014 Yasuhiro Matsumoto <mattn.jp@gmail.com>.
//
// Use of this source code is governed by an MIT-style
// license that can be found in the LICENSE file.
// +build !windows
package sqlite3
/*
#cgo CFLAGS: -I.
#cgo linux LDFLAGS: -ldl
*/
import "C"

1350
vendor/github.com/mattn/go-sqlite3/sqlite3_test.go generated vendored Normal file

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,409 @@
package sqlite3_test
import (
"database/sql"
"fmt"
"math/rand"
"regexp"
"strconv"
"sync"
"testing"
"time"
)
type Dialect int
const (
SQLITE Dialect = iota
POSTGRESQL
MYSQL
)
type DB struct {
*testing.T
*sql.DB
dialect Dialect
once sync.Once
}
var db *DB
// the following tables will be created and dropped during the test
var testTables = []string{"foo", "bar", "t", "bench"}
var tests = []testing.InternalTest{
{"TestBlobs", TestBlobs},
{"TestManyQueryRow", TestManyQueryRow},
{"TestTxQuery", TestTxQuery},
{"TestPreparedStmt", TestPreparedStmt},
}
var benchmarks = []testing.InternalBenchmark{
{"BenchmarkExec", BenchmarkExec},
{"BenchmarkQuery", BenchmarkQuery},
{"BenchmarkParams", BenchmarkParams},
{"BenchmarkStmt", BenchmarkStmt},
{"BenchmarkRows", BenchmarkRows},
{"BenchmarkStmtRows", BenchmarkStmtRows},
}
// RunTests runs the SQL test suite
func RunTests(t *testing.T, d *sql.DB, dialect Dialect) {
db = &DB{t, d, dialect, sync.Once{}}
testing.RunTests(func(string, string) (bool, error) { return true, nil }, tests)
if !testing.Short() {
for _, b := range benchmarks {
fmt.Printf("%-20s", b.Name)
r := testing.Benchmark(b.F)
fmt.Printf("%10d %10.0f req/s\n", r.N, float64(r.N)/r.T.Seconds())
}
}
db.tearDown()
}
func (db *DB) mustExec(sql string, args ...interface{}) sql.Result {
res, err := db.Exec(sql, args...)
if err != nil {
db.Fatalf("Error running %q: %v", sql, err)
}
return res
}
func (db *DB) tearDown() {
for _, tbl := range testTables {
switch db.dialect {
case SQLITE:
db.mustExec("drop table if exists " + tbl)
case MYSQL, POSTGRESQL:
db.mustExec("drop table if exists " + tbl)
default:
db.Fatal("unkown dialect")
}
}
}
// q replaces ? parameters if needed
func (db *DB) q(sql string) string {
switch db.dialect {
case POSTGRESQL: // repace with $1, $2, ..
qrx := regexp.MustCompile(`\?`)
n := 0
return qrx.ReplaceAllStringFunc(sql, func(string) string {
n++
return "$" + strconv.Itoa(n)
})
}
return sql
}
func (db *DB) blobType(size int) string {
switch db.dialect {
case SQLITE:
return fmt.Sprintf("blob[%d]", size)
case POSTGRESQL:
return "bytea"
case MYSQL:
return fmt.Sprintf("VARBINARY(%d)", size)
}
panic("unkown dialect")
}
func (db *DB) serialPK() string {
switch db.dialect {
case SQLITE:
return "integer primary key autoincrement"
case POSTGRESQL:
return "serial primary key"
case MYSQL:
return "integer primary key auto_increment"
}
panic("unkown dialect")
}
func (db *DB) now() string {
switch db.dialect {
case SQLITE:
return "datetime('now')"
case POSTGRESQL:
return "now()"
case MYSQL:
return "now()"
}
panic("unkown dialect")
}
func makeBench() {
if _, err := db.Exec("create table bench (n varchar(32), i integer, d double, s varchar(32), t datetime)"); err != nil {
panic(err)
}
st, err := db.Prepare("insert into bench values (?, ?, ?, ?, ?)")
if err != nil {
panic(err)
}
defer st.Close()
for i := 0; i < 100; i++ {
if _, err = st.Exec(nil, i, float64(i), fmt.Sprintf("%d", i), time.Now()); err != nil {
panic(err)
}
}
}
func TestResult(t *testing.T) {
db.tearDown()
db.mustExec("create temporary table test (id " + db.serialPK() + ", name varchar(10))")
for i := 1; i < 3; i++ {
r := db.mustExec(db.q("insert into test (name) values (?)"), fmt.Sprintf("row %d", i))
n, err := r.RowsAffected()
if err != nil {
t.Fatal(err)
}
if n != 1 {
t.Errorf("got %v, want %v", n, 1)
}
n, err = r.LastInsertId()
if err != nil {
t.Fatal(err)
}
if n != int64(i) {
t.Errorf("got %v, want %v", n, i)
}
}
if _, err := db.Exec("error!"); err == nil {
t.Fatalf("expected error")
}
}
func TestBlobs(t *testing.T) {
db.tearDown()
var blob = []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15}
db.mustExec("create table foo (id integer primary key, bar " + db.blobType(16) + ")")
db.mustExec(db.q("insert into foo (id, bar) values(?,?)"), 0, blob)
want := fmt.Sprintf("%x", blob)
b := make([]byte, 16)
err := db.QueryRow(db.q("select bar from foo where id = ?"), 0).Scan(&b)
got := fmt.Sprintf("%x", b)
if err != nil {
t.Errorf("[]byte scan: %v", err)
} else if got != want {
t.Errorf("for []byte, got %q; want %q", got, want)
}
err = db.QueryRow(db.q("select bar from foo where id = ?"), 0).Scan(&got)
want = string(blob)
if err != nil {
t.Errorf("string scan: %v", err)
} else if got != want {
t.Errorf("for string, got %q; want %q", got, want)
}
}
func TestManyQueryRow(t *testing.T) {
if testing.Short() {
t.Log("skipping in short mode")
return
}
db.tearDown()
db.mustExec("create table foo (id integer primary key, name varchar(50))")
db.mustExec(db.q("insert into foo (id, name) values(?,?)"), 1, "bob")
var name string
for i := 0; i < 10000; i++ {
err := db.QueryRow(db.q("select name from foo where id = ?"), 1).Scan(&name)
if err != nil || name != "bob" {
t.Fatalf("on query %d: err=%v, name=%q", i, err, name)
}
}
}
func TestTxQuery(t *testing.T) {
db.tearDown()
tx, err := db.Begin()
if err != nil {
t.Fatal(err)
}
defer tx.Rollback()
_, err = tx.Exec("create table foo (id integer primary key, name varchar(50))")
if err != nil {
t.Fatal(err)
}
_, err = tx.Exec(db.q("insert into foo (id, name) values(?,?)"), 1, "bob")
if err != nil {
t.Fatal(err)
}
r, err := tx.Query(db.q("select name from foo where id = ?"), 1)
if err != nil {
t.Fatal(err)
}
defer r.Close()
if !r.Next() {
if r.Err() != nil {
t.Fatal(err)
}
t.Fatal("expected one rows")
}
var name string
err = r.Scan(&name)
if err != nil {
t.Fatal(err)
}
}
func TestPreparedStmt(t *testing.T) {
db.tearDown()
db.mustExec("CREATE TABLE t (count INT)")
sel, err := db.Prepare("SELECT count FROM t ORDER BY count DESC")
if err != nil {
t.Fatalf("prepare 1: %v", err)
}
ins, err := db.Prepare(db.q("INSERT INTO t (count) VALUES (?)"))
if err != nil {
t.Fatalf("prepare 2: %v", err)
}
for n := 1; n <= 3; n++ {
if _, err := ins.Exec(n); err != nil {
t.Fatalf("insert(%d) = %v", n, err)
}
}
const nRuns = 10
var wg sync.WaitGroup
for i := 0; i < nRuns; i++ {
wg.Add(1)
go func() {
defer wg.Done()
for j := 0; j < 10; j++ {
count := 0
if err := sel.QueryRow().Scan(&count); err != nil && err != sql.ErrNoRows {
t.Errorf("Query: %v", err)
return
}
if _, err := ins.Exec(rand.Intn(100)); err != nil {
t.Errorf("Insert: %v", err)
return
}
}
}()
}
wg.Wait()
}
// Benchmarks need to use panic() since b.Error errors are lost when
// running via testing.Benchmark() I would like to run these via go
// test -bench but calling Benchmark() from a benchmark test
// currently hangs go.
func BenchmarkExec(b *testing.B) {
for i := 0; i < b.N; i++ {
if _, err := db.Exec("select 1"); err != nil {
panic(err)
}
}
}
func BenchmarkQuery(b *testing.B) {
for i := 0; i < b.N; i++ {
var n sql.NullString
var i int
var f float64
var s string
// var t time.Time
if err := db.QueryRow("select null, 1, 1.1, 'foo'").Scan(&n, &i, &f, &s); err != nil {
panic(err)
}
}
}
func BenchmarkParams(b *testing.B) {
for i := 0; i < b.N; i++ {
var n sql.NullString
var i int
var f float64
var s string
// var t time.Time
if err := db.QueryRow("select ?, ?, ?, ?", nil, 1, 1.1, "foo").Scan(&n, &i, &f, &s); err != nil {
panic(err)
}
}
}
func BenchmarkStmt(b *testing.B) {
st, err := db.Prepare("select ?, ?, ?, ?")
if err != nil {
panic(err)
}
defer st.Close()
for n := 0; n < b.N; n++ {
var n sql.NullString
var i int
var f float64
var s string
// var t time.Time
if err := st.QueryRow(nil, 1, 1.1, "foo").Scan(&n, &i, &f, &s); err != nil {
panic(err)
}
}
}
func BenchmarkRows(b *testing.B) {
db.once.Do(makeBench)
for n := 0; n < b.N; n++ {
var n sql.NullString
var i int
var f float64
var s string
var t time.Time
r, err := db.Query("select * from bench")
if err != nil {
panic(err)
}
for r.Next() {
if err = r.Scan(&n, &i, &f, &s, &t); err != nil {
panic(err)
}
}
if err = r.Err(); err != nil {
panic(err)
}
}
}
func BenchmarkStmtRows(b *testing.B) {
db.once.Do(makeBench)
st, err := db.Prepare("select * from bench")
if err != nil {
panic(err)
}
defer st.Close()
for n := 0; n < b.N; n++ {
var n sql.NullString
var i int
var f float64
var s string
var t time.Time
r, err := st.Query()
if err != nil {
panic(err)
}
for r.Next() {
if err = r.Scan(&n, &i, &f, &s, &t); err != nil {
panic(err)
}
}
if err = r.Err(); err != nil {
panic(err)
}
}
}

14
vendor/github.com/mattn/go-sqlite3/sqlite3_windows.go generated vendored Normal file
View File

@ -0,0 +1,14 @@
// Copyright (C) 2014 Yasuhiro Matsumoto <mattn.jp@gmail.com>.
//
// Use of this source code is governed by an MIT-style
// license that can be found in the LICENSE file.
// +build windows
package sqlite3
/*
#cgo CFLAGS: -I. -fno-stack-check -fno-stack-protector -mno-stack-arg-probe
#cgo windows,386 CFLAGS: -D_USE_32BIT_TIME_T
#cgo LDFLAGS: -lmingwex -lmingw32
*/
import "C"

546
vendor/github.com/mattn/go-sqlite3/sqlite3ext.h generated vendored Normal file
View File

@ -0,0 +1,546 @@
/*
** 2006 June 7
**
** The author disclaims copyright to this source code. In place of
** a legal notice, here is a blessing:
**
** May you do good and not evil.
** May you find forgiveness for yourself and forgive others.
** May you share freely, never taking more than you give.
**
*************************************************************************
** This header file defines the SQLite interface for use by
** shared libraries that want to be imported as extensions into
** an SQLite instance. Shared libraries that intend to be loaded
** as extensions by SQLite should #include this file instead of
** sqlite3.h.
*/
#ifndef _SQLITE3EXT_H_
#define _SQLITE3EXT_H_
#include "sqlite3-binding.h"
typedef struct sqlite3_api_routines sqlite3_api_routines;
/*
** The following structure holds pointers to all of the SQLite API
** routines.
**
** WARNING: In order to maintain backwards compatibility, add new
** interfaces to the end of this structure only. If you insert new
** interfaces in the middle of this structure, then older different
** versions of SQLite will not be able to load each other's shared
** libraries!
*/
struct sqlite3_api_routines {
void * (*aggregate_context)(sqlite3_context*,int nBytes);
int (*aggregate_count)(sqlite3_context*);
int (*bind_blob)(sqlite3_stmt*,int,const void*,int n,void(*)(void*));
int (*bind_double)(sqlite3_stmt*,int,double);
int (*bind_int)(sqlite3_stmt*,int,int);
int (*bind_int64)(sqlite3_stmt*,int,sqlite_int64);
int (*bind_null)(sqlite3_stmt*,int);
int (*bind_parameter_count)(sqlite3_stmt*);
int (*bind_parameter_index)(sqlite3_stmt*,const char*zName);
const char * (*bind_parameter_name)(sqlite3_stmt*,int);
int (*bind_text)(sqlite3_stmt*,int,const char*,int n,void(*)(void*));
int (*bind_text16)(sqlite3_stmt*,int,const void*,int,void(*)(void*));
int (*bind_value)(sqlite3_stmt*,int,const sqlite3_value*);
int (*busy_handler)(sqlite3*,int(*)(void*,int),void*);
int (*busy_timeout)(sqlite3*,int ms);
int (*changes)(sqlite3*);
int (*close)(sqlite3*);
int (*collation_needed)(sqlite3*,void*,void(*)(void*,sqlite3*,
int eTextRep,const char*));
int (*collation_needed16)(sqlite3*,void*,void(*)(void*,sqlite3*,
int eTextRep,const void*));
const void * (*column_blob)(sqlite3_stmt*,int iCol);
int (*column_bytes)(sqlite3_stmt*,int iCol);
int (*column_bytes16)(sqlite3_stmt*,int iCol);
int (*column_count)(sqlite3_stmt*pStmt);
const char * (*column_database_name)(sqlite3_stmt*,int);
const void * (*column_database_name16)(sqlite3_stmt*,int);
const char * (*column_decltype)(sqlite3_stmt*,int i);
const void * (*column_decltype16)(sqlite3_stmt*,int);
double (*column_double)(sqlite3_stmt*,int iCol);
int (*column_int)(sqlite3_stmt*,int iCol);
sqlite_int64 (*column_int64)(sqlite3_stmt*,int iCol);
const char * (*column_name)(sqlite3_stmt*,int);
const void * (*column_name16)(sqlite3_stmt*,int);
const char * (*column_origin_name)(sqlite3_stmt*,int);
const void * (*column_origin_name16)(sqlite3_stmt*,int);
const char * (*column_table_name)(sqlite3_stmt*,int);
const void * (*column_table_name16)(sqlite3_stmt*,int);
const unsigned char * (*column_text)(sqlite3_stmt*,int iCol);
const void * (*column_text16)(sqlite3_stmt*,int iCol);
int (*column_type)(sqlite3_stmt*,int iCol);
sqlite3_value* (*column_value)(sqlite3_stmt*,int iCol);
void * (*commit_hook)(sqlite3*,int(*)(void*),void*);
int (*complete)(const char*sql);
int (*complete16)(const void*sql);
int (*create_collation)(sqlite3*,const char*,int,void*,
int(*)(void*,int,const void*,int,const void*));
int (*create_collation16)(sqlite3*,const void*,int,void*,
int(*)(void*,int,const void*,int,const void*));
int (*create_function)(sqlite3*,const char*,int,int,void*,
void (*xFunc)(sqlite3_context*,int,sqlite3_value**),
void (*xStep)(sqlite3_context*,int,sqlite3_value**),
void (*xFinal)(sqlite3_context*));
int (*create_function16)(sqlite3*,const void*,int,int,void*,
void (*xFunc)(sqlite3_context*,int,sqlite3_value**),
void (*xStep)(sqlite3_context*,int,sqlite3_value**),
void (*xFinal)(sqlite3_context*));
int (*create_module)(sqlite3*,const char*,const sqlite3_module*,void*);
int (*data_count)(sqlite3_stmt*pStmt);
sqlite3 * (*db_handle)(sqlite3_stmt*);
int (*declare_vtab)(sqlite3*,const char*);
int (*enable_shared_cache)(int);
int (*errcode)(sqlite3*db);
const char * (*errmsg)(sqlite3*);
const void * (*errmsg16)(sqlite3*);
int (*exec)(sqlite3*,const char*,sqlite3_callback,void*,char**);
int (*expired)(sqlite3_stmt*);
int (*finalize)(sqlite3_stmt*pStmt);
void (*free)(void*);
void (*free_table)(char**result);
int (*get_autocommit)(sqlite3*);
void * (*get_auxdata)(sqlite3_context*,int);
int (*get_table)(sqlite3*,const char*,char***,int*,int*,char**);
int (*global_recover)(void);
void (*interruptx)(sqlite3*);
sqlite_int64 (*last_insert_rowid)(sqlite3*);
const char * (*libversion)(void);
int (*libversion_number)(void);
void *(*malloc)(int);
char * (*mprintf)(const char*,...);
int (*open)(const char*,sqlite3**);
int (*open16)(const void*,sqlite3**);
int (*prepare)(sqlite3*,const char*,int,sqlite3_stmt**,const char**);
int (*prepare16)(sqlite3*,const void*,int,sqlite3_stmt**,const void**);
void * (*profile)(sqlite3*,void(*)(void*,const char*,sqlite_uint64),void*);
void (*progress_handler)(sqlite3*,int,int(*)(void*),void*);
void *(*realloc)(void*,int);
int (*reset)(sqlite3_stmt*pStmt);
void (*result_blob)(sqlite3_context*,const void*,int,void(*)(void*));
void (*result_double)(sqlite3_context*,double);
void (*result_error)(sqlite3_context*,const char*,int);
void (*result_error16)(sqlite3_context*,const void*,int);
void (*result_int)(sqlite3_context*,int);
void (*result_int64)(sqlite3_context*,sqlite_int64);
void (*result_null)(sqlite3_context*);
void (*result_text)(sqlite3_context*,const char*,int,void(*)(void*));
void (*result_text16)(sqlite3_context*,const void*,int,void(*)(void*));
void (*result_text16be)(sqlite3_context*,const void*,int,void(*)(void*));
void (*result_text16le)(sqlite3_context*,const void*,int,void(*)(void*));
void (*result_value)(sqlite3_context*,sqlite3_value*);
void * (*rollback_hook)(sqlite3*,void(*)(void*),void*);
int (*set_authorizer)(sqlite3*,int(*)(void*,int,const char*,const char*,
const char*,const char*),void*);
void (*set_auxdata)(sqlite3_context*,int,void*,void (*)(void*));
char * (*snprintf)(int,char*,const char*,...);
int (*step)(sqlite3_stmt*);
int (*table_column_metadata)(sqlite3*,const char*,const char*,const char*,
char const**,char const**,int*,int*,int*);
void (*thread_cleanup)(void);
int (*total_changes)(sqlite3*);
void * (*trace)(sqlite3*,void(*xTrace)(void*,const char*),void*);
int (*transfer_bindings)(sqlite3_stmt*,sqlite3_stmt*);
void * (*update_hook)(sqlite3*,void(*)(void*,int ,char const*,char const*,
sqlite_int64),void*);
void * (*user_data)(sqlite3_context*);
const void * (*value_blob)(sqlite3_value*);
int (*value_bytes)(sqlite3_value*);
int (*value_bytes16)(sqlite3_value*);
double (*value_double)(sqlite3_value*);
int (*value_int)(sqlite3_value*);
sqlite_int64 (*value_int64)(sqlite3_value*);
int (*value_numeric_type)(sqlite3_value*);
const unsigned char * (*value_text)(sqlite3_value*);
const void * (*value_text16)(sqlite3_value*);
const void * (*value_text16be)(sqlite3_value*);
const void * (*value_text16le)(sqlite3_value*);
int (*value_type)(sqlite3_value*);
char *(*vmprintf)(const char*,va_list);
/* Added ??? */
int (*overload_function)(sqlite3*, const char *zFuncName, int nArg);
/* Added by 3.3.13 */
int (*prepare_v2)(sqlite3*,const char*,int,sqlite3_stmt**,const char**);
int (*prepare16_v2)(sqlite3*,const void*,int,sqlite3_stmt**,const void**);
int (*clear_bindings)(sqlite3_stmt*);
/* Added by 3.4.1 */
int (*create_module_v2)(sqlite3*,const char*,const sqlite3_module*,void*,
void (*xDestroy)(void *));
/* Added by 3.5.0 */
int (*bind_zeroblob)(sqlite3_stmt*,int,int);
int (*blob_bytes)(sqlite3_blob*);
int (*blob_close)(sqlite3_blob*);
int (*blob_open)(sqlite3*,const char*,const char*,const char*,sqlite3_int64,
int,sqlite3_blob**);
int (*blob_read)(sqlite3_blob*,void*,int,int);
int (*blob_write)(sqlite3_blob*,const void*,int,int);
int (*create_collation_v2)(sqlite3*,const char*,int,void*,
int(*)(void*,int,const void*,int,const void*),
void(*)(void*));
int (*file_control)(sqlite3*,const char*,int,void*);
sqlite3_int64 (*memory_highwater)(int);
sqlite3_int64 (*memory_used)(void);
sqlite3_mutex *(*mutex_alloc)(int);
void (*mutex_enter)(sqlite3_mutex*);
void (*mutex_free)(sqlite3_mutex*);
void (*mutex_leave)(sqlite3_mutex*);
int (*mutex_try)(sqlite3_mutex*);
int (*open_v2)(const char*,sqlite3**,int,const char*);
int (*release_memory)(int);
void (*result_error_nomem)(sqlite3_context*);
void (*result_error_toobig)(sqlite3_context*);
int (*sleep)(int);
void (*soft_heap_limit)(int);
sqlite3_vfs *(*vfs_find)(const char*);
int (*vfs_register)(sqlite3_vfs*,int);
int (*vfs_unregister)(sqlite3_vfs*);
int (*xthreadsafe)(void);
void (*result_zeroblob)(sqlite3_context*,int);
void (*result_error_code)(sqlite3_context*,int);
int (*test_control)(int, ...);
void (*randomness)(int,void*);
sqlite3 *(*context_db_handle)(sqlite3_context*);
int (*extended_result_codes)(sqlite3*,int);
int (*limit)(sqlite3*,int,int);
sqlite3_stmt *(*next_stmt)(sqlite3*,sqlite3_stmt*);
const char *(*sql)(sqlite3_stmt*);
int (*status)(int,int*,int*,int);
int (*backup_finish)(sqlite3_backup*);
sqlite3_backup *(*backup_init)(sqlite3*,const char*,sqlite3*,const char*);
int (*backup_pagecount)(sqlite3_backup*);
int (*backup_remaining)(sqlite3_backup*);
int (*backup_step)(sqlite3_backup*,int);
const char *(*compileoption_get)(int);
int (*compileoption_used)(const char*);
int (*create_function_v2)(sqlite3*,const char*,int,int,void*,
void (*xFunc)(sqlite3_context*,int,sqlite3_value**),
void (*xStep)(sqlite3_context*,int,sqlite3_value**),
void (*xFinal)(sqlite3_context*),
void(*xDestroy)(void*));
int (*db_config)(sqlite3*,int,...);
sqlite3_mutex *(*db_mutex)(sqlite3*);
int (*db_status)(sqlite3*,int,int*,int*,int);
int (*extended_errcode)(sqlite3*);
void (*log)(int,const char*,...);
sqlite3_int64 (*soft_heap_limit64)(sqlite3_int64);
const char *(*sourceid)(void);
int (*stmt_status)(sqlite3_stmt*,int,int);
int (*strnicmp)(const char*,const char*,int);
int (*unlock_notify)(sqlite3*,void(*)(void**,int),void*);
int (*wal_autocheckpoint)(sqlite3*,int);
int (*wal_checkpoint)(sqlite3*,const char*);
void *(*wal_hook)(sqlite3*,int(*)(void*,sqlite3*,const char*,int),void*);
int (*blob_reopen)(sqlite3_blob*,sqlite3_int64);
int (*vtab_config)(sqlite3*,int op,...);
int (*vtab_on_conflict)(sqlite3*);
/* Version 3.7.16 and later */
int (*close_v2)(sqlite3*);
const char *(*db_filename)(sqlite3*,const char*);
int (*db_readonly)(sqlite3*,const char*);
int (*db_release_memory)(sqlite3*);
const char *(*errstr)(int);
int (*stmt_busy)(sqlite3_stmt*);
int (*stmt_readonly)(sqlite3_stmt*);
int (*stricmp)(const char*,const char*);
int (*uri_boolean)(const char*,const char*,int);
sqlite3_int64 (*uri_int64)(const char*,const char*,sqlite3_int64);
const char *(*uri_parameter)(const char*,const char*);
char *(*vsnprintf)(int,char*,const char*,va_list);
int (*wal_checkpoint_v2)(sqlite3*,const char*,int,int*,int*);
/* Version 3.8.7 and later */
int (*auto_extension)(void(*)(void));
int (*bind_blob64)(sqlite3_stmt*,int,const void*,sqlite3_uint64,
void(*)(void*));
int (*bind_text64)(sqlite3_stmt*,int,const char*,sqlite3_uint64,
void(*)(void*),unsigned char);
int (*cancel_auto_extension)(void(*)(void));
int (*load_extension)(sqlite3*,const char*,const char*,char**);
void *(*malloc64)(sqlite3_uint64);
sqlite3_uint64 (*msize)(void*);
void *(*realloc64)(void*,sqlite3_uint64);
void (*reset_auto_extension)(void);
void (*result_blob64)(sqlite3_context*,const void*,sqlite3_uint64,
void(*)(void*));
void (*result_text64)(sqlite3_context*,const char*,sqlite3_uint64,
void(*)(void*), unsigned char);
int (*strglob)(const char*,const char*);
/* Version 3.8.11 and later */
sqlite3_value *(*value_dup)(const sqlite3_value*);
void (*value_free)(sqlite3_value*);
int (*result_zeroblob64)(sqlite3_context*,sqlite3_uint64);
int (*bind_zeroblob64)(sqlite3_stmt*, int, sqlite3_uint64);
/* Version 3.9.0 and later */
unsigned int (*value_subtype)(sqlite3_value*);
void (*result_subtype)(sqlite3_context*,unsigned int);
/* Version 3.10.0 and later */
int (*status64)(int,sqlite3_int64*,sqlite3_int64*,int);
int (*strlike)(const char*,const char*,unsigned int);
int (*db_cacheflush)(sqlite3*);
/* Version 3.12.0 and later */
int (*system_errno)(sqlite3*);
};
/*
** The following macros redefine the API routines so that they are
** redirected through the global sqlite3_api structure.
**
** This header file is also used by the loadext.c source file
** (part of the main SQLite library - not an extension) so that
** it can get access to the sqlite3_api_routines structure
** definition. But the main library does not want to redefine
** the API. So the redefinition macros are only valid if the
** SQLITE_CORE macros is undefined.
*/
#if !defined(SQLITE_CORE) && !defined(SQLITE_OMIT_LOAD_EXTENSION)
#define sqlite3_aggregate_context sqlite3_api->aggregate_context
#ifndef SQLITE_OMIT_DEPRECATED
#define sqlite3_aggregate_count sqlite3_api->aggregate_count
#endif
#define sqlite3_bind_blob sqlite3_api->bind_blob
#define sqlite3_bind_double sqlite3_api->bind_double
#define sqlite3_bind_int sqlite3_api->bind_int
#define sqlite3_bind_int64 sqlite3_api->bind_int64
#define sqlite3_bind_null sqlite3_api->bind_null
#define sqlite3_bind_parameter_count sqlite3_api->bind_parameter_count
#define sqlite3_bind_parameter_index sqlite3_api->bind_parameter_index
#define sqlite3_bind_parameter_name sqlite3_api->bind_parameter_name
#define sqlite3_bind_text sqlite3_api->bind_text
#define sqlite3_bind_text16 sqlite3_api->bind_text16
#define sqlite3_bind_value sqlite3_api->bind_value
#define sqlite3_busy_handler sqlite3_api->busy_handler
#define sqlite3_busy_timeout sqlite3_api->busy_timeout
#define sqlite3_changes sqlite3_api->changes
#define sqlite3_close sqlite3_api->close
#define sqlite3_collation_needed sqlite3_api->collation_needed
#define sqlite3_collation_needed16 sqlite3_api->collation_needed16
#define sqlite3_column_blob sqlite3_api->column_blob
#define sqlite3_column_bytes sqlite3_api->column_bytes
#define sqlite3_column_bytes16 sqlite3_api->column_bytes16
#define sqlite3_column_count sqlite3_api->column_count
#define sqlite3_column_database_name sqlite3_api->column_database_name
#define sqlite3_column_database_name16 sqlite3_api->column_database_name16
#define sqlite3_column_decltype sqlite3_api->column_decltype
#define sqlite3_column_decltype16 sqlite3_api->column_decltype16
#define sqlite3_column_double sqlite3_api->column_double
#define sqlite3_column_int sqlite3_api->column_int
#define sqlite3_column_int64 sqlite3_api->column_int64
#define sqlite3_column_name sqlite3_api->column_name
#define sqlite3_column_name16 sqlite3_api->column_name16
#define sqlite3_column_origin_name sqlite3_api->column_origin_name
#define sqlite3_column_origin_name16 sqlite3_api->column_origin_name16
#define sqlite3_column_table_name sqlite3_api->column_table_name
#define sqlite3_column_table_name16 sqlite3_api->column_table_name16
#define sqlite3_column_text sqlite3_api->column_text
#define sqlite3_column_text16 sqlite3_api->column_text16
#define sqlite3_column_type sqlite3_api->column_type
#define sqlite3_column_value sqlite3_api->column_value
#define sqlite3_commit_hook sqlite3_api->commit_hook
#define sqlite3_complete sqlite3_api->complete
#define sqlite3_complete16 sqlite3_api->complete16
#define sqlite3_create_collation sqlite3_api->create_collation
#define sqlite3_create_collation16 sqlite3_api->create_collation16
#define sqlite3_create_function sqlite3_api->create_function
#define sqlite3_create_function16 sqlite3_api->create_function16
#define sqlite3_create_module sqlite3_api->create_module
#define sqlite3_create_module_v2 sqlite3_api->create_module_v2
#define sqlite3_data_count sqlite3_api->data_count
#define sqlite3_db_handle sqlite3_api->db_handle
#define sqlite3_declare_vtab sqlite3_api->declare_vtab
#define sqlite3_enable_shared_cache sqlite3_api->enable_shared_cache
#define sqlite3_errcode sqlite3_api->errcode
#define sqlite3_errmsg sqlite3_api->errmsg
#define sqlite3_errmsg16 sqlite3_api->errmsg16
#define sqlite3_exec sqlite3_api->exec
#ifndef SQLITE_OMIT_DEPRECATED
#define sqlite3_expired sqlite3_api->expired
#endif
#define sqlite3_finalize sqlite3_api->finalize
#define sqlite3_free sqlite3_api->free
#define sqlite3_free_table sqlite3_api->free_table
#define sqlite3_get_autocommit sqlite3_api->get_autocommit
#define sqlite3_get_auxdata sqlite3_api->get_auxdata
#define sqlite3_get_table sqlite3_api->get_table
#ifndef SQLITE_OMIT_DEPRECATED
#define sqlite3_global_recover sqlite3_api->global_recover
#endif
#define sqlite3_interrupt sqlite3_api->interruptx
#define sqlite3_last_insert_rowid sqlite3_api->last_insert_rowid
#define sqlite3_libversion sqlite3_api->libversion
#define sqlite3_libversion_number sqlite3_api->libversion_number
#define sqlite3_malloc sqlite3_api->malloc
#define sqlite3_mprintf sqlite3_api->mprintf
#define sqlite3_open sqlite3_api->open
#define sqlite3_open16 sqlite3_api->open16
#define sqlite3_prepare sqlite3_api->prepare
#define sqlite3_prepare16 sqlite3_api->prepare16
#define sqlite3_prepare_v2 sqlite3_api->prepare_v2
#define sqlite3_prepare16_v2 sqlite3_api->prepare16_v2
#define sqlite3_profile sqlite3_api->profile
#define sqlite3_progress_handler sqlite3_api->progress_handler
#define sqlite3_realloc sqlite3_api->realloc
#define sqlite3_reset sqlite3_api->reset
#define sqlite3_result_blob sqlite3_api->result_blob
#define sqlite3_result_double sqlite3_api->result_double
#define sqlite3_result_error sqlite3_api->result_error
#define sqlite3_result_error16 sqlite3_api->result_error16
#define sqlite3_result_int sqlite3_api->result_int
#define sqlite3_result_int64 sqlite3_api->result_int64
#define sqlite3_result_null sqlite3_api->result_null
#define sqlite3_result_text sqlite3_api->result_text
#define sqlite3_result_text16 sqlite3_api->result_text16
#define sqlite3_result_text16be sqlite3_api->result_text16be
#define sqlite3_result_text16le sqlite3_api->result_text16le
#define sqlite3_result_value sqlite3_api->result_value
#define sqlite3_rollback_hook sqlite3_api->rollback_hook
#define sqlite3_set_authorizer sqlite3_api->set_authorizer
#define sqlite3_set_auxdata sqlite3_api->set_auxdata
#define sqlite3_snprintf sqlite3_api->snprintf
#define sqlite3_step sqlite3_api->step
#define sqlite3_table_column_metadata sqlite3_api->table_column_metadata
#define sqlite3_thread_cleanup sqlite3_api->thread_cleanup
#define sqlite3_total_changes sqlite3_api->total_changes
#define sqlite3_trace sqlite3_api->trace
#ifndef SQLITE_OMIT_DEPRECATED
#define sqlite3_transfer_bindings sqlite3_api->transfer_bindings
#endif
#define sqlite3_update_hook sqlite3_api->update_hook
#define sqlite3_user_data sqlite3_api->user_data
#define sqlite3_value_blob sqlite3_api->value_blob
#define sqlite3_value_bytes sqlite3_api->value_bytes
#define sqlite3_value_bytes16 sqlite3_api->value_bytes16
#define sqlite3_value_double sqlite3_api->value_double
#define sqlite3_value_int sqlite3_api->value_int
#define sqlite3_value_int64 sqlite3_api->value_int64
#define sqlite3_value_numeric_type sqlite3_api->value_numeric_type
#define sqlite3_value_text sqlite3_api->value_text
#define sqlite3_value_text16 sqlite3_api->value_text16
#define sqlite3_value_text16be sqlite3_api->value_text16be
#define sqlite3_value_text16le sqlite3_api->value_text16le
#define sqlite3_value_type sqlite3_api->value_type
#define sqlite3_vmprintf sqlite3_api->vmprintf
#define sqlite3_vsnprintf sqlite3_api->vsnprintf
#define sqlite3_overload_function sqlite3_api->overload_function
#define sqlite3_prepare_v2 sqlite3_api->prepare_v2
#define sqlite3_prepare16_v2 sqlite3_api->prepare16_v2
#define sqlite3_clear_bindings sqlite3_api->clear_bindings
#define sqlite3_bind_zeroblob sqlite3_api->bind_zeroblob
#define sqlite3_blob_bytes sqlite3_api->blob_bytes
#define sqlite3_blob_close sqlite3_api->blob_close
#define sqlite3_blob_open sqlite3_api->blob_open
#define sqlite3_blob_read sqlite3_api->blob_read
#define sqlite3_blob_write sqlite3_api->blob_write
#define sqlite3_create_collation_v2 sqlite3_api->create_collation_v2
#define sqlite3_file_control sqlite3_api->file_control
#define sqlite3_memory_highwater sqlite3_api->memory_highwater
#define sqlite3_memory_used sqlite3_api->memory_used
#define sqlite3_mutex_alloc sqlite3_api->mutex_alloc
#define sqlite3_mutex_enter sqlite3_api->mutex_enter
#define sqlite3_mutex_free sqlite3_api->mutex_free
#define sqlite3_mutex_leave sqlite3_api->mutex_leave
#define sqlite3_mutex_try sqlite3_api->mutex_try
#define sqlite3_open_v2 sqlite3_api->open_v2
#define sqlite3_release_memory sqlite3_api->release_memory
#define sqlite3_result_error_nomem sqlite3_api->result_error_nomem
#define sqlite3_result_error_toobig sqlite3_api->result_error_toobig
#define sqlite3_sleep sqlite3_api->sleep
#define sqlite3_soft_heap_limit sqlite3_api->soft_heap_limit
#define sqlite3_vfs_find sqlite3_api->vfs_find
#define sqlite3_vfs_register sqlite3_api->vfs_register
#define sqlite3_vfs_unregister sqlite3_api->vfs_unregister
#define sqlite3_threadsafe sqlite3_api->xthreadsafe
#define sqlite3_result_zeroblob sqlite3_api->result_zeroblob
#define sqlite3_result_error_code sqlite3_api->result_error_code
#define sqlite3_test_control sqlite3_api->test_control
#define sqlite3_randomness sqlite3_api->randomness
#define sqlite3_context_db_handle sqlite3_api->context_db_handle
#define sqlite3_extended_result_codes sqlite3_api->extended_result_codes
#define sqlite3_limit sqlite3_api->limit
#define sqlite3_next_stmt sqlite3_api->next_stmt
#define sqlite3_sql sqlite3_api->sql
#define sqlite3_status sqlite3_api->status
#define sqlite3_backup_finish sqlite3_api->backup_finish
#define sqlite3_backup_init sqlite3_api->backup_init
#define sqlite3_backup_pagecount sqlite3_api->backup_pagecount
#define sqlite3_backup_remaining sqlite3_api->backup_remaining
#define sqlite3_backup_step sqlite3_api->backup_step
#define sqlite3_compileoption_get sqlite3_api->compileoption_get
#define sqlite3_compileoption_used sqlite3_api->compileoption_used
#define sqlite3_create_function_v2 sqlite3_api->create_function_v2
#define sqlite3_db_config sqlite3_api->db_config
#define sqlite3_db_mutex sqlite3_api->db_mutex
#define sqlite3_db_status sqlite3_api->db_status
#define sqlite3_extended_errcode sqlite3_api->extended_errcode
#define sqlite3_log sqlite3_api->log
#define sqlite3_soft_heap_limit64 sqlite3_api->soft_heap_limit64
#define sqlite3_sourceid sqlite3_api->sourceid
#define sqlite3_stmt_status sqlite3_api->stmt_status
#define sqlite3_strnicmp sqlite3_api->strnicmp
#define sqlite3_unlock_notify sqlite3_api->unlock_notify
#define sqlite3_wal_autocheckpoint sqlite3_api->wal_autocheckpoint
#define sqlite3_wal_checkpoint sqlite3_api->wal_checkpoint
#define sqlite3_wal_hook sqlite3_api->wal_hook
#define sqlite3_blob_reopen sqlite3_api->blob_reopen
#define sqlite3_vtab_config sqlite3_api->vtab_config
#define sqlite3_vtab_on_conflict sqlite3_api->vtab_on_conflict
/* Version 3.7.16 and later */
#define sqlite3_close_v2 sqlite3_api->close_v2
#define sqlite3_db_filename sqlite3_api->db_filename
#define sqlite3_db_readonly sqlite3_api->db_readonly
#define sqlite3_db_release_memory sqlite3_api->db_release_memory
#define sqlite3_errstr sqlite3_api->errstr
#define sqlite3_stmt_busy sqlite3_api->stmt_busy
#define sqlite3_stmt_readonly sqlite3_api->stmt_readonly
#define sqlite3_stricmp sqlite3_api->stricmp
#define sqlite3_uri_boolean sqlite3_api->uri_boolean
#define sqlite3_uri_int64 sqlite3_api->uri_int64
#define sqlite3_uri_parameter sqlite3_api->uri_parameter
#define sqlite3_uri_vsnprintf sqlite3_api->vsnprintf
#define sqlite3_wal_checkpoint_v2 sqlite3_api->wal_checkpoint_v2
/* Version 3.8.7 and later */
#define sqlite3_auto_extension sqlite3_api->auto_extension
#define sqlite3_bind_blob64 sqlite3_api->bind_blob64
#define sqlite3_bind_text64 sqlite3_api->bind_text64
#define sqlite3_cancel_auto_extension sqlite3_api->cancel_auto_extension
#define sqlite3_load_extension sqlite3_api->load_extension
#define sqlite3_malloc64 sqlite3_api->malloc64
#define sqlite3_msize sqlite3_api->msize
#define sqlite3_realloc64 sqlite3_api->realloc64
#define sqlite3_reset_auto_extension sqlite3_api->reset_auto_extension
#define sqlite3_result_blob64 sqlite3_api->result_blob64
#define sqlite3_result_text64 sqlite3_api->result_text64
#define sqlite3_strglob sqlite3_api->strglob
/* Version 3.8.11 and later */
#define sqlite3_value_dup sqlite3_api->value_dup
#define sqlite3_value_free sqlite3_api->value_free
#define sqlite3_result_zeroblob64 sqlite3_api->result_zeroblob64
#define sqlite3_bind_zeroblob64 sqlite3_api->bind_zeroblob64
/* Version 3.9.0 and later */
#define sqlite3_value_subtype sqlite3_api->value_subtype
#define sqlite3_result_subtype sqlite3_api->result_subtype
/* Version 3.10.0 and later */
#define sqlite3_status64 sqlite3_api->status64
#define sqlite3_strlike sqlite3_api->strlike
#define sqlite3_db_cacheflush sqlite3_api->db_cacheflush
/* Version 3.12.0 and later */
#define sqlite3_system_errno sqlite3_api->system_errno
#endif /* !defined(SQLITE_CORE) && !defined(SQLITE_OMIT_LOAD_EXTENSION) */
#if !defined(SQLITE_CORE) && !defined(SQLITE_OMIT_LOAD_EXTENSION)
/* This case when the file really is being compiled as a loadable
** extension */
# define SQLITE_EXTENSION_INIT1 const sqlite3_api_routines *sqlite3_api=0;
# define SQLITE_EXTENSION_INIT2(v) sqlite3_api=v;
# define SQLITE_EXTENSION_INIT3 \
extern const sqlite3_api_routines *sqlite3_api;
#else
/* This case when the file is being statically linked into the
** application */
# define SQLITE_EXTENSION_INIT1 /*no-op*/
# define SQLITE_EXTENSION_INIT2(v) (void)v; /* unused parameter */
# define SQLITE_EXTENSION_INIT3 /*no-op*/
#endif
#endif /* _SQLITE3EXT_H_ */