diff --git a/.gitignore b/.gitignore index e307413..ebb6854 100644 --- a/.gitignore +++ b/.gitignore @@ -6,3 +6,4 @@ anonircd anonircd.conf bin data +dist diff --git a/.travis.yml b/.travis.yml index a7f6394..d466faa 100644 --- a/.travis.yml +++ b/.travis.yml @@ -2,34 +2,14 @@ language: go go: - 1.x - - tip -install: true +env: + global: + - CGO_ENABLED=0 + - GO111MODULE=on -matrix: - allow_failures: - - go: tip - fast_finish: true - -notifications: - email: false - -before_script: - - GO_FILES=$(find . -iname '*.go' | grep -v /vendor/) - - PKGS=$(go list ./... | grep -v /vendor/) -# - go get github.com/golang/lint/golint -# - go get honnef.co/go/tools/cmd/megacheck +install: + - go mod download script: - - test -z $(gofmt -s -l $GO_FILES) - - go test -v -race $PKGS - - go vet $PKGS -# - megacheck $PKGS -# - golint -set_exit_status $PKGS - -notifications: - irc: - channels: - - "z.1chan.us#anonircd" - on_success: change - on_failure: always + - go test -v ./... diff --git a/Gopkg.lock b/Gopkg.lock deleted file mode 100644 index 8ad23eb..0000000 --- a/Gopkg.lock +++ /dev/null @@ -1,63 +0,0 @@ -# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'. - - -[[projects]] - name = "github.com/BurntSushi/toml" - packages = ["."] - revision = "b26d9c308763d68093482582cea63d69be07a0f0" - version = "v0.3.0" - -[[projects]] - name = "github.com/gorilla/securecookie" - packages = ["."] - revision = "667fe4e3466a040b780561fe9b51a83a3753eefc" - version = "v1.1" - -[[projects]] - name = "github.com/jessevdk/go-flags" - packages = ["."] - revision = "96dc06278ce32a0e9d957d590bb987c81ee66407" - version = "v1.3.0" - -[[projects]] - branch = "master" - name = "github.com/jmoiron/sqlx" - packages = [".","reflectx"] - revision = "de8647470aafe4854c976707c431dbe1eb2822c6" - -[[projects]] - name = "github.com/mattn/go-sqlite3" - packages = ["."] - revision = "ed69081a91fd053f17672236b0dd52ba7485e1a3" - version = "v1.4.0" - -[[projects]] - name = "github.com/pkg/errors" - packages = ["."] - revision = "645ef00459ed84a119197bfb8d8205042c6df63d" - version = "v0.8.0" - -[[projects]] - branch = "master" - name = "golang.org/x/crypto" - packages = ["sha3"] - revision = "d585fd2cc9195196078f516b69daff6744ef5e84" - -[[projects]] - branch = "master" - name = "golang.org/x/net" - packages = ["context"] - revision = "d866cfc389cec985d6fda2859936a575a55a3ab6" - -[[projects]] - branch = "v2" - name = "gopkg.in/sorcix/irc.v2" - packages = [".","internal"] - revision = "1b25be7f891d1bd0190ac0ef159da153c9ffa22a" - -[solve-meta] - analyzer-name = "dep" - analyzer-version = 1 - inputs-digest = "2597d02f0d1ff0af313642458ae19f0dabc6e5464adc94013e82fa3285a75c4e" - solver-name = "gps-cdcl" - solver-version = 1 diff --git a/Gopkg.toml b/Gopkg.toml deleted file mode 100644 index c7da9ba..0000000 --- a/Gopkg.toml +++ /dev/null @@ -1,34 +0,0 @@ - -# Gopkg.toml example -# -# Refer to https://github.com/golang/dep/blob/master/docs/Gopkg.toml.md -# for detailed Gopkg.toml documentation. -# -# required = ["github.com/user/thing/cmd/thing"] -# ignored = ["github.com/user/project/pkgX", "bitbucket.org/user/project/pkgA/pkgY"] -# -# [[constraint]] -# name = "github.com/user/project" -# version = "1.0.0" -# -# [[constraint]] -# name = "github.com/user/project2" -# branch = "dev" -# source = "github.com/myfork/project2" -# -# [[override]] -# name = "github.com/x/y" -# version = "2.4.0" - - -[[constraint]] - name = "github.com/BurntSushi/toml" - version = "0.3.0" - -[[constraint]] - branch = "master" - name = "github.com/orcaman/concurrent-map" - -[[constraint]] - branch = "v2" - name = "gopkg.in/sorcix/irc.v2" diff --git a/go.mod b/go.mod new file mode 100644 index 0000000..845ce88 --- /dev/null +++ b/go.mod @@ -0,0 +1,17 @@ +module github.com/sageru-6ch/anonircd + +go 1.12 + +require ( + github.com/BurntSushi/toml v0.3.1 + github.com/go-sql-driver/mysql v1.4.1 // indirect + github.com/gorilla/securecookie v1.1.1 + github.com/jessevdk/go-flags v1.4.0 + github.com/jmoiron/sqlx v1.2.0 + github.com/mattn/go-sqlite3 v1.10.0 + github.com/pkg/errors v0.8.1 + golang.org/x/crypto v0.0.0-20190325154230-a5d413f7728c + golang.org/x/net v0.0.0-20190327214358-63eda1eb0650 // indirect + golang.org/x/sys v0.0.0-20190322080309-f49334f85ddc // indirect + gopkg.in/sorcix/irc.v2 v2.0.0-20190306112350-8d7a73540b90 +) diff --git a/go.sum b/go.sum new file mode 100644 index 0000000..d9550e8 --- /dev/null +++ b/go.sum @@ -0,0 +1,35 @@ +github.com/BurntSushi/toml v0.3.0/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ= +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= +github.com/go-sql-driver/mysql v1.4.1/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= +github.com/gorilla/securecookie v0.0.0-20160422134519-667fe4e3466a/go.mod h1:ra0sb63/xPlUeL+yeDciTfxMRAA+MP+HVt/4epWDjd4= +github.com/gorilla/securecookie v1.1.1 h1:miw7JPhV+b/lAHSXz4qd/nN9jRiAFV5FwjeKyCS8BvQ= +github.com/gorilla/securecookie v1.1.1/go.mod h1:ra0sb63/xPlUeL+yeDciTfxMRAA+MP+HVt/4epWDjd4= +github.com/jessevdk/go-flags v1.3.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= +github.com/jessevdk/go-flags v1.4.0 h1:4IU2WS7AumrZ/40jfhf4QVDMsQwqA7VEHozFRrGARJA= +github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= +github.com/jmoiron/sqlx v0.0.0-20171211234905-de8647470aaf/go.mod h1:IiEW3SEiiErVyFdH8NTuWjSifiEQKUoyK3LNqr2kCHU= +github.com/jmoiron/sqlx v1.2.0 h1:41Ip0zITnmWNR/vHV+S4m+VoUivnWY5E4OJfLZjCJMA= +github.com/jmoiron/sqlx v1.2.0/go.mod h1:1FEQNm3xlJgrMD+FBdI9+xvCksHtbpVBBw5dYhBSsks= +github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= +github.com/mattn/go-sqlite3 v1.4.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc= +github.com/mattn/go-sqlite3 v1.9.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc= +github.com/mattn/go-sqlite3 v1.10.0 h1:jbhqpg7tQe4SupckyijYiy0mJJ/pRyHvXf7JdWK860o= +github.com/mattn/go-sqlite3 v1.10.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc= +github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I= +github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +golang.org/x/crypto v0.0.0-20171219041129-d585fd2cc919/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190325154230-a5d413f7728c h1:Vj5n4GlwjmQteupaxJ9+0FNOmBrHfq7vN4btdGoDZgI= +golang.org/x/crypto v0.0.0-20190325154230-a5d413f7728c/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/net v0.0.0-20171212005608-d866cfc389ce/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190327214358-63eda1eb0650/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190322080309-f49334f85ddc h1:4gbWbmmPFp4ySWICouJl6emP0MyS31yy9SrTlAGFT+g= +golang.org/x/sys v0.0.0-20190322080309-f49334f85ddc/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +gopkg.in/sorcix/irc.v2 v2.0.0-20170726154628-1b25be7f891d/go.mod h1:9LLe1SvUK2YoWyIuJ+AParKHhu749G8oM+HTQQMZz9E= +gopkg.in/sorcix/irc.v2 v2.0.0-20190306112350-8d7a73540b90 h1:ItuFAq9SlPhZvdIvsdgoE38i9aLLdDpBbFV9vTJhlp8= +gopkg.in/sorcix/irc.v2 v2.0.0-20190306112350-8d7a73540b90/go.mod h1:PmJkUcwbuPi1FiZ9Rarr6wzVMvzkO7uWqH1jwrMkgW0= diff --git a/goreleaser.yml b/goreleaser.yml new file mode 100644 index 0000000..094848b --- /dev/null +++ b/goreleaser.yml @@ -0,0 +1,33 @@ +project_name: stick +builds: + - + env: + - CGO_ENABLED=0 + ldflags: + - -s -w -X main.version={{.Version}} + goos: + - darwin + - freebsd + - linux + - windows + goarch: + - 386 + - amd64 + - arm + - arm64 + - ppc64 + - ppc64le + goarm: + - 6 + - 7 +archive: + replacements: + 386: i386 + format_overrides: + - goos: windows + format: zip + files: + - LICENSE + - README.md +checksum: + name_template: 'checksums.txt' diff --git a/vendor/github.com/BurntSushi/toml/.gitignore b/vendor/github.com/BurntSushi/toml/.gitignore deleted file mode 100644 index 0cd3800..0000000 --- a/vendor/github.com/BurntSushi/toml/.gitignore +++ /dev/null @@ -1,5 +0,0 @@ -TAGS -tags -.*.swp -tomlcheck/tomlcheck -toml.test diff --git a/vendor/github.com/BurntSushi/toml/.travis.yml b/vendor/github.com/BurntSushi/toml/.travis.yml deleted file mode 100644 index 8b8afc4..0000000 --- a/vendor/github.com/BurntSushi/toml/.travis.yml +++ /dev/null @@ -1,15 +0,0 @@ -language: go -go: - - 1.1 - - 1.2 - - 1.3 - - 1.4 - - 1.5 - - 1.6 - - tip -install: - - go install ./... - - go get github.com/BurntSushi/toml-test -script: - - export PATH="$PATH:$HOME/gopath/bin" - - make test diff --git a/vendor/github.com/BurntSushi/toml/COMPATIBLE b/vendor/github.com/BurntSushi/toml/COMPATIBLE deleted file mode 100644 index 6efcfd0..0000000 --- a/vendor/github.com/BurntSushi/toml/COMPATIBLE +++ /dev/null @@ -1,3 +0,0 @@ -Compatible with TOML version -[v0.4.0](https://github.com/toml-lang/toml/blob/v0.4.0/versions/en/toml-v0.4.0.md) - diff --git a/vendor/github.com/BurntSushi/toml/COPYING b/vendor/github.com/BurntSushi/toml/COPYING deleted file mode 100644 index 5a8e332..0000000 --- a/vendor/github.com/BurntSushi/toml/COPYING +++ /dev/null @@ -1,14 +0,0 @@ - DO WHAT THE FUCK YOU WANT TO PUBLIC LICENSE - Version 2, December 2004 - - Copyright (C) 2004 Sam Hocevar - - Everyone is permitted to copy and distribute verbatim or modified - copies of this license document, and changing it is allowed as long - as the name is changed. - - DO WHAT THE FUCK YOU WANT TO PUBLIC LICENSE - TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION - - 0. You just DO WHAT THE FUCK YOU WANT TO. - diff --git a/vendor/github.com/BurntSushi/toml/Makefile b/vendor/github.com/BurntSushi/toml/Makefile deleted file mode 100644 index 3600848..0000000 --- a/vendor/github.com/BurntSushi/toml/Makefile +++ /dev/null @@ -1,19 +0,0 @@ -install: - go install ./... - -test: install - go test -v - toml-test toml-test-decoder - toml-test -encoder toml-test-encoder - -fmt: - gofmt -w *.go */*.go - colcheck *.go */*.go - -tags: - find ./ -name '*.go' -print0 | xargs -0 gotags > TAGS - -push: - git push origin master - git push github master - diff --git a/vendor/github.com/BurntSushi/toml/README.md b/vendor/github.com/BurntSushi/toml/README.md deleted file mode 100644 index 7c1b37e..0000000 --- a/vendor/github.com/BurntSushi/toml/README.md +++ /dev/null @@ -1,218 +0,0 @@ -## TOML parser and encoder for Go with reflection - -TOML stands for Tom's Obvious, Minimal Language. This Go package provides a -reflection interface similar to Go's standard library `json` and `xml` -packages. This package also supports the `encoding.TextUnmarshaler` and -`encoding.TextMarshaler` interfaces so that you can define custom data -representations. (There is an example of this below.) - -Spec: https://github.com/toml-lang/toml - -Compatible with TOML version -[v0.4.0](https://github.com/toml-lang/toml/blob/master/versions/en/toml-v0.4.0.md) - -Documentation: https://godoc.org/github.com/BurntSushi/toml - -Installation: - -```bash -go get github.com/BurntSushi/toml -``` - -Try the toml validator: - -```bash -go get github.com/BurntSushi/toml/cmd/tomlv -tomlv some-toml-file.toml -``` - -[![Build Status](https://travis-ci.org/BurntSushi/toml.svg?branch=master)](https://travis-ci.org/BurntSushi/toml) [![GoDoc](https://godoc.org/github.com/BurntSushi/toml?status.svg)](https://godoc.org/github.com/BurntSushi/toml) - -### Testing - -This package passes all tests in -[toml-test](https://github.com/BurntSushi/toml-test) for both the decoder -and the encoder. - -### Examples - -This package works similarly to how the Go standard library handles `XML` -and `JSON`. Namely, data is loaded into Go values via reflection. - -For the simplest example, consider some TOML file as just a list of keys -and values: - -```toml -Age = 25 -Cats = [ "Cauchy", "Plato" ] -Pi = 3.14 -Perfection = [ 6, 28, 496, 8128 ] -DOB = 1987-07-05T05:45:00Z -``` - -Which could be defined in Go as: - -```go -type Config struct { - Age int - Cats []string - Pi float64 - Perfection []int - DOB time.Time // requires `import time` -} -``` - -And then decoded with: - -```go -var conf Config -if _, err := toml.Decode(tomlData, &conf); err != nil { - // handle error -} -``` - -You can also use struct tags if your struct field name doesn't map to a TOML -key value directly: - -```toml -some_key_NAME = "wat" -``` - -```go -type TOML struct { - ObscureKey string `toml:"some_key_NAME"` -} -``` - -### Using the `encoding.TextUnmarshaler` interface - -Here's an example that automatically parses duration strings into -`time.Duration` values: - -```toml -[[song]] -name = "Thunder Road" -duration = "4m49s" - -[[song]] -name = "Stairway to Heaven" -duration = "8m03s" -``` - -Which can be decoded with: - -```go -type song struct { - Name string - Duration duration -} -type songs struct { - Song []song -} -var favorites songs -if _, err := toml.Decode(blob, &favorites); err != nil { - log.Fatal(err) -} - -for _, s := range favorites.Song { - fmt.Printf("%s (%s)\n", s.Name, s.Duration) -} -``` - -And you'll also need a `duration` type that satisfies the -`encoding.TextUnmarshaler` interface: - -```go -type duration struct { - time.Duration -} - -func (d *duration) UnmarshalText(text []byte) error { - var err error - d.Duration, err = time.ParseDuration(string(text)) - return err -} -``` - -### More complex usage - -Here's an example of how to load the example from the official spec page: - -```toml -# This is a TOML document. Boom. - -title = "TOML Example" - -[owner] -name = "Tom Preston-Werner" -organization = "GitHub" -bio = "GitHub Cofounder & CEO\nLikes tater tots and beer." -dob = 1979-05-27T07:32:00Z # First class dates? Why not? - -[database] -server = "192.168.1.1" -ports = [ 8001, 8001, 8002 ] -connection_max = 5000 -enabled = true - -[servers] - - # You can indent as you please. Tabs or spaces. TOML don't care. - [servers.alpha] - ip = "10.0.0.1" - dc = "eqdc10" - - [servers.beta] - ip = "10.0.0.2" - dc = "eqdc10" - -[clients] -data = [ ["gamma", "delta"], [1, 2] ] # just an update to make sure parsers support it - -# Line breaks are OK when inside arrays -hosts = [ - "alpha", - "omega" -] -``` - -And the corresponding Go types are: - -```go -type tomlConfig struct { - Title string - Owner ownerInfo - DB database `toml:"database"` - Servers map[string]server - Clients clients -} - -type ownerInfo struct { - Name string - Org string `toml:"organization"` - Bio string - DOB time.Time -} - -type database struct { - Server string - Ports []int - ConnMax int `toml:"connection_max"` - Enabled bool -} - -type server struct { - IP string - DC string -} - -type clients struct { - Data [][]interface{} - Hosts []string -} -``` - -Note that a case insensitive match will be tried if an exact match can't be -found. - -A working example of the above can be found in `_examples/example.{go,toml}`. diff --git a/vendor/github.com/BurntSushi/toml/_examples/example.go b/vendor/github.com/BurntSushi/toml/_examples/example.go deleted file mode 100644 index 79f31f2..0000000 --- a/vendor/github.com/BurntSushi/toml/_examples/example.go +++ /dev/null @@ -1,61 +0,0 @@ -package main - -import ( - "fmt" - "time" - - "github.com/BurntSushi/toml" -) - -type tomlConfig struct { - Title string - Owner ownerInfo - DB database `toml:"database"` - Servers map[string]server - Clients clients -} - -type ownerInfo struct { - Name string - Org string `toml:"organization"` - Bio string - DOB time.Time -} - -type database struct { - Server string - Ports []int - ConnMax int `toml:"connection_max"` - Enabled bool -} - -type server struct { - IP string - DC string -} - -type clients struct { - Data [][]interface{} - Hosts []string -} - -func main() { - var config tomlConfig - if _, err := toml.DecodeFile("example.toml", &config); err != nil { - fmt.Println(err) - return - } - - fmt.Printf("Title: %s\n", config.Title) - fmt.Printf("Owner: %s (%s, %s), Born: %s\n", - config.Owner.Name, config.Owner.Org, config.Owner.Bio, - config.Owner.DOB) - fmt.Printf("Database: %s %v (Max conn. %d), Enabled? %v\n", - config.DB.Server, config.DB.Ports, config.DB.ConnMax, - config.DB.Enabled) - for serverName, server := range config.Servers { - fmt.Printf("Server: %s (%s, %s)\n", serverName, server.IP, server.DC) - } - fmt.Printf("Client data: %v\n", config.Clients.Data) - fmt.Printf("Client hosts: %v\n", config.Clients.Hosts) -} diff --git a/vendor/github.com/BurntSushi/toml/_examples/example.toml b/vendor/github.com/BurntSushi/toml/_examples/example.toml deleted file mode 100644 index 32c7a4f..0000000 --- a/vendor/github.com/BurntSushi/toml/_examples/example.toml +++ /dev/null @@ -1,35 +0,0 @@ -# This is a TOML document. Boom. - -title = "TOML Example" - -[owner] -name = "Tom Preston-Werner" -organization = "GitHub" -bio = "GitHub Cofounder & CEO\nLikes tater tots and beer." -dob = 1979-05-27T07:32:00Z # First class dates? Why not? - -[database] -server = "192.168.1.1" -ports = [ 8001, 8001, 8002 ] -connection_max = 5000 -enabled = true - -[servers] - - # You can indent as you please. Tabs or spaces. TOML don't care. - [servers.alpha] - ip = "10.0.0.1" - dc = "eqdc10" - - [servers.beta] - ip = "10.0.0.2" - dc = "eqdc10" - -[clients] -data = [ ["gamma", "delta"], [1, 2] ] # just an update to make sure parsers support it - -# Line breaks are OK when inside arrays -hosts = [ - "alpha", - "omega" -] diff --git a/vendor/github.com/BurntSushi/toml/_examples/hard.toml b/vendor/github.com/BurntSushi/toml/_examples/hard.toml deleted file mode 100644 index 26145d2..0000000 --- a/vendor/github.com/BurntSushi/toml/_examples/hard.toml +++ /dev/null @@ -1,22 +0,0 @@ -# Test file for TOML -# Only this one tries to emulate a TOML file written by a user of the kind of parser writers probably hate -# This part you'll really hate - -[the] -test_string = "You'll hate me after this - #" # " Annoying, isn't it? - - [the.hard] - test_array = [ "] ", " # "] # ] There you go, parse this! - test_array2 = [ "Test #11 ]proved that", "Experiment #9 was a success" ] - # You didn't think it'd as easy as chucking out the last #, did you? - another_test_string = " Same thing, but with a string #" - harder_test_string = " And when \"'s are in the string, along with # \"" # "and comments are there too" - # Things will get harder - - [the.hard.bit#] - what? = "You don't think some user won't do that?" - multi_line_array = [ - "]", - # ] Oh yes I did - ] - diff --git a/vendor/github.com/BurntSushi/toml/_examples/implicit.toml b/vendor/github.com/BurntSushi/toml/_examples/implicit.toml deleted file mode 100644 index 1dea5ce..0000000 --- a/vendor/github.com/BurntSushi/toml/_examples/implicit.toml +++ /dev/null @@ -1,4 +0,0 @@ -# [x] you -# [x.y] don't -# [x.y.z] need these -[x.y.z.w] # for this to work diff --git a/vendor/github.com/BurntSushi/toml/_examples/invalid-apples.toml b/vendor/github.com/BurntSushi/toml/_examples/invalid-apples.toml deleted file mode 100644 index 74e9e33..0000000 --- a/vendor/github.com/BurntSushi/toml/_examples/invalid-apples.toml +++ /dev/null @@ -1,6 +0,0 @@ -# DO NOT WANT -[fruit] -type = "apple" - -[fruit.type] -apple = "yes" diff --git a/vendor/github.com/BurntSushi/toml/_examples/invalid.toml b/vendor/github.com/BurntSushi/toml/_examples/invalid.toml deleted file mode 100644 index beb1dba..0000000 --- a/vendor/github.com/BurntSushi/toml/_examples/invalid.toml +++ /dev/null @@ -1,35 +0,0 @@ -# This is an INVALID TOML document. Boom. -# Can you spot the error without help? - -title = "TOML Example" - -[owner] -name = "Tom Preston-Werner" -organization = "GitHub" -bio = "GitHub Cofounder & CEO\nLikes tater tots and beer." -dob = 1979-05-27T7:32:00Z # First class dates? Why not? - -[database] -server = "192.168.1.1" -ports = [ 8001, 8001, 8002 ] -connection_max = 5000 -enabled = true - -[servers] - # You can indent as you please. Tabs or spaces. TOML don't care. - [servers.alpha] - ip = "10.0.0.1" - dc = "eqdc10" - - [servers.beta] - ip = "10.0.0.2" - dc = "eqdc10" - -[clients] -data = [ ["gamma", "delta"], [1, 2] ] # just an update to make sure parsers support it - -# Line breaks are OK when inside arrays -hosts = [ - "alpha", - "omega" -] diff --git a/vendor/github.com/BurntSushi/toml/_examples/readme1.toml b/vendor/github.com/BurntSushi/toml/_examples/readme1.toml deleted file mode 100644 index 3e1261d..0000000 --- a/vendor/github.com/BurntSushi/toml/_examples/readme1.toml +++ /dev/null @@ -1,5 +0,0 @@ -Age = 25 -Cats = [ "Cauchy", "Plato" ] -Pi = 3.14 -Perfection = [ 6, 28, 496, 8128 ] -DOB = 1987-07-05T05:45:00Z diff --git a/vendor/github.com/BurntSushi/toml/_examples/readme2.toml b/vendor/github.com/BurntSushi/toml/_examples/readme2.toml deleted file mode 100644 index b51cd93..0000000 --- a/vendor/github.com/BurntSushi/toml/_examples/readme2.toml +++ /dev/null @@ -1 +0,0 @@ -some_key_NAME = "wat" diff --git a/vendor/github.com/BurntSushi/toml/cmd/toml-test-decoder/COPYING b/vendor/github.com/BurntSushi/toml/cmd/toml-test-decoder/COPYING deleted file mode 100644 index 5a8e332..0000000 --- a/vendor/github.com/BurntSushi/toml/cmd/toml-test-decoder/COPYING +++ /dev/null @@ -1,14 +0,0 @@ - DO WHAT THE FUCK YOU WANT TO PUBLIC LICENSE - Version 2, December 2004 - - Copyright (C) 2004 Sam Hocevar - - Everyone is permitted to copy and distribute verbatim or modified - copies of this license document, and changing it is allowed as long - as the name is changed. - - DO WHAT THE FUCK YOU WANT TO PUBLIC LICENSE - TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION - - 0. You just DO WHAT THE FUCK YOU WANT TO. - diff --git a/vendor/github.com/BurntSushi/toml/cmd/toml-test-decoder/README.md b/vendor/github.com/BurntSushi/toml/cmd/toml-test-decoder/README.md deleted file mode 100644 index 93f4e3a..0000000 --- a/vendor/github.com/BurntSushi/toml/cmd/toml-test-decoder/README.md +++ /dev/null @@ -1,13 +0,0 @@ -# Implements the TOML test suite interface - -This is an implementation of the interface expected by -[toml-test](https://github.com/BurntSushi/toml-test) for my -[toml parser written in Go](https://github.com/BurntSushi/toml). -In particular, it maps TOML data on `stdin` to a JSON format on `stdout`. - - -Compatible with TOML version -[v0.4.0](https://github.com/toml-lang/toml/blob/master/versions/en/toml-v0.4.0.md) - -Compatible with `toml-test` version -[v0.2.0](https://github.com/BurntSushi/toml-test/tree/v0.2.0) diff --git a/vendor/github.com/BurntSushi/toml/cmd/toml-test-decoder/main.go b/vendor/github.com/BurntSushi/toml/cmd/toml-test-decoder/main.go deleted file mode 100644 index 14e7557..0000000 --- a/vendor/github.com/BurntSushi/toml/cmd/toml-test-decoder/main.go +++ /dev/null @@ -1,90 +0,0 @@ -// Command toml-test-decoder satisfies the toml-test interface for testing -// TOML decoders. Namely, it accepts TOML on stdin and outputs JSON on stdout. -package main - -import ( - "encoding/json" - "flag" - "fmt" - "log" - "os" - "path" - "time" - - "github.com/BurntSushi/toml" -) - -func init() { - log.SetFlags(0) - - flag.Usage = usage - flag.Parse() -} - -func usage() { - log.Printf("Usage: %s < toml-file\n", path.Base(os.Args[0])) - flag.PrintDefaults() - - os.Exit(1) -} - -func main() { - if flag.NArg() != 0 { - flag.Usage() - } - - var tmp interface{} - if _, err := toml.DecodeReader(os.Stdin, &tmp); err != nil { - log.Fatalf("Error decoding TOML: %s", err) - } - - typedTmp := translate(tmp) - if err := json.NewEncoder(os.Stdout).Encode(typedTmp); err != nil { - log.Fatalf("Error encoding JSON: %s", err) - } -} - -func translate(tomlData interface{}) interface{} { - switch orig := tomlData.(type) { - case map[string]interface{}: - typed := make(map[string]interface{}, len(orig)) - for k, v := range orig { - typed[k] = translate(v) - } - return typed - case []map[string]interface{}: - typed := make([]map[string]interface{}, len(orig)) - for i, v := range orig { - typed[i] = translate(v).(map[string]interface{}) - } - return typed - case []interface{}: - typed := make([]interface{}, len(orig)) - for i, v := range orig { - typed[i] = translate(v) - } - - // We don't really need to tag arrays, but let's be future proof. - // (If TOML ever supports tuples, we'll need this.) - return tag("array", typed) - case time.Time: - return tag("datetime", orig.Format("2006-01-02T15:04:05Z")) - case bool: - return tag("bool", fmt.Sprintf("%v", orig)) - case int64: - return tag("integer", fmt.Sprintf("%d", orig)) - case float64: - return tag("float", fmt.Sprintf("%v", orig)) - case string: - return tag("string", orig) - } - - panic(fmt.Sprintf("Unknown type: %T", tomlData)) -} - -func tag(typeName string, data interface{}) map[string]interface{} { - return map[string]interface{}{ - "type": typeName, - "value": data, - } -} diff --git a/vendor/github.com/BurntSushi/toml/cmd/toml-test-encoder/COPYING b/vendor/github.com/BurntSushi/toml/cmd/toml-test-encoder/COPYING deleted file mode 100644 index 5a8e332..0000000 --- a/vendor/github.com/BurntSushi/toml/cmd/toml-test-encoder/COPYING +++ /dev/null @@ -1,14 +0,0 @@ - DO WHAT THE FUCK YOU WANT TO PUBLIC LICENSE - Version 2, December 2004 - - Copyright (C) 2004 Sam Hocevar - - Everyone is permitted to copy and distribute verbatim or modified - copies of this license document, and changing it is allowed as long - as the name is changed. - - DO WHAT THE FUCK YOU WANT TO PUBLIC LICENSE - TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION - - 0. You just DO WHAT THE FUCK YOU WANT TO. - diff --git a/vendor/github.com/BurntSushi/toml/cmd/toml-test-encoder/README.md b/vendor/github.com/BurntSushi/toml/cmd/toml-test-encoder/README.md deleted file mode 100644 index a45bd4d..0000000 --- a/vendor/github.com/BurntSushi/toml/cmd/toml-test-encoder/README.md +++ /dev/null @@ -1,13 +0,0 @@ -# Implements the TOML test suite interface for TOML encoders - -This is an implementation of the interface expected by -[toml-test](https://github.com/BurntSushi/toml-test) for the -[TOML encoder](https://github.com/BurntSushi/toml). -In particular, it maps JSON data on `stdin` to a TOML format on `stdout`. - - -Compatible with TOML version -[v0.4.0](https://github.com/toml-lang/toml/blob/master/versions/en/toml-v0.4.0.md) - -Compatible with `toml-test` version -[v0.2.0](https://github.com/BurntSushi/toml-test/tree/v0.2.0) diff --git a/vendor/github.com/BurntSushi/toml/cmd/toml-test-encoder/main.go b/vendor/github.com/BurntSushi/toml/cmd/toml-test-encoder/main.go deleted file mode 100644 index 092cc68..0000000 --- a/vendor/github.com/BurntSushi/toml/cmd/toml-test-encoder/main.go +++ /dev/null @@ -1,131 +0,0 @@ -// Command toml-test-encoder satisfies the toml-test interface for testing -// TOML encoders. Namely, it accepts JSON on stdin and outputs TOML on stdout. -package main - -import ( - "encoding/json" - "flag" - "log" - "os" - "path" - "strconv" - "time" - - "github.com/BurntSushi/toml" -) - -func init() { - log.SetFlags(0) - - flag.Usage = usage - flag.Parse() -} - -func usage() { - log.Printf("Usage: %s < json-file\n", path.Base(os.Args[0])) - flag.PrintDefaults() - - os.Exit(1) -} - -func main() { - if flag.NArg() != 0 { - flag.Usage() - } - - var tmp interface{} - if err := json.NewDecoder(os.Stdin).Decode(&tmp); err != nil { - log.Fatalf("Error decoding JSON: %s", err) - } - - tomlData := translate(tmp) - if err := toml.NewEncoder(os.Stdout).Encode(tomlData); err != nil { - log.Fatalf("Error encoding TOML: %s", err) - } -} - -func translate(typedJson interface{}) interface{} { - switch v := typedJson.(type) { - case map[string]interface{}: - if len(v) == 2 && in("type", v) && in("value", v) { - return untag(v) - } - m := make(map[string]interface{}, len(v)) - for k, v2 := range v { - m[k] = translate(v2) - } - return m - case []interface{}: - tabArray := make([]map[string]interface{}, len(v)) - for i := range v { - if m, ok := translate(v[i]).(map[string]interface{}); ok { - tabArray[i] = m - } else { - log.Fatalf("JSON arrays may only contain objects. This " + - "corresponds to only tables being allowed in " + - "TOML table arrays.") - } - } - return tabArray - } - log.Fatalf("Unrecognized JSON format '%T'.", typedJson) - panic("unreachable") -} - -func untag(typed map[string]interface{}) interface{} { - t := typed["type"].(string) - v := typed["value"] - switch t { - case "string": - return v.(string) - case "integer": - v := v.(string) - n, err := strconv.Atoi(v) - if err != nil { - log.Fatalf("Could not parse '%s' as integer: %s", v, err) - } - return n - case "float": - v := v.(string) - f, err := strconv.ParseFloat(v, 64) - if err != nil { - log.Fatalf("Could not parse '%s' as float64: %s", v, err) - } - return f - case "datetime": - v := v.(string) - t, err := time.Parse("2006-01-02T15:04:05Z", v) - if err != nil { - log.Fatalf("Could not parse '%s' as a datetime: %s", v, err) - } - return t - case "bool": - v := v.(string) - switch v { - case "true": - return true - case "false": - return false - } - log.Fatalf("Could not parse '%s' as a boolean.", v) - case "array": - v := v.([]interface{}) - array := make([]interface{}, len(v)) - for i := range v { - if m, ok := v[i].(map[string]interface{}); ok { - array[i] = untag(m) - } else { - log.Fatalf("Arrays may only contain other arrays or "+ - "primitive values, but found a '%T'.", m) - } - } - return array - } - log.Fatalf("Unrecognized tag type '%s'.", t) - panic("unreachable") -} - -func in(key string, m map[string]interface{}) bool { - _, ok := m[key] - return ok -} diff --git a/vendor/github.com/BurntSushi/toml/cmd/tomlv/COPYING b/vendor/github.com/BurntSushi/toml/cmd/tomlv/COPYING deleted file mode 100644 index 5a8e332..0000000 --- a/vendor/github.com/BurntSushi/toml/cmd/tomlv/COPYING +++ /dev/null @@ -1,14 +0,0 @@ - DO WHAT THE FUCK YOU WANT TO PUBLIC LICENSE - Version 2, December 2004 - - Copyright (C) 2004 Sam Hocevar - - Everyone is permitted to copy and distribute verbatim or modified - copies of this license document, and changing it is allowed as long - as the name is changed. - - DO WHAT THE FUCK YOU WANT TO PUBLIC LICENSE - TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION - - 0. You just DO WHAT THE FUCK YOU WANT TO. - diff --git a/vendor/github.com/BurntSushi/toml/cmd/tomlv/README.md b/vendor/github.com/BurntSushi/toml/cmd/tomlv/README.md deleted file mode 100644 index 51231e2..0000000 --- a/vendor/github.com/BurntSushi/toml/cmd/tomlv/README.md +++ /dev/null @@ -1,21 +0,0 @@ -# TOML Validator - -If Go is installed, it's simple to try it out: - -```bash -go get github.com/BurntSushi/toml/cmd/tomlv -tomlv some-toml-file.toml -``` - -You can see the types of every key in a TOML file with: - -```bash -tomlv -types some-toml-file.toml -``` - -At the moment, only one error message is reported at a time. Error messages -include line numbers. No output means that the files given are valid TOML, or -there is a bug in `tomlv`. - -Compatible with TOML version -[v0.4.0](https://github.com/toml-lang/toml/blob/master/versions/en/toml-v0.4.0.md) diff --git a/vendor/github.com/BurntSushi/toml/cmd/tomlv/main.go b/vendor/github.com/BurntSushi/toml/cmd/tomlv/main.go deleted file mode 100644 index c7d689a..0000000 --- a/vendor/github.com/BurntSushi/toml/cmd/tomlv/main.go +++ /dev/null @@ -1,61 +0,0 @@ -// Command tomlv validates TOML documents and prints each key's type. -package main - -import ( - "flag" - "fmt" - "log" - "os" - "path" - "strings" - "text/tabwriter" - - "github.com/BurntSushi/toml" -) - -var ( - flagTypes = false -) - -func init() { - log.SetFlags(0) - - flag.BoolVar(&flagTypes, "types", flagTypes, - "When set, the types of every defined key will be shown.") - - flag.Usage = usage - flag.Parse() -} - -func usage() { - log.Printf("Usage: %s toml-file [ toml-file ... ]\n", - path.Base(os.Args[0])) - flag.PrintDefaults() - - os.Exit(1) -} - -func main() { - if flag.NArg() < 1 { - flag.Usage() - } - for _, f := range flag.Args() { - var tmp interface{} - md, err := toml.DecodeFile(f, &tmp) - if err != nil { - log.Fatalf("Error in '%s': %s", f, err) - } - if flagTypes { - printTypes(md) - } - } -} - -func printTypes(md toml.MetaData) { - tabw := tabwriter.NewWriter(os.Stdout, 0, 0, 2, ' ', 0) - for _, key := range md.Keys() { - fmt.Fprintf(tabw, "%s%s\t%s\n", - strings.Repeat(" ", len(key)-1), key, md.Type(key...)) - } - tabw.Flush() -} diff --git a/vendor/github.com/BurntSushi/toml/decode.go b/vendor/github.com/BurntSushi/toml/decode.go deleted file mode 100644 index b0fd51d..0000000 --- a/vendor/github.com/BurntSushi/toml/decode.go +++ /dev/null @@ -1,509 +0,0 @@ -package toml - -import ( - "fmt" - "io" - "io/ioutil" - "math" - "reflect" - "strings" - "time" -) - -func e(format string, args ...interface{}) error { - return fmt.Errorf("toml: "+format, args...) -} - -// Unmarshaler is the interface implemented by objects that can unmarshal a -// TOML description of themselves. -type Unmarshaler interface { - UnmarshalTOML(interface{}) error -} - -// Unmarshal decodes the contents of `p` in TOML format into a pointer `v`. -func Unmarshal(p []byte, v interface{}) error { - _, err := Decode(string(p), v) - return err -} - -// Primitive is a TOML value that hasn't been decoded into a Go value. -// When using the various `Decode*` functions, the type `Primitive` may -// be given to any value, and its decoding will be delayed. -// -// A `Primitive` value can be decoded using the `PrimitiveDecode` function. -// -// The underlying representation of a `Primitive` value is subject to change. -// Do not rely on it. -// -// N.B. Primitive values are still parsed, so using them will only avoid -// the overhead of reflection. They can be useful when you don't know the -// exact type of TOML data until run time. -type Primitive struct { - undecoded interface{} - context Key -} - -// DEPRECATED! -// -// Use MetaData.PrimitiveDecode instead. -func PrimitiveDecode(primValue Primitive, v interface{}) error { - md := MetaData{decoded: make(map[string]bool)} - return md.unify(primValue.undecoded, rvalue(v)) -} - -// PrimitiveDecode is just like the other `Decode*` functions, except it -// decodes a TOML value that has already been parsed. Valid primitive values -// can *only* be obtained from values filled by the decoder functions, -// including this method. (i.e., `v` may contain more `Primitive` -// values.) -// -// Meta data for primitive values is included in the meta data returned by -// the `Decode*` functions with one exception: keys returned by the Undecoded -// method will only reflect keys that were decoded. Namely, any keys hidden -// behind a Primitive will be considered undecoded. Executing this method will -// update the undecoded keys in the meta data. (See the example.) -func (md *MetaData) PrimitiveDecode(primValue Primitive, v interface{}) error { - md.context = primValue.context - defer func() { md.context = nil }() - return md.unify(primValue.undecoded, rvalue(v)) -} - -// Decode will decode the contents of `data` in TOML format into a pointer -// `v`. -// -// TOML hashes correspond to Go structs or maps. (Dealer's choice. They can be -// used interchangeably.) -// -// TOML arrays of tables correspond to either a slice of structs or a slice -// of maps. -// -// TOML datetimes correspond to Go `time.Time` values. -// -// All other TOML types (float, string, int, bool and array) correspond -// to the obvious Go types. -// -// An exception to the above rules is if a type implements the -// encoding.TextUnmarshaler interface. In this case, any primitive TOML value -// (floats, strings, integers, booleans and datetimes) will be converted to -// a byte string and given to the value's UnmarshalText method. See the -// Unmarshaler example for a demonstration with time duration strings. -// -// Key mapping -// -// TOML keys can map to either keys in a Go map or field names in a Go -// struct. The special `toml` struct tag may be used to map TOML keys to -// struct fields that don't match the key name exactly. (See the example.) -// A case insensitive match to struct names will be tried if an exact match -// can't be found. -// -// The mapping between TOML values and Go values is loose. That is, there -// may exist TOML values that cannot be placed into your representation, and -// there may be parts of your representation that do not correspond to -// TOML values. This loose mapping can be made stricter by using the IsDefined -// and/or Undecoded methods on the MetaData returned. -// -// This decoder will not handle cyclic types. If a cyclic type is passed, -// `Decode` will not terminate. -func Decode(data string, v interface{}) (MetaData, error) { - rv := reflect.ValueOf(v) - if rv.Kind() != reflect.Ptr { - return MetaData{}, e("Decode of non-pointer %s", reflect.TypeOf(v)) - } - if rv.IsNil() { - return MetaData{}, e("Decode of nil %s", reflect.TypeOf(v)) - } - p, err := parse(data) - if err != nil { - return MetaData{}, err - } - md := MetaData{ - p.mapping, p.types, p.ordered, - make(map[string]bool, len(p.ordered)), nil, - } - return md, md.unify(p.mapping, indirect(rv)) -} - -// DecodeFile is just like Decode, except it will automatically read the -// contents of the file at `fpath` and decode it for you. -func DecodeFile(fpath string, v interface{}) (MetaData, error) { - bs, err := ioutil.ReadFile(fpath) - if err != nil { - return MetaData{}, err - } - return Decode(string(bs), v) -} - -// DecodeReader is just like Decode, except it will consume all bytes -// from the reader and decode it for you. -func DecodeReader(r io.Reader, v interface{}) (MetaData, error) { - bs, err := ioutil.ReadAll(r) - if err != nil { - return MetaData{}, err - } - return Decode(string(bs), v) -} - -// unify performs a sort of type unification based on the structure of `rv`, -// which is the client representation. -// -// Any type mismatch produces an error. Finding a type that we don't know -// how to handle produces an unsupported type error. -func (md *MetaData) unify(data interface{}, rv reflect.Value) error { - - // Special case. Look for a `Primitive` value. - if rv.Type() == reflect.TypeOf((*Primitive)(nil)).Elem() { - // Save the undecoded data and the key context into the primitive - // value. - context := make(Key, len(md.context)) - copy(context, md.context) - rv.Set(reflect.ValueOf(Primitive{ - undecoded: data, - context: context, - })) - return nil - } - - // Special case. Unmarshaler Interface support. - if rv.CanAddr() { - if v, ok := rv.Addr().Interface().(Unmarshaler); ok { - return v.UnmarshalTOML(data) - } - } - - // Special case. Handle time.Time values specifically. - // TODO: Remove this code when we decide to drop support for Go 1.1. - // This isn't necessary in Go 1.2 because time.Time satisfies the encoding - // interfaces. - if rv.Type().AssignableTo(rvalue(time.Time{}).Type()) { - return md.unifyDatetime(data, rv) - } - - // Special case. Look for a value satisfying the TextUnmarshaler interface. - if v, ok := rv.Interface().(TextUnmarshaler); ok { - return md.unifyText(data, v) - } - // BUG(burntsushi) - // The behavior here is incorrect whenever a Go type satisfies the - // encoding.TextUnmarshaler interface but also corresponds to a TOML - // hash or array. In particular, the unmarshaler should only be applied - // to primitive TOML values. But at this point, it will be applied to - // all kinds of values and produce an incorrect error whenever those values - // are hashes or arrays (including arrays of tables). - - k := rv.Kind() - - // laziness - if k >= reflect.Int && k <= reflect.Uint64 { - return md.unifyInt(data, rv) - } - switch k { - case reflect.Ptr: - elem := reflect.New(rv.Type().Elem()) - err := md.unify(data, reflect.Indirect(elem)) - if err != nil { - return err - } - rv.Set(elem) - return nil - case reflect.Struct: - return md.unifyStruct(data, rv) - case reflect.Map: - return md.unifyMap(data, rv) - case reflect.Array: - return md.unifyArray(data, rv) - case reflect.Slice: - return md.unifySlice(data, rv) - case reflect.String: - return md.unifyString(data, rv) - case reflect.Bool: - return md.unifyBool(data, rv) - case reflect.Interface: - // we only support empty interfaces. - if rv.NumMethod() > 0 { - return e("unsupported type %s", rv.Type()) - } - return md.unifyAnything(data, rv) - case reflect.Float32: - fallthrough - case reflect.Float64: - return md.unifyFloat64(data, rv) - } - return e("unsupported type %s", rv.Kind()) -} - -func (md *MetaData) unifyStruct(mapping interface{}, rv reflect.Value) error { - tmap, ok := mapping.(map[string]interface{}) - if !ok { - if mapping == nil { - return nil - } - return e("type mismatch for %s: expected table but found %T", - rv.Type().String(), mapping) - } - - for key, datum := range tmap { - var f *field - fields := cachedTypeFields(rv.Type()) - for i := range fields { - ff := &fields[i] - if ff.name == key { - f = ff - break - } - if f == nil && strings.EqualFold(ff.name, key) { - f = ff - } - } - if f != nil { - subv := rv - for _, i := range f.index { - subv = indirect(subv.Field(i)) - } - if isUnifiable(subv) { - md.decoded[md.context.add(key).String()] = true - md.context = append(md.context, key) - if err := md.unify(datum, subv); err != nil { - return err - } - md.context = md.context[0 : len(md.context)-1] - } else if f.name != "" { - // Bad user! No soup for you! - return e("cannot write unexported field %s.%s", - rv.Type().String(), f.name) - } - } - } - return nil -} - -func (md *MetaData) unifyMap(mapping interface{}, rv reflect.Value) error { - tmap, ok := mapping.(map[string]interface{}) - if !ok { - if tmap == nil { - return nil - } - return badtype("map", mapping) - } - if rv.IsNil() { - rv.Set(reflect.MakeMap(rv.Type())) - } - for k, v := range tmap { - md.decoded[md.context.add(k).String()] = true - md.context = append(md.context, k) - - rvkey := indirect(reflect.New(rv.Type().Key())) - rvval := reflect.Indirect(reflect.New(rv.Type().Elem())) - if err := md.unify(v, rvval); err != nil { - return err - } - md.context = md.context[0 : len(md.context)-1] - - rvkey.SetString(k) - rv.SetMapIndex(rvkey, rvval) - } - return nil -} - -func (md *MetaData) unifyArray(data interface{}, rv reflect.Value) error { - datav := reflect.ValueOf(data) - if datav.Kind() != reflect.Slice { - if !datav.IsValid() { - return nil - } - return badtype("slice", data) - } - sliceLen := datav.Len() - if sliceLen != rv.Len() { - return e("expected array length %d; got TOML array of length %d", - rv.Len(), sliceLen) - } - return md.unifySliceArray(datav, rv) -} - -func (md *MetaData) unifySlice(data interface{}, rv reflect.Value) error { - datav := reflect.ValueOf(data) - if datav.Kind() != reflect.Slice { - if !datav.IsValid() { - return nil - } - return badtype("slice", data) - } - n := datav.Len() - if rv.IsNil() || rv.Cap() < n { - rv.Set(reflect.MakeSlice(rv.Type(), n, n)) - } - rv.SetLen(n) - return md.unifySliceArray(datav, rv) -} - -func (md *MetaData) unifySliceArray(data, rv reflect.Value) error { - sliceLen := data.Len() - for i := 0; i < sliceLen; i++ { - v := data.Index(i).Interface() - sliceval := indirect(rv.Index(i)) - if err := md.unify(v, sliceval); err != nil { - return err - } - } - return nil -} - -func (md *MetaData) unifyDatetime(data interface{}, rv reflect.Value) error { - if _, ok := data.(time.Time); ok { - rv.Set(reflect.ValueOf(data)) - return nil - } - return badtype("time.Time", data) -} - -func (md *MetaData) unifyString(data interface{}, rv reflect.Value) error { - if s, ok := data.(string); ok { - rv.SetString(s) - return nil - } - return badtype("string", data) -} - -func (md *MetaData) unifyFloat64(data interface{}, rv reflect.Value) error { - if num, ok := data.(float64); ok { - switch rv.Kind() { - case reflect.Float32: - fallthrough - case reflect.Float64: - rv.SetFloat(num) - default: - panic("bug") - } - return nil - } - return badtype("float", data) -} - -func (md *MetaData) unifyInt(data interface{}, rv reflect.Value) error { - if num, ok := data.(int64); ok { - if rv.Kind() >= reflect.Int && rv.Kind() <= reflect.Int64 { - switch rv.Kind() { - case reflect.Int, reflect.Int64: - // No bounds checking necessary. - case reflect.Int8: - if num < math.MinInt8 || num > math.MaxInt8 { - return e("value %d is out of range for int8", num) - } - case reflect.Int16: - if num < math.MinInt16 || num > math.MaxInt16 { - return e("value %d is out of range for int16", num) - } - case reflect.Int32: - if num < math.MinInt32 || num > math.MaxInt32 { - return e("value %d is out of range for int32", num) - } - } - rv.SetInt(num) - } else if rv.Kind() >= reflect.Uint && rv.Kind() <= reflect.Uint64 { - unum := uint64(num) - switch rv.Kind() { - case reflect.Uint, reflect.Uint64: - // No bounds checking necessary. - case reflect.Uint8: - if num < 0 || unum > math.MaxUint8 { - return e("value %d is out of range for uint8", num) - } - case reflect.Uint16: - if num < 0 || unum > math.MaxUint16 { - return e("value %d is out of range for uint16", num) - } - case reflect.Uint32: - if num < 0 || unum > math.MaxUint32 { - return e("value %d is out of range for uint32", num) - } - } - rv.SetUint(unum) - } else { - panic("unreachable") - } - return nil - } - return badtype("integer", data) -} - -func (md *MetaData) unifyBool(data interface{}, rv reflect.Value) error { - if b, ok := data.(bool); ok { - rv.SetBool(b) - return nil - } - return badtype("boolean", data) -} - -func (md *MetaData) unifyAnything(data interface{}, rv reflect.Value) error { - rv.Set(reflect.ValueOf(data)) - return nil -} - -func (md *MetaData) unifyText(data interface{}, v TextUnmarshaler) error { - var s string - switch sdata := data.(type) { - case TextMarshaler: - text, err := sdata.MarshalText() - if err != nil { - return err - } - s = string(text) - case fmt.Stringer: - s = sdata.String() - case string: - s = sdata - case bool: - s = fmt.Sprintf("%v", sdata) - case int64: - s = fmt.Sprintf("%d", sdata) - case float64: - s = fmt.Sprintf("%f", sdata) - default: - return badtype("primitive (string-like)", data) - } - if err := v.UnmarshalText([]byte(s)); err != nil { - return err - } - return nil -} - -// rvalue returns a reflect.Value of `v`. All pointers are resolved. -func rvalue(v interface{}) reflect.Value { - return indirect(reflect.ValueOf(v)) -} - -// indirect returns the value pointed to by a pointer. -// Pointers are followed until the value is not a pointer. -// New values are allocated for each nil pointer. -// -// An exception to this rule is if the value satisfies an interface of -// interest to us (like encoding.TextUnmarshaler). -func indirect(v reflect.Value) reflect.Value { - if v.Kind() != reflect.Ptr { - if v.CanSet() { - pv := v.Addr() - if _, ok := pv.Interface().(TextUnmarshaler); ok { - return pv - } - } - return v - } - if v.IsNil() { - v.Set(reflect.New(v.Type().Elem())) - } - return indirect(reflect.Indirect(v)) -} - -func isUnifiable(rv reflect.Value) bool { - if rv.CanSet() { - return true - } - if _, ok := rv.Interface().(TextUnmarshaler); ok { - return true - } - return false -} - -func badtype(expected string, data interface{}) error { - return e("cannot load TOML value of type %T into a Go %s", data, expected) -} diff --git a/vendor/github.com/BurntSushi/toml/decode_meta.go b/vendor/github.com/BurntSushi/toml/decode_meta.go deleted file mode 100644 index b9914a6..0000000 --- a/vendor/github.com/BurntSushi/toml/decode_meta.go +++ /dev/null @@ -1,121 +0,0 @@ -package toml - -import "strings" - -// MetaData allows access to meta information about TOML data that may not -// be inferrable via reflection. In particular, whether a key has been defined -// and the TOML type of a key. -type MetaData struct { - mapping map[string]interface{} - types map[string]tomlType - keys []Key - decoded map[string]bool - context Key // Used only during decoding. -} - -// IsDefined returns true if the key given exists in the TOML data. The key -// should be specified hierarchially. e.g., -// -// // access the TOML key 'a.b.c' -// IsDefined("a", "b", "c") -// -// IsDefined will return false if an empty key given. Keys are case sensitive. -func (md *MetaData) IsDefined(key ...string) bool { - if len(key) == 0 { - return false - } - - var hash map[string]interface{} - var ok bool - var hashOrVal interface{} = md.mapping - for _, k := range key { - if hash, ok = hashOrVal.(map[string]interface{}); !ok { - return false - } - if hashOrVal, ok = hash[k]; !ok { - return false - } - } - return true -} - -// Type returns a string representation of the type of the key specified. -// -// Type will return the empty string if given an empty key or a key that -// does not exist. Keys are case sensitive. -func (md *MetaData) Type(key ...string) string { - fullkey := strings.Join(key, ".") - if typ, ok := md.types[fullkey]; ok { - return typ.typeString() - } - return "" -} - -// Key is the type of any TOML key, including key groups. Use (MetaData).Keys -// to get values of this type. -type Key []string - -func (k Key) String() string { - return strings.Join(k, ".") -} - -func (k Key) maybeQuotedAll() string { - var ss []string - for i := range k { - ss = append(ss, k.maybeQuoted(i)) - } - return strings.Join(ss, ".") -} - -func (k Key) maybeQuoted(i int) string { - quote := false - for _, c := range k[i] { - if !isBareKeyChar(c) { - quote = true - break - } - } - if quote { - return "\"" + strings.Replace(k[i], "\"", "\\\"", -1) + "\"" - } - return k[i] -} - -func (k Key) add(piece string) Key { - newKey := make(Key, len(k)+1) - copy(newKey, k) - newKey[len(k)] = piece - return newKey -} - -// Keys returns a slice of every key in the TOML data, including key groups. -// Each key is itself a slice, where the first element is the top of the -// hierarchy and the last is the most specific. -// -// The list will have the same order as the keys appeared in the TOML data. -// -// All keys returned are non-empty. -func (md *MetaData) Keys() []Key { - return md.keys -} - -// Undecoded returns all keys that have not been decoded in the order in which -// they appear in the original TOML document. -// -// This includes keys that haven't been decoded because of a Primitive value. -// Once the Primitive value is decoded, the keys will be considered decoded. -// -// Also note that decoding into an empty interface will result in no decoding, -// and so no keys will be considered decoded. -// -// In this sense, the Undecoded keys correspond to keys in the TOML document -// that do not have a concrete type in your representation. -func (md *MetaData) Undecoded() []Key { - undecoded := make([]Key, 0, len(md.keys)) - for _, key := range md.keys { - if !md.decoded[key.String()] { - undecoded = append(undecoded, key) - } - } - return undecoded -} diff --git a/vendor/github.com/BurntSushi/toml/decode_test.go b/vendor/github.com/BurntSushi/toml/decode_test.go deleted file mode 100644 index 0c36b33..0000000 --- a/vendor/github.com/BurntSushi/toml/decode_test.go +++ /dev/null @@ -1,1447 +0,0 @@ -package toml - -import ( - "fmt" - "log" - "math" - "reflect" - "strings" - "testing" - "time" -) - -func TestDecodeSimple(t *testing.T) { - var testSimple = ` -age = 250 -andrew = "gallant" -kait = "brady" -now = 1987-07-05T05:45:00Z -yesOrNo = true -pi = 3.14 -colors = [ - ["red", "green", "blue"], - ["cyan", "magenta", "yellow", "black"], -] - -[My.Cats] -plato = "cat 1" -cauchy = "cat 2" -` - - type cats struct { - Plato string - Cauchy string - } - type simple struct { - Age int - Colors [][]string - Pi float64 - YesOrNo bool - Now time.Time - Andrew string - Kait string - My map[string]cats - } - - var val simple - _, err := Decode(testSimple, &val) - if err != nil { - t.Fatal(err) - } - - now, err := time.Parse("2006-01-02T15:04:05", "1987-07-05T05:45:00") - if err != nil { - panic(err) - } - var answer = simple{ - Age: 250, - Andrew: "gallant", - Kait: "brady", - Now: now, - YesOrNo: true, - Pi: 3.14, - Colors: [][]string{ - {"red", "green", "blue"}, - {"cyan", "magenta", "yellow", "black"}, - }, - My: map[string]cats{ - "Cats": {Plato: "cat 1", Cauchy: "cat 2"}, - }, - } - if !reflect.DeepEqual(val, answer) { - t.Fatalf("Expected\n-----\n%#v\n-----\nbut got\n-----\n%#v\n", - answer, val) - } -} - -func TestDecodeEmbedded(t *testing.T) { - type Dog struct{ Name string } - type Age int - type cat struct{ Name string } - - for _, test := range []struct { - label string - input string - decodeInto interface{} - wantDecoded interface{} - }{ - { - label: "embedded struct", - input: `Name = "milton"`, - decodeInto: &struct{ Dog }{}, - wantDecoded: &struct{ Dog }{Dog{"milton"}}, - }, - { - label: "embedded non-nil pointer to struct", - input: `Name = "milton"`, - decodeInto: &struct{ *Dog }{}, - wantDecoded: &struct{ *Dog }{&Dog{"milton"}}, - }, - { - label: "embedded nil pointer to struct", - input: ``, - decodeInto: &struct{ *Dog }{}, - wantDecoded: &struct{ *Dog }{nil}, - }, - { - label: "unexported embedded struct", - input: `Name = "socks"`, - decodeInto: &struct{ cat }{}, - wantDecoded: &struct{ cat }{cat{"socks"}}, - }, - { - label: "embedded int", - input: `Age = -5`, - decodeInto: &struct{ Age }{}, - wantDecoded: &struct{ Age }{-5}, - }, - } { - _, err := Decode(test.input, test.decodeInto) - if err != nil { - t.Fatal(err) - } - if !reflect.DeepEqual(test.wantDecoded, test.decodeInto) { - t.Errorf("%s: want decoded == %+v, got %+v", - test.label, test.wantDecoded, test.decodeInto) - } - } -} - -func TestDecodeIgnoredFields(t *testing.T) { - type simple struct { - Number int `toml:"-"` - } - const input = ` -Number = 123 -- = 234 -` - var s simple - if _, err := Decode(input, &s); err != nil { - t.Fatal(err) - } - if s.Number != 0 { - t.Errorf("got: %d; want 0", s.Number) - } -} - -func TestTableArrays(t *testing.T) { - var tomlTableArrays = ` -[[albums]] -name = "Born to Run" - - [[albums.songs]] - name = "Jungleland" - - [[albums.songs]] - name = "Meeting Across the River" - -[[albums]] -name = "Born in the USA" - - [[albums.songs]] - name = "Glory Days" - - [[albums.songs]] - name = "Dancing in the Dark" -` - - type Song struct { - Name string - } - - type Album struct { - Name string - Songs []Song - } - - type Music struct { - Albums []Album - } - - expected := Music{[]Album{ - {"Born to Run", []Song{{"Jungleland"}, {"Meeting Across the River"}}}, - {"Born in the USA", []Song{{"Glory Days"}, {"Dancing in the Dark"}}}, - }} - var got Music - if _, err := Decode(tomlTableArrays, &got); err != nil { - t.Fatal(err) - } - if !reflect.DeepEqual(expected, got) { - t.Fatalf("\n%#v\n!=\n%#v\n", expected, got) - } -} - -func TestTableNesting(t *testing.T) { - for _, tt := range []struct { - t string - want []string - }{ - {"[a.b.c]", []string{"a", "b", "c"}}, - {`[a."b.c"]`, []string{"a", "b.c"}}, - {`[a.'b.c']`, []string{"a", "b.c"}}, - {`[a.' b ']`, []string{"a", " b "}}, - {"[ d.e.f ]", []string{"d", "e", "f"}}, - {"[ g . h . i ]", []string{"g", "h", "i"}}, - {`[ j . "ʞ" . 'l' ]`, []string{"j", "ʞ", "l"}}, - } { - var m map[string]interface{} - if _, err := Decode(tt.t, &m); err != nil { - t.Errorf("Decode(%q): got error: %s", tt.t, err) - continue - } - if keys := extractNestedKeys(m); !reflect.DeepEqual(keys, tt.want) { - t.Errorf("Decode(%q): got nested keys %#v; want %#v", - tt.t, keys, tt.want) - } - } -} - -func extractNestedKeys(v map[string]interface{}) []string { - var result []string - for { - if len(v) != 1 { - return result - } - for k, m := range v { - result = append(result, k) - var ok bool - v, ok = m.(map[string]interface{}) - if !ok { - return result - } - } - - } -} - -// Case insensitive matching tests. -// A bit more comprehensive than needed given the current implementation, -// but implementations change. -// Probably still missing demonstrations of some ugly corner cases regarding -// case insensitive matching and multiple fields. -func TestCase(t *testing.T) { - var caseToml = ` -tOpString = "string" -tOpInt = 1 -tOpFloat = 1.1 -tOpBool = true -tOpdate = 2006-01-02T15:04:05Z -tOparray = [ "array" ] -Match = "i should be in Match only" -MatcH = "i should be in MatcH only" -once = "just once" -[nEst.eD] -nEstedString = "another string" -` - - type InsensitiveEd struct { - NestedString string - } - - type InsensitiveNest struct { - Ed InsensitiveEd - } - - type Insensitive struct { - TopString string - TopInt int - TopFloat float64 - TopBool bool - TopDate time.Time - TopArray []string - Match string - MatcH string - Once string - OncE string - Nest InsensitiveNest - } - - tme, err := time.Parse(time.RFC3339, time.RFC3339[:len(time.RFC3339)-5]) - if err != nil { - panic(err) - } - expected := Insensitive{ - TopString: "string", - TopInt: 1, - TopFloat: 1.1, - TopBool: true, - TopDate: tme, - TopArray: []string{"array"}, - MatcH: "i should be in MatcH only", - Match: "i should be in Match only", - Once: "just once", - OncE: "", - Nest: InsensitiveNest{ - Ed: InsensitiveEd{NestedString: "another string"}, - }, - } - var got Insensitive - if _, err := Decode(caseToml, &got); err != nil { - t.Fatal(err) - } - if !reflect.DeepEqual(expected, got) { - t.Fatalf("\n%#v\n!=\n%#v\n", expected, got) - } -} - -func TestPointers(t *testing.T) { - type Object struct { - Type string - Description string - } - - type Dict struct { - NamedObject map[string]*Object - BaseObject *Object - Strptr *string - Strptrs []*string - } - s1, s2, s3 := "blah", "abc", "def" - expected := &Dict{ - Strptr: &s1, - Strptrs: []*string{&s2, &s3}, - NamedObject: map[string]*Object{ - "foo": {"FOO", "fooooo!!!"}, - "bar": {"BAR", "ba-ba-ba-ba-barrrr!!!"}, - }, - BaseObject: &Object{"BASE", "da base"}, - } - - ex1 := ` -Strptr = "blah" -Strptrs = ["abc", "def"] - -[NamedObject.foo] -Type = "FOO" -Description = "fooooo!!!" - -[NamedObject.bar] -Type = "BAR" -Description = "ba-ba-ba-ba-barrrr!!!" - -[BaseObject] -Type = "BASE" -Description = "da base" -` - dict := new(Dict) - _, err := Decode(ex1, dict) - if err != nil { - t.Errorf("Decode error: %v", err) - } - if !reflect.DeepEqual(expected, dict) { - t.Fatalf("\n%#v\n!=\n%#v\n", expected, dict) - } -} - -func TestDecodeDatetime(t *testing.T) { - const noTimestamp = "2006-01-02T15:04:05" - for _, tt := range []struct { - s string - t string - format string - }{ - {"1979-05-27T07:32:00Z", "1979-05-27T07:32:00Z", time.RFC3339}, - {"1979-05-27T00:32:00-07:00", "1979-05-27T00:32:00-07:00", time.RFC3339}, - { - "1979-05-27T00:32:00.999999-07:00", - "1979-05-27T00:32:00.999999-07:00", - time.RFC3339, - }, - {"1979-05-27T07:32:00", "1979-05-27T07:32:00", noTimestamp}, - { - "1979-05-27T00:32:00.999999", - "1979-05-27T00:32:00.999999", - noTimestamp, - }, - {"1979-05-27", "1979-05-27T00:00:00", noTimestamp}, - } { - var x struct{ D time.Time } - input := "d = " + tt.s - if _, err := Decode(input, &x); err != nil { - t.Errorf("Decode(%q): got error: %s", input, err) - continue - } - want, err := time.ParseInLocation(tt.format, tt.t, time.Local) - if err != nil { - panic(err) - } - if !x.D.Equal(want) { - t.Errorf("Decode(%q): got %s; want %s", input, x.D, want) - } - } -} - -func TestDecodeBadDatetime(t *testing.T) { - var x struct{ T time.Time } - for _, s := range []string{ - "123", - "2006-01-50T00:00:00Z", - "2006-01-30T00:00", - "2006-01-30T", - } { - input := "T = " + s - if _, err := Decode(input, &x); err == nil { - t.Errorf("Expected invalid DateTime error for %q", s) - } - } -} - -func TestDecodeMultilineStrings(t *testing.T) { - var x struct { - S string - } - const s0 = `s = """ -a b \n c -d e f -"""` - if _, err := Decode(s0, &x); err != nil { - t.Fatal(err) - } - if want := "a b \n c\nd e f\n"; x.S != want { - t.Errorf("got: %q; want: %q", x.S, want) - } - const s1 = `s = """a b c\ -"""` - if _, err := Decode(s1, &x); err != nil { - t.Fatal(err) - } - if want := "a b c"; x.S != want { - t.Errorf("got: %q; want: %q", x.S, want) - } -} - -type sphere struct { - Center [3]float64 - Radius float64 -} - -func TestDecodeSimpleArray(t *testing.T) { - var s1 sphere - if _, err := Decode(`center = [0.0, 1.5, 0.0]`, &s1); err != nil { - t.Fatal(err) - } -} - -func TestDecodeArrayWrongSize(t *testing.T) { - var s1 sphere - if _, err := Decode(`center = [0.1, 2.3]`, &s1); err == nil { - t.Fatal("Expected array type mismatch error") - } -} - -func TestDecodeLargeIntoSmallInt(t *testing.T) { - type table struct { - Value int8 - } - var tab table - if _, err := Decode(`value = 500`, &tab); err == nil { - t.Fatal("Expected integer out-of-bounds error.") - } -} - -func TestDecodeSizedInts(t *testing.T) { - type table struct { - U8 uint8 - U16 uint16 - U32 uint32 - U64 uint64 - U uint - I8 int8 - I16 int16 - I32 int32 - I64 int64 - I int - } - answer := table{1, 1, 1, 1, 1, -1, -1, -1, -1, -1} - toml := ` - u8 = 1 - u16 = 1 - u32 = 1 - u64 = 1 - u = 1 - i8 = -1 - i16 = -1 - i32 = -1 - i64 = -1 - i = -1 - ` - var tab table - if _, err := Decode(toml, &tab); err != nil { - t.Fatal(err.Error()) - } - if answer != tab { - t.Fatalf("Expected %#v but got %#v", answer, tab) - } -} - -func TestDecodeInts(t *testing.T) { - for _, tt := range []struct { - s string - want int64 - }{ - {"0", 0}, - {"+99", 99}, - {"-10", -10}, - {"1_234_567", 1234567}, - {"1_2_3_4", 1234}, - {"-9_223_372_036_854_775_808", math.MinInt64}, - {"9_223_372_036_854_775_807", math.MaxInt64}, - } { - var x struct{ N int64 } - input := "n = " + tt.s - if _, err := Decode(input, &x); err != nil { - t.Errorf("Decode(%q): got error: %s", input, err) - continue - } - if x.N != tt.want { - t.Errorf("Decode(%q): got %d; want %d", input, x.N, tt.want) - } - } -} - -func TestDecodeFloats(t *testing.T) { - for _, tt := range []struct { - s string - want float64 - }{ - {"+1.0", 1}, - {"3.1415", 3.1415}, - {"-0.01", -0.01}, - {"5e+22", 5e22}, - {"1e6", 1e6}, - {"-2E-2", -2e-2}, - {"6.626e-34", 6.626e-34}, - {"9_224_617.445_991_228_313", 9224617.445991228313}, - {"9_876.54_32e1_0", 9876.5432e10}, - } { - var x struct{ N float64 } - input := "n = " + tt.s - if _, err := Decode(input, &x); err != nil { - t.Errorf("Decode(%q): got error: %s", input, err) - continue - } - if x.N != tt.want { - t.Errorf("Decode(%q): got %f; want %f", input, x.N, tt.want) - } - } -} - -func TestDecodeMalformedNumbers(t *testing.T) { - for _, tt := range []struct { - s string - want string - }{ - {"++99", "expected a digit"}, - {"0..1", "must be followed by one or more digits"}, - {"0.1.2", "Invalid float value"}, - {"1e2.3", "Invalid float value"}, - {"1e2e3", "Invalid float value"}, - {"_123", "expected value"}, - {"123_", "surrounded by digits"}, - {"1._23", "surrounded by digits"}, - {"1e__23", "surrounded by digits"}, - {"123.", "must be followed by one or more digits"}, - {"1.e2", "must be followed by one or more digits"}, - } { - var x struct{ N interface{} } - input := "n = " + tt.s - _, err := Decode(input, &x) - if err == nil { - t.Errorf("Decode(%q): got nil, want error containing %q", - input, tt.want) - continue - } - if !strings.Contains(err.Error(), tt.want) { - t.Errorf("Decode(%q): got %q, want error containing %q", - input, err, tt.want) - } - } -} - -func TestDecodeBadValues(t *testing.T) { - for _, tt := range []struct { - v interface{} - want string - }{ - {3, "non-pointer int"}, - {(*int)(nil), "nil"}, - } { - _, err := Decode(`x = 3`, tt.v) - if err == nil { - t.Errorf("Decode(%v): got nil; want error containing %q", - tt.v, tt.want) - continue - } - if !strings.Contains(err.Error(), tt.want) { - t.Errorf("Decode(%v): got %q; want error containing %q", - tt.v, err, tt.want) - } - } -} - -func TestUnmarshaler(t *testing.T) { - - var tomlBlob = ` -[dishes.hamboogie] -name = "Hamboogie with fries" -price = 10.99 - -[[dishes.hamboogie.ingredients]] -name = "Bread Bun" - -[[dishes.hamboogie.ingredients]] -name = "Lettuce" - -[[dishes.hamboogie.ingredients]] -name = "Real Beef Patty" - -[[dishes.hamboogie.ingredients]] -name = "Tomato" - -[dishes.eggsalad] -name = "Egg Salad with rice" -price = 3.99 - -[[dishes.eggsalad.ingredients]] -name = "Egg" - -[[dishes.eggsalad.ingredients]] -name = "Mayo" - -[[dishes.eggsalad.ingredients]] -name = "Rice" -` - m := &menu{} - if _, err := Decode(tomlBlob, m); err != nil { - t.Fatal(err) - } - - if len(m.Dishes) != 2 { - t.Log("two dishes should be loaded with UnmarshalTOML()") - t.Errorf("expected %d but got %d", 2, len(m.Dishes)) - } - - eggSalad := m.Dishes["eggsalad"] - if _, ok := interface{}(eggSalad).(dish); !ok { - t.Errorf("expected a dish") - } - - if eggSalad.Name != "Egg Salad with rice" { - t.Errorf("expected the dish to be named 'Egg Salad with rice'") - } - - if len(eggSalad.Ingredients) != 3 { - t.Log("dish should be loaded with UnmarshalTOML()") - t.Errorf("expected %d but got %d", 3, len(eggSalad.Ingredients)) - } - - found := false - for _, i := range eggSalad.Ingredients { - if i.Name == "Rice" { - found = true - break - } - } - if !found { - t.Error("Rice was not loaded in UnmarshalTOML()") - } - - // test on a value - must be passed as * - o := menu{} - if _, err := Decode(tomlBlob, &o); err != nil { - t.Fatal(err) - } - -} - -func TestDecodeInlineTable(t *testing.T) { - input := ` -[CookieJar] -Types = {Chocolate = "yummy", Oatmeal = "best ever"} - -[Seasons] -Locations = {NY = {Temp = "not cold", Rating = 4}, MI = {Temp = "freezing", Rating = 9}} -` - type cookieJar struct { - Types map[string]string - } - type properties struct { - Temp string - Rating int - } - type seasons struct { - Locations map[string]properties - } - type wrapper struct { - CookieJar cookieJar - Seasons seasons - } - var got wrapper - - meta, err := Decode(input, &got) - if err != nil { - t.Fatal(err) - } - want := wrapper{ - CookieJar: cookieJar{ - Types: map[string]string{ - "Chocolate": "yummy", - "Oatmeal": "best ever", - }, - }, - Seasons: seasons{ - Locations: map[string]properties{ - "NY": { - Temp: "not cold", - Rating: 4, - }, - "MI": { - Temp: "freezing", - Rating: 9, - }, - }, - }, - } - if !reflect.DeepEqual(got, want) { - t.Fatalf("after decode, got:\n\n%#v\n\nwant:\n\n%#v", got, want) - } - if len(meta.keys) != 12 { - t.Errorf("after decode, got %d meta keys; want 12", len(meta.keys)) - } - if len(meta.types) != 12 { - t.Errorf("after decode, got %d meta types; want 12", len(meta.types)) - } -} - -func TestDecodeInlineTableArray(t *testing.T) { - type point struct { - X, Y, Z int - } - var got struct { - Points []point - } - // Example inline table array from the spec. - const in = ` -points = [ { x = 1, y = 2, z = 3 }, - { x = 7, y = 8, z = 9 }, - { x = 2, y = 4, z = 8 } ] - -` - if _, err := Decode(in, &got); err != nil { - t.Fatal(err) - } - want := []point{ - {X: 1, Y: 2, Z: 3}, - {X: 7, Y: 8, Z: 9}, - {X: 2, Y: 4, Z: 8}, - } - if !reflect.DeepEqual(got.Points, want) { - t.Errorf("got %#v; want %#v", got.Points, want) - } -} - -func TestDecodeMalformedInlineTable(t *testing.T) { - for _, tt := range []struct { - s string - want string - }{ - {"{,}", "unexpected comma"}, - {"{x = 3 y = 4}", "expected a comma or an inline table terminator"}, - {"{x=3,,y=4}", "unexpected comma"}, - {"{x=3,\ny=4}", "newlines not allowed"}, - {"{x=3\n,y=4}", "newlines not allowed"}, - } { - var x struct{ A map[string]int } - input := "a = " + tt.s - _, err := Decode(input, &x) - if err == nil { - t.Errorf("Decode(%q): got nil, want error containing %q", - input, tt.want) - continue - } - if !strings.Contains(err.Error(), tt.want) { - t.Errorf("Decode(%q): got %q, want error containing %q", - input, err, tt.want) - } - } -} - -type menu struct { - Dishes map[string]dish -} - -func (m *menu) UnmarshalTOML(p interface{}) error { - m.Dishes = make(map[string]dish) - data, _ := p.(map[string]interface{}) - dishes := data["dishes"].(map[string]interface{}) - for n, v := range dishes { - if d, ok := v.(map[string]interface{}); ok { - nd := dish{} - nd.UnmarshalTOML(d) - m.Dishes[n] = nd - } else { - return fmt.Errorf("not a dish") - } - } - return nil -} - -type dish struct { - Name string - Price float32 - Ingredients []ingredient -} - -func (d *dish) UnmarshalTOML(p interface{}) error { - data, _ := p.(map[string]interface{}) - d.Name, _ = data["name"].(string) - d.Price, _ = data["price"].(float32) - ingredients, _ := data["ingredients"].([]map[string]interface{}) - for _, e := range ingredients { - n, _ := interface{}(e).(map[string]interface{}) - name, _ := n["name"].(string) - i := ingredient{name} - d.Ingredients = append(d.Ingredients, i) - } - return nil -} - -type ingredient struct { - Name string -} - -func TestDecodeSlices(t *testing.T) { - type T struct { - S []string - } - for i, tt := range []struct { - v T - input string - want T - }{ - {T{}, "", T{}}, - {T{[]string{}}, "", T{[]string{}}}, - {T{[]string{"a", "b"}}, "", T{[]string{"a", "b"}}}, - {T{}, "S = []", T{[]string{}}}, - {T{[]string{}}, "S = []", T{[]string{}}}, - {T{[]string{"a", "b"}}, "S = []", T{[]string{}}}, - {T{}, `S = ["x"]`, T{[]string{"x"}}}, - {T{[]string{}}, `S = ["x"]`, T{[]string{"x"}}}, - {T{[]string{"a", "b"}}, `S = ["x"]`, T{[]string{"x"}}}, - } { - if _, err := Decode(tt.input, &tt.v); err != nil { - t.Errorf("[%d] %s", i, err) - continue - } - if !reflect.DeepEqual(tt.v, tt.want) { - t.Errorf("[%d] got %#v; want %#v", i, tt.v, tt.want) - } - } -} - -func TestDecodePrimitive(t *testing.T) { - type S struct { - P Primitive - } - type T struct { - S []int - } - slicep := func(s []int) *[]int { return &s } - arrayp := func(a [2]int) *[2]int { return &a } - mapp := func(m map[string]int) *map[string]int { return &m } - for i, tt := range []struct { - v interface{} - input string - want interface{} - }{ - // slices - {slicep(nil), "", slicep(nil)}, - {slicep([]int{}), "", slicep([]int{})}, - {slicep([]int{1, 2, 3}), "", slicep([]int{1, 2, 3})}, - {slicep(nil), "P = [1,2]", slicep([]int{1, 2})}, - {slicep([]int{}), "P = [1,2]", slicep([]int{1, 2})}, - {slicep([]int{1, 2, 3}), "P = [1,2]", slicep([]int{1, 2})}, - - // arrays - {arrayp([2]int{2, 3}), "", arrayp([2]int{2, 3})}, - {arrayp([2]int{2, 3}), "P = [3,4]", arrayp([2]int{3, 4})}, - - // maps - {mapp(nil), "", mapp(nil)}, - {mapp(map[string]int{}), "", mapp(map[string]int{})}, - {mapp(map[string]int{"a": 1}), "", mapp(map[string]int{"a": 1})}, - {mapp(nil), "[P]\na = 2", mapp(map[string]int{"a": 2})}, - {mapp(map[string]int{}), "[P]\na = 2", mapp(map[string]int{"a": 2})}, - {mapp(map[string]int{"a": 1, "b": 3}), "[P]\na = 2", mapp(map[string]int{"a": 2, "b": 3})}, - - // structs - {&T{nil}, "[P]", &T{nil}}, - {&T{[]int{}}, "[P]", &T{[]int{}}}, - {&T{[]int{1, 2, 3}}, "[P]", &T{[]int{1, 2, 3}}}, - {&T{nil}, "[P]\nS = [1,2]", &T{[]int{1, 2}}}, - {&T{[]int{}}, "[P]\nS = [1,2]", &T{[]int{1, 2}}}, - {&T{[]int{1, 2, 3}}, "[P]\nS = [1,2]", &T{[]int{1, 2}}}, - } { - var s S - md, err := Decode(tt.input, &s) - if err != nil { - t.Errorf("[%d] Decode error: %s", i, err) - continue - } - if err := md.PrimitiveDecode(s.P, tt.v); err != nil { - t.Errorf("[%d] PrimitiveDecode error: %s", i, err) - continue - } - if !reflect.DeepEqual(tt.v, tt.want) { - t.Errorf("[%d] got %#v; want %#v", i, tt.v, tt.want) - } - } -} - -func TestDecodeErrors(t *testing.T) { - for _, s := range []string{ - `x="`, - `x='`, - `x='''`, - - // Cases found by fuzzing in - // https://github.com/BurntSushi/toml/issues/155. - `""�`, // used to panic with index out of range - `e="""`, // used to hang - } { - var x struct{} - _, err := Decode(s, &x) - if err == nil { - t.Errorf("Decode(%q): got nil error", s) - } - } -} - -// Test for https://github.com/BurntSushi/toml/pull/166. -func TestDecodeBoolArray(t *testing.T) { - for _, tt := range []struct { - s string - got interface{} - want interface{} - }{ - { - "a = [true, false]", - &struct{ A []bool }{}, - &struct{ A []bool }{[]bool{true, false}}, - }, - { - "a = {a = true, b = false}", - &struct{ A map[string]bool }{}, - &struct{ A map[string]bool }{map[string]bool{"a": true, "b": false}}, - }, - } { - if _, err := Decode(tt.s, tt.got); err != nil { - t.Errorf("Decode(%q): %s", tt.s, err) - continue - } - if !reflect.DeepEqual(tt.got, tt.want) { - t.Errorf("Decode(%q): got %#v; want %#v", tt.s, tt.got, tt.want) - } - } -} - -func ExampleMetaData_PrimitiveDecode() { - var md MetaData - var err error - - var tomlBlob = ` -ranking = ["Springsteen", "J Geils"] - -[bands.Springsteen] -started = 1973 -albums = ["Greetings", "WIESS", "Born to Run", "Darkness"] - -[bands."J Geils"] -started = 1970 -albums = ["The J. Geils Band", "Full House", "Blow Your Face Out"] -` - - type band struct { - Started int - Albums []string - } - type classics struct { - Ranking []string - Bands map[string]Primitive - } - - // Do the initial decode. Reflection is delayed on Primitive values. - var music classics - if md, err = Decode(tomlBlob, &music); err != nil { - log.Fatal(err) - } - - // MetaData still includes information on Primitive values. - fmt.Printf("Is `bands.Springsteen` defined? %v\n", - md.IsDefined("bands", "Springsteen")) - - // Decode primitive data into Go values. - for _, artist := range music.Ranking { - // A band is a primitive value, so we need to decode it to get a - // real `band` value. - primValue := music.Bands[artist] - - var aBand band - if err = md.PrimitiveDecode(primValue, &aBand); err != nil { - log.Fatal(err) - } - fmt.Printf("%s started in %d.\n", artist, aBand.Started) - } - // Check to see if there were any fields left undecoded. - // Note that this won't be empty before decoding the Primitive value! - fmt.Printf("Undecoded: %q\n", md.Undecoded()) - - // Output: - // Is `bands.Springsteen` defined? true - // Springsteen started in 1973. - // J Geils started in 1970. - // Undecoded: [] -} - -func ExampleDecode() { - var tomlBlob = ` -# Some comments. -[alpha] -ip = "10.0.0.1" - - [alpha.config] - Ports = [ 8001, 8002 ] - Location = "Toronto" - Created = 1987-07-05T05:45:00Z - -[beta] -ip = "10.0.0.2" - - [beta.config] - Ports = [ 9001, 9002 ] - Location = "New Jersey" - Created = 1887-01-05T05:55:00Z -` - - type serverConfig struct { - Ports []int - Location string - Created time.Time - } - - type server struct { - IP string `toml:"ip,omitempty"` - Config serverConfig `toml:"config"` - } - - type servers map[string]server - - var config servers - if _, err := Decode(tomlBlob, &config); err != nil { - log.Fatal(err) - } - - for _, name := range []string{"alpha", "beta"} { - s := config[name] - fmt.Printf("Server: %s (ip: %s) in %s created on %s\n", - name, s.IP, s.Config.Location, - s.Config.Created.Format("2006-01-02")) - fmt.Printf("Ports: %v\n", s.Config.Ports) - } - - // Output: - // Server: alpha (ip: 10.0.0.1) in Toronto created on 1987-07-05 - // Ports: [8001 8002] - // Server: beta (ip: 10.0.0.2) in New Jersey created on 1887-01-05 - // Ports: [9001 9002] -} - -type duration struct { - time.Duration -} - -func (d *duration) UnmarshalText(text []byte) error { - var err error - d.Duration, err = time.ParseDuration(string(text)) - return err -} - -// Example Unmarshaler shows how to decode TOML strings into your own -// custom data type. -func Example_unmarshaler() { - blob := ` -[[song]] -name = "Thunder Road" -duration = "4m49s" - -[[song]] -name = "Stairway to Heaven" -duration = "8m03s" -` - type song struct { - Name string - Duration duration - } - type songs struct { - Song []song - } - var favorites songs - if _, err := Decode(blob, &favorites); err != nil { - log.Fatal(err) - } - - // Code to implement the TextUnmarshaler interface for `duration`: - // - // type duration struct { - // time.Duration - // } - // - // func (d *duration) UnmarshalText(text []byte) error { - // var err error - // d.Duration, err = time.ParseDuration(string(text)) - // return err - // } - - for _, s := range favorites.Song { - fmt.Printf("%s (%s)\n", s.Name, s.Duration) - } - // Output: - // Thunder Road (4m49s) - // Stairway to Heaven (8m3s) -} - -// Example StrictDecoding shows how to detect whether there are keys in the -// TOML document that weren't decoded into the value given. This is useful -// for returning an error to the user if they've included extraneous fields -// in their configuration. -func Example_strictDecoding() { - var blob = ` -key1 = "value1" -key2 = "value2" -key3 = "value3" -` - type config struct { - Key1 string - Key3 string - } - - var conf config - md, err := Decode(blob, &conf) - if err != nil { - log.Fatal(err) - } - fmt.Printf("Undecoded keys: %q\n", md.Undecoded()) - // Output: - // Undecoded keys: ["key2"] -} - -// Example UnmarshalTOML shows how to implement a struct type that knows how to -// unmarshal itself. The struct must take full responsibility for mapping the -// values passed into the struct. The method may be used with interfaces in a -// struct in cases where the actual type is not known until the data is -// examined. -func Example_unmarshalTOML() { - - var blob = ` -[[parts]] -type = "valve" -id = "valve-1" -size = 1.2 -rating = 4 - -[[parts]] -type = "valve" -id = "valve-2" -size = 2.1 -rating = 5 - -[[parts]] -type = "pipe" -id = "pipe-1" -length = 2.1 -diameter = 12 - -[[parts]] -type = "cable" -id = "cable-1" -length = 12 -rating = 3.1 -` - o := &order{} - err := Unmarshal([]byte(blob), o) - if err != nil { - log.Fatal(err) - } - - fmt.Println(len(o.parts)) - - for _, part := range o.parts { - fmt.Println(part.Name()) - } - - // Code to implement UmarshalJSON. - - // type order struct { - // // NOTE `order.parts` is a private slice of type `part` which is an - // // interface and may only be loaded from toml using the - // // UnmarshalTOML() method of the Umarshaler interface. - // parts parts - // } - - // func (o *order) UnmarshalTOML(data interface{}) error { - - // // NOTE the example below contains detailed type casting to show how - // // the 'data' is retrieved. In operational use, a type cast wrapper - // // may be preferred e.g. - // // - // // func AsMap(v interface{}) (map[string]interface{}, error) { - // // return v.(map[string]interface{}) - // // } - // // - // // resulting in: - // // d, _ := AsMap(data) - // // - - // d, _ := data.(map[string]interface{}) - // parts, _ := d["parts"].([]map[string]interface{}) - - // for _, p := range parts { - - // typ, _ := p["type"].(string) - // id, _ := p["id"].(string) - - // // detect the type of part and handle each case - // switch p["type"] { - // case "valve": - - // size := float32(p["size"].(float64)) - // rating := int(p["rating"].(int64)) - - // valve := &valve{ - // Type: typ, - // ID: id, - // Size: size, - // Rating: rating, - // } - - // o.parts = append(o.parts, valve) - - // case "pipe": - - // length := float32(p["length"].(float64)) - // diameter := int(p["diameter"].(int64)) - - // pipe := &pipe{ - // Type: typ, - // ID: id, - // Length: length, - // Diameter: diameter, - // } - - // o.parts = append(o.parts, pipe) - - // case "cable": - - // length := int(p["length"].(int64)) - // rating := float32(p["rating"].(float64)) - - // cable := &cable{ - // Type: typ, - // ID: id, - // Length: length, - // Rating: rating, - // } - - // o.parts = append(o.parts, cable) - - // } - // } - - // return nil - // } - - // type parts []part - - // type part interface { - // Name() string - // } - - // type valve struct { - // Type string - // ID string - // Size float32 - // Rating int - // } - - // func (v *valve) Name() string { - // return fmt.Sprintf("VALVE: %s", v.ID) - // } - - // type pipe struct { - // Type string - // ID string - // Length float32 - // Diameter int - // } - - // func (p *pipe) Name() string { - // return fmt.Sprintf("PIPE: %s", p.ID) - // } - - // type cable struct { - // Type string - // ID string - // Length int - // Rating float32 - // } - - // func (c *cable) Name() string { - // return fmt.Sprintf("CABLE: %s", c.ID) - // } - - // Output: - // 4 - // VALVE: valve-1 - // VALVE: valve-2 - // PIPE: pipe-1 - // CABLE: cable-1 - -} - -type order struct { - // NOTE `order.parts` is a private slice of type `part` which is an - // interface and may only be loaded from toml using the UnmarshalTOML() - // method of the Umarshaler interface. - parts parts -} - -func (o *order) UnmarshalTOML(data interface{}) error { - - // NOTE the example below contains detailed type casting to show how - // the 'data' is retrieved. In operational use, a type cast wrapper - // may be preferred e.g. - // - // func AsMap(v interface{}) (map[string]interface{}, error) { - // return v.(map[string]interface{}) - // } - // - // resulting in: - // d, _ := AsMap(data) - // - - d, _ := data.(map[string]interface{}) - parts, _ := d["parts"].([]map[string]interface{}) - - for _, p := range parts { - - typ, _ := p["type"].(string) - id, _ := p["id"].(string) - - // detect the type of part and handle each case - switch p["type"] { - case "valve": - - size := float32(p["size"].(float64)) - rating := int(p["rating"].(int64)) - - valve := &valve{ - Type: typ, - ID: id, - Size: size, - Rating: rating, - } - - o.parts = append(o.parts, valve) - - case "pipe": - - length := float32(p["length"].(float64)) - diameter := int(p["diameter"].(int64)) - - pipe := &pipe{ - Type: typ, - ID: id, - Length: length, - Diameter: diameter, - } - - o.parts = append(o.parts, pipe) - - case "cable": - - length := int(p["length"].(int64)) - rating := float32(p["rating"].(float64)) - - cable := &cable{ - Type: typ, - ID: id, - Length: length, - Rating: rating, - } - - o.parts = append(o.parts, cable) - - } - } - - return nil -} - -type parts []part - -type part interface { - Name() string -} - -type valve struct { - Type string - ID string - Size float32 - Rating int -} - -func (v *valve) Name() string { - return fmt.Sprintf("VALVE: %s", v.ID) -} - -type pipe struct { - Type string - ID string - Length float32 - Diameter int -} - -func (p *pipe) Name() string { - return fmt.Sprintf("PIPE: %s", p.ID) -} - -type cable struct { - Type string - ID string - Length int - Rating float32 -} - -func (c *cable) Name() string { - return fmt.Sprintf("CABLE: %s", c.ID) -} diff --git a/vendor/github.com/BurntSushi/toml/doc.go b/vendor/github.com/BurntSushi/toml/doc.go deleted file mode 100644 index b371f39..0000000 --- a/vendor/github.com/BurntSushi/toml/doc.go +++ /dev/null @@ -1,27 +0,0 @@ -/* -Package toml provides facilities for decoding and encoding TOML configuration -files via reflection. There is also support for delaying decoding with -the Primitive type, and querying the set of keys in a TOML document with the -MetaData type. - -The specification implemented: https://github.com/toml-lang/toml - -The sub-command github.com/BurntSushi/toml/cmd/tomlv can be used to verify -whether a file is a valid TOML document. It can also be used to print the -type of each key in a TOML document. - -Testing - -There are two important types of tests used for this package. The first is -contained inside '*_test.go' files and uses the standard Go unit testing -framework. These tests are primarily devoted to holistically testing the -decoder and encoder. - -The second type of testing is used to verify the implementation's adherence -to the TOML specification. These tests have been factored into their own -project: https://github.com/BurntSushi/toml-test - -The reason the tests are in a separate project is so that they can be used by -any implementation of TOML. Namely, it is language agnostic. -*/ -package toml diff --git a/vendor/github.com/BurntSushi/toml/encode.go b/vendor/github.com/BurntSushi/toml/encode.go deleted file mode 100644 index d905c21..0000000 --- a/vendor/github.com/BurntSushi/toml/encode.go +++ /dev/null @@ -1,568 +0,0 @@ -package toml - -import ( - "bufio" - "errors" - "fmt" - "io" - "reflect" - "sort" - "strconv" - "strings" - "time" -) - -type tomlEncodeError struct{ error } - -var ( - errArrayMixedElementTypes = errors.New( - "toml: cannot encode array with mixed element types") - errArrayNilElement = errors.New( - "toml: cannot encode array with nil element") - errNonString = errors.New( - "toml: cannot encode a map with non-string key type") - errAnonNonStruct = errors.New( - "toml: cannot encode an anonymous field that is not a struct") - errArrayNoTable = errors.New( - "toml: TOML array element cannot contain a table") - errNoKey = errors.New( - "toml: top-level values must be Go maps or structs") - errAnything = errors.New("") // used in testing -) - -var quotedReplacer = strings.NewReplacer( - "\t", "\\t", - "\n", "\\n", - "\r", "\\r", - "\"", "\\\"", - "\\", "\\\\", -) - -// Encoder controls the encoding of Go values to a TOML document to some -// io.Writer. -// -// The indentation level can be controlled with the Indent field. -type Encoder struct { - // A single indentation level. By default it is two spaces. - Indent string - - // hasWritten is whether we have written any output to w yet. - hasWritten bool - w *bufio.Writer -} - -// NewEncoder returns a TOML encoder that encodes Go values to the io.Writer -// given. By default, a single indentation level is 2 spaces. -func NewEncoder(w io.Writer) *Encoder { - return &Encoder{ - w: bufio.NewWriter(w), - Indent: " ", - } -} - -// Encode writes a TOML representation of the Go value to the underlying -// io.Writer. If the value given cannot be encoded to a valid TOML document, -// then an error is returned. -// -// The mapping between Go values and TOML values should be precisely the same -// as for the Decode* functions. Similarly, the TextMarshaler interface is -// supported by encoding the resulting bytes as strings. (If you want to write -// arbitrary binary data then you will need to use something like base64 since -// TOML does not have any binary types.) -// -// When encoding TOML hashes (i.e., Go maps or structs), keys without any -// sub-hashes are encoded first. -// -// If a Go map is encoded, then its keys are sorted alphabetically for -// deterministic output. More control over this behavior may be provided if -// there is demand for it. -// -// Encoding Go values without a corresponding TOML representation---like map -// types with non-string keys---will cause an error to be returned. Similarly -// for mixed arrays/slices, arrays/slices with nil elements, embedded -// non-struct types and nested slices containing maps or structs. -// (e.g., [][]map[string]string is not allowed but []map[string]string is OK -// and so is []map[string][]string.) -func (enc *Encoder) Encode(v interface{}) error { - rv := eindirect(reflect.ValueOf(v)) - if err := enc.safeEncode(Key([]string{}), rv); err != nil { - return err - } - return enc.w.Flush() -} - -func (enc *Encoder) safeEncode(key Key, rv reflect.Value) (err error) { - defer func() { - if r := recover(); r != nil { - if terr, ok := r.(tomlEncodeError); ok { - err = terr.error - return - } - panic(r) - } - }() - enc.encode(key, rv) - return nil -} - -func (enc *Encoder) encode(key Key, rv reflect.Value) { - // Special case. Time needs to be in ISO8601 format. - // Special case. If we can marshal the type to text, then we used that. - // Basically, this prevents the encoder for handling these types as - // generic structs (or whatever the underlying type of a TextMarshaler is). - switch rv.Interface().(type) { - case time.Time, TextMarshaler: - enc.keyEqElement(key, rv) - return - } - - k := rv.Kind() - switch k { - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, - reflect.Int64, - reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, - reflect.Uint64, - reflect.Float32, reflect.Float64, reflect.String, reflect.Bool: - enc.keyEqElement(key, rv) - case reflect.Array, reflect.Slice: - if typeEqual(tomlArrayHash, tomlTypeOfGo(rv)) { - enc.eArrayOfTables(key, rv) - } else { - enc.keyEqElement(key, rv) - } - case reflect.Interface: - if rv.IsNil() { - return - } - enc.encode(key, rv.Elem()) - case reflect.Map: - if rv.IsNil() { - return - } - enc.eTable(key, rv) - case reflect.Ptr: - if rv.IsNil() { - return - } - enc.encode(key, rv.Elem()) - case reflect.Struct: - enc.eTable(key, rv) - default: - panic(e("unsupported type for key '%s': %s", key, k)) - } -} - -// eElement encodes any value that can be an array element (primitives and -// arrays). -func (enc *Encoder) eElement(rv reflect.Value) { - switch v := rv.Interface().(type) { - case time.Time: - // Special case time.Time as a primitive. Has to come before - // TextMarshaler below because time.Time implements - // encoding.TextMarshaler, but we need to always use UTC. - enc.wf(v.UTC().Format("2006-01-02T15:04:05Z")) - return - case TextMarshaler: - // Special case. Use text marshaler if it's available for this value. - if s, err := v.MarshalText(); err != nil { - encPanic(err) - } else { - enc.writeQuoted(string(s)) - } - return - } - switch rv.Kind() { - case reflect.Bool: - enc.wf(strconv.FormatBool(rv.Bool())) - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, - reflect.Int64: - enc.wf(strconv.FormatInt(rv.Int(), 10)) - case reflect.Uint, reflect.Uint8, reflect.Uint16, - reflect.Uint32, reflect.Uint64: - enc.wf(strconv.FormatUint(rv.Uint(), 10)) - case reflect.Float32: - enc.wf(floatAddDecimal(strconv.FormatFloat(rv.Float(), 'f', -1, 32))) - case reflect.Float64: - enc.wf(floatAddDecimal(strconv.FormatFloat(rv.Float(), 'f', -1, 64))) - case reflect.Array, reflect.Slice: - enc.eArrayOrSliceElement(rv) - case reflect.Interface: - enc.eElement(rv.Elem()) - case reflect.String: - enc.writeQuoted(rv.String()) - default: - panic(e("unexpected primitive type: %s", rv.Kind())) - } -} - -// By the TOML spec, all floats must have a decimal with at least one -// number on either side. -func floatAddDecimal(fstr string) string { - if !strings.Contains(fstr, ".") { - return fstr + ".0" - } - return fstr -} - -func (enc *Encoder) writeQuoted(s string) { - enc.wf("\"%s\"", quotedReplacer.Replace(s)) -} - -func (enc *Encoder) eArrayOrSliceElement(rv reflect.Value) { - length := rv.Len() - enc.wf("[") - for i := 0; i < length; i++ { - elem := rv.Index(i) - enc.eElement(elem) - if i != length-1 { - enc.wf(", ") - } - } - enc.wf("]") -} - -func (enc *Encoder) eArrayOfTables(key Key, rv reflect.Value) { - if len(key) == 0 { - encPanic(errNoKey) - } - for i := 0; i < rv.Len(); i++ { - trv := rv.Index(i) - if isNil(trv) { - continue - } - panicIfInvalidKey(key) - enc.newline() - enc.wf("%s[[%s]]", enc.indentStr(key), key.maybeQuotedAll()) - enc.newline() - enc.eMapOrStruct(key, trv) - } -} - -func (enc *Encoder) eTable(key Key, rv reflect.Value) { - panicIfInvalidKey(key) - if len(key) == 1 { - // Output an extra newline between top-level tables. - // (The newline isn't written if nothing else has been written though.) - enc.newline() - } - if len(key) > 0 { - enc.wf("%s[%s]", enc.indentStr(key), key.maybeQuotedAll()) - enc.newline() - } - enc.eMapOrStruct(key, rv) -} - -func (enc *Encoder) eMapOrStruct(key Key, rv reflect.Value) { - switch rv := eindirect(rv); rv.Kind() { - case reflect.Map: - enc.eMap(key, rv) - case reflect.Struct: - enc.eStruct(key, rv) - default: - panic("eTable: unhandled reflect.Value Kind: " + rv.Kind().String()) - } -} - -func (enc *Encoder) eMap(key Key, rv reflect.Value) { - rt := rv.Type() - if rt.Key().Kind() != reflect.String { - encPanic(errNonString) - } - - // Sort keys so that we have deterministic output. And write keys directly - // underneath this key first, before writing sub-structs or sub-maps. - var mapKeysDirect, mapKeysSub []string - for _, mapKey := range rv.MapKeys() { - k := mapKey.String() - if typeIsHash(tomlTypeOfGo(rv.MapIndex(mapKey))) { - mapKeysSub = append(mapKeysSub, k) - } else { - mapKeysDirect = append(mapKeysDirect, k) - } - } - - var writeMapKeys = func(mapKeys []string) { - sort.Strings(mapKeys) - for _, mapKey := range mapKeys { - mrv := rv.MapIndex(reflect.ValueOf(mapKey)) - if isNil(mrv) { - // Don't write anything for nil fields. - continue - } - enc.encode(key.add(mapKey), mrv) - } - } - writeMapKeys(mapKeysDirect) - writeMapKeys(mapKeysSub) -} - -func (enc *Encoder) eStruct(key Key, rv reflect.Value) { - // Write keys for fields directly under this key first, because if we write - // a field that creates a new table, then all keys under it will be in that - // table (not the one we're writing here). - rt := rv.Type() - var fieldsDirect, fieldsSub [][]int - var addFields func(rt reflect.Type, rv reflect.Value, start []int) - addFields = func(rt reflect.Type, rv reflect.Value, start []int) { - for i := 0; i < rt.NumField(); i++ { - f := rt.Field(i) - // skip unexported fields - if f.PkgPath != "" && !f.Anonymous { - continue - } - frv := rv.Field(i) - if f.Anonymous { - t := f.Type - switch t.Kind() { - case reflect.Struct: - // Treat anonymous struct fields with - // tag names as though they are not - // anonymous, like encoding/json does. - if getOptions(f.Tag).name == "" { - addFields(t, frv, f.Index) - continue - } - case reflect.Ptr: - if t.Elem().Kind() == reflect.Struct && - getOptions(f.Tag).name == "" { - if !frv.IsNil() { - addFields(t.Elem(), frv.Elem(), f.Index) - } - continue - } - // Fall through to the normal field encoding logic below - // for non-struct anonymous fields. - } - } - - if typeIsHash(tomlTypeOfGo(frv)) { - fieldsSub = append(fieldsSub, append(start, f.Index...)) - } else { - fieldsDirect = append(fieldsDirect, append(start, f.Index...)) - } - } - } - addFields(rt, rv, nil) - - var writeFields = func(fields [][]int) { - for _, fieldIndex := range fields { - sft := rt.FieldByIndex(fieldIndex) - sf := rv.FieldByIndex(fieldIndex) - if isNil(sf) { - // Don't write anything for nil fields. - continue - } - - opts := getOptions(sft.Tag) - if opts.skip { - continue - } - keyName := sft.Name - if opts.name != "" { - keyName = opts.name - } - if opts.omitempty && isEmpty(sf) { - continue - } - if opts.omitzero && isZero(sf) { - continue - } - - enc.encode(key.add(keyName), sf) - } - } - writeFields(fieldsDirect) - writeFields(fieldsSub) -} - -// tomlTypeName returns the TOML type name of the Go value's type. It is -// used to determine whether the types of array elements are mixed (which is -// forbidden). If the Go value is nil, then it is illegal for it to be an array -// element, and valueIsNil is returned as true. - -// Returns the TOML type of a Go value. The type may be `nil`, which means -// no concrete TOML type could be found. -func tomlTypeOfGo(rv reflect.Value) tomlType { - if isNil(rv) || !rv.IsValid() { - return nil - } - switch rv.Kind() { - case reflect.Bool: - return tomlBool - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, - reflect.Int64, - reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, - reflect.Uint64: - return tomlInteger - case reflect.Float32, reflect.Float64: - return tomlFloat - case reflect.Array, reflect.Slice: - if typeEqual(tomlHash, tomlArrayType(rv)) { - return tomlArrayHash - } - return tomlArray - case reflect.Ptr, reflect.Interface: - return tomlTypeOfGo(rv.Elem()) - case reflect.String: - return tomlString - case reflect.Map: - return tomlHash - case reflect.Struct: - switch rv.Interface().(type) { - case time.Time: - return tomlDatetime - case TextMarshaler: - return tomlString - default: - return tomlHash - } - default: - panic("unexpected reflect.Kind: " + rv.Kind().String()) - } -} - -// tomlArrayType returns the element type of a TOML array. The type returned -// may be nil if it cannot be determined (e.g., a nil slice or a zero length -// slize). This function may also panic if it finds a type that cannot be -// expressed in TOML (such as nil elements, heterogeneous arrays or directly -// nested arrays of tables). -func tomlArrayType(rv reflect.Value) tomlType { - if isNil(rv) || !rv.IsValid() || rv.Len() == 0 { - return nil - } - firstType := tomlTypeOfGo(rv.Index(0)) - if firstType == nil { - encPanic(errArrayNilElement) - } - - rvlen := rv.Len() - for i := 1; i < rvlen; i++ { - elem := rv.Index(i) - switch elemType := tomlTypeOfGo(elem); { - case elemType == nil: - encPanic(errArrayNilElement) - case !typeEqual(firstType, elemType): - encPanic(errArrayMixedElementTypes) - } - } - // If we have a nested array, then we must make sure that the nested - // array contains ONLY primitives. - // This checks arbitrarily nested arrays. - if typeEqual(firstType, tomlArray) || typeEqual(firstType, tomlArrayHash) { - nest := tomlArrayType(eindirect(rv.Index(0))) - if typeEqual(nest, tomlHash) || typeEqual(nest, tomlArrayHash) { - encPanic(errArrayNoTable) - } - } - return firstType -} - -type tagOptions struct { - skip bool // "-" - name string - omitempty bool - omitzero bool -} - -func getOptions(tag reflect.StructTag) tagOptions { - t := tag.Get("toml") - if t == "-" { - return tagOptions{skip: true} - } - var opts tagOptions - parts := strings.Split(t, ",") - opts.name = parts[0] - for _, s := range parts[1:] { - switch s { - case "omitempty": - opts.omitempty = true - case "omitzero": - opts.omitzero = true - } - } - return opts -} - -func isZero(rv reflect.Value) bool { - switch rv.Kind() { - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - return rv.Int() == 0 - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: - return rv.Uint() == 0 - case reflect.Float32, reflect.Float64: - return rv.Float() == 0.0 - } - return false -} - -func isEmpty(rv reflect.Value) bool { - switch rv.Kind() { - case reflect.Array, reflect.Slice, reflect.Map, reflect.String: - return rv.Len() == 0 - case reflect.Bool: - return !rv.Bool() - } - return false -} - -func (enc *Encoder) newline() { - if enc.hasWritten { - enc.wf("\n") - } -} - -func (enc *Encoder) keyEqElement(key Key, val reflect.Value) { - if len(key) == 0 { - encPanic(errNoKey) - } - panicIfInvalidKey(key) - enc.wf("%s%s = ", enc.indentStr(key), key.maybeQuoted(len(key)-1)) - enc.eElement(val) - enc.newline() -} - -func (enc *Encoder) wf(format string, v ...interface{}) { - if _, err := fmt.Fprintf(enc.w, format, v...); err != nil { - encPanic(err) - } - enc.hasWritten = true -} - -func (enc *Encoder) indentStr(key Key) string { - return strings.Repeat(enc.Indent, len(key)-1) -} - -func encPanic(err error) { - panic(tomlEncodeError{err}) -} - -func eindirect(v reflect.Value) reflect.Value { - switch v.Kind() { - case reflect.Ptr, reflect.Interface: - return eindirect(v.Elem()) - default: - return v - } -} - -func isNil(rv reflect.Value) bool { - switch rv.Kind() { - case reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice: - return rv.IsNil() - default: - return false - } -} - -func panicIfInvalidKey(key Key) { - for _, k := range key { - if len(k) == 0 { - encPanic(e("Key '%s' is not a valid table name. Key names "+ - "cannot be empty.", key.maybeQuotedAll())) - } - } -} - -func isValidKeyName(s string) bool { - return len(s) != 0 -} diff --git a/vendor/github.com/BurntSushi/toml/encode_test.go b/vendor/github.com/BurntSushi/toml/encode_test.go deleted file mode 100644 index 673b7b0..0000000 --- a/vendor/github.com/BurntSushi/toml/encode_test.go +++ /dev/null @@ -1,615 +0,0 @@ -package toml - -import ( - "bytes" - "fmt" - "log" - "net" - "testing" - "time" -) - -func TestEncodeRoundTrip(t *testing.T) { - type Config struct { - Age int - Cats []string - Pi float64 - Perfection []int - DOB time.Time - Ipaddress net.IP - } - - var inputs = Config{ - 13, - []string{"one", "two", "three"}, - 3.145, - []int{11, 2, 3, 4}, - time.Now(), - net.ParseIP("192.168.59.254"), - } - - var firstBuffer bytes.Buffer - e := NewEncoder(&firstBuffer) - err := e.Encode(inputs) - if err != nil { - t.Fatal(err) - } - var outputs Config - if _, err := Decode(firstBuffer.String(), &outputs); err != nil { - t.Logf("Could not decode:\n-----\n%s\n-----\n", - firstBuffer.String()) - t.Fatal(err) - } - - // could test each value individually, but I'm lazy - var secondBuffer bytes.Buffer - e2 := NewEncoder(&secondBuffer) - err = e2.Encode(outputs) - if err != nil { - t.Fatal(err) - } - if firstBuffer.String() != secondBuffer.String() { - t.Error( - firstBuffer.String(), - "\n\n is not identical to\n\n", - secondBuffer.String()) - } -} - -// XXX(burntsushi) -// I think these tests probably should be removed. They are good, but they -// ought to be obsolete by toml-test. -func TestEncode(t *testing.T) { - type Embedded struct { - Int int `toml:"_int"` - } - type NonStruct int - - date := time.Date(2014, 5, 11, 20, 30, 40, 0, time.FixedZone("IST", 3600)) - dateStr := "2014-05-11T19:30:40Z" - - tests := map[string]struct { - input interface{} - wantOutput string - wantError error - }{ - "bool field": { - input: struct { - BoolTrue bool - BoolFalse bool - }{true, false}, - wantOutput: "BoolTrue = true\nBoolFalse = false\n", - }, - "int fields": { - input: struct { - Int int - Int8 int8 - Int16 int16 - Int32 int32 - Int64 int64 - }{1, 2, 3, 4, 5}, - wantOutput: "Int = 1\nInt8 = 2\nInt16 = 3\nInt32 = 4\nInt64 = 5\n", - }, - "uint fields": { - input: struct { - Uint uint - Uint8 uint8 - Uint16 uint16 - Uint32 uint32 - Uint64 uint64 - }{1, 2, 3, 4, 5}, - wantOutput: "Uint = 1\nUint8 = 2\nUint16 = 3\nUint32 = 4" + - "\nUint64 = 5\n", - }, - "float fields": { - input: struct { - Float32 float32 - Float64 float64 - }{1.5, 2.5}, - wantOutput: "Float32 = 1.5\nFloat64 = 2.5\n", - }, - "string field": { - input: struct{ String string }{"foo"}, - wantOutput: "String = \"foo\"\n", - }, - "string field and unexported field": { - input: struct { - String string - unexported int - }{"foo", 0}, - wantOutput: "String = \"foo\"\n", - }, - "datetime field in UTC": { - input: struct{ Date time.Time }{date}, - wantOutput: fmt.Sprintf("Date = %s\n", dateStr), - }, - "datetime field as primitive": { - // Using a map here to fail if isStructOrMap() returns true for - // time.Time. - input: map[string]interface{}{ - "Date": date, - "Int": 1, - }, - wantOutput: fmt.Sprintf("Date = %s\nInt = 1\n", dateStr), - }, - "array fields": { - input: struct { - IntArray0 [0]int - IntArray3 [3]int - }{[0]int{}, [3]int{1, 2, 3}}, - wantOutput: "IntArray0 = []\nIntArray3 = [1, 2, 3]\n", - }, - "slice fields": { - input: struct{ IntSliceNil, IntSlice0, IntSlice3 []int }{ - nil, []int{}, []int{1, 2, 3}, - }, - wantOutput: "IntSlice0 = []\nIntSlice3 = [1, 2, 3]\n", - }, - "datetime slices": { - input: struct{ DatetimeSlice []time.Time }{ - []time.Time{date, date}, - }, - wantOutput: fmt.Sprintf("DatetimeSlice = [%s, %s]\n", - dateStr, dateStr), - }, - "nested arrays and slices": { - input: struct { - SliceOfArrays [][2]int - ArrayOfSlices [2][]int - SliceOfArraysOfSlices [][2][]int - ArrayOfSlicesOfArrays [2][][2]int - SliceOfMixedArrays [][2]interface{} - ArrayOfMixedSlices [2][]interface{} - }{ - [][2]int{{1, 2}, {3, 4}}, - [2][]int{{1, 2}, {3, 4}}, - [][2][]int{ - { - {1, 2}, {3, 4}, - }, - { - {5, 6}, {7, 8}, - }, - }, - [2][][2]int{ - { - {1, 2}, {3, 4}, - }, - { - {5, 6}, {7, 8}, - }, - }, - [][2]interface{}{ - {1, 2}, {"a", "b"}, - }, - [2][]interface{}{ - {1, 2}, {"a", "b"}, - }, - }, - wantOutput: `SliceOfArrays = [[1, 2], [3, 4]] -ArrayOfSlices = [[1, 2], [3, 4]] -SliceOfArraysOfSlices = [[[1, 2], [3, 4]], [[5, 6], [7, 8]]] -ArrayOfSlicesOfArrays = [[[1, 2], [3, 4]], [[5, 6], [7, 8]]] -SliceOfMixedArrays = [[1, 2], ["a", "b"]] -ArrayOfMixedSlices = [[1, 2], ["a", "b"]] -`, - }, - "empty slice": { - input: struct{ Empty []interface{} }{[]interface{}{}}, - wantOutput: "Empty = []\n", - }, - "(error) slice with element type mismatch (string and integer)": { - input: struct{ Mixed []interface{} }{[]interface{}{1, "a"}}, - wantError: errArrayMixedElementTypes, - }, - "(error) slice with element type mismatch (integer and float)": { - input: struct{ Mixed []interface{} }{[]interface{}{1, 2.5}}, - wantError: errArrayMixedElementTypes, - }, - "slice with elems of differing Go types, same TOML types": { - input: struct { - MixedInts []interface{} - MixedFloats []interface{} - }{ - []interface{}{ - int(1), int8(2), int16(3), int32(4), int64(5), - uint(1), uint8(2), uint16(3), uint32(4), uint64(5), - }, - []interface{}{float32(1.5), float64(2.5)}, - }, - wantOutput: "MixedInts = [1, 2, 3, 4, 5, 1, 2, 3, 4, 5]\n" + - "MixedFloats = [1.5, 2.5]\n", - }, - "(error) slice w/ element type mismatch (one is nested array)": { - input: struct{ Mixed []interface{} }{ - []interface{}{1, []interface{}{2}}, - }, - wantError: errArrayMixedElementTypes, - }, - "(error) slice with 1 nil element": { - input: struct{ NilElement1 []interface{} }{[]interface{}{nil}}, - wantError: errArrayNilElement, - }, - "(error) slice with 1 nil element (and other non-nil elements)": { - input: struct{ NilElement []interface{} }{ - []interface{}{1, nil}, - }, - wantError: errArrayNilElement, - }, - "simple map": { - input: map[string]int{"a": 1, "b": 2}, - wantOutput: "a = 1\nb = 2\n", - }, - "map with interface{} value type": { - input: map[string]interface{}{"a": 1, "b": "c"}, - wantOutput: "a = 1\nb = \"c\"\n", - }, - "map with interface{} value type, some of which are structs": { - input: map[string]interface{}{ - "a": struct{ Int int }{2}, - "b": 1, - }, - wantOutput: "b = 1\n\n[a]\n Int = 2\n", - }, - "nested map": { - input: map[string]map[string]int{ - "a": {"b": 1}, - "c": {"d": 2}, - }, - wantOutput: "[a]\n b = 1\n\n[c]\n d = 2\n", - }, - "nested struct": { - input: struct{ Struct struct{ Int int } }{ - struct{ Int int }{1}, - }, - wantOutput: "[Struct]\n Int = 1\n", - }, - "nested struct and non-struct field": { - input: struct { - Struct struct{ Int int } - Bool bool - }{struct{ Int int }{1}, true}, - wantOutput: "Bool = true\n\n[Struct]\n Int = 1\n", - }, - "2 nested structs": { - input: struct{ Struct1, Struct2 struct{ Int int } }{ - struct{ Int int }{1}, struct{ Int int }{2}, - }, - wantOutput: "[Struct1]\n Int = 1\n\n[Struct2]\n Int = 2\n", - }, - "deeply nested structs": { - input: struct { - Struct1, Struct2 struct{ Struct3 *struct{ Int int } } - }{ - struct{ Struct3 *struct{ Int int } }{&struct{ Int int }{1}}, - struct{ Struct3 *struct{ Int int } }{nil}, - }, - wantOutput: "[Struct1]\n [Struct1.Struct3]\n Int = 1" + - "\n\n[Struct2]\n", - }, - "nested struct with nil struct elem": { - input: struct { - Struct struct{ Inner *struct{ Int int } } - }{ - struct{ Inner *struct{ Int int } }{nil}, - }, - wantOutput: "[Struct]\n", - }, - "nested struct with no fields": { - input: struct { - Struct struct{ Inner struct{} } - }{ - struct{ Inner struct{} }{struct{}{}}, - }, - wantOutput: "[Struct]\n [Struct.Inner]\n", - }, - "struct with tags": { - input: struct { - Struct struct { - Int int `toml:"_int"` - } `toml:"_struct"` - Bool bool `toml:"_bool"` - }{ - struct { - Int int `toml:"_int"` - }{1}, true, - }, - wantOutput: "_bool = true\n\n[_struct]\n _int = 1\n", - }, - "embedded struct": { - input: struct{ Embedded }{Embedded{1}}, - wantOutput: "_int = 1\n", - }, - "embedded *struct": { - input: struct{ *Embedded }{&Embedded{1}}, - wantOutput: "_int = 1\n", - }, - "nested embedded struct": { - input: struct { - Struct struct{ Embedded } `toml:"_struct"` - }{struct{ Embedded }{Embedded{1}}}, - wantOutput: "[_struct]\n _int = 1\n", - }, - "nested embedded *struct": { - input: struct { - Struct struct{ *Embedded } `toml:"_struct"` - }{struct{ *Embedded }{&Embedded{1}}}, - wantOutput: "[_struct]\n _int = 1\n", - }, - "embedded non-struct": { - input: struct{ NonStruct }{5}, - wantOutput: "NonStruct = 5\n", - }, - "array of tables": { - input: struct { - Structs []*struct{ Int int } `toml:"struct"` - }{ - []*struct{ Int int }{{1}, {3}}, - }, - wantOutput: "[[struct]]\n Int = 1\n\n[[struct]]\n Int = 3\n", - }, - "array of tables order": { - input: map[string]interface{}{ - "map": map[string]interface{}{ - "zero": 5, - "arr": []map[string]int{ - { - "friend": 5, - }, - }, - }, - }, - wantOutput: "[map]\n zero = 5\n\n [[map.arr]]\n friend = 5\n", - }, - "(error) top-level slice": { - input: []struct{ Int int }{{1}, {2}, {3}}, - wantError: errNoKey, - }, - "(error) slice of slice": { - input: struct { - Slices [][]struct{ Int int } - }{ - [][]struct{ Int int }{{{1}}, {{2}}, {{3}}}, - }, - wantError: errArrayNoTable, - }, - "(error) map no string key": { - input: map[int]string{1: ""}, - wantError: errNonString, - }, - "(error) empty key name": { - input: map[string]int{"": 1}, - wantError: errAnything, - }, - "(error) empty map name": { - input: map[string]interface{}{ - "": map[string]int{"v": 1}, - }, - wantError: errAnything, - }, - } - for label, test := range tests { - encodeExpected(t, label, test.input, test.wantOutput, test.wantError) - } -} - -func TestEncodeNestedTableArrays(t *testing.T) { - type song struct { - Name string `toml:"name"` - } - type album struct { - Name string `toml:"name"` - Songs []song `toml:"songs"` - } - type springsteen struct { - Albums []album `toml:"albums"` - } - value := springsteen{ - []album{ - {"Born to Run", - []song{{"Jungleland"}, {"Meeting Across the River"}}}, - {"Born in the USA", - []song{{"Glory Days"}, {"Dancing in the Dark"}}}, - }, - } - expected := `[[albums]] - name = "Born to Run" - - [[albums.songs]] - name = "Jungleland" - - [[albums.songs]] - name = "Meeting Across the River" - -[[albums]] - name = "Born in the USA" - - [[albums.songs]] - name = "Glory Days" - - [[albums.songs]] - name = "Dancing in the Dark" -` - encodeExpected(t, "nested table arrays", value, expected, nil) -} - -func TestEncodeArrayHashWithNormalHashOrder(t *testing.T) { - type Alpha struct { - V int - } - type Beta struct { - V int - } - type Conf struct { - V int - A Alpha - B []Beta - } - - val := Conf{ - V: 1, - A: Alpha{2}, - B: []Beta{{3}}, - } - expected := "V = 1\n\n[A]\n V = 2\n\n[[B]]\n V = 3\n" - encodeExpected(t, "array hash with normal hash order", val, expected, nil) -} - -func TestEncodeWithOmitEmpty(t *testing.T) { - type simple struct { - Bool bool `toml:"bool,omitempty"` - String string `toml:"string,omitempty"` - Array [0]byte `toml:"array,omitempty"` - Slice []int `toml:"slice,omitempty"` - Map map[string]string `toml:"map,omitempty"` - } - - var v simple - encodeExpected(t, "fields with omitempty are omitted when empty", v, "", nil) - v = simple{ - Bool: true, - String: " ", - Slice: []int{2, 3, 4}, - Map: map[string]string{"foo": "bar"}, - } - expected := `bool = true -string = " " -slice = [2, 3, 4] - -[map] - foo = "bar" -` - encodeExpected(t, "fields with omitempty are not omitted when non-empty", - v, expected, nil) -} - -func TestEncodeWithOmitZero(t *testing.T) { - type simple struct { - Number int `toml:"number,omitzero"` - Real float64 `toml:"real,omitzero"` - Unsigned uint `toml:"unsigned,omitzero"` - } - - value := simple{0, 0.0, uint(0)} - expected := "" - - encodeExpected(t, "simple with omitzero, all zero", value, expected, nil) - - value.Number = 10 - value.Real = 20 - value.Unsigned = 5 - expected = `number = 10 -real = 20.0 -unsigned = 5 -` - encodeExpected(t, "simple with omitzero, non-zero", value, expected, nil) -} - -func TestEncodeOmitemptyWithEmptyName(t *testing.T) { - type simple struct { - S []int `toml:",omitempty"` - } - v := simple{[]int{1, 2, 3}} - expected := "S = [1, 2, 3]\n" - encodeExpected(t, "simple with omitempty, no name, non-empty field", - v, expected, nil) -} - -func TestEncodeAnonymousStruct(t *testing.T) { - type Inner struct{ N int } - type Outer0 struct{ Inner } - type Outer1 struct { - Inner `toml:"inner"` - } - - v0 := Outer0{Inner{3}} - expected := "N = 3\n" - encodeExpected(t, "embedded anonymous untagged struct", v0, expected, nil) - - v1 := Outer1{Inner{3}} - expected = "[inner]\n N = 3\n" - encodeExpected(t, "embedded anonymous tagged struct", v1, expected, nil) -} - -func TestEncodeAnonymousStructPointerField(t *testing.T) { - type Inner struct{ N int } - type Outer0 struct{ *Inner } - type Outer1 struct { - *Inner `toml:"inner"` - } - - v0 := Outer0{} - expected := "" - encodeExpected(t, "nil anonymous untagged struct pointer field", v0, expected, nil) - - v0 = Outer0{&Inner{3}} - expected = "N = 3\n" - encodeExpected(t, "non-nil anonymous untagged struct pointer field", v0, expected, nil) - - v1 := Outer1{} - expected = "" - encodeExpected(t, "nil anonymous tagged struct pointer field", v1, expected, nil) - - v1 = Outer1{&Inner{3}} - expected = "[inner]\n N = 3\n" - encodeExpected(t, "non-nil anonymous tagged struct pointer field", v1, expected, nil) -} - -func TestEncodeIgnoredFields(t *testing.T) { - type simple struct { - Number int `toml:"-"` - } - value := simple{} - expected := "" - encodeExpected(t, "ignored field", value, expected, nil) -} - -func encodeExpected( - t *testing.T, label string, val interface{}, wantStr string, wantErr error, -) { - var buf bytes.Buffer - enc := NewEncoder(&buf) - err := enc.Encode(val) - if err != wantErr { - if wantErr != nil { - if wantErr == errAnything && err != nil { - return - } - t.Errorf("%s: want Encode error %v, got %v", label, wantErr, err) - } else { - t.Errorf("%s: Encode failed: %s", label, err) - } - } - if err != nil { - return - } - if got := buf.String(); wantStr != got { - t.Errorf("%s: want\n-----\n%q\n-----\nbut got\n-----\n%q\n-----\n", - label, wantStr, got) - } -} - -func ExampleEncoder_Encode() { - date, _ := time.Parse(time.RFC822, "14 Mar 10 18:00 UTC") - var config = map[string]interface{}{ - "date": date, - "counts": []int{1, 1, 2, 3, 5, 8}, - "hash": map[string]string{ - "key1": "val1", - "key2": "val2", - }, - } - buf := new(bytes.Buffer) - if err := NewEncoder(buf).Encode(config); err != nil { - log.Fatal(err) - } - fmt.Println(buf.String()) - - // Output: - // counts = [1, 1, 2, 3, 5, 8] - // date = 2010-03-14T18:00:00Z - // - // [hash] - // key1 = "val1" - // key2 = "val2" -} diff --git a/vendor/github.com/BurntSushi/toml/encoding_types.go b/vendor/github.com/BurntSushi/toml/encoding_types.go deleted file mode 100644 index d36e1dd..0000000 --- a/vendor/github.com/BurntSushi/toml/encoding_types.go +++ /dev/null @@ -1,19 +0,0 @@ -// +build go1.2 - -package toml - -// In order to support Go 1.1, we define our own TextMarshaler and -// TextUnmarshaler types. For Go 1.2+, we just alias them with the -// standard library interfaces. - -import ( - "encoding" -) - -// TextMarshaler is a synonym for encoding.TextMarshaler. It is defined here -// so that Go 1.1 can be supported. -type TextMarshaler encoding.TextMarshaler - -// TextUnmarshaler is a synonym for encoding.TextUnmarshaler. It is defined -// here so that Go 1.1 can be supported. -type TextUnmarshaler encoding.TextUnmarshaler diff --git a/vendor/github.com/BurntSushi/toml/encoding_types_1.1.go b/vendor/github.com/BurntSushi/toml/encoding_types_1.1.go deleted file mode 100644 index e8d503d..0000000 --- a/vendor/github.com/BurntSushi/toml/encoding_types_1.1.go +++ /dev/null @@ -1,18 +0,0 @@ -// +build !go1.2 - -package toml - -// These interfaces were introduced in Go 1.2, so we add them manually when -// compiling for Go 1.1. - -// TextMarshaler is a synonym for encoding.TextMarshaler. It is defined here -// so that Go 1.1 can be supported. -type TextMarshaler interface { - MarshalText() (text []byte, err error) -} - -// TextUnmarshaler is a synonym for encoding.TextUnmarshaler. It is defined -// here so that Go 1.1 can be supported. -type TextUnmarshaler interface { - UnmarshalText(text []byte) error -} diff --git a/vendor/github.com/BurntSushi/toml/lex.go b/vendor/github.com/BurntSushi/toml/lex.go deleted file mode 100644 index 6dee7fc..0000000 --- a/vendor/github.com/BurntSushi/toml/lex.go +++ /dev/null @@ -1,953 +0,0 @@ -package toml - -import ( - "fmt" - "strings" - "unicode" - "unicode/utf8" -) - -type itemType int - -const ( - itemError itemType = iota - itemNIL // used in the parser to indicate no type - itemEOF - itemText - itemString - itemRawString - itemMultilineString - itemRawMultilineString - itemBool - itemInteger - itemFloat - itemDatetime - itemArray // the start of an array - itemArrayEnd - itemTableStart - itemTableEnd - itemArrayTableStart - itemArrayTableEnd - itemKeyStart - itemCommentStart - itemInlineTableStart - itemInlineTableEnd -) - -const ( - eof = 0 - comma = ',' - tableStart = '[' - tableEnd = ']' - arrayTableStart = '[' - arrayTableEnd = ']' - tableSep = '.' - keySep = '=' - arrayStart = '[' - arrayEnd = ']' - commentStart = '#' - stringStart = '"' - stringEnd = '"' - rawStringStart = '\'' - rawStringEnd = '\'' - inlineTableStart = '{' - inlineTableEnd = '}' -) - -type stateFn func(lx *lexer) stateFn - -type lexer struct { - input string - start int - pos int - line int - state stateFn - items chan item - - // Allow for backing up up to three runes. - // This is necessary because TOML contains 3-rune tokens (""" and '''). - prevWidths [3]int - nprev int // how many of prevWidths are in use - // If we emit an eof, we can still back up, but it is not OK to call - // next again. - atEOF bool - - // A stack of state functions used to maintain context. - // The idea is to reuse parts of the state machine in various places. - // For example, values can appear at the top level or within arbitrarily - // nested arrays. The last state on the stack is used after a value has - // been lexed. Similarly for comments. - stack []stateFn -} - -type item struct { - typ itemType - val string - line int -} - -func (lx *lexer) nextItem() item { - for { - select { - case item := <-lx.items: - return item - default: - lx.state = lx.state(lx) - } - } -} - -func lex(input string) *lexer { - lx := &lexer{ - input: input, - state: lexTop, - line: 1, - items: make(chan item, 10), - stack: make([]stateFn, 0, 10), - } - return lx -} - -func (lx *lexer) push(state stateFn) { - lx.stack = append(lx.stack, state) -} - -func (lx *lexer) pop() stateFn { - if len(lx.stack) == 0 { - return lx.errorf("BUG in lexer: no states to pop") - } - last := lx.stack[len(lx.stack)-1] - lx.stack = lx.stack[0 : len(lx.stack)-1] - return last -} - -func (lx *lexer) current() string { - return lx.input[lx.start:lx.pos] -} - -func (lx *lexer) emit(typ itemType) { - lx.items <- item{typ, lx.current(), lx.line} - lx.start = lx.pos -} - -func (lx *lexer) emitTrim(typ itemType) { - lx.items <- item{typ, strings.TrimSpace(lx.current()), lx.line} - lx.start = lx.pos -} - -func (lx *lexer) next() (r rune) { - if lx.atEOF { - panic("next called after EOF") - } - if lx.pos >= len(lx.input) { - lx.atEOF = true - return eof - } - - if lx.input[lx.pos] == '\n' { - lx.line++ - } - lx.prevWidths[2] = lx.prevWidths[1] - lx.prevWidths[1] = lx.prevWidths[0] - if lx.nprev < 3 { - lx.nprev++ - } - r, w := utf8.DecodeRuneInString(lx.input[lx.pos:]) - lx.prevWidths[0] = w - lx.pos += w - return r -} - -// ignore skips over the pending input before this point. -func (lx *lexer) ignore() { - lx.start = lx.pos -} - -// backup steps back one rune. Can be called only twice between calls to next. -func (lx *lexer) backup() { - if lx.atEOF { - lx.atEOF = false - return - } - if lx.nprev < 1 { - panic("backed up too far") - } - w := lx.prevWidths[0] - lx.prevWidths[0] = lx.prevWidths[1] - lx.prevWidths[1] = lx.prevWidths[2] - lx.nprev-- - lx.pos -= w - if lx.pos < len(lx.input) && lx.input[lx.pos] == '\n' { - lx.line-- - } -} - -// accept consumes the next rune if it's equal to `valid`. -func (lx *lexer) accept(valid rune) bool { - if lx.next() == valid { - return true - } - lx.backup() - return false -} - -// peek returns but does not consume the next rune in the input. -func (lx *lexer) peek() rune { - r := lx.next() - lx.backup() - return r -} - -// skip ignores all input that matches the given predicate. -func (lx *lexer) skip(pred func(rune) bool) { - for { - r := lx.next() - if pred(r) { - continue - } - lx.backup() - lx.ignore() - return - } -} - -// errorf stops all lexing by emitting an error and returning `nil`. -// Note that any value that is a character is escaped if it's a special -// character (newlines, tabs, etc.). -func (lx *lexer) errorf(format string, values ...interface{}) stateFn { - lx.items <- item{ - itemError, - fmt.Sprintf(format, values...), - lx.line, - } - return nil -} - -// lexTop consumes elements at the top level of TOML data. -func lexTop(lx *lexer) stateFn { - r := lx.next() - if isWhitespace(r) || isNL(r) { - return lexSkip(lx, lexTop) - } - switch r { - case commentStart: - lx.push(lexTop) - return lexCommentStart - case tableStart: - return lexTableStart - case eof: - if lx.pos > lx.start { - return lx.errorf("unexpected EOF") - } - lx.emit(itemEOF) - return nil - } - - // At this point, the only valid item can be a key, so we back up - // and let the key lexer do the rest. - lx.backup() - lx.push(lexTopEnd) - return lexKeyStart -} - -// lexTopEnd is entered whenever a top-level item has been consumed. (A value -// or a table.) It must see only whitespace, and will turn back to lexTop -// upon a newline. If it sees EOF, it will quit the lexer successfully. -func lexTopEnd(lx *lexer) stateFn { - r := lx.next() - switch { - case r == commentStart: - // a comment will read to a newline for us. - lx.push(lexTop) - return lexCommentStart - case isWhitespace(r): - return lexTopEnd - case isNL(r): - lx.ignore() - return lexTop - case r == eof: - lx.emit(itemEOF) - return nil - } - return lx.errorf("expected a top-level item to end with a newline, "+ - "comment, or EOF, but got %q instead", r) -} - -// lexTable lexes the beginning of a table. Namely, it makes sure that -// it starts with a character other than '.' and ']'. -// It assumes that '[' has already been consumed. -// It also handles the case that this is an item in an array of tables. -// e.g., '[[name]]'. -func lexTableStart(lx *lexer) stateFn { - if lx.peek() == arrayTableStart { - lx.next() - lx.emit(itemArrayTableStart) - lx.push(lexArrayTableEnd) - } else { - lx.emit(itemTableStart) - lx.push(lexTableEnd) - } - return lexTableNameStart -} - -func lexTableEnd(lx *lexer) stateFn { - lx.emit(itemTableEnd) - return lexTopEnd -} - -func lexArrayTableEnd(lx *lexer) stateFn { - if r := lx.next(); r != arrayTableEnd { - return lx.errorf("expected end of table array name delimiter %q, "+ - "but got %q instead", arrayTableEnd, r) - } - lx.emit(itemArrayTableEnd) - return lexTopEnd -} - -func lexTableNameStart(lx *lexer) stateFn { - lx.skip(isWhitespace) - switch r := lx.peek(); { - case r == tableEnd || r == eof: - return lx.errorf("unexpected end of table name " + - "(table names cannot be empty)") - case r == tableSep: - return lx.errorf("unexpected table separator " + - "(table names cannot be empty)") - case r == stringStart || r == rawStringStart: - lx.ignore() - lx.push(lexTableNameEnd) - return lexValue // reuse string lexing - default: - return lexBareTableName - } -} - -// lexBareTableName lexes the name of a table. It assumes that at least one -// valid character for the table has already been read. -func lexBareTableName(lx *lexer) stateFn { - r := lx.next() - if isBareKeyChar(r) { - return lexBareTableName - } - lx.backup() - lx.emit(itemText) - return lexTableNameEnd -} - -// lexTableNameEnd reads the end of a piece of a table name, optionally -// consuming whitespace. -func lexTableNameEnd(lx *lexer) stateFn { - lx.skip(isWhitespace) - switch r := lx.next(); { - case isWhitespace(r): - return lexTableNameEnd - case r == tableSep: - lx.ignore() - return lexTableNameStart - case r == tableEnd: - return lx.pop() - default: - return lx.errorf("expected '.' or ']' to end table name, "+ - "but got %q instead", r) - } -} - -// lexKeyStart consumes a key name up until the first non-whitespace character. -// lexKeyStart will ignore whitespace. -func lexKeyStart(lx *lexer) stateFn { - r := lx.peek() - switch { - case r == keySep: - return lx.errorf("unexpected key separator %q", keySep) - case isWhitespace(r) || isNL(r): - lx.next() - return lexSkip(lx, lexKeyStart) - case r == stringStart || r == rawStringStart: - lx.ignore() - lx.emit(itemKeyStart) - lx.push(lexKeyEnd) - return lexValue // reuse string lexing - default: - lx.ignore() - lx.emit(itemKeyStart) - return lexBareKey - } -} - -// lexBareKey consumes the text of a bare key. Assumes that the first character -// (which is not whitespace) has not yet been consumed. -func lexBareKey(lx *lexer) stateFn { - switch r := lx.next(); { - case isBareKeyChar(r): - return lexBareKey - case isWhitespace(r): - lx.backup() - lx.emit(itemText) - return lexKeyEnd - case r == keySep: - lx.backup() - lx.emit(itemText) - return lexKeyEnd - default: - return lx.errorf("bare keys cannot contain %q", r) - } -} - -// lexKeyEnd consumes the end of a key and trims whitespace (up to the key -// separator). -func lexKeyEnd(lx *lexer) stateFn { - switch r := lx.next(); { - case r == keySep: - return lexSkip(lx, lexValue) - case isWhitespace(r): - return lexSkip(lx, lexKeyEnd) - default: - return lx.errorf("expected key separator %q, but got %q instead", - keySep, r) - } -} - -// lexValue starts the consumption of a value anywhere a value is expected. -// lexValue will ignore whitespace. -// After a value is lexed, the last state on the next is popped and returned. -func lexValue(lx *lexer) stateFn { - // We allow whitespace to precede a value, but NOT newlines. - // In array syntax, the array states are responsible for ignoring newlines. - r := lx.next() - switch { - case isWhitespace(r): - return lexSkip(lx, lexValue) - case isDigit(r): - lx.backup() // avoid an extra state and use the same as above - return lexNumberOrDateStart - } - switch r { - case arrayStart: - lx.ignore() - lx.emit(itemArray) - return lexArrayValue - case inlineTableStart: - lx.ignore() - lx.emit(itemInlineTableStart) - return lexInlineTableValue - case stringStart: - if lx.accept(stringStart) { - if lx.accept(stringStart) { - lx.ignore() // Ignore """ - return lexMultilineString - } - lx.backup() - } - lx.ignore() // ignore the '"' - return lexString - case rawStringStart: - if lx.accept(rawStringStart) { - if lx.accept(rawStringStart) { - lx.ignore() // Ignore """ - return lexMultilineRawString - } - lx.backup() - } - lx.ignore() // ignore the "'" - return lexRawString - case '+', '-': - return lexNumberStart - case '.': // special error case, be kind to users - return lx.errorf("floats must start with a digit, not '.'") - } - if unicode.IsLetter(r) { - // Be permissive here; lexBool will give a nice error if the - // user wrote something like - // x = foo - // (i.e. not 'true' or 'false' but is something else word-like.) - lx.backup() - return lexBool - } - return lx.errorf("expected value but found %q instead", r) -} - -// lexArrayValue consumes one value in an array. It assumes that '[' or ',' -// have already been consumed. All whitespace and newlines are ignored. -func lexArrayValue(lx *lexer) stateFn { - r := lx.next() - switch { - case isWhitespace(r) || isNL(r): - return lexSkip(lx, lexArrayValue) - case r == commentStart: - lx.push(lexArrayValue) - return lexCommentStart - case r == comma: - return lx.errorf("unexpected comma") - case r == arrayEnd: - // NOTE(caleb): The spec isn't clear about whether you can have - // a trailing comma or not, so we'll allow it. - return lexArrayEnd - } - - lx.backup() - lx.push(lexArrayValueEnd) - return lexValue -} - -// lexArrayValueEnd consumes everything between the end of an array value and -// the next value (or the end of the array): it ignores whitespace and newlines -// and expects either a ',' or a ']'. -func lexArrayValueEnd(lx *lexer) stateFn { - r := lx.next() - switch { - case isWhitespace(r) || isNL(r): - return lexSkip(lx, lexArrayValueEnd) - case r == commentStart: - lx.push(lexArrayValueEnd) - return lexCommentStart - case r == comma: - lx.ignore() - return lexArrayValue // move on to the next value - case r == arrayEnd: - return lexArrayEnd - } - return lx.errorf( - "expected a comma or array terminator %q, but got %q instead", - arrayEnd, r, - ) -} - -// lexArrayEnd finishes the lexing of an array. -// It assumes that a ']' has just been consumed. -func lexArrayEnd(lx *lexer) stateFn { - lx.ignore() - lx.emit(itemArrayEnd) - return lx.pop() -} - -// lexInlineTableValue consumes one key/value pair in an inline table. -// It assumes that '{' or ',' have already been consumed. Whitespace is ignored. -func lexInlineTableValue(lx *lexer) stateFn { - r := lx.next() - switch { - case isWhitespace(r): - return lexSkip(lx, lexInlineTableValue) - case isNL(r): - return lx.errorf("newlines not allowed within inline tables") - case r == commentStart: - lx.push(lexInlineTableValue) - return lexCommentStart - case r == comma: - return lx.errorf("unexpected comma") - case r == inlineTableEnd: - return lexInlineTableEnd - } - lx.backup() - lx.push(lexInlineTableValueEnd) - return lexKeyStart -} - -// lexInlineTableValueEnd consumes everything between the end of an inline table -// key/value pair and the next pair (or the end of the table): -// it ignores whitespace and expects either a ',' or a '}'. -func lexInlineTableValueEnd(lx *lexer) stateFn { - r := lx.next() - switch { - case isWhitespace(r): - return lexSkip(lx, lexInlineTableValueEnd) - case isNL(r): - return lx.errorf("newlines not allowed within inline tables") - case r == commentStart: - lx.push(lexInlineTableValueEnd) - return lexCommentStart - case r == comma: - lx.ignore() - return lexInlineTableValue - case r == inlineTableEnd: - return lexInlineTableEnd - } - return lx.errorf("expected a comma or an inline table terminator %q, "+ - "but got %q instead", inlineTableEnd, r) -} - -// lexInlineTableEnd finishes the lexing of an inline table. -// It assumes that a '}' has just been consumed. -func lexInlineTableEnd(lx *lexer) stateFn { - lx.ignore() - lx.emit(itemInlineTableEnd) - return lx.pop() -} - -// lexString consumes the inner contents of a string. It assumes that the -// beginning '"' has already been consumed and ignored. -func lexString(lx *lexer) stateFn { - r := lx.next() - switch { - case r == eof: - return lx.errorf("unexpected EOF") - case isNL(r): - return lx.errorf("strings cannot contain newlines") - case r == '\\': - lx.push(lexString) - return lexStringEscape - case r == stringEnd: - lx.backup() - lx.emit(itemString) - lx.next() - lx.ignore() - return lx.pop() - } - return lexString -} - -// lexMultilineString consumes the inner contents of a string. It assumes that -// the beginning '"""' has already been consumed and ignored. -func lexMultilineString(lx *lexer) stateFn { - switch lx.next() { - case eof: - return lx.errorf("unexpected EOF") - case '\\': - return lexMultilineStringEscape - case stringEnd: - if lx.accept(stringEnd) { - if lx.accept(stringEnd) { - lx.backup() - lx.backup() - lx.backup() - lx.emit(itemMultilineString) - lx.next() - lx.next() - lx.next() - lx.ignore() - return lx.pop() - } - lx.backup() - } - } - return lexMultilineString -} - -// lexRawString consumes a raw string. Nothing can be escaped in such a string. -// It assumes that the beginning "'" has already been consumed and ignored. -func lexRawString(lx *lexer) stateFn { - r := lx.next() - switch { - case r == eof: - return lx.errorf("unexpected EOF") - case isNL(r): - return lx.errorf("strings cannot contain newlines") - case r == rawStringEnd: - lx.backup() - lx.emit(itemRawString) - lx.next() - lx.ignore() - return lx.pop() - } - return lexRawString -} - -// lexMultilineRawString consumes a raw string. Nothing can be escaped in such -// a string. It assumes that the beginning "'''" has already been consumed and -// ignored. -func lexMultilineRawString(lx *lexer) stateFn { - switch lx.next() { - case eof: - return lx.errorf("unexpected EOF") - case rawStringEnd: - if lx.accept(rawStringEnd) { - if lx.accept(rawStringEnd) { - lx.backup() - lx.backup() - lx.backup() - lx.emit(itemRawMultilineString) - lx.next() - lx.next() - lx.next() - lx.ignore() - return lx.pop() - } - lx.backup() - } - } - return lexMultilineRawString -} - -// lexMultilineStringEscape consumes an escaped character. It assumes that the -// preceding '\\' has already been consumed. -func lexMultilineStringEscape(lx *lexer) stateFn { - // Handle the special case first: - if isNL(lx.next()) { - return lexMultilineString - } - lx.backup() - lx.push(lexMultilineString) - return lexStringEscape(lx) -} - -func lexStringEscape(lx *lexer) stateFn { - r := lx.next() - switch r { - case 'b': - fallthrough - case 't': - fallthrough - case 'n': - fallthrough - case 'f': - fallthrough - case 'r': - fallthrough - case '"': - fallthrough - case '\\': - return lx.pop() - case 'u': - return lexShortUnicodeEscape - case 'U': - return lexLongUnicodeEscape - } - return lx.errorf("invalid escape character %q; only the following "+ - "escape characters are allowed: "+ - `\b, \t, \n, \f, \r, \", \\, \uXXXX, and \UXXXXXXXX`, r) -} - -func lexShortUnicodeEscape(lx *lexer) stateFn { - var r rune - for i := 0; i < 4; i++ { - r = lx.next() - if !isHexadecimal(r) { - return lx.errorf(`expected four hexadecimal digits after '\u', `+ - "but got %q instead", lx.current()) - } - } - return lx.pop() -} - -func lexLongUnicodeEscape(lx *lexer) stateFn { - var r rune - for i := 0; i < 8; i++ { - r = lx.next() - if !isHexadecimal(r) { - return lx.errorf(`expected eight hexadecimal digits after '\U', `+ - "but got %q instead", lx.current()) - } - } - return lx.pop() -} - -// lexNumberOrDateStart consumes either an integer, a float, or datetime. -func lexNumberOrDateStart(lx *lexer) stateFn { - r := lx.next() - if isDigit(r) { - return lexNumberOrDate - } - switch r { - case '_': - return lexNumber - case 'e', 'E': - return lexFloat - case '.': - return lx.errorf("floats must start with a digit, not '.'") - } - return lx.errorf("expected a digit but got %q", r) -} - -// lexNumberOrDate consumes either an integer, float or datetime. -func lexNumberOrDate(lx *lexer) stateFn { - r := lx.next() - if isDigit(r) { - return lexNumberOrDate - } - switch r { - case '-': - return lexDatetime - case '_': - return lexNumber - case '.', 'e', 'E': - return lexFloat - } - - lx.backup() - lx.emit(itemInteger) - return lx.pop() -} - -// lexDatetime consumes a Datetime, to a first approximation. -// The parser validates that it matches one of the accepted formats. -func lexDatetime(lx *lexer) stateFn { - r := lx.next() - if isDigit(r) { - return lexDatetime - } - switch r { - case '-', 'T', ':', '.', 'Z': - return lexDatetime - } - - lx.backup() - lx.emit(itemDatetime) - return lx.pop() -} - -// lexNumberStart consumes either an integer or a float. It assumes that a sign -// has already been read, but that *no* digits have been consumed. -// lexNumberStart will move to the appropriate integer or float states. -func lexNumberStart(lx *lexer) stateFn { - // We MUST see a digit. Even floats have to start with a digit. - r := lx.next() - if !isDigit(r) { - if r == '.' { - return lx.errorf("floats must start with a digit, not '.'") - } - return lx.errorf("expected a digit but got %q", r) - } - return lexNumber -} - -// lexNumber consumes an integer or a float after seeing the first digit. -func lexNumber(lx *lexer) stateFn { - r := lx.next() - if isDigit(r) { - return lexNumber - } - switch r { - case '_': - return lexNumber - case '.', 'e', 'E': - return lexFloat - } - - lx.backup() - lx.emit(itemInteger) - return lx.pop() -} - -// lexFloat consumes the elements of a float. It allows any sequence of -// float-like characters, so floats emitted by the lexer are only a first -// approximation and must be validated by the parser. -func lexFloat(lx *lexer) stateFn { - r := lx.next() - if isDigit(r) { - return lexFloat - } - switch r { - case '_', '.', '-', '+', 'e', 'E': - return lexFloat - } - - lx.backup() - lx.emit(itemFloat) - return lx.pop() -} - -// lexBool consumes a bool string: 'true' or 'false. -func lexBool(lx *lexer) stateFn { - var rs []rune - for { - r := lx.next() - if !unicode.IsLetter(r) { - lx.backup() - break - } - rs = append(rs, r) - } - s := string(rs) - switch s { - case "true", "false": - lx.emit(itemBool) - return lx.pop() - } - return lx.errorf("expected value but found %q instead", s) -} - -// lexCommentStart begins the lexing of a comment. It will emit -// itemCommentStart and consume no characters, passing control to lexComment. -func lexCommentStart(lx *lexer) stateFn { - lx.ignore() - lx.emit(itemCommentStart) - return lexComment -} - -// lexComment lexes an entire comment. It assumes that '#' has been consumed. -// It will consume *up to* the first newline character, and pass control -// back to the last state on the stack. -func lexComment(lx *lexer) stateFn { - r := lx.peek() - if isNL(r) || r == eof { - lx.emit(itemText) - return lx.pop() - } - lx.next() - return lexComment -} - -// lexSkip ignores all slurped input and moves on to the next state. -func lexSkip(lx *lexer, nextState stateFn) stateFn { - return func(lx *lexer) stateFn { - lx.ignore() - return nextState - } -} - -// isWhitespace returns true if `r` is a whitespace character according -// to the spec. -func isWhitespace(r rune) bool { - return r == '\t' || r == ' ' -} - -func isNL(r rune) bool { - return r == '\n' || r == '\r' -} - -func isDigit(r rune) bool { - return r >= '0' && r <= '9' -} - -func isHexadecimal(r rune) bool { - return (r >= '0' && r <= '9') || - (r >= 'a' && r <= 'f') || - (r >= 'A' && r <= 'F') -} - -func isBareKeyChar(r rune) bool { - return (r >= 'A' && r <= 'Z') || - (r >= 'a' && r <= 'z') || - (r >= '0' && r <= '9') || - r == '_' || - r == '-' -} - -func (itype itemType) String() string { - switch itype { - case itemError: - return "Error" - case itemNIL: - return "NIL" - case itemEOF: - return "EOF" - case itemText: - return "Text" - case itemString, itemRawString, itemMultilineString, itemRawMultilineString: - return "String" - case itemBool: - return "Bool" - case itemInteger: - return "Integer" - case itemFloat: - return "Float" - case itemDatetime: - return "DateTime" - case itemTableStart: - return "TableStart" - case itemTableEnd: - return "TableEnd" - case itemKeyStart: - return "KeyStart" - case itemArray: - return "Array" - case itemArrayEnd: - return "ArrayEnd" - case itemCommentStart: - return "CommentStart" - } - panic(fmt.Sprintf("BUG: Unknown type '%d'.", int(itype))) -} - -func (item item) String() string { - return fmt.Sprintf("(%s, %s)", item.typ.String(), item.val) -} diff --git a/vendor/github.com/BurntSushi/toml/parse.go b/vendor/github.com/BurntSushi/toml/parse.go deleted file mode 100644 index 50869ef..0000000 --- a/vendor/github.com/BurntSushi/toml/parse.go +++ /dev/null @@ -1,592 +0,0 @@ -package toml - -import ( - "fmt" - "strconv" - "strings" - "time" - "unicode" - "unicode/utf8" -) - -type parser struct { - mapping map[string]interface{} - types map[string]tomlType - lx *lexer - - // A list of keys in the order that they appear in the TOML data. - ordered []Key - - // the full key for the current hash in scope - context Key - - // the base key name for everything except hashes - currentKey string - - // rough approximation of line number - approxLine int - - // A map of 'key.group.names' to whether they were created implicitly. - implicits map[string]bool -} - -type parseError string - -func (pe parseError) Error() string { - return string(pe) -} - -func parse(data string) (p *parser, err error) { - defer func() { - if r := recover(); r != nil { - var ok bool - if err, ok = r.(parseError); ok { - return - } - panic(r) - } - }() - - p = &parser{ - mapping: make(map[string]interface{}), - types: make(map[string]tomlType), - lx: lex(data), - ordered: make([]Key, 0), - implicits: make(map[string]bool), - } - for { - item := p.next() - if item.typ == itemEOF { - break - } - p.topLevel(item) - } - - return p, nil -} - -func (p *parser) panicf(format string, v ...interface{}) { - msg := fmt.Sprintf("Near line %d (last key parsed '%s'): %s", - p.approxLine, p.current(), fmt.Sprintf(format, v...)) - panic(parseError(msg)) -} - -func (p *parser) next() item { - it := p.lx.nextItem() - if it.typ == itemError { - p.panicf("%s", it.val) - } - return it -} - -func (p *parser) bug(format string, v ...interface{}) { - panic(fmt.Sprintf("BUG: "+format+"\n\n", v...)) -} - -func (p *parser) expect(typ itemType) item { - it := p.next() - p.assertEqual(typ, it.typ) - return it -} - -func (p *parser) assertEqual(expected, got itemType) { - if expected != got { - p.bug("Expected '%s' but got '%s'.", expected, got) - } -} - -func (p *parser) topLevel(item item) { - switch item.typ { - case itemCommentStart: - p.approxLine = item.line - p.expect(itemText) - case itemTableStart: - kg := p.next() - p.approxLine = kg.line - - var key Key - for ; kg.typ != itemTableEnd && kg.typ != itemEOF; kg = p.next() { - key = append(key, p.keyString(kg)) - } - p.assertEqual(itemTableEnd, kg.typ) - - p.establishContext(key, false) - p.setType("", tomlHash) - p.ordered = append(p.ordered, key) - case itemArrayTableStart: - kg := p.next() - p.approxLine = kg.line - - var key Key - for ; kg.typ != itemArrayTableEnd && kg.typ != itemEOF; kg = p.next() { - key = append(key, p.keyString(kg)) - } - p.assertEqual(itemArrayTableEnd, kg.typ) - - p.establishContext(key, true) - p.setType("", tomlArrayHash) - p.ordered = append(p.ordered, key) - case itemKeyStart: - kname := p.next() - p.approxLine = kname.line - p.currentKey = p.keyString(kname) - - val, typ := p.value(p.next()) - p.setValue(p.currentKey, val) - p.setType(p.currentKey, typ) - p.ordered = append(p.ordered, p.context.add(p.currentKey)) - p.currentKey = "" - default: - p.bug("Unexpected type at top level: %s", item.typ) - } -} - -// Gets a string for a key (or part of a key in a table name). -func (p *parser) keyString(it item) string { - switch it.typ { - case itemText: - return it.val - case itemString, itemMultilineString, - itemRawString, itemRawMultilineString: - s, _ := p.value(it) - return s.(string) - default: - p.bug("Unexpected key type: %s", it.typ) - panic("unreachable") - } -} - -// value translates an expected value from the lexer into a Go value wrapped -// as an empty interface. -func (p *parser) value(it item) (interface{}, tomlType) { - switch it.typ { - case itemString: - return p.replaceEscapes(it.val), p.typeOfPrimitive(it) - case itemMultilineString: - trimmed := stripFirstNewline(stripEscapedWhitespace(it.val)) - return p.replaceEscapes(trimmed), p.typeOfPrimitive(it) - case itemRawString: - return it.val, p.typeOfPrimitive(it) - case itemRawMultilineString: - return stripFirstNewline(it.val), p.typeOfPrimitive(it) - case itemBool: - switch it.val { - case "true": - return true, p.typeOfPrimitive(it) - case "false": - return false, p.typeOfPrimitive(it) - } - p.bug("Expected boolean value, but got '%s'.", it.val) - case itemInteger: - if !numUnderscoresOK(it.val) { - p.panicf("Invalid integer %q: underscores must be surrounded by digits", - it.val) - } - val := strings.Replace(it.val, "_", "", -1) - num, err := strconv.ParseInt(val, 10, 64) - if err != nil { - // Distinguish integer values. Normally, it'd be a bug if the lexer - // provides an invalid integer, but it's possible that the number is - // out of range of valid values (which the lexer cannot determine). - // So mark the former as a bug but the latter as a legitimate user - // error. - if e, ok := err.(*strconv.NumError); ok && - e.Err == strconv.ErrRange { - - p.panicf("Integer '%s' is out of the range of 64-bit "+ - "signed integers.", it.val) - } else { - p.bug("Expected integer value, but got '%s'.", it.val) - } - } - return num, p.typeOfPrimitive(it) - case itemFloat: - parts := strings.FieldsFunc(it.val, func(r rune) bool { - switch r { - case '.', 'e', 'E': - return true - } - return false - }) - for _, part := range parts { - if !numUnderscoresOK(part) { - p.panicf("Invalid float %q: underscores must be "+ - "surrounded by digits", it.val) - } - } - if !numPeriodsOK(it.val) { - // As a special case, numbers like '123.' or '1.e2', - // which are valid as far as Go/strconv are concerned, - // must be rejected because TOML says that a fractional - // part consists of '.' followed by 1+ digits. - p.panicf("Invalid float %q: '.' must be followed "+ - "by one or more digits", it.val) - } - val := strings.Replace(it.val, "_", "", -1) - num, err := strconv.ParseFloat(val, 64) - if err != nil { - if e, ok := err.(*strconv.NumError); ok && - e.Err == strconv.ErrRange { - - p.panicf("Float '%s' is out of the range of 64-bit "+ - "IEEE-754 floating-point numbers.", it.val) - } else { - p.panicf("Invalid float value: %q", it.val) - } - } - return num, p.typeOfPrimitive(it) - case itemDatetime: - var t time.Time - var ok bool - var err error - for _, format := range []string{ - "2006-01-02T15:04:05Z07:00", - "2006-01-02T15:04:05", - "2006-01-02", - } { - t, err = time.ParseInLocation(format, it.val, time.Local) - if err == nil { - ok = true - break - } - } - if !ok { - p.panicf("Invalid TOML Datetime: %q.", it.val) - } - return t, p.typeOfPrimitive(it) - case itemArray: - array := make([]interface{}, 0) - types := make([]tomlType, 0) - - for it = p.next(); it.typ != itemArrayEnd; it = p.next() { - if it.typ == itemCommentStart { - p.expect(itemText) - continue - } - - val, typ := p.value(it) - array = append(array, val) - types = append(types, typ) - } - return array, p.typeOfArray(types) - case itemInlineTableStart: - var ( - hash = make(map[string]interface{}) - outerContext = p.context - outerKey = p.currentKey - ) - - p.context = append(p.context, p.currentKey) - p.currentKey = "" - for it := p.next(); it.typ != itemInlineTableEnd; it = p.next() { - if it.typ != itemKeyStart { - p.bug("Expected key start but instead found %q, around line %d", - it.val, p.approxLine) - } - if it.typ == itemCommentStart { - p.expect(itemText) - continue - } - - // retrieve key - k := p.next() - p.approxLine = k.line - kname := p.keyString(k) - - // retrieve value - p.currentKey = kname - val, typ := p.value(p.next()) - // make sure we keep metadata up to date - p.setType(kname, typ) - p.ordered = append(p.ordered, p.context.add(p.currentKey)) - hash[kname] = val - } - p.context = outerContext - p.currentKey = outerKey - return hash, tomlHash - } - p.bug("Unexpected value type: %s", it.typ) - panic("unreachable") -} - -// numUnderscoresOK checks whether each underscore in s is surrounded by -// characters that are not underscores. -func numUnderscoresOK(s string) bool { - accept := false - for _, r := range s { - if r == '_' { - if !accept { - return false - } - accept = false - continue - } - accept = true - } - return accept -} - -// numPeriodsOK checks whether every period in s is followed by a digit. -func numPeriodsOK(s string) bool { - period := false - for _, r := range s { - if period && !isDigit(r) { - return false - } - period = r == '.' - } - return !period -} - -// establishContext sets the current context of the parser, -// where the context is either a hash or an array of hashes. Which one is -// set depends on the value of the `array` parameter. -// -// Establishing the context also makes sure that the key isn't a duplicate, and -// will create implicit hashes automatically. -func (p *parser) establishContext(key Key, array bool) { - var ok bool - - // Always start at the top level and drill down for our context. - hashContext := p.mapping - keyContext := make(Key, 0) - - // We only need implicit hashes for key[0:-1] - for _, k := range key[0 : len(key)-1] { - _, ok = hashContext[k] - keyContext = append(keyContext, k) - - // No key? Make an implicit hash and move on. - if !ok { - p.addImplicit(keyContext) - hashContext[k] = make(map[string]interface{}) - } - - // If the hash context is actually an array of tables, then set - // the hash context to the last element in that array. - // - // Otherwise, it better be a table, since this MUST be a key group (by - // virtue of it not being the last element in a key). - switch t := hashContext[k].(type) { - case []map[string]interface{}: - hashContext = t[len(t)-1] - case map[string]interface{}: - hashContext = t - default: - p.panicf("Key '%s' was already created as a hash.", keyContext) - } - } - - p.context = keyContext - if array { - // If this is the first element for this array, then allocate a new - // list of tables for it. - k := key[len(key)-1] - if _, ok := hashContext[k]; !ok { - hashContext[k] = make([]map[string]interface{}, 0, 5) - } - - // Add a new table. But make sure the key hasn't already been used - // for something else. - if hash, ok := hashContext[k].([]map[string]interface{}); ok { - hashContext[k] = append(hash, make(map[string]interface{})) - } else { - p.panicf("Key '%s' was already created and cannot be used as "+ - "an array.", keyContext) - } - } else { - p.setValue(key[len(key)-1], make(map[string]interface{})) - } - p.context = append(p.context, key[len(key)-1]) -} - -// setValue sets the given key to the given value in the current context. -// It will make sure that the key hasn't already been defined, account for -// implicit key groups. -func (p *parser) setValue(key string, value interface{}) { - var tmpHash interface{} - var ok bool - - hash := p.mapping - keyContext := make(Key, 0) - for _, k := range p.context { - keyContext = append(keyContext, k) - if tmpHash, ok = hash[k]; !ok { - p.bug("Context for key '%s' has not been established.", keyContext) - } - switch t := tmpHash.(type) { - case []map[string]interface{}: - // The context is a table of hashes. Pick the most recent table - // defined as the current hash. - hash = t[len(t)-1] - case map[string]interface{}: - hash = t - default: - p.bug("Expected hash to have type 'map[string]interface{}', but "+ - "it has '%T' instead.", tmpHash) - } - } - keyContext = append(keyContext, key) - - if _, ok := hash[key]; ok { - // Typically, if the given key has already been set, then we have - // to raise an error since duplicate keys are disallowed. However, - // it's possible that a key was previously defined implicitly. In this - // case, it is allowed to be redefined concretely. (See the - // `tests/valid/implicit-and-explicit-after.toml` test in `toml-test`.) - // - // But we have to make sure to stop marking it as an implicit. (So that - // another redefinition provokes an error.) - // - // Note that since it has already been defined (as a hash), we don't - // want to overwrite it. So our business is done. - if p.isImplicit(keyContext) { - p.removeImplicit(keyContext) - return - } - - // Otherwise, we have a concrete key trying to override a previous - // key, which is *always* wrong. - p.panicf("Key '%s' has already been defined.", keyContext) - } - hash[key] = value -} - -// setType sets the type of a particular value at a given key. -// It should be called immediately AFTER setValue. -// -// Note that if `key` is empty, then the type given will be applied to the -// current context (which is either a table or an array of tables). -func (p *parser) setType(key string, typ tomlType) { - keyContext := make(Key, 0, len(p.context)+1) - for _, k := range p.context { - keyContext = append(keyContext, k) - } - if len(key) > 0 { // allow type setting for hashes - keyContext = append(keyContext, key) - } - p.types[keyContext.String()] = typ -} - -// addImplicit sets the given Key as having been created implicitly. -func (p *parser) addImplicit(key Key) { - p.implicits[key.String()] = true -} - -// removeImplicit stops tagging the given key as having been implicitly -// created. -func (p *parser) removeImplicit(key Key) { - p.implicits[key.String()] = false -} - -// isImplicit returns true if the key group pointed to by the key was created -// implicitly. -func (p *parser) isImplicit(key Key) bool { - return p.implicits[key.String()] -} - -// current returns the full key name of the current context. -func (p *parser) current() string { - if len(p.currentKey) == 0 { - return p.context.String() - } - if len(p.context) == 0 { - return p.currentKey - } - return fmt.Sprintf("%s.%s", p.context, p.currentKey) -} - -func stripFirstNewline(s string) string { - if len(s) == 0 || s[0] != '\n' { - return s - } - return s[1:] -} - -func stripEscapedWhitespace(s string) string { - esc := strings.Split(s, "\\\n") - if len(esc) > 1 { - for i := 1; i < len(esc); i++ { - esc[i] = strings.TrimLeftFunc(esc[i], unicode.IsSpace) - } - } - return strings.Join(esc, "") -} - -func (p *parser) replaceEscapes(str string) string { - var replaced []rune - s := []byte(str) - r := 0 - for r < len(s) { - if s[r] != '\\' { - c, size := utf8.DecodeRune(s[r:]) - r += size - replaced = append(replaced, c) - continue - } - r += 1 - if r >= len(s) { - p.bug("Escape sequence at end of string.") - return "" - } - switch s[r] { - default: - p.bug("Expected valid escape code after \\, but got %q.", s[r]) - return "" - case 'b': - replaced = append(replaced, rune(0x0008)) - r += 1 - case 't': - replaced = append(replaced, rune(0x0009)) - r += 1 - case 'n': - replaced = append(replaced, rune(0x000A)) - r += 1 - case 'f': - replaced = append(replaced, rune(0x000C)) - r += 1 - case 'r': - replaced = append(replaced, rune(0x000D)) - r += 1 - case '"': - replaced = append(replaced, rune(0x0022)) - r += 1 - case '\\': - replaced = append(replaced, rune(0x005C)) - r += 1 - case 'u': - // At this point, we know we have a Unicode escape of the form - // `uXXXX` at [r, r+5). (Because the lexer guarantees this - // for us.) - escaped := p.asciiEscapeToUnicode(s[r+1 : r+5]) - replaced = append(replaced, escaped) - r += 5 - case 'U': - // At this point, we know we have a Unicode escape of the form - // `uXXXX` at [r, r+9). (Because the lexer guarantees this - // for us.) - escaped := p.asciiEscapeToUnicode(s[r+1 : r+9]) - replaced = append(replaced, escaped) - r += 9 - } - } - return string(replaced) -} - -func (p *parser) asciiEscapeToUnicode(bs []byte) rune { - s := string(bs) - hex, err := strconv.ParseUint(strings.ToLower(s), 16, 32) - if err != nil { - p.bug("Could not parse '%s' as a hexadecimal number, but the "+ - "lexer claims it's OK: %s", s, err) - } - if !utf8.ValidRune(rune(hex)) { - p.panicf("Escaped character '\\u%s' is not valid UTF-8.", s) - } - return rune(hex) -} - -func isStringType(ty itemType) bool { - return ty == itemString || ty == itemMultilineString || - ty == itemRawString || ty == itemRawMultilineString -} diff --git a/vendor/github.com/BurntSushi/toml/session.vim b/vendor/github.com/BurntSushi/toml/session.vim deleted file mode 100644 index 562164b..0000000 --- a/vendor/github.com/BurntSushi/toml/session.vim +++ /dev/null @@ -1 +0,0 @@ -au BufWritePost *.go silent!make tags > /dev/null 2>&1 diff --git a/vendor/github.com/BurntSushi/toml/type_check.go b/vendor/github.com/BurntSushi/toml/type_check.go deleted file mode 100644 index c73f8af..0000000 --- a/vendor/github.com/BurntSushi/toml/type_check.go +++ /dev/null @@ -1,91 +0,0 @@ -package toml - -// tomlType represents any Go type that corresponds to a TOML type. -// While the first draft of the TOML spec has a simplistic type system that -// probably doesn't need this level of sophistication, we seem to be militating -// toward adding real composite types. -type tomlType interface { - typeString() string -} - -// typeEqual accepts any two types and returns true if they are equal. -func typeEqual(t1, t2 tomlType) bool { - if t1 == nil || t2 == nil { - return false - } - return t1.typeString() == t2.typeString() -} - -func typeIsHash(t tomlType) bool { - return typeEqual(t, tomlHash) || typeEqual(t, tomlArrayHash) -} - -type tomlBaseType string - -func (btype tomlBaseType) typeString() string { - return string(btype) -} - -func (btype tomlBaseType) String() string { - return btype.typeString() -} - -var ( - tomlInteger tomlBaseType = "Integer" - tomlFloat tomlBaseType = "Float" - tomlDatetime tomlBaseType = "Datetime" - tomlString tomlBaseType = "String" - tomlBool tomlBaseType = "Bool" - tomlArray tomlBaseType = "Array" - tomlHash tomlBaseType = "Hash" - tomlArrayHash tomlBaseType = "ArrayHash" -) - -// typeOfPrimitive returns a tomlType of any primitive value in TOML. -// Primitive values are: Integer, Float, Datetime, String and Bool. -// -// Passing a lexer item other than the following will cause a BUG message -// to occur: itemString, itemBool, itemInteger, itemFloat, itemDatetime. -func (p *parser) typeOfPrimitive(lexItem item) tomlType { - switch lexItem.typ { - case itemInteger: - return tomlInteger - case itemFloat: - return tomlFloat - case itemDatetime: - return tomlDatetime - case itemString: - return tomlString - case itemMultilineString: - return tomlString - case itemRawString: - return tomlString - case itemRawMultilineString: - return tomlString - case itemBool: - return tomlBool - } - p.bug("Cannot infer primitive type of lex item '%s'.", lexItem) - panic("unreachable") -} - -// typeOfArray returns a tomlType for an array given a list of types of its -// values. -// -// In the current spec, if an array is homogeneous, then its type is always -// "Array". If the array is not homogeneous, an error is generated. -func (p *parser) typeOfArray(types []tomlType) tomlType { - // Empty arrays are cool. - if len(types) == 0 { - return tomlArray - } - - theType := types[0] - for _, t := range types[1:] { - if !typeEqual(theType, t) { - p.panicf("Array contains values of type '%s' and '%s', but "+ - "arrays must be homogeneous.", theType, t) - } - } - return tomlArray -} diff --git a/vendor/github.com/BurntSushi/toml/type_fields.go b/vendor/github.com/BurntSushi/toml/type_fields.go deleted file mode 100644 index 608997c..0000000 --- a/vendor/github.com/BurntSushi/toml/type_fields.go +++ /dev/null @@ -1,242 +0,0 @@ -package toml - -// Struct field handling is adapted from code in encoding/json: -// -// Copyright 2010 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the Go distribution. - -import ( - "reflect" - "sort" - "sync" -) - -// A field represents a single field found in a struct. -type field struct { - name string // the name of the field (`toml` tag included) - tag bool // whether field has a `toml` tag - index []int // represents the depth of an anonymous field - typ reflect.Type // the type of the field -} - -// byName sorts field by name, breaking ties with depth, -// then breaking ties with "name came from toml tag", then -// breaking ties with index sequence. -type byName []field - -func (x byName) Len() int { return len(x) } - -func (x byName) Swap(i, j int) { x[i], x[j] = x[j], x[i] } - -func (x byName) Less(i, j int) bool { - if x[i].name != x[j].name { - return x[i].name < x[j].name - } - if len(x[i].index) != len(x[j].index) { - return len(x[i].index) < len(x[j].index) - } - if x[i].tag != x[j].tag { - return x[i].tag - } - return byIndex(x).Less(i, j) -} - -// byIndex sorts field by index sequence. -type byIndex []field - -func (x byIndex) Len() int { return len(x) } - -func (x byIndex) Swap(i, j int) { x[i], x[j] = x[j], x[i] } - -func (x byIndex) Less(i, j int) bool { - for k, xik := range x[i].index { - if k >= len(x[j].index) { - return false - } - if xik != x[j].index[k] { - return xik < x[j].index[k] - } - } - return len(x[i].index) < len(x[j].index) -} - -// typeFields returns a list of fields that TOML should recognize for the given -// type. The algorithm is breadth-first search over the set of structs to -// include - the top struct and then any reachable anonymous structs. -func typeFields(t reflect.Type) []field { - // Anonymous fields to explore at the current level and the next. - current := []field{} - next := []field{{typ: t}} - - // Count of queued names for current level and the next. - count := map[reflect.Type]int{} - nextCount := map[reflect.Type]int{} - - // Types already visited at an earlier level. - visited := map[reflect.Type]bool{} - - // Fields found. - var fields []field - - for len(next) > 0 { - current, next = next, current[:0] - count, nextCount = nextCount, map[reflect.Type]int{} - - for _, f := range current { - if visited[f.typ] { - continue - } - visited[f.typ] = true - - // Scan f.typ for fields to include. - for i := 0; i < f.typ.NumField(); i++ { - sf := f.typ.Field(i) - if sf.PkgPath != "" && !sf.Anonymous { // unexported - continue - } - opts := getOptions(sf.Tag) - if opts.skip { - continue - } - index := make([]int, len(f.index)+1) - copy(index, f.index) - index[len(f.index)] = i - - ft := sf.Type - if ft.Name() == "" && ft.Kind() == reflect.Ptr { - // Follow pointer. - ft = ft.Elem() - } - - // Record found field and index sequence. - if opts.name != "" || !sf.Anonymous || ft.Kind() != reflect.Struct { - tagged := opts.name != "" - name := opts.name - if name == "" { - name = sf.Name - } - fields = append(fields, field{name, tagged, index, ft}) - if count[f.typ] > 1 { - // If there were multiple instances, add a second, - // so that the annihilation code will see a duplicate. - // It only cares about the distinction between 1 or 2, - // so don't bother generating any more copies. - fields = append(fields, fields[len(fields)-1]) - } - continue - } - - // Record new anonymous struct to explore in next round. - nextCount[ft]++ - if nextCount[ft] == 1 { - f := field{name: ft.Name(), index: index, typ: ft} - next = append(next, f) - } - } - } - } - - sort.Sort(byName(fields)) - - // Delete all fields that are hidden by the Go rules for embedded fields, - // except that fields with TOML tags are promoted. - - // The fields are sorted in primary order of name, secondary order - // of field index length. Loop over names; for each name, delete - // hidden fields by choosing the one dominant field that survives. - out := fields[:0] - for advance, i := 0, 0; i < len(fields); i += advance { - // One iteration per name. - // Find the sequence of fields with the name of this first field. - fi := fields[i] - name := fi.name - for advance = 1; i+advance < len(fields); advance++ { - fj := fields[i+advance] - if fj.name != name { - break - } - } - if advance == 1 { // Only one field with this name - out = append(out, fi) - continue - } - dominant, ok := dominantField(fields[i : i+advance]) - if ok { - out = append(out, dominant) - } - } - - fields = out - sort.Sort(byIndex(fields)) - - return fields -} - -// dominantField looks through the fields, all of which are known to -// have the same name, to find the single field that dominates the -// others using Go's embedding rules, modified by the presence of -// TOML tags. If there are multiple top-level fields, the boolean -// will be false: This condition is an error in Go and we skip all -// the fields. -func dominantField(fields []field) (field, bool) { - // The fields are sorted in increasing index-length order. The winner - // must therefore be one with the shortest index length. Drop all - // longer entries, which is easy: just truncate the slice. - length := len(fields[0].index) - tagged := -1 // Index of first tagged field. - for i, f := range fields { - if len(f.index) > length { - fields = fields[:i] - break - } - if f.tag { - if tagged >= 0 { - // Multiple tagged fields at the same level: conflict. - // Return no field. - return field{}, false - } - tagged = i - } - } - if tagged >= 0 { - return fields[tagged], true - } - // All remaining fields have the same length. If there's more than one, - // we have a conflict (two fields named "X" at the same level) and we - // return no field. - if len(fields) > 1 { - return field{}, false - } - return fields[0], true -} - -var fieldCache struct { - sync.RWMutex - m map[reflect.Type][]field -} - -// cachedTypeFields is like typeFields but uses a cache to avoid repeated work. -func cachedTypeFields(t reflect.Type) []field { - fieldCache.RLock() - f := fieldCache.m[t] - fieldCache.RUnlock() - if f != nil { - return f - } - - // Compute fields without lock. - // Might duplicate effort but won't hold other computations back. - f = typeFields(t) - if f == nil { - f = []field{} - } - - fieldCache.Lock() - if fieldCache.m == nil { - fieldCache.m = map[reflect.Type][]field{} - } - fieldCache.m[t] = f - fieldCache.Unlock() - return f -} diff --git a/vendor/github.com/gorilla/securecookie/.travis.yml b/vendor/github.com/gorilla/securecookie/.travis.yml deleted file mode 100644 index 24882fc..0000000 --- a/vendor/github.com/gorilla/securecookie/.travis.yml +++ /dev/null @@ -1,18 +0,0 @@ -language: go -sudo: false - -matrix: - include: - - go: 1.3 - - go: 1.4 - - go: 1.5 - - go: 1.6 - - go: tip - allow_failures: - - go: tip - -script: - - go get -t -v ./... - - diff -u <(echo -n) <(gofmt -d .) - - go vet $(go list ./... | grep -v /vendor/) - - go test -v -race ./... diff --git a/vendor/github.com/gorilla/securecookie/LICENSE b/vendor/github.com/gorilla/securecookie/LICENSE deleted file mode 100644 index 0e5fb87..0000000 --- a/vendor/github.com/gorilla/securecookie/LICENSE +++ /dev/null @@ -1,27 +0,0 @@ -Copyright (c) 2012 Rodrigo Moraes. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/gorilla/securecookie/README.md b/vendor/github.com/gorilla/securecookie/README.md deleted file mode 100644 index 5ed299e..0000000 --- a/vendor/github.com/gorilla/securecookie/README.md +++ /dev/null @@ -1,76 +0,0 @@ -securecookie -============ -[![GoDoc](https://godoc.org/github.com/gorilla/securecookie?status.svg)](https://godoc.org/github.com/gorilla/securecookie) [![Build Status](https://travis-ci.org/gorilla/securecookie.png?branch=master)](https://travis-ci.org/gorilla/securecookie) - -securecookie encodes and decodes authenticated and optionally encrypted -cookie values. - -Secure cookies can't be forged, because their values are validated using HMAC. -When encrypted, the content is also inaccessible to malicious eyes. It is still -recommended that sensitive data not be stored in cookies, and that HTTPS be used -to prevent cookie [replay attacks](https://en.wikipedia.org/wiki/Replay_attack). - -## Examples - -To use it, first create a new SecureCookie instance: - -```go -// Hash keys should be at least 32 bytes long -var hashKey = []byte("very-secret") -// Block keys should be 16 bytes (AES-128) or 32 bytes (AES-256) long. -// Shorter keys may weaken the encryption used. -var blockKey = []byte("a-lot-secret") -var s = securecookie.New(hashKey, blockKey) -``` - -The hashKey is required, used to authenticate the cookie value using HMAC. -It is recommended to use a key with 32 or 64 bytes. - -The blockKey is optional, used to encrypt the cookie value -- set it to nil -to not use encryption. If set, the length must correspond to the block size -of the encryption algorithm. For AES, used by default, valid lengths are -16, 24, or 32 bytes to select AES-128, AES-192, or AES-256. - -Strong keys can be created using the convenience function GenerateRandomKey(). - -Once a SecureCookie instance is set, use it to encode a cookie value: - -```go -func SetCookieHandler(w http.ResponseWriter, r *http.Request) { - value := map[string]string{ - "foo": "bar", - } - if encoded, err := s.Encode("cookie-name", value); err == nil { - cookie := &http.Cookie{ - Name: "cookie-name", - Value: encoded, - Path: "/", - } - http.SetCookie(w, cookie) - } -} -``` - -Later, use the same SecureCookie instance to decode and validate a cookie -value: - -```go -func ReadCookieHandler(w http.ResponseWriter, r *http.Request) { - if cookie, err := r.Cookie("cookie-name"); err == nil { - value := make(map[string]string) - if err = s2.Decode("cookie-name", cookie.Value, &value); err == nil { - fmt.Fprintf(w, "The value of foo is %q", value["foo"]) - } - } -} -``` - -We stored a map[string]string, but secure cookies can hold any value that -can be encoded using `encoding/gob`. To store custom types, they must be -registered first using gob.Register(). For basic types this is not needed; -it works out of the box. An optional JSON encoder that uses `encoding/json` is -available for types compatible with JSON. - -## License - -BSD licensed. See the LICENSE file for details. diff --git a/vendor/github.com/gorilla/securecookie/doc.go b/vendor/github.com/gorilla/securecookie/doc.go deleted file mode 100644 index ae89408..0000000 --- a/vendor/github.com/gorilla/securecookie/doc.go +++ /dev/null @@ -1,61 +0,0 @@ -// Copyright 2012 The Gorilla Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -/* -Package securecookie encodes and decodes authenticated and optionally -encrypted cookie values. - -Secure cookies can't be forged, because their values are validated using HMAC. -When encrypted, the content is also inaccessible to malicious eyes. - -To use it, first create a new SecureCookie instance: - - var hashKey = []byte("very-secret") - var blockKey = []byte("a-lot-secret") - var s = securecookie.New(hashKey, blockKey) - -The hashKey is required, used to authenticate the cookie value using HMAC. -It is recommended to use a key with 32 or 64 bytes. - -The blockKey is optional, used to encrypt the cookie value -- set it to nil -to not use encryption. If set, the length must correspond to the block size -of the encryption algorithm. For AES, used by default, valid lengths are -16, 24, or 32 bytes to select AES-128, AES-192, or AES-256. - -Strong keys can be created using the convenience function GenerateRandomKey(). - -Once a SecureCookie instance is set, use it to encode a cookie value: - - func SetCookieHandler(w http.ResponseWriter, r *http.Request) { - value := map[string]string{ - "foo": "bar", - } - if encoded, err := s.Encode("cookie-name", value); err == nil { - cookie := &http.Cookie{ - Name: "cookie-name", - Value: encoded, - Path: "/", - } - http.SetCookie(w, cookie) - } - } - -Later, use the same SecureCookie instance to decode and validate a cookie -value: - - func ReadCookieHandler(w http.ResponseWriter, r *http.Request) { - if cookie, err := r.Cookie("cookie-name"); err == nil { - value := make(map[string]string) - if err = s2.Decode("cookie-name", cookie.Value, &value); err == nil { - fmt.Fprintf(w, "The value of foo is %q", value["foo"]) - } - } - } - -We stored a map[string]string, but secure cookies can hold any value that -can be encoded using encoding/gob. To store custom types, they must be -registered first using gob.Register(). For basic types this is not needed; -it works out of the box. -*/ -package securecookie diff --git a/vendor/github.com/gorilla/securecookie/fuzz.go b/vendor/github.com/gorilla/securecookie/fuzz.go deleted file mode 100644 index e4d0534..0000000 --- a/vendor/github.com/gorilla/securecookie/fuzz.go +++ /dev/null @@ -1,25 +0,0 @@ -// +build gofuzz - -package securecookie - -var hashKey = []byte("very-secret12345") -var blockKey = []byte("a-lot-secret1234") -var s = New(hashKey, blockKey) - -type Cookie struct { - B bool - I int - S string -} - -func Fuzz(data []byte) int { - datas := string(data) - var c Cookie - if err := s.Decode("fuzz", datas, &c); err != nil { - return 0 - } - if _, err := s.Encode("fuzz", c); err != nil { - panic(err) - } - return 1 -} diff --git a/vendor/github.com/gorilla/securecookie/fuzz/gencorpus.go b/vendor/github.com/gorilla/securecookie/fuzz/gencorpus.go deleted file mode 100644 index 368192b..0000000 --- a/vendor/github.com/gorilla/securecookie/fuzz/gencorpus.go +++ /dev/null @@ -1,47 +0,0 @@ -package main - -import ( - "fmt" - "io" - "math/rand" - "os" - "reflect" - "testing/quick" - - "github.com/gorilla/securecookie" -) - -var hashKey = []byte("very-secret12345") -var blockKey = []byte("a-lot-secret1234") -var s = securecookie.New(hashKey, blockKey) - -type Cookie struct { - B bool - I int - S string -} - -func main() { - var c Cookie - t := reflect.TypeOf(c) - rnd := rand.New(rand.NewSource(0)) - for i := 0; i < 100; i++ { - v, ok := quick.Value(t, rnd) - if !ok { - panic("couldn't generate value") - } - encoded, err := s.Encode("fuzz", v.Interface()) - if err != nil { - panic(err) - } - f, err := os.Create(fmt.Sprintf("corpus/%d.sc", i)) - if err != nil { - panic(err) - } - _, err = io.WriteString(f, encoded) - if err != nil { - panic(err) - } - f.Close() - } -} diff --git a/vendor/github.com/gorilla/securecookie/securecookie.go b/vendor/github.com/gorilla/securecookie/securecookie.go deleted file mode 100644 index 83dd606..0000000 --- a/vendor/github.com/gorilla/securecookie/securecookie.go +++ /dev/null @@ -1,646 +0,0 @@ -// Copyright 2012 The Gorilla Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package securecookie - -import ( - "bytes" - "crypto/aes" - "crypto/cipher" - "crypto/hmac" - "crypto/rand" - "crypto/sha256" - "crypto/subtle" - "encoding/base64" - "encoding/gob" - "encoding/json" - "fmt" - "hash" - "io" - "strconv" - "strings" - "time" -) - -// Error is the interface of all errors returned by functions in this library. -type Error interface { - error - - // IsUsage returns true for errors indicating the client code probably - // uses this library incorrectly. For example, the client may have - // failed to provide a valid hash key, or may have failed to configure - // the Serializer adequately for encoding value. - IsUsage() bool - - // IsDecode returns true for errors indicating that a cookie could not - // be decoded and validated. Since cookies are usually untrusted - // user-provided input, errors of this type should be expected. - // Usually, the proper action is simply to reject the request. - IsDecode() bool - - // IsInternal returns true for unexpected errors occurring in the - // securecookie implementation. - IsInternal() bool - - // Cause, if it returns a non-nil value, indicates that this error was - // propagated from some underlying library. If this method returns nil, - // this error was raised directly by this library. - // - // Cause is provided principally for debugging/logging purposes; it is - // rare that application logic should perform meaningfully different - // logic based on Cause. See, for example, the caveats described on - // (MultiError).Cause(). - Cause() error -} - -// errorType is a bitmask giving the error type(s) of an cookieError value. -type errorType int - -const ( - usageError = errorType(1 << iota) - decodeError - internalError -) - -type cookieError struct { - typ errorType - msg string - cause error -} - -func (e cookieError) IsUsage() bool { return (e.typ & usageError) != 0 } -func (e cookieError) IsDecode() bool { return (e.typ & decodeError) != 0 } -func (e cookieError) IsInternal() bool { return (e.typ & internalError) != 0 } - -func (e cookieError) Cause() error { return e.cause } - -func (e cookieError) Error() string { - parts := []string{"securecookie: "} - if e.msg == "" { - parts = append(parts, "error") - } else { - parts = append(parts, e.msg) - } - if c := e.Cause(); c != nil { - parts = append(parts, " - caused by: ", c.Error()) - } - return strings.Join(parts, "") -} - -var ( - errGeneratingIV = cookieError{typ: internalError, msg: "failed to generate random iv"} - - errNoCodecs = cookieError{typ: usageError, msg: "no codecs provided"} - errHashKeyNotSet = cookieError{typ: usageError, msg: "hash key is not set"} - errBlockKeyNotSet = cookieError{typ: usageError, msg: "block key is not set"} - errEncodedValueTooLong = cookieError{typ: usageError, msg: "the value is too long"} - - errValueToDecodeTooLong = cookieError{typ: decodeError, msg: "the value is too long"} - errTimestampInvalid = cookieError{typ: decodeError, msg: "invalid timestamp"} - errTimestampTooNew = cookieError{typ: decodeError, msg: "timestamp is too new"} - errTimestampExpired = cookieError{typ: decodeError, msg: "expired timestamp"} - errDecryptionFailed = cookieError{typ: decodeError, msg: "the value could not be decrypted"} - errValueNotByte = cookieError{typ: decodeError, msg: "value not a []byte."} - - // ErrMacInvalid indicates that cookie decoding failed because the HMAC - // could not be extracted and verified. Direct use of this error - // variable is deprecated; it is public only for legacy compatibility, - // and may be privatized in the future, as it is rarely useful to - // distinguish between this error and other Error implementations. - ErrMacInvalid = cookieError{typ: decodeError, msg: "the value is not valid"} -) - -// Codec defines an interface to encode and decode cookie values. -type Codec interface { - Encode(name string, value interface{}) (string, error) - Decode(name, value string, dst interface{}) error -} - -// New returns a new SecureCookie. -// -// hashKey is required, used to authenticate values using HMAC. Create it using -// GenerateRandomKey(). It is recommended to use a key with 32 or 64 bytes. -// -// blockKey is optional, used to encrypt values. Create it using -// GenerateRandomKey(). The key length must correspond to the block size -// of the encryption algorithm. For AES, used by default, valid lengths are -// 16, 24, or 32 bytes to select AES-128, AES-192, or AES-256. -// The default encoder used for cookie serialization is encoding/gob. -// -// Note that keys created using GenerateRandomKey() are not automatically -// persisted. New keys will be created when the application is restarted, and -// previously issued cookies will not be able to be decoded. -func New(hashKey, blockKey []byte) *SecureCookie { - s := &SecureCookie{ - hashKey: hashKey, - blockKey: blockKey, - hashFunc: sha256.New, - maxAge: 86400 * 30, - maxLength: 4096, - sz: GobEncoder{}, - } - if hashKey == nil { - s.err = errHashKeyNotSet - } - if blockKey != nil { - s.BlockFunc(aes.NewCipher) - } - return s -} - -// SecureCookie encodes and decodes authenticated and optionally encrypted -// cookie values. -type SecureCookie struct { - hashKey []byte - hashFunc func() hash.Hash - blockKey []byte - block cipher.Block - maxLength int - maxAge int64 - minAge int64 - err error - sz Serializer - // For testing purposes, the function that returns the current timestamp. - // If not set, it will use time.Now().UTC().Unix(). - timeFunc func() int64 -} - -// Serializer provides an interface for providing custom serializers for cookie -// values. -type Serializer interface { - Serialize(src interface{}) ([]byte, error) - Deserialize(src []byte, dst interface{}) error -} - -// GobEncoder encodes cookie values using encoding/gob. This is the simplest -// encoder and can handle complex types via gob.Register. -type GobEncoder struct{} - -// JSONEncoder encodes cookie values using encoding/json. Users who wish to -// encode complex types need to satisfy the json.Marshaller and -// json.Unmarshaller interfaces. -type JSONEncoder struct{} - -// NopEncoder does not encode cookie values, and instead simply accepts a []byte -// (as an interface{}) and returns a []byte. This is particularly useful when -// you encoding an object upstream and do not wish to re-encode it. -type NopEncoder struct{} - -// MaxLength restricts the maximum length, in bytes, for the cookie value. -// -// Default is 4096, which is the maximum value accepted by Internet Explorer. -func (s *SecureCookie) MaxLength(value int) *SecureCookie { - s.maxLength = value - return s -} - -// MaxAge restricts the maximum age, in seconds, for the cookie value. -// -// Default is 86400 * 30. Set it to 0 for no restriction. -func (s *SecureCookie) MaxAge(value int) *SecureCookie { - s.maxAge = int64(value) - return s -} - -// MinAge restricts the minimum age, in seconds, for the cookie value. -// -// Default is 0 (no restriction). -func (s *SecureCookie) MinAge(value int) *SecureCookie { - s.minAge = int64(value) - return s -} - -// HashFunc sets the hash function used to create HMAC. -// -// Default is crypto/sha256.New. -func (s *SecureCookie) HashFunc(f func() hash.Hash) *SecureCookie { - s.hashFunc = f - return s -} - -// BlockFunc sets the encryption function used to create a cipher.Block. -// -// Default is crypto/aes.New. -func (s *SecureCookie) BlockFunc(f func([]byte) (cipher.Block, error)) *SecureCookie { - if s.blockKey == nil { - s.err = errBlockKeyNotSet - } else if block, err := f(s.blockKey); err == nil { - s.block = block - } else { - s.err = cookieError{cause: err, typ: usageError} - } - return s -} - -// Encoding sets the encoding/serialization method for cookies. -// -// Default is encoding/gob. To encode special structures using encoding/gob, -// they must be registered first using gob.Register(). -func (s *SecureCookie) SetSerializer(sz Serializer) *SecureCookie { - s.sz = sz - - return s -} - -// Encode encodes a cookie value. -// -// It serializes, optionally encrypts, signs with a message authentication code, -// and finally encodes the value. -// -// The name argument is the cookie name. It is stored with the encoded value. -// The value argument is the value to be encoded. It can be any value that can -// be encoded using the currently selected serializer; see SetSerializer(). -// -// It is the client's responsibility to ensure that value, when encoded using -// the current serialization/encryption settings on s and then base64-encoded, -// is shorter than the maximum permissible length. -func (s *SecureCookie) Encode(name string, value interface{}) (string, error) { - if s.err != nil { - return "", s.err - } - if s.hashKey == nil { - s.err = errHashKeyNotSet - return "", s.err - } - var err error - var b []byte - // 1. Serialize. - if b, err = s.sz.Serialize(value); err != nil { - return "", cookieError{cause: err, typ: usageError} - } - // 2. Encrypt (optional). - if s.block != nil { - if b, err = encrypt(s.block, b); err != nil { - return "", cookieError{cause: err, typ: usageError} - } - } - b = encode(b) - // 3. Create MAC for "name|date|value". Extra pipe to be used later. - b = []byte(fmt.Sprintf("%s|%d|%s|", name, s.timestamp(), b)) - mac := createMac(hmac.New(s.hashFunc, s.hashKey), b[:len(b)-1]) - // Append mac, remove name. - b = append(b, mac...)[len(name)+1:] - // 4. Encode to base64. - b = encode(b) - // 5. Check length. - if s.maxLength != 0 && len(b) > s.maxLength { - return "", errEncodedValueTooLong - } - // Done. - return string(b), nil -} - -// Decode decodes a cookie value. -// -// It decodes, verifies a message authentication code, optionally decrypts and -// finally deserializes the value. -// -// The name argument is the cookie name. It must be the same name used when -// it was stored. The value argument is the encoded cookie value. The dst -// argument is where the cookie will be decoded. It must be a pointer. -func (s *SecureCookie) Decode(name, value string, dst interface{}) error { - if s.err != nil { - return s.err - } - if s.hashKey == nil { - s.err = errHashKeyNotSet - return s.err - } - // 1. Check length. - if s.maxLength != 0 && len(value) > s.maxLength { - return errValueToDecodeTooLong - } - // 2. Decode from base64. - b, err := decode([]byte(value)) - if err != nil { - return err - } - // 3. Verify MAC. Value is "date|value|mac". - parts := bytes.SplitN(b, []byte("|"), 3) - if len(parts) != 3 { - return ErrMacInvalid - } - h := hmac.New(s.hashFunc, s.hashKey) - b = append([]byte(name+"|"), b[:len(b)-len(parts[2])-1]...) - if err = verifyMac(h, b, parts[2]); err != nil { - return err - } - // 4. Verify date ranges. - var t1 int64 - if t1, err = strconv.ParseInt(string(parts[0]), 10, 64); err != nil { - return errTimestampInvalid - } - t2 := s.timestamp() - if s.minAge != 0 && t1 > t2-s.minAge { - return errTimestampTooNew - } - if s.maxAge != 0 && t1 < t2-s.maxAge { - return errTimestampExpired - } - // 5. Decrypt (optional). - b, err = decode(parts[1]) - if err != nil { - return err - } - if s.block != nil { - if b, err = decrypt(s.block, b); err != nil { - return err - } - } - // 6. Deserialize. - if err = s.sz.Deserialize(b, dst); err != nil { - return cookieError{cause: err, typ: decodeError} - } - // Done. - return nil -} - -// timestamp returns the current timestamp, in seconds. -// -// For testing purposes, the function that generates the timestamp can be -// overridden. If not set, it will return time.Now().UTC().Unix(). -func (s *SecureCookie) timestamp() int64 { - if s.timeFunc == nil { - return time.Now().UTC().Unix() - } - return s.timeFunc() -} - -// Authentication ------------------------------------------------------------- - -// createMac creates a message authentication code (MAC). -func createMac(h hash.Hash, value []byte) []byte { - h.Write(value) - return h.Sum(nil) -} - -// verifyMac verifies that a message authentication code (MAC) is valid. -func verifyMac(h hash.Hash, value []byte, mac []byte) error { - mac2 := createMac(h, value) - // Check that both MACs are of equal length, as subtle.ConstantTimeCompare - // does not do this prior to Go 1.4. - if len(mac) == len(mac2) && subtle.ConstantTimeCompare(mac, mac2) == 1 { - return nil - } - return ErrMacInvalid -} - -// Encryption ----------------------------------------------------------------- - -// encrypt encrypts a value using the given block in counter mode. -// -// A random initialization vector (http://goo.gl/zF67k) with the length of the -// block size is prepended to the resulting ciphertext. -func encrypt(block cipher.Block, value []byte) ([]byte, error) { - iv := GenerateRandomKey(block.BlockSize()) - if iv == nil { - return nil, errGeneratingIV - } - // Encrypt it. - stream := cipher.NewCTR(block, iv) - stream.XORKeyStream(value, value) - // Return iv + ciphertext. - return append(iv, value...), nil -} - -// decrypt decrypts a value using the given block in counter mode. -// -// The value to be decrypted must be prepended by a initialization vector -// (http://goo.gl/zF67k) with the length of the block size. -func decrypt(block cipher.Block, value []byte) ([]byte, error) { - size := block.BlockSize() - if len(value) > size { - // Extract iv. - iv := value[:size] - // Extract ciphertext. - value = value[size:] - // Decrypt it. - stream := cipher.NewCTR(block, iv) - stream.XORKeyStream(value, value) - return value, nil - } - return nil, errDecryptionFailed -} - -// Serialization -------------------------------------------------------------- - -// Serialize encodes a value using gob. -func (e GobEncoder) Serialize(src interface{}) ([]byte, error) { - buf := new(bytes.Buffer) - enc := gob.NewEncoder(buf) - if err := enc.Encode(src); err != nil { - return nil, cookieError{cause: err, typ: usageError} - } - return buf.Bytes(), nil -} - -// Deserialize decodes a value using gob. -func (e GobEncoder) Deserialize(src []byte, dst interface{}) error { - dec := gob.NewDecoder(bytes.NewBuffer(src)) - if err := dec.Decode(dst); err != nil { - return cookieError{cause: err, typ: decodeError} - } - return nil -} - -// Serialize encodes a value using encoding/json. -func (e JSONEncoder) Serialize(src interface{}) ([]byte, error) { - buf := new(bytes.Buffer) - enc := json.NewEncoder(buf) - if err := enc.Encode(src); err != nil { - return nil, cookieError{cause: err, typ: usageError} - } - return buf.Bytes(), nil -} - -// Deserialize decodes a value using encoding/json. -func (e JSONEncoder) Deserialize(src []byte, dst interface{}) error { - dec := json.NewDecoder(bytes.NewReader(src)) - if err := dec.Decode(dst); err != nil { - return cookieError{cause: err, typ: decodeError} - } - return nil -} - -// Serialize passes a []byte through as-is. -func (e NopEncoder) Serialize(src interface{}) ([]byte, error) { - if b, ok := src.([]byte); ok { - return b, nil - } - - return nil, errValueNotByte -} - -// Deserialize passes a []byte through as-is. -func (e NopEncoder) Deserialize(src []byte, dst interface{}) error { - if _, ok := dst.([]byte); ok { - dst = src - return nil - } - - return errValueNotByte -} - -// Encoding ------------------------------------------------------------------- - -// encode encodes a value using base64. -func encode(value []byte) []byte { - encoded := make([]byte, base64.URLEncoding.EncodedLen(len(value))) - base64.URLEncoding.Encode(encoded, value) - return encoded -} - -// decode decodes a cookie using base64. -func decode(value []byte) ([]byte, error) { - decoded := make([]byte, base64.URLEncoding.DecodedLen(len(value))) - b, err := base64.URLEncoding.Decode(decoded, value) - if err != nil { - return nil, cookieError{cause: err, typ: decodeError, msg: "base64 decode failed"} - } - return decoded[:b], nil -} - -// Helpers -------------------------------------------------------------------- - -// GenerateRandomKey creates a random key with the given length in bytes. -// On failure, returns nil. -// -// Callers should explicitly check for the possibility of a nil return, treat -// it as a failure of the system random number generator, and not continue. -func GenerateRandomKey(length int) []byte { - k := make([]byte, length) - if _, err := io.ReadFull(rand.Reader, k); err != nil { - return nil - } - return k -} - -// CodecsFromPairs returns a slice of SecureCookie instances. -// -// It is a convenience function to create a list of codecs for key rotation. Note -// that the generated Codecs will have the default options applied: callers -// should iterate over each Codec and type-assert the underlying *SecureCookie to -// change these. -// -// Example: -// -// codecs := securecookie.CodecsFromPairs( -// []byte("new-hash-key"), -// []byte("new-block-key"), -// []byte("old-hash-key"), -// []byte("old-block-key"), -// ) -// -// // Modify each instance. -// for _, s := range codecs { -// if cookie, ok := s.(*securecookie.SecureCookie); ok { -// cookie.MaxAge(86400 * 7) -// cookie.SetSerializer(securecookie.JSONEncoder{}) -// cookie.HashFunc(sha512.New512_256) -// } -// } -// -func CodecsFromPairs(keyPairs ...[]byte) []Codec { - codecs := make([]Codec, len(keyPairs)/2+len(keyPairs)%2) - for i := 0; i < len(keyPairs); i += 2 { - var blockKey []byte - if i+1 < len(keyPairs) { - blockKey = keyPairs[i+1] - } - codecs[i/2] = New(keyPairs[i], blockKey) - } - return codecs -} - -// EncodeMulti encodes a cookie value using a group of codecs. -// -// The codecs are tried in order. Multiple codecs are accepted to allow -// key rotation. -// -// On error, may return a MultiError. -func EncodeMulti(name string, value interface{}, codecs ...Codec) (string, error) { - if len(codecs) == 0 { - return "", errNoCodecs - } - - var errors MultiError - for _, codec := range codecs { - encoded, err := codec.Encode(name, value) - if err == nil { - return encoded, nil - } - errors = append(errors, err) - } - return "", errors -} - -// DecodeMulti decodes a cookie value using a group of codecs. -// -// The codecs are tried in order. Multiple codecs are accepted to allow -// key rotation. -// -// On error, may return a MultiError. -func DecodeMulti(name string, value string, dst interface{}, codecs ...Codec) error { - if len(codecs) == 0 { - return errNoCodecs - } - - var errors MultiError - for _, codec := range codecs { - err := codec.Decode(name, value, dst) - if err == nil { - return nil - } - errors = append(errors, err) - } - return errors -} - -// MultiError groups multiple errors. -type MultiError []error - -func (m MultiError) IsUsage() bool { return m.any(func(e Error) bool { return e.IsUsage() }) } -func (m MultiError) IsDecode() bool { return m.any(func(e Error) bool { return e.IsDecode() }) } -func (m MultiError) IsInternal() bool { return m.any(func(e Error) bool { return e.IsInternal() }) } - -// Cause returns nil for MultiError; there is no unique underlying cause in the -// general case. -// -// Note: we could conceivably return a non-nil Cause only when there is exactly -// one child error with a Cause. However, it would be brittle for client code -// to rely on the arity of causes inside a MultiError, so we have opted not to -// provide this functionality. Clients which really wish to access the Causes -// of the underlying errors are free to iterate through the errors themselves. -func (m MultiError) Cause() error { return nil } - -func (m MultiError) Error() string { - s, n := "", 0 - for _, e := range m { - if e != nil { - if n == 0 { - s = e.Error() - } - n++ - } - } - switch n { - case 0: - return "(0 errors)" - case 1: - return s - case 2: - return s + " (and 1 other error)" - } - return fmt.Sprintf("%s (and %d other errors)", s, n-1) -} - -// any returns true if any element of m is an Error for which pred returns true. -func (m MultiError) any(pred func(Error) bool) bool { - for _, e := range m { - if ourErr, ok := e.(Error); ok && pred(ourErr) { - return true - } - } - return false -} diff --git a/vendor/github.com/gorilla/securecookie/securecookie_test.go b/vendor/github.com/gorilla/securecookie/securecookie_test.go deleted file mode 100644 index 33ce4fc..0000000 --- a/vendor/github.com/gorilla/securecookie/securecookie_test.go +++ /dev/null @@ -1,274 +0,0 @@ -// Copyright 2012 The Gorilla Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package securecookie - -import ( - "crypto/aes" - "crypto/hmac" - "crypto/sha256" - "encoding/base64" - "fmt" - "reflect" - "strings" - "testing" -) - -// Asserts that cookieError and MultiError are Error implementations. -var _ Error = cookieError{} -var _ Error = MultiError{} - -var testCookies = []interface{}{ - map[string]string{"foo": "bar"}, - map[string]string{"baz": "ding"}, -} - -var testStrings = []string{"foo", "bar", "baz"} - -func TestSecureCookie(t *testing.T) { - // TODO test too old / too new timestamps - s1 := New([]byte("12345"), []byte("1234567890123456")) - s2 := New([]byte("54321"), []byte("6543210987654321")) - value := map[string]interface{}{ - "foo": "bar", - "baz": 128, - } - - for i := 0; i < 50; i++ { - // Running this multiple times to check if any special character - // breaks encoding/decoding. - encoded, err1 := s1.Encode("sid", value) - if err1 != nil { - t.Error(err1) - continue - } - dst := make(map[string]interface{}) - err2 := s1.Decode("sid", encoded, &dst) - if err2 != nil { - t.Fatalf("%v: %v", err2, encoded) - } - if !reflect.DeepEqual(dst, value) { - t.Fatalf("Expected %v, got %v.", value, dst) - } - dst2 := make(map[string]interface{}) - err3 := s2.Decode("sid", encoded, &dst2) - if err3 == nil { - t.Fatalf("Expected failure decoding.") - } - err4, ok := err3.(Error) - if !ok { - t.Fatalf("Expected error to implement Error, got: %#v", err3) - } - if !err4.IsDecode() { - t.Fatalf("Expected DecodeError, got: %#v", err4) - } - - // Test other error type flags. - if err4.IsUsage() { - t.Fatalf("Expected IsUsage() == false, got: %#v", err4) - } - if err4.IsInternal() { - t.Fatalf("Expected IsInternal() == false, got: %#v", err4) - } - } -} - -func TestSecureCookieNilKey(t *testing.T) { - s1 := New(nil, nil) - value := map[string]interface{}{ - "foo": "bar", - "baz": 128, - } - _, err := s1.Encode("sid", value) - if err != errHashKeyNotSet { - t.Fatal("Wrong error returned:", err) - } -} - -func TestDecodeInvalid(t *testing.T) { - // List of invalid cookies, which must not be accepted, base64-decoded - // (they will be encoded before passing to Decode). - invalidCookies := []string{ - "", - " ", - "\n", - "||", - "|||", - "cookie", - } - s := New([]byte("12345"), nil) - var dst string - for i, v := range invalidCookies { - for _, enc := range []*base64.Encoding{ - base64.StdEncoding, - base64.URLEncoding, - } { - err := s.Decode("name", enc.EncodeToString([]byte(v)), &dst) - if err == nil { - t.Fatalf("%d: expected failure decoding", i) - } - err2, ok := err.(Error) - if !ok || !err2.IsDecode() { - t.Fatalf("%d: Expected IsDecode(), got: %#v", i, err) - } - } - } -} - -func TestAuthentication(t *testing.T) { - hash := hmac.New(sha256.New, []byte("secret-key")) - for _, value := range testStrings { - hash.Reset() - signed := createMac(hash, []byte(value)) - hash.Reset() - err := verifyMac(hash, []byte(value), signed) - if err != nil { - t.Error(err) - } - } -} - -func TestEncryption(t *testing.T) { - block, err := aes.NewCipher([]byte("1234567890123456")) - if err != nil { - t.Fatalf("Block could not be created") - } - var encrypted, decrypted []byte - for _, value := range testStrings { - if encrypted, err = encrypt(block, []byte(value)); err != nil { - t.Error(err) - } else { - if decrypted, err = decrypt(block, encrypted); err != nil { - t.Error(err) - } - if string(decrypted) != value { - t.Errorf("Expected %v, got %v.", value, string(decrypted)) - } - } - } -} - -func TestGobSerialization(t *testing.T) { - var ( - sz GobEncoder - serialized []byte - deserialized map[string]string - err error - ) - for _, value := range testCookies { - if serialized, err = sz.Serialize(value); err != nil { - t.Error(err) - } else { - deserialized = make(map[string]string) - if err = sz.Deserialize(serialized, &deserialized); err != nil { - t.Error(err) - } - if fmt.Sprintf("%v", deserialized) != fmt.Sprintf("%v", value) { - t.Errorf("Expected %v, got %v.", value, deserialized) - } - } - } -} - -func TestJSONSerialization(t *testing.T) { - var ( - sz JSONEncoder - serialized []byte - deserialized map[string]string - err error - ) - for _, value := range testCookies { - if serialized, err = sz.Serialize(value); err != nil { - t.Error(err) - } else { - deserialized = make(map[string]string) - if err = sz.Deserialize(serialized, &deserialized); err != nil { - t.Error(err) - } - if fmt.Sprintf("%v", deserialized) != fmt.Sprintf("%v", value) { - t.Errorf("Expected %v, got %v.", value, deserialized) - } - } - } -} - -func TestEncoding(t *testing.T) { - for _, value := range testStrings { - encoded := encode([]byte(value)) - decoded, err := decode(encoded) - if err != nil { - t.Error(err) - } else if string(decoded) != value { - t.Errorf("Expected %v, got %s.", value, string(decoded)) - } - } -} - -func TestMultiError(t *testing.T) { - s1, s2 := New(nil, nil), New(nil, nil) - _, err := EncodeMulti("sid", "value", s1, s2) - if len(err.(MultiError)) != 2 { - t.Errorf("Expected 2 errors, got %s.", err) - } else { - if strings.Index(err.Error(), "hash key is not set") == -1 { - t.Errorf("Expected missing hash key error, got %s.", err.Error()) - } - ourErr, ok := err.(Error) - if !ok || !ourErr.IsUsage() { - t.Fatalf("Expected error to be a usage error; got %#v", err) - } - if ourErr.IsDecode() { - t.Errorf("Expected error NOT to be a decode error; got %#v", ourErr) - } - if ourErr.IsInternal() { - t.Errorf("Expected error NOT to be an internal error; got %#v", ourErr) - } - } -} - -func TestMultiNoCodecs(t *testing.T) { - _, err := EncodeMulti("foo", "bar") - if err != errNoCodecs { - t.Errorf("EncodeMulti: bad value for error, got: %v", err) - } - - var dst []byte - err = DecodeMulti("foo", "bar", &dst) - if err != errNoCodecs { - t.Errorf("DecodeMulti: bad value for error, got: %v", err) - } -} - -func TestMissingKey(t *testing.T) { - s1 := New(nil, nil) - - var dst []byte - err := s1.Decode("sid", "value", &dst) - if err != errHashKeyNotSet { - t.Fatalf("Expected %#v, got %#v", errHashKeyNotSet, err) - } - if err2, ok := err.(Error); !ok || !err2.IsUsage() { - t.Errorf("Expected missing hash key to be IsUsage(); was %#v", err) - } -} - -// ---------------------------------------------------------------------------- - -type FooBar struct { - Foo int - Bar string -} - -func TestCustomType(t *testing.T) { - s1 := New([]byte("12345"), []byte("1234567890123456")) - // Type is not registered in gob. (!!!) - src := &FooBar{42, "bar"} - encoded, _ := s1.Encode("sid", src) - - dst := &FooBar{} - _ = s1.Decode("sid", encoded, dst) - if dst.Foo != 42 || dst.Bar != "bar" { - t.Fatalf("Expected %#v, got %#v", src, dst) - } -} diff --git a/vendor/github.com/jessevdk/go-flags/.travis.yml b/vendor/github.com/jessevdk/go-flags/.travis.yml deleted file mode 100644 index e7c4be0..0000000 --- a/vendor/github.com/jessevdk/go-flags/.travis.yml +++ /dev/null @@ -1,38 +0,0 @@ -language: go - -go: - - 1.6.x - - 1.7.x - -install: - # go-flags - - go get -d -v ./... - - go build -v ./... - - # linting - - go get github.com/golang/lint - - go install github.com/golang/lint/golint - - # code coverage - - go get golang.org/x/tools/cmd/cover - - go get github.com/onsi/ginkgo/ginkgo - - go get github.com/modocache/gover - - if [ "$TRAVIS_SECURE_ENV_VARS" = "true" ]; then go get github.com/mattn/goveralls; fi - -script: - # go-flags - - $(exit $(gofmt -l . | wc -l)) - - go test -v ./... - - # linting - - go tool vet -all=true -v=true . || true - - $(go env GOPATH | awk 'BEGIN{FS=":"} {print $1}')/bin/golint ./... - - # code coverage - - $(go env GOPATH | awk 'BEGIN{FS=":"} {print $1}')/bin/ginkgo -r -cover - - $(go env GOPATH | awk 'BEGIN{FS=":"} {print $1}')/bin/gover - - if [ "$TRAVIS_SECURE_ENV_VARS" = "true" ]; then $(go env GOPATH | awk 'BEGIN{FS=":"} {print $1}')/bin/goveralls -coverprofile=gover.coverprofile -service=travis-ci -repotoken $COVERALLS_TOKEN; fi - -env: - # coveralls.io - secure: "RCYbiB4P0RjQRIoUx/vG/AjP3mmYCbzOmr86DCww1Z88yNcy3hYr3Cq8rpPtYU5v0g7wTpu4adaKIcqRE9xknYGbqj3YWZiCoBP1/n4Z+9sHW3Dsd9D/GRGeHUus0laJUGARjWoCTvoEtOgTdGQDoX7mH+pUUY0FBltNYUdOiiU=" diff --git a/vendor/github.com/jessevdk/go-flags/LICENSE b/vendor/github.com/jessevdk/go-flags/LICENSE deleted file mode 100644 index bcca0d5..0000000 --- a/vendor/github.com/jessevdk/go-flags/LICENSE +++ /dev/null @@ -1,26 +0,0 @@ -Copyright (c) 2012 Jesse van den Kieboom. All rights reserved. -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the following disclaimer - in the documentation and/or other materials provided with the - distribution. - * Neither the name of Google Inc. nor the names of its - contributors may be used to endorse or promote products derived from - this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/jessevdk/go-flags/README.md b/vendor/github.com/jessevdk/go-flags/README.md deleted file mode 100644 index 9378b76..0000000 --- a/vendor/github.com/jessevdk/go-flags/README.md +++ /dev/null @@ -1,135 +0,0 @@ -go-flags: a go library for parsing command line arguments -========================================================= - -[![GoDoc](https://godoc.org/github.com/jessevdk/go-flags?status.png)](https://godoc.org/github.com/jessevdk/go-flags) [![Build Status](https://travis-ci.org/jessevdk/go-flags.svg?branch=master)](https://travis-ci.org/jessevdk/go-flags) [![Coverage Status](https://img.shields.io/coveralls/jessevdk/go-flags.svg)](https://coveralls.io/r/jessevdk/go-flags?branch=master) - -This library provides similar functionality to the builtin flag library of -go, but provides much more functionality and nicer formatting. From the -documentation: - -Package flags provides an extensive command line option parser. -The flags package is similar in functionality to the go builtin flag package -but provides more options and uses reflection to provide a convenient and -succinct way of specifying command line options. - -Supported features: -* Options with short names (-v) -* Options with long names (--verbose) -* Options with and without arguments (bool v.s. other type) -* Options with optional arguments and default values -* Multiple option groups each containing a set of options -* Generate and print well-formatted help message -* Passing remaining command line arguments after -- (optional) -* Ignoring unknown command line options (optional) -* Supports -I/usr/include -I=/usr/include -I /usr/include option argument specification -* Supports multiple short options -aux -* Supports all primitive go types (string, int{8..64}, uint{8..64}, float) -* Supports same option multiple times (can store in slice or last option counts) -* Supports maps -* Supports function callbacks -* Supports namespaces for (nested) option groups - -The flags package uses structs, reflection and struct field tags -to allow users to specify command line options. This results in very simple -and concise specification of your application options. For example: - -```go -type Options struct { - Verbose []bool `short:"v" long:"verbose" description:"Show verbose debug information"` -} -``` - -This specifies one option with a short name -v and a long name --verbose. -When either -v or --verbose is found on the command line, a 'true' value -will be appended to the Verbose field. e.g. when specifying -vvv, the -resulting value of Verbose will be {[true, true, true]}. - -Example: --------- -```go -var opts struct { - // Slice of bool will append 'true' each time the option - // is encountered (can be set multiple times, like -vvv) - Verbose []bool `short:"v" long:"verbose" description:"Show verbose debug information"` - - // Example of automatic marshalling to desired type (uint) - Offset uint `long:"offset" description:"Offset"` - - // Example of a callback, called each time the option is found. - Call func(string) `short:"c" description:"Call phone number"` - - // Example of a required flag - Name string `short:"n" long:"name" description:"A name" required:"true"` - - // Example of a value name - File string `short:"f" long:"file" description:"A file" value-name:"FILE"` - - // Example of a pointer - Ptr *int `short:"p" description:"A pointer to an integer"` - - // Example of a slice of strings - StringSlice []string `short:"s" description:"A slice of strings"` - - // Example of a slice of pointers - PtrSlice []*string `long:"ptrslice" description:"A slice of pointers to string"` - - // Example of a map - IntMap map[string]int `long:"intmap" description:"A map from string to int"` -} - -// Callback which will invoke callto: to call a number. -// Note that this works just on OS X (and probably only with -// Skype) but it shows the idea. -opts.Call = func(num string) { - cmd := exec.Command("open", "callto:"+num) - cmd.Start() - cmd.Process.Release() -} - -// Make some fake arguments to parse. -args := []string{ - "-vv", - "--offset=5", - "-n", "Me", - "-p", "3", - "-s", "hello", - "-s", "world", - "--ptrslice", "hello", - "--ptrslice", "world", - "--intmap", "a:1", - "--intmap", "b:5", - "arg1", - "arg2", - "arg3", -} - -// Parse flags from `args'. Note that here we use flags.ParseArgs for -// the sake of making a working example. Normally, you would simply use -// flags.Parse(&opts) which uses os.Args -args, err := flags.ParseArgs(&opts, args) - -if err != nil { - panic(err) - os.Exit(1) -} - -fmt.Printf("Verbosity: %v\n", opts.Verbose) -fmt.Printf("Offset: %d\n", opts.Offset) -fmt.Printf("Name: %s\n", opts.Name) -fmt.Printf("Ptr: %d\n", *opts.Ptr) -fmt.Printf("StringSlice: %v\n", opts.StringSlice) -fmt.Printf("PtrSlice: [%v %v]\n", *opts.PtrSlice[0], *opts.PtrSlice[1]) -fmt.Printf("IntMap: [a:%v b:%v]\n", opts.IntMap["a"], opts.IntMap["b"]) -fmt.Printf("Remaining args: %s\n", strings.Join(args, " ")) - -// Output: Verbosity: [true true] -// Offset: 5 -// Name: Me -// Ptr: 3 -// StringSlice: [hello world] -// PtrSlice: [hello world] -// IntMap: [a:1 b:5] -// Remaining args: arg1 arg2 arg3 -``` - -More information can be found in the godocs: diff --git a/vendor/github.com/jessevdk/go-flags/arg.go b/vendor/github.com/jessevdk/go-flags/arg.go deleted file mode 100644 index 8ec6204..0000000 --- a/vendor/github.com/jessevdk/go-flags/arg.go +++ /dev/null @@ -1,27 +0,0 @@ -package flags - -import ( - "reflect" -) - -// Arg represents a positional argument on the command line. -type Arg struct { - // The name of the positional argument (used in the help) - Name string - - // A description of the positional argument (used in the help) - Description string - - // The minimal number of required positional arguments - Required int - - // The maximum number of required positional arguments - RequiredMaximum int - - value reflect.Value - tag multiTag -} - -func (a *Arg) isRemaining() bool { - return a.value.Type().Kind() == reflect.Slice -} diff --git a/vendor/github.com/jessevdk/go-flags/arg_test.go b/vendor/github.com/jessevdk/go-flags/arg_test.go deleted file mode 100644 index c7c0a61..0000000 --- a/vendor/github.com/jessevdk/go-flags/arg_test.go +++ /dev/null @@ -1,163 +0,0 @@ -package flags - -import ( - "testing" -) - -func TestPositional(t *testing.T) { - var opts = struct { - Value bool `short:"v"` - - Positional struct { - Command int - Filename string - Rest []string - } `positional-args:"yes" required:"yes"` - }{} - - p := NewParser(&opts, Default) - ret, err := p.ParseArgs([]string{"10", "arg_test.go", "a", "b"}) - - if err != nil { - t.Fatalf("Unexpected error: %v", err) - return - } - - if opts.Positional.Command != 10 { - t.Fatalf("Expected opts.Positional.Command to be 10, but got %v", opts.Positional.Command) - } - - if opts.Positional.Filename != "arg_test.go" { - t.Fatalf("Expected opts.Positional.Filename to be \"arg_test.go\", but got %v", opts.Positional.Filename) - } - - assertStringArray(t, opts.Positional.Rest, []string{"a", "b"}) - assertStringArray(t, ret, []string{}) -} - -func TestPositionalRequired(t *testing.T) { - var opts = struct { - Value bool `short:"v"` - - Positional struct { - Command int - Filename string - Rest []string - } `positional-args:"yes" required:"yes"` - }{} - - p := NewParser(&opts, None) - _, err := p.ParseArgs([]string{"10"}) - - assertError(t, err, ErrRequired, "the required argument `Filename` was not provided") -} - -func TestPositionalRequiredRest1Fail(t *testing.T) { - var opts = struct { - Value bool `short:"v"` - - Positional struct { - Rest []string `required:"yes"` - } `positional-args:"yes"` - }{} - - p := NewParser(&opts, None) - _, err := p.ParseArgs([]string{}) - - assertError(t, err, ErrRequired, "the required argument `Rest (at least 1 argument)` was not provided") -} - -func TestPositionalRequiredRest1Pass(t *testing.T) { - var opts = struct { - Value bool `short:"v"` - - Positional struct { - Rest []string `required:"yes"` - } `positional-args:"yes"` - }{} - - p := NewParser(&opts, None) - _, err := p.ParseArgs([]string{"rest1"}) - - if err != nil { - t.Fatalf("Unexpected error: %v", err) - return - } - - if len(opts.Positional.Rest) != 1 { - t.Fatalf("Expected 1 positional rest argument") - } - - assertString(t, opts.Positional.Rest[0], "rest1") -} - -func TestPositionalRequiredRest2Fail(t *testing.T) { - var opts = struct { - Value bool `short:"v"` - - Positional struct { - Rest []string `required:"2"` - } `positional-args:"yes"` - }{} - - p := NewParser(&opts, None) - _, err := p.ParseArgs([]string{"rest1"}) - - assertError(t, err, ErrRequired, "the required argument `Rest (at least 2 arguments, but got only 1)` was not provided") -} - -func TestPositionalRequiredRest2Pass(t *testing.T) { - var opts = struct { - Value bool `short:"v"` - - Positional struct { - Rest []string `required:"2"` - } `positional-args:"yes"` - }{} - - p := NewParser(&opts, None) - _, err := p.ParseArgs([]string{"rest1", "rest2", "rest3"}) - - if err != nil { - t.Fatalf("Unexpected error: %v", err) - return - } - - if len(opts.Positional.Rest) != 3 { - t.Fatalf("Expected 3 positional rest argument") - } - - assertString(t, opts.Positional.Rest[0], "rest1") - assertString(t, opts.Positional.Rest[1], "rest2") - assertString(t, opts.Positional.Rest[2], "rest3") -} - -func TestPositionalRequiredRestRangeFail(t *testing.T) { - var opts = struct { - Value bool `short:"v"` - - Positional struct { - Rest []string `required:"1-2"` - } `positional-args:"yes"` - }{} - - p := NewParser(&opts, None) - _, err := p.ParseArgs([]string{"rest1", "rest2", "rest3"}) - - assertError(t, err, ErrRequired, "the required argument `Rest (at most 2 arguments, but got 3)` was not provided") -} - -func TestPositionalRequiredRestRangeEmptyFail(t *testing.T) { - var opts = struct { - Value bool `short:"v"` - - Positional struct { - Rest []string `required:"0-0"` - } `positional-args:"yes"` - }{} - - p := NewParser(&opts, None) - _, err := p.ParseArgs([]string{"some", "thing"}) - - assertError(t, err, ErrRequired, "the required argument `Rest (zero arguments)` was not provided") -} diff --git a/vendor/github.com/jessevdk/go-flags/assert_test.go b/vendor/github.com/jessevdk/go-flags/assert_test.go deleted file mode 100644 index 8e06636..0000000 --- a/vendor/github.com/jessevdk/go-flags/assert_test.go +++ /dev/null @@ -1,177 +0,0 @@ -package flags - -import ( - "fmt" - "io" - "io/ioutil" - "os" - "os/exec" - "path" - "runtime" - "testing" -) - -func assertCallerInfo() (string, int) { - ptr := make([]uintptr, 15) - n := runtime.Callers(1, ptr) - - if n == 0 { - return "", 0 - } - - mef := runtime.FuncForPC(ptr[0]) - mefile, meline := mef.FileLine(ptr[0]) - - for i := 2; i < n; i++ { - f := runtime.FuncForPC(ptr[i]) - file, line := f.FileLine(ptr[i]) - - if file != mefile { - return file, line - } - } - - return mefile, meline -} - -func assertErrorf(t *testing.T, format string, args ...interface{}) { - msg := fmt.Sprintf(format, args...) - - file, line := assertCallerInfo() - - t.Errorf("%s:%d: %s", path.Base(file), line, msg) -} - -func assertFatalf(t *testing.T, format string, args ...interface{}) { - msg := fmt.Sprintf(format, args...) - - file, line := assertCallerInfo() - - t.Fatalf("%s:%d: %s", path.Base(file), line, msg) -} - -func assertString(t *testing.T, a string, b string) { - if a != b { - assertErrorf(t, "Expected %#v, but got %#v", b, a) - } -} - -func assertStringArray(t *testing.T, a []string, b []string) { - if len(a) != len(b) { - assertErrorf(t, "Expected %#v, but got %#v", b, a) - return - } - - for i, v := range a { - if b[i] != v { - assertErrorf(t, "Expected %#v, but got %#v", b, a) - return - } - } -} - -func assertBoolArray(t *testing.T, a []bool, b []bool) { - if len(a) != len(b) { - assertErrorf(t, "Expected %#v, but got %#v", b, a) - return - } - - for i, v := range a { - if b[i] != v { - assertErrorf(t, "Expected %#v, but got %#v", b, a) - return - } - } -} - -func assertParserSuccess(t *testing.T, data interface{}, args ...string) (*Parser, []string) { - parser := NewParser(data, Default&^PrintErrors) - ret, err := parser.ParseArgs(args) - - if err != nil { - t.Fatalf("Unexpected parse error: %s", err) - return nil, nil - } - - return parser, ret -} - -func assertParseSuccess(t *testing.T, data interface{}, args ...string) []string { - _, ret := assertParserSuccess(t, data, args...) - return ret -} - -func assertError(t *testing.T, err error, typ ErrorType, msg string) { - if err == nil { - assertFatalf(t, "Expected error: %s", msg) - return - } - - if e, ok := err.(*Error); !ok { - assertFatalf(t, "Expected Error type, but got %#v", err) - } else { - if e.Type != typ { - assertErrorf(t, "Expected error type {%s}, but got {%s}", typ, e.Type) - } - - if e.Message != msg { - assertErrorf(t, "Expected error message %#v, but got %#v", msg, e.Message) - } - } -} - -func assertParseFail(t *testing.T, typ ErrorType, msg string, data interface{}, args ...string) []string { - parser := NewParser(data, Default&^PrintErrors) - ret, err := parser.ParseArgs(args) - - assertError(t, err, typ, msg) - return ret -} - -func diff(a, b string) (string, error) { - atmp, err := ioutil.TempFile("", "help-diff") - - if err != nil { - return "", err - } - - btmp, err := ioutil.TempFile("", "help-diff") - - if err != nil { - return "", err - } - - if _, err := io.WriteString(atmp, a); err != nil { - return "", err - } - - if _, err := io.WriteString(btmp, b); err != nil { - return "", err - } - - ret, err := exec.Command("diff", "-u", "-d", "--label", "got", atmp.Name(), "--label", "expected", btmp.Name()).Output() - - os.Remove(atmp.Name()) - os.Remove(btmp.Name()) - - if err.Error() == "exit status 1" { - return string(ret), nil - } - - return string(ret), err -} - -func assertDiff(t *testing.T, actual, expected, msg string) { - if actual == expected { - return - } - - ret, err := diff(actual, expected) - - if err != nil { - assertErrorf(t, "Unexpected diff error: %s", err) - assertErrorf(t, "Unexpected %s, expected:\n\n%s\n\nbut got\n\n%s", msg, expected, actual) - } else { - assertErrorf(t, "Unexpected %s:\n\n%s", msg, ret) - } -} diff --git a/vendor/github.com/jessevdk/go-flags/closest.go b/vendor/github.com/jessevdk/go-flags/closest.go deleted file mode 100644 index 3b51875..0000000 --- a/vendor/github.com/jessevdk/go-flags/closest.go +++ /dev/null @@ -1,59 +0,0 @@ -package flags - -func levenshtein(s string, t string) int { - if len(s) == 0 { - return len(t) - } - - if len(t) == 0 { - return len(s) - } - - dists := make([][]int, len(s)+1) - for i := range dists { - dists[i] = make([]int, len(t)+1) - dists[i][0] = i - } - - for j := range t { - dists[0][j] = j - } - - for i, sc := range s { - for j, tc := range t { - if sc == tc { - dists[i+1][j+1] = dists[i][j] - } else { - dists[i+1][j+1] = dists[i][j] + 1 - if dists[i+1][j] < dists[i+1][j+1] { - dists[i+1][j+1] = dists[i+1][j] + 1 - } - if dists[i][j+1] < dists[i+1][j+1] { - dists[i+1][j+1] = dists[i][j+1] + 1 - } - } - } - } - - return dists[len(s)][len(t)] -} - -func closestChoice(cmd string, choices []string) (string, int) { - if len(choices) == 0 { - return "", 0 - } - - mincmd := -1 - mindist := -1 - - for i, c := range choices { - l := levenshtein(cmd, c) - - if mincmd < 0 || l < mindist { - mindist = l - mincmd = i - } - } - - return choices[mincmd], mindist -} diff --git a/vendor/github.com/jessevdk/go-flags/command.go b/vendor/github.com/jessevdk/go-flags/command.go deleted file mode 100644 index 2662843..0000000 --- a/vendor/github.com/jessevdk/go-flags/command.go +++ /dev/null @@ -1,455 +0,0 @@ -package flags - -import ( - "reflect" - "sort" - "strconv" - "strings" - "unsafe" -) - -// Command represents an application command. Commands can be added to the -// parser (which itself is a command) and are selected/executed when its name -// is specified on the command line. The Command type embeds a Group and -// therefore also carries a set of command specific options. -type Command struct { - // Embedded, see Group for more information - *Group - - // The name by which the command can be invoked - Name string - - // The active sub command (set by parsing) or nil - Active *Command - - // Whether subcommands are optional - SubcommandsOptional bool - - // Aliases for the command - Aliases []string - - // Whether positional arguments are required - ArgsRequired bool - - commands []*Command - hasBuiltinHelpGroup bool - args []*Arg -} - -// Commander is an interface which can be implemented by any command added in -// the options. When implemented, the Execute method will be called for the last -// specified (sub)command providing the remaining command line arguments. -type Commander interface { - // Execute will be called for the last active (sub)command. The - // args argument contains the remaining command line arguments. The - // error that Execute returns will be eventually passed out of the - // Parse method of the Parser. - Execute(args []string) error -} - -// Usage is an interface which can be implemented to show a custom usage string -// in the help message shown for a command. -type Usage interface { - // Usage is called for commands to allow customized printing of command - // usage in the generated help message. - Usage() string -} - -type lookup struct { - shortNames map[string]*Option - longNames map[string]*Option - - commands map[string]*Command -} - -// AddCommand adds a new command to the parser with the given name and data. The -// data needs to be a pointer to a struct from which the fields indicate which -// options are in the command. The provided data can implement the Command and -// Usage interfaces. -func (c *Command) AddCommand(command string, shortDescription string, longDescription string, data interface{}) (*Command, error) { - cmd := newCommand(command, shortDescription, longDescription, data) - - cmd.parent = c - - if err := cmd.scan(); err != nil { - return nil, err - } - - c.commands = append(c.commands, cmd) - return cmd, nil -} - -// AddGroup adds a new group to the command with the given name and data. The -// data needs to be a pointer to a struct from which the fields indicate which -// options are in the group. -func (c *Command) AddGroup(shortDescription string, longDescription string, data interface{}) (*Group, error) { - group := newGroup(shortDescription, longDescription, data) - - group.parent = c - - if err := group.scanType(c.scanSubcommandHandler(group)); err != nil { - return nil, err - } - - c.groups = append(c.groups, group) - return group, nil -} - -// Commands returns a list of subcommands of this command. -func (c *Command) Commands() []*Command { - return c.commands -} - -// Find locates the subcommand with the given name and returns it. If no such -// command can be found Find will return nil. -func (c *Command) Find(name string) *Command { - for _, cc := range c.commands { - if cc.match(name) { - return cc - } - } - - return nil -} - -// FindOptionByLongName finds an option that is part of the command, or any of -// its parent commands, by matching its long name (including the option -// namespace). -func (c *Command) FindOptionByLongName(longName string) (option *Option) { - for option == nil && c != nil { - option = c.Group.FindOptionByLongName(longName) - - c, _ = c.parent.(*Command) - } - - return option -} - -// FindOptionByShortName finds an option that is part of the command, or any of -// its parent commands, by matching its long name (including the option -// namespace). -func (c *Command) FindOptionByShortName(shortName rune) (option *Option) { - for option == nil && c != nil { - option = c.Group.FindOptionByShortName(shortName) - - c, _ = c.parent.(*Command) - } - - return option -} - -// Args returns a list of positional arguments associated with this command. -func (c *Command) Args() []*Arg { - ret := make([]*Arg, len(c.args)) - copy(ret, c.args) - - return ret -} - -func newCommand(name string, shortDescription string, longDescription string, data interface{}) *Command { - return &Command{ - Group: newGroup(shortDescription, longDescription, data), - Name: name, - } -} - -func (c *Command) scanSubcommandHandler(parentg *Group) scanHandler { - f := func(realval reflect.Value, sfield *reflect.StructField) (bool, error) { - mtag := newMultiTag(string(sfield.Tag)) - - if err := mtag.Parse(); err != nil { - return true, err - } - - positional := mtag.Get("positional-args") - - if len(positional) != 0 { - stype := realval.Type() - - for i := 0; i < stype.NumField(); i++ { - field := stype.Field(i) - - m := newMultiTag((string(field.Tag))) - - if err := m.Parse(); err != nil { - return true, err - } - - name := m.Get("positional-arg-name") - - if len(name) == 0 { - name = field.Name - } - - required := -1 - requiredMaximum := -1 - - sreq := m.Get("required") - - if sreq != "" { - required = 1 - - rng := strings.SplitN(sreq, "-", 2) - - if len(rng) > 1 { - if preq, err := strconv.ParseInt(rng[0], 10, 32); err == nil { - required = int(preq) - } - - if preq, err := strconv.ParseInt(rng[1], 10, 32); err == nil { - requiredMaximum = int(preq) - } - } else { - if preq, err := strconv.ParseInt(sreq, 10, 32); err == nil { - required = int(preq) - } - } - } - - arg := &Arg{ - Name: name, - Description: m.Get("description"), - Required: required, - RequiredMaximum: requiredMaximum, - - value: realval.Field(i), - tag: m, - } - - c.args = append(c.args, arg) - - if len(mtag.Get("required")) != 0 { - c.ArgsRequired = true - } - } - - return true, nil - } - - subcommand := mtag.Get("command") - - if len(subcommand) != 0 { - ptrval := reflect.NewAt(realval.Type(), unsafe.Pointer(realval.UnsafeAddr())) - - shortDescription := mtag.Get("description") - longDescription := mtag.Get("long-description") - subcommandsOptional := mtag.Get("subcommands-optional") - aliases := mtag.GetMany("alias") - - subc, err := c.AddCommand(subcommand, shortDescription, longDescription, ptrval.Interface()) - if err != nil { - return true, err - } - - subc.Hidden = mtag.Get("hidden") != "" - - if len(subcommandsOptional) > 0 { - subc.SubcommandsOptional = true - } - - if len(aliases) > 0 { - subc.Aliases = aliases - } - - return true, nil - } - - return parentg.scanSubGroupHandler(realval, sfield) - } - - return f -} - -func (c *Command) scan() error { - return c.scanType(c.scanSubcommandHandler(c.Group)) -} - -func (c *Command) eachOption(f func(*Command, *Group, *Option)) { - c.eachCommand(func(c *Command) { - c.eachGroup(func(g *Group) { - for _, option := range g.options { - f(c, g, option) - } - }) - }, true) -} - -func (c *Command) eachCommand(f func(*Command), recurse bool) { - f(c) - - for _, cc := range c.commands { - if recurse { - cc.eachCommand(f, true) - } else { - f(cc) - } - } -} - -func (c *Command) eachActiveGroup(f func(cc *Command, g *Group)) { - c.eachGroup(func(g *Group) { - f(c, g) - }) - - if c.Active != nil { - c.Active.eachActiveGroup(f) - } -} - -func (c *Command) addHelpGroups(showHelp func() error) { - if !c.hasBuiltinHelpGroup { - c.addHelpGroup(showHelp) - c.hasBuiltinHelpGroup = true - } - - for _, cc := range c.commands { - cc.addHelpGroups(showHelp) - } -} - -func (c *Command) makeLookup() lookup { - ret := lookup{ - shortNames: make(map[string]*Option), - longNames: make(map[string]*Option), - commands: make(map[string]*Command), - } - - parent := c.parent - - var parents []*Command - - for parent != nil { - if cmd, ok := parent.(*Command); ok { - parents = append(parents, cmd) - parent = cmd.parent - } else { - parent = nil - } - } - - for i := len(parents) - 1; i >= 0; i-- { - parents[i].fillLookup(&ret, true) - } - - c.fillLookup(&ret, false) - return ret -} - -func (c *Command) fillLookup(ret *lookup, onlyOptions bool) { - c.eachGroup(func(g *Group) { - for _, option := range g.options { - if option.ShortName != 0 { - ret.shortNames[string(option.ShortName)] = option - } - - if len(option.LongName) > 0 { - ret.longNames[option.LongNameWithNamespace()] = option - } - } - }) - - if onlyOptions { - return - } - - for _, subcommand := range c.commands { - ret.commands[subcommand.Name] = subcommand - - for _, a := range subcommand.Aliases { - ret.commands[a] = subcommand - } - } -} - -func (c *Command) groupByName(name string) *Group { - if grp := c.Group.groupByName(name); grp != nil { - return grp - } - - for _, subc := range c.commands { - prefix := subc.Name + "." - - if strings.HasPrefix(name, prefix) { - if grp := subc.groupByName(name[len(prefix):]); grp != nil { - return grp - } - } else if name == subc.Name { - return subc.Group - } - } - - return nil -} - -type commandList []*Command - -func (c commandList) Less(i, j int) bool { - return c[i].Name < c[j].Name -} - -func (c commandList) Len() int { - return len(c) -} - -func (c commandList) Swap(i, j int) { - c[i], c[j] = c[j], c[i] -} - -func (c *Command) sortedVisibleCommands() []*Command { - ret := commandList(c.visibleCommands()) - sort.Sort(ret) - - return []*Command(ret) -} - -func (c *Command) visibleCommands() []*Command { - ret := make([]*Command, 0, len(c.commands)) - - for _, cmd := range c.commands { - if !cmd.Hidden { - ret = append(ret, cmd) - } - } - - return ret -} - -func (c *Command) match(name string) bool { - if c.Name == name { - return true - } - - for _, v := range c.Aliases { - if v == name { - return true - } - } - - return false -} - -func (c *Command) hasCliOptions() bool { - ret := false - - c.eachGroup(func(g *Group) { - if g.isBuiltinHelp { - return - } - - for _, opt := range g.options { - if opt.canCli() { - ret = true - } - } - }) - - return ret -} - -func (c *Command) fillParseState(s *parseState) { - s.positional = make([]*Arg, len(c.args)) - copy(s.positional, c.args) - - s.lookup = c.makeLookup() - s.command = c -} diff --git a/vendor/github.com/jessevdk/go-flags/command_test.go b/vendor/github.com/jessevdk/go-flags/command_test.go deleted file mode 100644 index dc04b66..0000000 --- a/vendor/github.com/jessevdk/go-flags/command_test.go +++ /dev/null @@ -1,582 +0,0 @@ -package flags - -import ( - "fmt" - "testing" -) - -func TestCommandInline(t *testing.T) { - var opts = struct { - Value bool `short:"v"` - - Command struct { - G bool `short:"g"` - } `command:"cmd"` - }{} - - p, ret := assertParserSuccess(t, &opts, "-v", "cmd", "-g") - - assertStringArray(t, ret, []string{}) - - if p.Active == nil { - t.Errorf("Expected active command") - } - - if !opts.Value { - t.Errorf("Expected Value to be true") - } - - if !opts.Command.G { - t.Errorf("Expected Command.G to be true") - } - - if p.Command.Find("cmd") != p.Active { - t.Errorf("Expected to find command `cmd' to be active") - } -} - -func TestCommandInlineMulti(t *testing.T) { - var opts = struct { - Value bool `short:"v"` - - C1 struct { - } `command:"c1"` - - C2 struct { - G bool `short:"g"` - } `command:"c2"` - }{} - - p, ret := assertParserSuccess(t, &opts, "-v", "c2", "-g") - - assertStringArray(t, ret, []string{}) - - if p.Active == nil { - t.Errorf("Expected active command") - } - - if !opts.Value { - t.Errorf("Expected Value to be true") - } - - if !opts.C2.G { - t.Errorf("Expected C2.G to be true") - } - - if p.Command.Find("c1") == nil { - t.Errorf("Expected to find command `c1'") - } - - if c2 := p.Command.Find("c2"); c2 == nil { - t.Errorf("Expected to find command `c2'") - } else if c2 != p.Active { - t.Errorf("Expected to find command `c2' to be active") - } -} - -func TestCommandFlagOrder1(t *testing.T) { - var opts = struct { - Value bool `short:"v"` - - Command struct { - G bool `short:"g"` - } `command:"cmd"` - }{} - - assertParseFail(t, ErrUnknownFlag, "unknown flag `g'", &opts, "-v", "-g", "cmd") -} - -func TestCommandFlagOrder2(t *testing.T) { - var opts = struct { - Value bool `short:"v"` - - Command struct { - G bool `short:"g"` - } `command:"cmd"` - }{} - - assertParseSuccess(t, &opts, "cmd", "-v", "-g") - - if !opts.Value { - t.Errorf("Expected Value to be true") - } - - if !opts.Command.G { - t.Errorf("Expected Command.G to be true") - } -} - -func TestCommandFlagOrderSub(t *testing.T) { - var opts = struct { - Value bool `short:"v"` - - Command struct { - G bool `short:"g"` - - SubCommand struct { - B bool `short:"b"` - } `command:"sub"` - } `command:"cmd"` - }{} - - assertParseSuccess(t, &opts, "cmd", "sub", "-v", "-g", "-b") - - if !opts.Value { - t.Errorf("Expected Value to be true") - } - - if !opts.Command.G { - t.Errorf("Expected Command.G to be true") - } - - if !opts.Command.SubCommand.B { - t.Errorf("Expected Command.SubCommand.B to be true") - } -} - -func TestCommandFlagOverride1(t *testing.T) { - var opts = struct { - Value bool `short:"v"` - - Command struct { - Value bool `short:"v"` - } `command:"cmd"` - }{} - - assertParseSuccess(t, &opts, "-v", "cmd") - - if !opts.Value { - t.Errorf("Expected Value to be true") - } - - if opts.Command.Value { - t.Errorf("Expected Command.Value to be false") - } -} - -func TestCommandFlagOverride2(t *testing.T) { - var opts = struct { - Value bool `short:"v"` - - Command struct { - Value bool `short:"v"` - } `command:"cmd"` - }{} - - assertParseSuccess(t, &opts, "cmd", "-v") - - if opts.Value { - t.Errorf("Expected Value to be false") - } - - if !opts.Command.Value { - t.Errorf("Expected Command.Value to be true") - } -} - -func TestCommandFlagOverrideSub(t *testing.T) { - var opts = struct { - Value bool `short:"v"` - - Command struct { - Value bool `short:"v"` - - SubCommand struct { - Value bool `short:"v"` - } `command:"sub"` - } `command:"cmd"` - }{} - - assertParseSuccess(t, &opts, "cmd", "sub", "-v") - - if opts.Value { - t.Errorf("Expected Value to be false") - } - - if opts.Command.Value { - t.Errorf("Expected Command.Value to be false") - } - - if !opts.Command.SubCommand.Value { - t.Errorf("Expected Command.Value to be true") - } -} - -func TestCommandFlagOverrideSub2(t *testing.T) { - var opts = struct { - Value bool `short:"v"` - - Command struct { - Value bool `short:"v"` - - SubCommand struct { - G bool `short:"g"` - } `command:"sub"` - } `command:"cmd"` - }{} - - assertParseSuccess(t, &opts, "cmd", "sub", "-v") - - if opts.Value { - t.Errorf("Expected Value to be false") - } - - if !opts.Command.Value { - t.Errorf("Expected Command.Value to be true") - } -} - -func TestCommandEstimate(t *testing.T) { - var opts = struct { - Value bool `short:"v"` - - Cmd1 struct { - } `command:"remove"` - - Cmd2 struct { - } `command:"add"` - }{} - - p := NewParser(&opts, None) - _, err := p.ParseArgs([]string{}) - - assertError(t, err, ErrCommandRequired, "Please specify one command of: add or remove") -} - -func TestCommandEstimate2(t *testing.T) { - var opts = struct { - Value bool `short:"v"` - - Cmd1 struct { - } `command:"remove"` - - Cmd2 struct { - } `command:"add"` - }{} - - p := NewParser(&opts, None) - _, err := p.ParseArgs([]string{"rmive"}) - - assertError(t, err, ErrUnknownCommand, "Unknown command `rmive', did you mean `remove'?") -} - -type testCommand struct { - G bool `short:"g"` - Executed bool - EArgs []string -} - -func (c *testCommand) Execute(args []string) error { - c.Executed = true - c.EArgs = args - - return nil -} - -func TestCommandExecute(t *testing.T) { - var opts = struct { - Value bool `short:"v"` - - Command testCommand `command:"cmd"` - }{} - - assertParseSuccess(t, &opts, "-v", "cmd", "-g", "a", "b") - - if !opts.Value { - t.Errorf("Expected Value to be true") - } - - if !opts.Command.Executed { - t.Errorf("Did not execute command") - } - - if !opts.Command.G { - t.Errorf("Expected Command.C to be true") - } - - assertStringArray(t, opts.Command.EArgs, []string{"a", "b"}) -} - -func TestCommandClosest(t *testing.T) { - var opts = struct { - Value bool `short:"v"` - - Cmd1 struct { - } `command:"remove"` - - Cmd2 struct { - } `command:"add"` - }{} - - args := assertParseFail(t, ErrUnknownCommand, "Unknown command `addd', did you mean `add'?", &opts, "-v", "addd") - - assertStringArray(t, args, []string{"addd"}) -} - -func TestCommandAdd(t *testing.T) { - var opts = struct { - Value bool `short:"v"` - }{} - - var cmd = struct { - G bool `short:"g"` - }{} - - p := NewParser(&opts, Default) - c, err := p.AddCommand("cmd", "", "", &cmd) - - if err != nil { - t.Fatalf("Unexpected error: %v", err) - return - } - - ret, err := p.ParseArgs([]string{"-v", "cmd", "-g", "rest"}) - - if err != nil { - t.Fatalf("Unexpected error: %v", err) - return - } - - assertStringArray(t, ret, []string{"rest"}) - - if !opts.Value { - t.Errorf("Expected Value to be true") - } - - if !cmd.G { - t.Errorf("Expected Command.G to be true") - } - - if p.Command.Find("cmd") != c { - t.Errorf("Expected to find command `cmd'") - } - - if p.Commands()[0] != c { - t.Errorf("Expected command %#v, but got %#v", c, p.Commands()[0]) - } - - if c.Options()[0].ShortName != 'g' { - t.Errorf("Expected short name `g' but got %v", c.Options()[0].ShortName) - } -} - -func TestCommandNestedInline(t *testing.T) { - var opts = struct { - Value bool `short:"v"` - - Command struct { - G bool `short:"g"` - - Nested struct { - N string `long:"n"` - } `command:"nested"` - } `command:"cmd"` - }{} - - p, ret := assertParserSuccess(t, &opts, "-v", "cmd", "-g", "nested", "--n", "n", "rest") - - assertStringArray(t, ret, []string{"rest"}) - - if !opts.Value { - t.Errorf("Expected Value to be true") - } - - if !opts.Command.G { - t.Errorf("Expected Command.G to be true") - } - - assertString(t, opts.Command.Nested.N, "n") - - if c := p.Command.Find("cmd"); c == nil { - t.Errorf("Expected to find command `cmd'") - } else { - if c != p.Active { - t.Errorf("Expected `cmd' to be the active parser command") - } - - if nested := c.Find("nested"); nested == nil { - t.Errorf("Expected to find command `nested'") - } else if nested != c.Active { - t.Errorf("Expected to find command `nested' to be the active `cmd' command") - } - } -} - -func TestRequiredOnCommand(t *testing.T) { - var opts = struct { - Value bool `short:"v" required:"true"` - - Command struct { - G bool `short:"g"` - } `command:"cmd"` - }{} - - assertParseFail(t, ErrRequired, fmt.Sprintf("the required flag `%cv' was not specified", defaultShortOptDelimiter), &opts, "cmd") -} - -func TestRequiredAllOnCommand(t *testing.T) { - var opts = struct { - Value bool `short:"v" required:"true"` - Missing bool `long:"missing" required:"true"` - - Command struct { - G bool `short:"g"` - } `command:"cmd"` - }{} - - assertParseFail(t, ErrRequired, fmt.Sprintf("the required flags `%smissing' and `%cv' were not specified", defaultLongOptDelimiter, defaultShortOptDelimiter), &opts, "cmd") -} - -func TestDefaultOnCommand(t *testing.T) { - var opts = struct { - Command struct { - G string `short:"g" default:"value"` - } `command:"cmd"` - }{} - - assertParseSuccess(t, &opts, "cmd") - - if opts.Command.G != "value" { - t.Errorf("Expected G to be \"value\"") - } -} - -func TestAfterNonCommand(t *testing.T) { - var opts = struct { - Value bool `short:"v"` - - Cmd1 struct { - } `command:"remove"` - - Cmd2 struct { - } `command:"add"` - }{} - - assertParseFail(t, ErrUnknownCommand, "Unknown command `nocmd'. Please specify one command of: add or remove", &opts, "nocmd", "remove") -} - -func TestSubcommandsOptional(t *testing.T) { - var opts = struct { - Value bool `short:"v"` - - Cmd1 struct { - } `command:"remove"` - - Cmd2 struct { - } `command:"add"` - }{} - - p := NewParser(&opts, None) - p.SubcommandsOptional = true - - _, err := p.ParseArgs([]string{"-v"}) - - if err != nil { - t.Fatalf("Unexpected error: %v", err) - return - } - - if !opts.Value { - t.Errorf("Expected Value to be true") - } -} - -func TestSubcommandsOptionalAfterNonCommand(t *testing.T) { - var opts = struct { - Value bool `short:"v"` - - Cmd1 struct { - } `command:"remove"` - - Cmd2 struct { - } `command:"add"` - }{} - - p := NewParser(&opts, None) - p.SubcommandsOptional = true - - retargs, err := p.ParseArgs([]string{"nocmd", "remove"}) - - if err != nil { - t.Fatalf("Unexpected error: %v", err) - return - } - - assertStringArray(t, retargs, []string{"nocmd", "remove"}) -} - -func TestCommandAlias(t *testing.T) { - var opts = struct { - Command struct { - G string `short:"g" default:"value"` - } `command:"cmd" alias:"cm"` - }{} - - assertParseSuccess(t, &opts, "cm") - - if opts.Command.G != "value" { - t.Errorf("Expected G to be \"value\"") - } -} - -func TestSubCommandFindOptionByLongFlag(t *testing.T) { - var opts struct { - Testing bool `long:"testing" description:"Testing"` - } - - var cmd struct { - Other bool `long:"other" description:"Other"` - } - - p := NewParser(&opts, Default) - c, _ := p.AddCommand("command", "Short", "Long", &cmd) - - opt := c.FindOptionByLongName("other") - - if opt == nil { - t.Errorf("Expected option, but found none") - } - - assertString(t, opt.LongName, "other") - - opt = c.FindOptionByLongName("testing") - - if opt == nil { - t.Errorf("Expected option, but found none") - } - - assertString(t, opt.LongName, "testing") -} - -func TestSubCommandFindOptionByShortFlag(t *testing.T) { - var opts struct { - Testing bool `short:"t" description:"Testing"` - } - - var cmd struct { - Other bool `short:"o" description:"Other"` - } - - p := NewParser(&opts, Default) - c, _ := p.AddCommand("command", "Short", "Long", &cmd) - - opt := c.FindOptionByShortName('o') - - if opt == nil { - t.Errorf("Expected option, but found none") - } - - if opt.ShortName != 'o' { - t.Errorf("Expected 'o', but got %v", opt.ShortName) - } - - opt = c.FindOptionByShortName('t') - - if opt == nil { - t.Errorf("Expected option, but found none") - } - - if opt.ShortName != 't' { - t.Errorf("Expected 'o', but got %v", opt.ShortName) - } -} diff --git a/vendor/github.com/jessevdk/go-flags/completion.go b/vendor/github.com/jessevdk/go-flags/completion.go deleted file mode 100644 index 7a7a08b..0000000 --- a/vendor/github.com/jessevdk/go-flags/completion.go +++ /dev/null @@ -1,309 +0,0 @@ -package flags - -import ( - "fmt" - "path/filepath" - "reflect" - "sort" - "strings" - "unicode/utf8" -) - -// Completion is a type containing information of a completion. -type Completion struct { - // The completed item - Item string - - // A description of the completed item (optional) - Description string -} - -type completions []Completion - -func (c completions) Len() int { - return len(c) -} - -func (c completions) Less(i, j int) bool { - return c[i].Item < c[j].Item -} - -func (c completions) Swap(i, j int) { - c[i], c[j] = c[j], c[i] -} - -// Completer is an interface which can be implemented by types -// to provide custom command line argument completion. -type Completer interface { - // Complete receives a prefix representing a (partial) value - // for its type and should provide a list of possible valid - // completions. - Complete(match string) []Completion -} - -type completion struct { - parser *Parser -} - -// Filename is a string alias which provides filename completion. -type Filename string - -func completionsWithoutDescriptions(items []string) []Completion { - ret := make([]Completion, len(items)) - - for i, v := range items { - ret[i].Item = v - } - - return ret -} - -// Complete returns a list of existing files with the given -// prefix. -func (f *Filename) Complete(match string) []Completion { - ret, _ := filepath.Glob(match + "*") - return completionsWithoutDescriptions(ret) -} - -func (c *completion) skipPositional(s *parseState, n int) { - if n >= len(s.positional) { - s.positional = nil - } else { - s.positional = s.positional[n:] - } -} - -func (c *completion) completeOptionNames(s *parseState, prefix string, match string, short bool) []Completion { - if short && len(match) != 0 { - return []Completion{ - Completion{ - Item: prefix + match, - }, - } - } - - var results []Completion - repeats := map[string]bool{} - - for name, opt := range s.lookup.longNames { - if strings.HasPrefix(name, match) && !opt.Hidden { - results = append(results, Completion{ - Item: defaultLongOptDelimiter + name, - Description: opt.Description, - }) - - if short { - repeats[string(opt.ShortName)] = true - } - } - } - - if short { - for name, opt := range s.lookup.shortNames { - if _, exist := repeats[name]; !exist && strings.HasPrefix(name, match) && !opt.Hidden { - results = append(results, Completion{ - Item: string(defaultShortOptDelimiter) + name, - Description: opt.Description, - }) - } - } - } - - return results -} - -func (c *completion) completeNamesForLongPrefix(s *parseState, prefix string, match string) []Completion { - return c.completeOptionNames(s, prefix, match, false) -} - -func (c *completion) completeNamesForShortPrefix(s *parseState, prefix string, match string) []Completion { - return c.completeOptionNames(s, prefix, match, true) -} - -func (c *completion) completeCommands(s *parseState, match string) []Completion { - n := make([]Completion, 0, len(s.command.commands)) - - for _, cmd := range s.command.commands { - if cmd.data != c && strings.HasPrefix(cmd.Name, match) { - n = append(n, Completion{ - Item: cmd.Name, - Description: cmd.ShortDescription, - }) - } - } - - return n -} - -func (c *completion) completeValue(value reflect.Value, prefix string, match string) []Completion { - if value.Kind() == reflect.Slice { - value = reflect.New(value.Type().Elem()) - } - i := value.Interface() - - var ret []Completion - - if cmp, ok := i.(Completer); ok { - ret = cmp.Complete(match) - } else if value.CanAddr() { - if cmp, ok = value.Addr().Interface().(Completer); ok { - ret = cmp.Complete(match) - } - } - - for i, v := range ret { - ret[i].Item = prefix + v.Item - } - - return ret -} - -func (c *completion) complete(args []string) []Completion { - if len(args) == 0 { - args = []string{""} - } - - s := &parseState{ - args: args, - } - - c.parser.fillParseState(s) - - var opt *Option - - for len(s.args) > 1 { - arg := s.pop() - - if (c.parser.Options&PassDoubleDash) != None && arg == "--" { - opt = nil - c.skipPositional(s, len(s.args)-1) - - break - } - - if argumentIsOption(arg) { - prefix, optname, islong := stripOptionPrefix(arg) - optname, _, argument := splitOption(prefix, optname, islong) - - if argument == nil { - var o *Option - canarg := true - - if islong { - o = s.lookup.longNames[optname] - } else { - for i, r := range optname { - sname := string(r) - o = s.lookup.shortNames[sname] - - if o == nil { - break - } - - if i == 0 && o.canArgument() && len(optname) != len(sname) { - canarg = false - break - } - } - } - - if o == nil && (c.parser.Options&PassAfterNonOption) != None { - opt = nil - c.skipPositional(s, len(s.args)-1) - - break - } else if o != nil && o.canArgument() && !o.OptionalArgument && canarg { - if len(s.args) > 1 { - s.pop() - } else { - opt = o - } - } - } - } else { - if len(s.positional) > 0 { - if !s.positional[0].isRemaining() { - // Don't advance beyond a remaining positional arg (because - // it consumes all subsequent args). - s.positional = s.positional[1:] - } - } else if cmd, ok := s.lookup.commands[arg]; ok { - cmd.fillParseState(s) - } - - opt = nil - } - } - - lastarg := s.args[len(s.args)-1] - var ret []Completion - - if opt != nil { - // Completion for the argument of 'opt' - ret = c.completeValue(opt.value, "", lastarg) - } else if argumentStartsOption(lastarg) { - // Complete the option - prefix, optname, islong := stripOptionPrefix(lastarg) - optname, split, argument := splitOption(prefix, optname, islong) - - if argument == nil && !islong { - rname, n := utf8.DecodeRuneInString(optname) - sname := string(rname) - - if opt := s.lookup.shortNames[sname]; opt != nil && opt.canArgument() { - ret = c.completeValue(opt.value, prefix+sname, optname[n:]) - } else { - ret = c.completeNamesForShortPrefix(s, prefix, optname) - } - } else if argument != nil { - if islong { - opt = s.lookup.longNames[optname] - } else { - opt = s.lookup.shortNames[optname] - } - - if opt != nil { - ret = c.completeValue(opt.value, prefix+optname+split, *argument) - } - } else if islong { - ret = c.completeNamesForLongPrefix(s, prefix, optname) - } else { - ret = c.completeNamesForShortPrefix(s, prefix, optname) - } - } else if len(s.positional) > 0 { - // Complete for positional argument - ret = c.completeValue(s.positional[0].value, "", lastarg) - } else if len(s.command.commands) > 0 { - // Complete for command - ret = c.completeCommands(s, lastarg) - } - - sort.Sort(completions(ret)) - return ret -} - -func (c *completion) print(items []Completion, showDescriptions bool) { - if showDescriptions && len(items) > 1 { - maxl := 0 - - for _, v := range items { - if len(v.Item) > maxl { - maxl = len(v.Item) - } - } - - for _, v := range items { - fmt.Printf("%s", v.Item) - - if len(v.Description) > 0 { - fmt.Printf("%s # %s", strings.Repeat(" ", maxl-len(v.Item)), v.Description) - } - - fmt.Printf("\n") - } - } else { - for _, v := range items { - fmt.Println(v.Item) - } - } -} diff --git a/vendor/github.com/jessevdk/go-flags/completion_test.go b/vendor/github.com/jessevdk/go-flags/completion_test.go deleted file mode 100644 index 26f70e4..0000000 --- a/vendor/github.com/jessevdk/go-flags/completion_test.go +++ /dev/null @@ -1,315 +0,0 @@ -package flags - -import ( - "bytes" - "io" - "os" - "path" - "path/filepath" - "reflect" - "runtime" - "strings" - "testing" -) - -type TestComplete struct { -} - -func (t *TestComplete) Complete(match string) []Completion { - options := []string{ - "hello world", - "hello universe", - "hello multiverse", - } - - ret := make([]Completion, 0, len(options)) - - for _, o := range options { - if strings.HasPrefix(o, match) { - ret = append(ret, Completion{ - Item: o, - }) - } - } - - return ret -} - -var completionTestOptions struct { - Verbose bool `short:"v" long:"verbose" description:"Verbose messages"` - Debug bool `short:"d" long:"debug" description:"Enable debug"` - Info bool `short:"i" description:"Display info"` - Version bool `long:"version" description:"Show version"` - Required bool `long:"required" required:"true" description:"This is required"` - Hidden bool `long:"hidden" hidden:"true" description:"This is hidden"` - - AddCommand struct { - Positional struct { - Filename Filename - } `positional-args:"yes"` - } `command:"add" description:"add an item"` - - AddMultiCommand struct { - Positional struct { - Filename []Filename - } `positional-args:"yes"` - Extra []Filename `short:"f"` - } `command:"add-multi" description:"add multiple items"` - - AddMultiCommandFlag struct { - Files []Filename `short:"f"` - } `command:"add-multi-flag" description:"add multiple items via flags"` - - RemoveCommand struct { - Other bool `short:"o"` - File Filename `short:"f" long:"filename"` - } `command:"rm" description:"remove an item"` - - RenameCommand struct { - Completed TestComplete `short:"c" long:"completed"` - } `command:"rename" description:"rename an item"` -} - -type completionTest struct { - Args []string - Completed []string - ShowDescriptions bool -} - -var completionTests []completionTest - -func init() { - _, sourcefile, _, _ := runtime.Caller(0) - completionTestSourcedir := filepath.Join(filepath.SplitList(path.Dir(sourcefile))...) - - completionTestFilename := []string{filepath.Join(completionTestSourcedir, "completion.go"), filepath.Join(completionTestSourcedir, "completion_test.go")} - - completionTests = []completionTest{ - { - // Short names - []string{"-"}, - []string{"--debug", "--required", "--verbose", "--version", "-i"}, - false, - }, - - { - // Short names full - []string{"-i"}, - []string{"-i"}, - false, - }, - - { - // Short names concatenated - []string{"-dv"}, - []string{"-dv"}, - false, - }, - - { - // Long names - []string{"--"}, - []string{"--debug", "--required", "--verbose", "--version"}, - false, - }, - - { - // Long names with descriptions - []string{"--"}, - []string{ - "--debug # Enable debug", - "--required # This is required", - "--verbose # Verbose messages", - "--version # Show version", - }, - true, - }, - - { - // Long names partial - []string{"--ver"}, - []string{"--verbose", "--version"}, - false, - }, - - { - // Commands - []string{""}, - []string{"add", "add-multi", "add-multi-flag", "rename", "rm"}, - false, - }, - - { - // Commands with descriptions - []string{""}, - []string{ - "add # add an item", - "add-multi # add multiple items", - "add-multi-flag # add multiple items via flags", - "rename # rename an item", - "rm # remove an item", - }, - true, - }, - - { - // Commands partial - []string{"r"}, - []string{"rename", "rm"}, - false, - }, - - { - // Positional filename - []string{"add", filepath.Join(completionTestSourcedir, "completion")}, - completionTestFilename, - false, - }, - - { - // Multiple positional filename (1 arg) - []string{"add-multi", filepath.Join(completionTestSourcedir, "completion")}, - completionTestFilename, - false, - }, - { - // Multiple positional filename (2 args) - []string{"add-multi", filepath.Join(completionTestSourcedir, "completion.go"), filepath.Join(completionTestSourcedir, "completion")}, - completionTestFilename, - false, - }, - { - // Multiple positional filename (3 args) - []string{"add-multi", filepath.Join(completionTestSourcedir, "completion.go"), filepath.Join(completionTestSourcedir, "completion.go"), filepath.Join(completionTestSourcedir, "completion")}, - completionTestFilename, - false, - }, - - { - // Flag filename - []string{"rm", "-f", path.Join(completionTestSourcedir, "completion")}, - completionTestFilename, - false, - }, - - { - // Flag short concat last filename - []string{"rm", "-of", path.Join(completionTestSourcedir, "completion")}, - completionTestFilename, - false, - }, - - { - // Flag concat filename - []string{"rm", "-f" + path.Join(completionTestSourcedir, "completion")}, - []string{"-f" + completionTestFilename[0], "-f" + completionTestFilename[1]}, - false, - }, - - { - // Flag equal concat filename - []string{"rm", "-f=" + path.Join(completionTestSourcedir, "completion")}, - []string{"-f=" + completionTestFilename[0], "-f=" + completionTestFilename[1]}, - false, - }, - - { - // Flag concat long filename - []string{"rm", "--filename=" + path.Join(completionTestSourcedir, "completion")}, - []string{"--filename=" + completionTestFilename[0], "--filename=" + completionTestFilename[1]}, - false, - }, - - { - // Flag long filename - []string{"rm", "--filename", path.Join(completionTestSourcedir, "completion")}, - completionTestFilename, - false, - }, - - { - // Custom completed - []string{"rename", "-c", "hello un"}, - []string{"hello universe"}, - false, - }, - { - // Multiple flag filename - []string{"add-multi-flag", "-f", filepath.Join(completionTestSourcedir, "completion")}, - completionTestFilename, - false, - }, - } -} - -func TestCompletion(t *testing.T) { - p := NewParser(&completionTestOptions, Default) - c := &completion{parser: p} - - for _, test := range completionTests { - if test.ShowDescriptions { - continue - } - - ret := c.complete(test.Args) - items := make([]string, len(ret)) - - for i, v := range ret { - items[i] = v.Item - } - - if !reflect.DeepEqual(items, test.Completed) { - t.Errorf("Args: %#v, %#v\n Expected: %#v\n Got: %#v", test.Args, test.ShowDescriptions, test.Completed, items) - } - } -} - -func TestParserCompletion(t *testing.T) { - for _, test := range completionTests { - if test.ShowDescriptions { - os.Setenv("GO_FLAGS_COMPLETION", "verbose") - } else { - os.Setenv("GO_FLAGS_COMPLETION", "1") - } - - tmp := os.Stdout - - r, w, _ := os.Pipe() - os.Stdout = w - - out := make(chan string) - - go func() { - var buf bytes.Buffer - - io.Copy(&buf, r) - - out <- buf.String() - }() - - p := NewParser(&completionTestOptions, None) - - p.CompletionHandler = func(items []Completion) { - comp := &completion{parser: p} - comp.print(items, test.ShowDescriptions) - } - - _, err := p.ParseArgs(test.Args) - - w.Close() - - os.Stdout = tmp - - if err != nil { - t.Fatalf("Unexpected error: %s", err) - } - - got := strings.Split(strings.Trim(<-out, "\n"), "\n") - - if !reflect.DeepEqual(got, test.Completed) { - t.Errorf("Expected: %#v\nGot: %#v", test.Completed, got) - } - } - - os.Setenv("GO_FLAGS_COMPLETION", "") -} diff --git a/vendor/github.com/jessevdk/go-flags/convert.go b/vendor/github.com/jessevdk/go-flags/convert.go deleted file mode 100644 index 984aac8..0000000 --- a/vendor/github.com/jessevdk/go-flags/convert.go +++ /dev/null @@ -1,348 +0,0 @@ -// Copyright 2012 Jesse van den Kieboom. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package flags - -import ( - "fmt" - "reflect" - "strconv" - "strings" - "time" -) - -// Marshaler is the interface implemented by types that can marshal themselves -// to a string representation of the flag. -type Marshaler interface { - // MarshalFlag marshals a flag value to its string representation. - MarshalFlag() (string, error) -} - -// Unmarshaler is the interface implemented by types that can unmarshal a flag -// argument to themselves. The provided value is directly passed from the -// command line. -type Unmarshaler interface { - // UnmarshalFlag unmarshals a string value representation to the flag - // value (which therefore needs to be a pointer receiver). - UnmarshalFlag(value string) error -} - -func getBase(options multiTag, base int) (int, error) { - sbase := options.Get("base") - - var err error - var ivbase int64 - - if sbase != "" { - ivbase, err = strconv.ParseInt(sbase, 10, 32) - base = int(ivbase) - } - - return base, err -} - -func convertMarshal(val reflect.Value) (bool, string, error) { - // Check first for the Marshaler interface - if val.Type().NumMethod() > 0 && val.CanInterface() { - if marshaler, ok := val.Interface().(Marshaler); ok { - ret, err := marshaler.MarshalFlag() - return true, ret, err - } - } - - return false, "", nil -} - -func convertToString(val reflect.Value, options multiTag) (string, error) { - if ok, ret, err := convertMarshal(val); ok { - return ret, err - } - - tp := val.Type() - - // Support for time.Duration - if tp == reflect.TypeOf((*time.Duration)(nil)).Elem() { - stringer := val.Interface().(fmt.Stringer) - return stringer.String(), nil - } - - switch tp.Kind() { - case reflect.String: - return val.String(), nil - case reflect.Bool: - if val.Bool() { - return "true", nil - } - - return "false", nil - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - base, err := getBase(options, 10) - - if err != nil { - return "", err - } - - return strconv.FormatInt(val.Int(), base), nil - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: - base, err := getBase(options, 10) - - if err != nil { - return "", err - } - - return strconv.FormatUint(val.Uint(), base), nil - case reflect.Float32, reflect.Float64: - return strconv.FormatFloat(val.Float(), 'g', -1, tp.Bits()), nil - case reflect.Slice: - if val.Len() == 0 { - return "", nil - } - - ret := "[" - - for i := 0; i < val.Len(); i++ { - if i != 0 { - ret += ", " - } - - item, err := convertToString(val.Index(i), options) - - if err != nil { - return "", err - } - - ret += item - } - - return ret + "]", nil - case reflect.Map: - ret := "{" - - for i, key := range val.MapKeys() { - if i != 0 { - ret += ", " - } - - keyitem, err := convertToString(key, options) - - if err != nil { - return "", err - } - - item, err := convertToString(val.MapIndex(key), options) - - if err != nil { - return "", err - } - - ret += keyitem + ":" + item - } - - return ret + "}", nil - case reflect.Ptr: - return convertToString(reflect.Indirect(val), options) - case reflect.Interface: - if !val.IsNil() { - return convertToString(val.Elem(), options) - } - } - - return "", nil -} - -func convertUnmarshal(val string, retval reflect.Value) (bool, error) { - if retval.Type().NumMethod() > 0 && retval.CanInterface() { - if unmarshaler, ok := retval.Interface().(Unmarshaler); ok { - if retval.IsNil() { - retval.Set(reflect.New(retval.Type().Elem())) - - // Re-assign from the new value - unmarshaler = retval.Interface().(Unmarshaler) - } - - return true, unmarshaler.UnmarshalFlag(val) - } - } - - if retval.Type().Kind() != reflect.Ptr && retval.CanAddr() { - return convertUnmarshal(val, retval.Addr()) - } - - if retval.Type().Kind() == reflect.Interface && !retval.IsNil() { - return convertUnmarshal(val, retval.Elem()) - } - - return false, nil -} - -func convert(val string, retval reflect.Value, options multiTag) error { - if ok, err := convertUnmarshal(val, retval); ok { - return err - } - - tp := retval.Type() - - // Support for time.Duration - if tp == reflect.TypeOf((*time.Duration)(nil)).Elem() { - parsed, err := time.ParseDuration(val) - - if err != nil { - return err - } - - retval.SetInt(int64(parsed)) - return nil - } - - switch tp.Kind() { - case reflect.String: - retval.SetString(val) - case reflect.Bool: - if val == "" { - retval.SetBool(true) - } else { - b, err := strconv.ParseBool(val) - - if err != nil { - return err - } - - retval.SetBool(b) - } - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - base, err := getBase(options, 10) - - if err != nil { - return err - } - - parsed, err := strconv.ParseInt(val, base, tp.Bits()) - - if err != nil { - return err - } - - retval.SetInt(parsed) - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: - base, err := getBase(options, 10) - - if err != nil { - return err - } - - parsed, err := strconv.ParseUint(val, base, tp.Bits()) - - if err != nil { - return err - } - - retval.SetUint(parsed) - case reflect.Float32, reflect.Float64: - parsed, err := strconv.ParseFloat(val, tp.Bits()) - - if err != nil { - return err - } - - retval.SetFloat(parsed) - case reflect.Slice: - elemtp := tp.Elem() - - elemvalptr := reflect.New(elemtp) - elemval := reflect.Indirect(elemvalptr) - - if err := convert(val, elemval, options); err != nil { - return err - } - - retval.Set(reflect.Append(retval, elemval)) - case reflect.Map: - parts := strings.SplitN(val, ":", 2) - - key := parts[0] - var value string - - if len(parts) == 2 { - value = parts[1] - } - - keytp := tp.Key() - keyval := reflect.New(keytp) - - if err := convert(key, keyval, options); err != nil { - return err - } - - valuetp := tp.Elem() - valueval := reflect.New(valuetp) - - if err := convert(value, valueval, options); err != nil { - return err - } - - if retval.IsNil() { - retval.Set(reflect.MakeMap(tp)) - } - - retval.SetMapIndex(reflect.Indirect(keyval), reflect.Indirect(valueval)) - case reflect.Ptr: - if retval.IsNil() { - retval.Set(reflect.New(retval.Type().Elem())) - } - - return convert(val, reflect.Indirect(retval), options) - case reflect.Interface: - if !retval.IsNil() { - return convert(val, retval.Elem(), options) - } - } - - return nil -} - -func isPrint(s string) bool { - for _, c := range s { - if !strconv.IsPrint(c) { - return false - } - } - - return true -} - -func quoteIfNeeded(s string) string { - if !isPrint(s) { - return strconv.Quote(s) - } - - return s -} - -func quoteIfNeededV(s []string) []string { - ret := make([]string, len(s)) - - for i, v := range s { - ret[i] = quoteIfNeeded(v) - } - - return ret -} - -func quoteV(s []string) []string { - ret := make([]string, len(s)) - - for i, v := range s { - ret[i] = strconv.Quote(v) - } - - return ret -} - -func unquoteIfPossible(s string) (string, error) { - if len(s) == 0 || s[0] != '"' { - return s, nil - } - - return strconv.Unquote(s) -} diff --git a/vendor/github.com/jessevdk/go-flags/convert_test.go b/vendor/github.com/jessevdk/go-flags/convert_test.go deleted file mode 100644 index ef131dc..0000000 --- a/vendor/github.com/jessevdk/go-flags/convert_test.go +++ /dev/null @@ -1,159 +0,0 @@ -package flags - -import ( - "testing" - "time" -) - -func expectConvert(t *testing.T, o *Option, expected string) { - s, err := convertToString(o.value, o.tag) - - if err != nil { - t.Errorf("Unexpected error: %v", err) - return - } - - assertString(t, s, expected) -} - -func TestConvertToString(t *testing.T) { - d, _ := time.ParseDuration("1h2m4s") - - var opts = struct { - String string `long:"string"` - - Int int `long:"int"` - Int8 int8 `long:"int8"` - Int16 int16 `long:"int16"` - Int32 int32 `long:"int32"` - Int64 int64 `long:"int64"` - - Uint uint `long:"uint"` - Uint8 uint8 `long:"uint8"` - Uint16 uint16 `long:"uint16"` - Uint32 uint32 `long:"uint32"` - Uint64 uint64 `long:"uint64"` - - Float32 float32 `long:"float32"` - Float64 float64 `long:"float64"` - - Duration time.Duration `long:"duration"` - - Bool bool `long:"bool"` - - IntSlice []int `long:"int-slice"` - IntFloatMap map[int]float64 `long:"int-float-map"` - - PtrBool *bool `long:"ptr-bool"` - Interface interface{} `long:"interface"` - - Int32Base int32 `long:"int32-base" base:"16"` - Uint32Base uint32 `long:"uint32-base" base:"16"` - }{ - "string", - - -2, - -1, - 0, - 1, - 2, - - 1, - 2, - 3, - 4, - 5, - - 1.2, - -3.4, - - d, - true, - - []int{-3, 4, -2}, - map[int]float64{-2: 4.5}, - - new(bool), - float32(5.2), - - -5823, - 4232, - } - - p := NewNamedParser("test", Default) - grp, _ := p.AddGroup("test group", "", &opts) - - expects := []string{ - "string", - "-2", - "-1", - "0", - "1", - "2", - - "1", - "2", - "3", - "4", - "5", - - "1.2", - "-3.4", - - "1h2m4s", - "true", - - "[-3, 4, -2]", - "{-2:4.5}", - - "false", - "5.2", - - "-16bf", - "1088", - } - - for i, v := range grp.Options() { - expectConvert(t, v, expects[i]) - } -} - -func TestConvertToStringInvalidIntBase(t *testing.T) { - var opts = struct { - Int int `long:"int" base:"no"` - }{ - 2, - } - - p := NewNamedParser("test", Default) - grp, _ := p.AddGroup("test group", "", &opts) - o := grp.Options()[0] - - _, err := convertToString(o.value, o.tag) - - if err != nil { - err = newErrorf(ErrMarshal, "%v", err) - } - - assertError(t, err, ErrMarshal, "strconv.ParseInt: parsing \"no\": invalid syntax") -} - -func TestConvertToStringInvalidUintBase(t *testing.T) { - var opts = struct { - Uint uint `long:"uint" base:"no"` - }{ - 2, - } - - p := NewNamedParser("test", Default) - grp, _ := p.AddGroup("test group", "", &opts) - o := grp.Options()[0] - - _, err := convertToString(o.value, o.tag) - - if err != nil { - err = newErrorf(ErrMarshal, "%v", err) - } - - assertError(t, err, ErrMarshal, "strconv.ParseInt: parsing \"no\": invalid syntax") -} diff --git a/vendor/github.com/jessevdk/go-flags/error.go b/vendor/github.com/jessevdk/go-flags/error.go deleted file mode 100644 index 05528d8..0000000 --- a/vendor/github.com/jessevdk/go-flags/error.go +++ /dev/null @@ -1,134 +0,0 @@ -package flags - -import ( - "fmt" -) - -// ErrorType represents the type of error. -type ErrorType uint - -const ( - // ErrUnknown indicates a generic error. - ErrUnknown ErrorType = iota - - // ErrExpectedArgument indicates that an argument was expected. - ErrExpectedArgument - - // ErrUnknownFlag indicates an unknown flag. - ErrUnknownFlag - - // ErrUnknownGroup indicates an unknown group. - ErrUnknownGroup - - // ErrMarshal indicates a marshalling error while converting values. - ErrMarshal - - // ErrHelp indicates that the built-in help was shown (the error - // contains the help message). - ErrHelp - - // ErrNoArgumentForBool indicates that an argument was given for a - // boolean flag (which don't not take any arguments). - ErrNoArgumentForBool - - // ErrRequired indicates that a required flag was not provided. - ErrRequired - - // ErrShortNameTooLong indicates that a short flag name was specified, - // longer than one character. - ErrShortNameTooLong - - // ErrDuplicatedFlag indicates that a short or long flag has been - // defined more than once - ErrDuplicatedFlag - - // ErrTag indicates an error while parsing flag tags. - ErrTag - - // ErrCommandRequired indicates that a command was required but not - // specified - ErrCommandRequired - - // ErrUnknownCommand indicates that an unknown command was specified. - ErrUnknownCommand - - // ErrInvalidChoice indicates an invalid option value which only allows - // a certain number of choices. - ErrInvalidChoice - - // ErrInvalidTag indicates an invalid tag or invalid use of an existing tag - ErrInvalidTag -) - -func (e ErrorType) String() string { - switch e { - case ErrUnknown: - return "unknown" - case ErrExpectedArgument: - return "expected argument" - case ErrUnknownFlag: - return "unknown flag" - case ErrUnknownGroup: - return "unknown group" - case ErrMarshal: - return "marshal" - case ErrHelp: - return "help" - case ErrNoArgumentForBool: - return "no argument for bool" - case ErrRequired: - return "required" - case ErrShortNameTooLong: - return "short name too long" - case ErrDuplicatedFlag: - return "duplicated flag" - case ErrTag: - return "tag" - case ErrCommandRequired: - return "command required" - case ErrUnknownCommand: - return "unknown command" - case ErrInvalidChoice: - return "invalid choice" - case ErrInvalidTag: - return "invalid tag" - } - - return "unrecognized error type" -} - -// Error represents a parser error. The error returned from Parse is of this -// type. The error contains both a Type and Message. -type Error struct { - // The type of error - Type ErrorType - - // The error message - Message string -} - -// Error returns the error's message -func (e *Error) Error() string { - return e.Message -} - -func newError(tp ErrorType, message string) *Error { - return &Error{ - Type: tp, - Message: message, - } -} - -func newErrorf(tp ErrorType, format string, args ...interface{}) *Error { - return newError(tp, fmt.Sprintf(format, args...)) -} - -func wrapError(err error) *Error { - ret, ok := err.(*Error) - - if !ok { - return newError(ErrUnknown, err.Error()) - } - - return ret -} diff --git a/vendor/github.com/jessevdk/go-flags/example_test.go b/vendor/github.com/jessevdk/go-flags/example_test.go deleted file mode 100644 index 4321ed8..0000000 --- a/vendor/github.com/jessevdk/go-flags/example_test.go +++ /dev/null @@ -1,110 +0,0 @@ -// Example of use of the flags package. -package flags - -import ( - "fmt" - "os/exec" -) - -func Example() { - var opts struct { - // Slice of bool will append 'true' each time the option - // is encountered (can be set multiple times, like -vvv) - Verbose []bool `short:"v" long:"verbose" description:"Show verbose debug information"` - - // Example of automatic marshalling to desired type (uint) - Offset uint `long:"offset" description:"Offset"` - - // Example of a callback, called each time the option is found. - Call func(string) `short:"c" description:"Call phone number"` - - // Example of a required flag - Name string `short:"n" long:"name" description:"A name" required:"true"` - - // Example of a value name - File string `short:"f" long:"file" description:"A file" value-name:"FILE"` - - // Example of a pointer - Ptr *int `short:"p" description:"A pointer to an integer"` - - // Example of a slice of strings - StringSlice []string `short:"s" description:"A slice of strings"` - - // Example of a slice of pointers - PtrSlice []*string `long:"ptrslice" description:"A slice of pointers to string"` - - // Example of a map - IntMap map[string]int `long:"intmap" description:"A map from string to int"` - - // Example of a filename (useful for completion) - Filename Filename `long:"filename" description:"A filename"` - - // Example of positional arguments - Args struct { - ID string - Num int - Rest []string - } `positional-args:"yes" required:"yes"` - } - - // Callback which will invoke callto: to call a number. - // Note that this works just on OS X (and probably only with - // Skype) but it shows the idea. - opts.Call = func(num string) { - cmd := exec.Command("open", "callto:"+num) - cmd.Start() - cmd.Process.Release() - } - - // Make some fake arguments to parse. - args := []string{ - "-vv", - "--offset=5", - "-n", "Me", - "-p", "3", - "-s", "hello", - "-s", "world", - "--ptrslice", "hello", - "--ptrslice", "world", - "--intmap", "a:1", - "--intmap", "b:5", - "--filename", "hello.go", - "id", - "10", - "remaining1", - "remaining2", - } - - // Parse flags from `args'. Note that here we use flags.ParseArgs for - // the sake of making a working example. Normally, you would simply use - // flags.Parse(&opts) which uses os.Args - _, err := ParseArgs(&opts, args) - - if err != nil { - panic(err) - } - - fmt.Printf("Verbosity: %v\n", opts.Verbose) - fmt.Printf("Offset: %d\n", opts.Offset) - fmt.Printf("Name: %s\n", opts.Name) - fmt.Printf("Ptr: %d\n", *opts.Ptr) - fmt.Printf("StringSlice: %v\n", opts.StringSlice) - fmt.Printf("PtrSlice: [%v %v]\n", *opts.PtrSlice[0], *opts.PtrSlice[1]) - fmt.Printf("IntMap: [a:%v b:%v]\n", opts.IntMap["a"], opts.IntMap["b"]) - fmt.Printf("Filename: %v\n", opts.Filename) - fmt.Printf("Args.ID: %s\n", opts.Args.ID) - fmt.Printf("Args.Num: %d\n", opts.Args.Num) - fmt.Printf("Args.Rest: %v\n", opts.Args.Rest) - - // Output: Verbosity: [true true] - // Offset: 5 - // Name: Me - // Ptr: 3 - // StringSlice: [hello world] - // PtrSlice: [hello world] - // IntMap: [a:1 b:5] - // Filename: hello.go - // Args.ID: id - // Args.Num: 10 - // Args.Rest: [remaining1 remaining2] -} diff --git a/vendor/github.com/jessevdk/go-flags/examples/add.go b/vendor/github.com/jessevdk/go-flags/examples/add.go deleted file mode 100644 index 57d8f23..0000000 --- a/vendor/github.com/jessevdk/go-flags/examples/add.go +++ /dev/null @@ -1,23 +0,0 @@ -package main - -import ( - "fmt" -) - -type AddCommand struct { - All bool `short:"a" long:"all" description:"Add all files"` -} - -var addCommand AddCommand - -func (x *AddCommand) Execute(args []string) error { - fmt.Printf("Adding (all=%v): %#v\n", x.All, args) - return nil -} - -func init() { - parser.AddCommand("add", - "Add a file", - "The add command adds a file to the repository. Use -a to add all files.", - &addCommand) -} diff --git a/vendor/github.com/jessevdk/go-flags/examples/bash-completion b/vendor/github.com/jessevdk/go-flags/examples/bash-completion deleted file mode 100644 index 974f52a..0000000 --- a/vendor/github.com/jessevdk/go-flags/examples/bash-completion +++ /dev/null @@ -1,9 +0,0 @@ -_examples() { - args=("${COMP_WORDS[@]:1:$COMP_CWORD}") - - local IFS=$'\n' - COMPREPLY=($(GO_FLAGS_COMPLETION=1 ${COMP_WORDS[0]} "${args[@]}")) - return 1 -} - -complete -F _examples examples diff --git a/vendor/github.com/jessevdk/go-flags/examples/main.go b/vendor/github.com/jessevdk/go-flags/examples/main.go deleted file mode 100644 index 632c331..0000000 --- a/vendor/github.com/jessevdk/go-flags/examples/main.go +++ /dev/null @@ -1,79 +0,0 @@ -package main - -import ( - "errors" - "fmt" - "github.com/jessevdk/go-flags" - "os" - "strconv" - "strings" -) - -type EditorOptions struct { - Input flags.Filename `short:"i" long:"input" description:"Input file" default:"-"` - Output flags.Filename `short:"o" long:"output" description:"Output file" default:"-"` -} - -type Point struct { - X, Y int -} - -func (p *Point) UnmarshalFlag(value string) error { - parts := strings.Split(value, ",") - - if len(parts) != 2 { - return errors.New("expected two numbers separated by a ,") - } - - x, err := strconv.ParseInt(parts[0], 10, 32) - - if err != nil { - return err - } - - y, err := strconv.ParseInt(parts[1], 10, 32) - - if err != nil { - return err - } - - p.X = int(x) - p.Y = int(y) - - return nil -} - -func (p Point) MarshalFlag() (string, error) { - return fmt.Sprintf("%d,%d", p.X, p.Y), nil -} - -type Options struct { - // Example of verbosity with level - Verbose []bool `short:"v" long:"verbose" description:"Verbose output"` - - // Example of optional value - User string `short:"u" long:"user" description:"User name" optional:"yes" optional-value:"pancake"` - - // Example of map with multiple default values - Users map[string]string `long:"users" description:"User e-mail map" default:"system:system@example.org" default:"admin:admin@example.org"` - - // Example of option group - Editor EditorOptions `group:"Editor Options"` - - // Example of custom type Marshal/Unmarshal - Point Point `long:"point" description:"A x,y point" default:"1,2"` -} - -var options Options - -var parser = flags.NewParser(&options, flags.Default) - -func main() { - if _, err := parser.Parse(); err != nil { - if flagsErr, ok := err.(*flags.Error); ok && flagsErr.Type == flags.ErrHelp { - os.Exit(0) - } else { - os.Exit(1) - } - } -} diff --git a/vendor/github.com/jessevdk/go-flags/examples/rm.go b/vendor/github.com/jessevdk/go-flags/examples/rm.go deleted file mode 100644 index c9c1dd0..0000000 --- a/vendor/github.com/jessevdk/go-flags/examples/rm.go +++ /dev/null @@ -1,23 +0,0 @@ -package main - -import ( - "fmt" -) - -type RmCommand struct { - Force bool `short:"f" long:"force" description:"Force removal of files"` -} - -var rmCommand RmCommand - -func (x *RmCommand) Execute(args []string) error { - fmt.Printf("Removing (force=%v): %#v\n", x.Force, args) - return nil -} - -func init() { - parser.AddCommand("rm", - "Remove a file", - "The rm command removes a file to the repository. Use -f to force removal of files.", - &rmCommand) -} diff --git a/vendor/github.com/jessevdk/go-flags/flags.go b/vendor/github.com/jessevdk/go-flags/flags.go deleted file mode 100644 index 889762d..0000000 --- a/vendor/github.com/jessevdk/go-flags/flags.go +++ /dev/null @@ -1,258 +0,0 @@ -// Copyright 2012 Jesse van den Kieboom. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -/* -Package flags provides an extensive command line option parser. -The flags package is similar in functionality to the go built-in flag package -but provides more options and uses reflection to provide a convenient and -succinct way of specifying command line options. - - -Supported features - -The following features are supported in go-flags: - - Options with short names (-v) - Options with long names (--verbose) - Options with and without arguments (bool v.s. other type) - Options with optional arguments and default values - Option default values from ENVIRONMENT_VARIABLES, including slice and map values - Multiple option groups each containing a set of options - Generate and print well-formatted help message - Passing remaining command line arguments after -- (optional) - Ignoring unknown command line options (optional) - Supports -I/usr/include -I=/usr/include -I /usr/include option argument specification - Supports multiple short options -aux - Supports all primitive go types (string, int{8..64}, uint{8..64}, float) - Supports same option multiple times (can store in slice or last option counts) - Supports maps - Supports function callbacks - Supports namespaces for (nested) option groups - -Additional features specific to Windows: - Options with short names (/v) - Options with long names (/verbose) - Windows-style options with arguments use a colon as the delimiter - Modify generated help message with Windows-style / options - Windows style options can be disabled at build time using the "forceposix" - build tag - - -Basic usage - -The flags package uses structs, reflection and struct field tags -to allow users to specify command line options. This results in very simple -and concise specification of your application options. For example: - - type Options struct { - Verbose []bool `short:"v" long:"verbose" description:"Show verbose debug information"` - } - -This specifies one option with a short name -v and a long name --verbose. -When either -v or --verbose is found on the command line, a 'true' value -will be appended to the Verbose field. e.g. when specifying -vvv, the -resulting value of Verbose will be {[true, true, true]}. - -Slice options work exactly the same as primitive type options, except that -whenever the option is encountered, a value is appended to the slice. - -Map options from string to primitive type are also supported. On the command -line, you specify the value for such an option as key:value. For example - - type Options struct { - AuthorInfo string[string] `short:"a"` - } - -Then, the AuthorInfo map can be filled with something like --a name:Jesse -a "surname:van den Kieboom". - -Finally, for full control over the conversion between command line argument -values and options, user defined types can choose to implement the Marshaler -and Unmarshaler interfaces. - - -Available field tags - -The following is a list of tags for struct fields supported by go-flags: - - short: the short name of the option (single character) - long: the long name of the option - required: if non empty, makes the option required to appear on the command - line. If a required option is not present, the parser will - return ErrRequired (optional) - description: the description of the option (optional) - long-description: the long description of the option. Currently only - displayed in generated man pages (optional) - no-flag: if non-empty, this field is ignored as an option (optional) - - optional: if non-empty, makes the argument of the option optional. When an - argument is optional it can only be specified using - --option=argument (optional) - optional-value: the value of an optional option when the option occurs - without an argument. This tag can be specified multiple - times in the case of maps or slices (optional) - default: the default value of an option. This tag can be specified - multiple times in the case of slices or maps (optional) - default-mask: when specified, this value will be displayed in the help - instead of the actual default value. This is useful - mostly for hiding otherwise sensitive information from - showing up in the help. If default-mask takes the special - value "-", then no default value will be shown at all - (optional) - env: the default value of the option is overridden from the - specified environment variable, if one has been defined. - (optional) - env-delim: the 'env' default value from environment is split into - multiple values with the given delimiter string, use with - slices and maps (optional) - value-name: the name of the argument value (to be shown in the help) - (optional) - choice: limits the values for an option to a set of values. - This tag can be specified multiple times (optional) - hidden: if non-empty, the option is not visible in the help or man page. - - base: a base (radix) used to convert strings to integer values, the - default base is 10 (i.e. decimal) (optional) - - ini-name: the explicit ini option name (optional) - no-ini: if non-empty this field is ignored as an ini option - (optional) - - group: when specified on a struct field, makes the struct - field a separate group with the given name (optional) - namespace: when specified on a group struct field, the namespace - gets prepended to every option's long name and - subgroup's namespace of this group, separated by - the parser's namespace delimiter (optional) - command: when specified on a struct field, makes the struct - field a (sub)command with the given name (optional) - subcommands-optional: when specified on a command struct field, makes - any subcommands of that command optional (optional) - alias: when specified on a command struct field, adds the - specified name as an alias for the command. Can be - be specified multiple times to add more than one - alias (optional) - positional-args: when specified on a field with a struct type, - uses the fields of that struct to parse remaining - positional command line arguments into (in order - of the fields). If a field has a slice type, - then all remaining arguments will be added to it. - Positional arguments are optional by default, - unless the "required" tag is specified together - with the "positional-args" tag. The "required" tag - can also be set on the individual rest argument - fields, to require only the first N positional - arguments. If the "required" tag is set on the - rest arguments slice, then its value determines - the minimum amount of rest arguments that needs to - be provided (e.g. `required:"2"`) (optional) - positional-arg-name: used on a field in a positional argument struct; name - of the positional argument placeholder to be shown in - the help (optional) - -Either the `short:` tag or the `long:` must be specified to make the field eligible as an -option. - - -Option groups - -Option groups are a simple way to semantically separate your options. All -options in a particular group are shown together in the help under the name -of the group. Namespaces can be used to specify option long names more -precisely and emphasize the options affiliation to their group. - -There are currently three ways to specify option groups. - - 1. Use NewNamedParser specifying the various option groups. - 2. Use AddGroup to add a group to an existing parser. - 3. Add a struct field to the top-level options annotated with the - group:"group-name" tag. - - - -Commands - -The flags package also has basic support for commands. Commands are often -used in monolithic applications that support various commands or actions. -Take git for example, all of the add, commit, checkout, etc. are called -commands. Using commands you can easily separate multiple functions of your -application. - -There are currently two ways to specify a command. - - 1. Use AddCommand on an existing parser. - 2. Add a struct field to your options struct annotated with the - command:"command-name" tag. - -The most common, idiomatic way to implement commands is to define a global -parser instance and implement each command in a separate file. These -command files should define a go init function which calls AddCommand on -the global parser. - -When parsing ends and there is an active command and that command implements -the Commander interface, then its Execute method will be run with the -remaining command line arguments. - -Command structs can have options which become valid to parse after the -command has been specified on the command line, in addition to the options -of all the parent commands. I.e. considering a -v flag on the parser and an -add command, the following are equivalent: - - ./app -v add - ./app add -v - -However, if the -v flag is defined on the add command, then the first of -the two examples above would fail since the -v flag is not defined before -the add command. - - -Completion - -go-flags has builtin support to provide bash completion of flags, commands -and argument values. To use completion, the binary which uses go-flags -can be invoked in a special environment to list completion of the current -command line argument. It should be noted that this `executes` your application, -and it is up to the user to make sure there are no negative side effects (for -example from init functions). - -Setting the environment variable `GO_FLAGS_COMPLETION=1` enables completion -by replacing the argument parsing routine with the completion routine which -outputs completions for the passed arguments. The basic invocation to -complete a set of arguments is therefore: - - GO_FLAGS_COMPLETION=1 ./completion-example arg1 arg2 arg3 - -where `completion-example` is the binary, `arg1` and `arg2` are -the current arguments, and `arg3` (the last argument) is the argument -to be completed. If the GO_FLAGS_COMPLETION is set to "verbose", then -descriptions of possible completion items will also be shown, if there -are more than 1 completion items. - -To use this with bash completion, a simple file can be written which -calls the binary which supports go-flags completion: - - _completion_example() { - # All arguments except the first one - args=("${COMP_WORDS[@]:1:$COMP_CWORD}") - - # Only split on newlines - local IFS=$'\n' - - # Call completion (note that the first element of COMP_WORDS is - # the executable itself) - COMPREPLY=($(GO_FLAGS_COMPLETION=1 ${COMP_WORDS[0]} "${args[@]}")) - return 0 - } - - complete -F _completion_example completion-example - -Completion requires the parser option PassDoubleDash and is therefore enforced if the environment variable GO_FLAGS_COMPLETION is set. - -Customized completion for argument values is supported by implementing -the flags.Completer interface for the argument value type. An example -of a type which does so is the flags.Filename type, an alias of string -allowing simple filename completion. A slice or array argument value -whose element type implements flags.Completer will also be completed. -*/ -package flags diff --git a/vendor/github.com/jessevdk/go-flags/group.go b/vendor/github.com/jessevdk/go-flags/group.go deleted file mode 100644 index 6133a71..0000000 --- a/vendor/github.com/jessevdk/go-flags/group.go +++ /dev/null @@ -1,395 +0,0 @@ -// Copyright 2012 Jesse van den Kieboom. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package flags - -import ( - "errors" - "reflect" - "strings" - "unicode/utf8" - "unsafe" -) - -// ErrNotPointerToStruct indicates that a provided data container is not -// a pointer to a struct. Only pointers to structs are valid data containers -// for options. -var ErrNotPointerToStruct = errors.New("provided data is not a pointer to struct") - -// Group represents an option group. Option groups can be used to logically -// group options together under a description. Groups are only used to provide -// more structure to options both for the user (as displayed in the help message) -// and for you, since groups can be nested. -type Group struct { - // A short description of the group. The - // short description is primarily used in the built-in generated help - // message - ShortDescription string - - // A long description of the group. The long - // description is primarily used to present information on commands - // (Command embeds Group) in the built-in generated help and man pages. - LongDescription string - - // The namespace of the group - Namespace string - - // If true, the group is not displayed in the help or man page - Hidden bool - - // The parent of the group or nil if it has no parent - parent interface{} - - // All the options in the group - options []*Option - - // All the subgroups - groups []*Group - - // Whether the group represents the built-in help group - isBuiltinHelp bool - - data interface{} -} - -type scanHandler func(reflect.Value, *reflect.StructField) (bool, error) - -// AddGroup adds a new group to the command with the given name and data. The -// data needs to be a pointer to a struct from which the fields indicate which -// options are in the group. -func (g *Group) AddGroup(shortDescription string, longDescription string, data interface{}) (*Group, error) { - group := newGroup(shortDescription, longDescription, data) - - group.parent = g - - if err := group.scan(); err != nil { - return nil, err - } - - g.groups = append(g.groups, group) - return group, nil -} - -// Groups returns the list of groups embedded in this group. -func (g *Group) Groups() []*Group { - return g.groups -} - -// Options returns the list of options in this group. -func (g *Group) Options() []*Option { - return g.options -} - -// Find locates the subgroup with the given short description and returns it. -// If no such group can be found Find will return nil. Note that the description -// is matched case insensitively. -func (g *Group) Find(shortDescription string) *Group { - lshortDescription := strings.ToLower(shortDescription) - - var ret *Group - - g.eachGroup(func(gg *Group) { - if gg != g && strings.ToLower(gg.ShortDescription) == lshortDescription { - ret = gg - } - }) - - return ret -} - -func (g *Group) findOption(matcher func(*Option) bool) (option *Option) { - g.eachGroup(func(g *Group) { - for _, opt := range g.options { - if option == nil && matcher(opt) { - option = opt - } - } - }) - - return option -} - -// FindOptionByLongName finds an option that is part of the group, or any of its -// subgroups, by matching its long name (including the option namespace). -func (g *Group) FindOptionByLongName(longName string) *Option { - return g.findOption(func(option *Option) bool { - return option.LongNameWithNamespace() == longName - }) -} - -// FindOptionByShortName finds an option that is part of the group, or any of -// its subgroups, by matching its short name. -func (g *Group) FindOptionByShortName(shortName rune) *Option { - return g.findOption(func(option *Option) bool { - return option.ShortName == shortName - }) -} - -func newGroup(shortDescription string, longDescription string, data interface{}) *Group { - return &Group{ - ShortDescription: shortDescription, - LongDescription: longDescription, - - data: data, - } -} - -func (g *Group) optionByName(name string, namematch func(*Option, string) bool) *Option { - prio := 0 - var retopt *Option - - g.eachGroup(func(g *Group) { - for _, opt := range g.options { - if namematch != nil && namematch(opt, name) && prio < 4 { - retopt = opt - prio = 4 - } - - if name == opt.field.Name && prio < 3 { - retopt = opt - prio = 3 - } - - if name == opt.LongNameWithNamespace() && prio < 2 { - retopt = opt - prio = 2 - } - - if opt.ShortName != 0 && name == string(opt.ShortName) && prio < 1 { - retopt = opt - prio = 1 - } - } - }) - - return retopt -} - -func (g *Group) eachGroup(f func(*Group)) { - f(g) - - for _, gg := range g.groups { - gg.eachGroup(f) - } -} - -func isStringFalsy(s string) bool { - return s == "" || s == "false" || s == "no" || s == "0" -} - -func (g *Group) scanStruct(realval reflect.Value, sfield *reflect.StructField, handler scanHandler) error { - stype := realval.Type() - - if sfield != nil { - if ok, err := handler(realval, sfield); err != nil { - return err - } else if ok { - return nil - } - } - - for i := 0; i < stype.NumField(); i++ { - field := stype.Field(i) - - // PkgName is set only for non-exported fields, which we ignore - if field.PkgPath != "" && !field.Anonymous { - continue - } - - mtag := newMultiTag(string(field.Tag)) - - if err := mtag.Parse(); err != nil { - return err - } - - // Skip fields with the no-flag tag - if mtag.Get("no-flag") != "" { - continue - } - - // Dive deep into structs or pointers to structs - kind := field.Type.Kind() - fld := realval.Field(i) - - if kind == reflect.Struct { - if err := g.scanStruct(fld, &field, handler); err != nil { - return err - } - } else if kind == reflect.Ptr && field.Type.Elem().Kind() == reflect.Struct { - flagCountBefore := len(g.options) + len(g.groups) - - if fld.IsNil() { - fld = reflect.New(fld.Type().Elem()) - } - - if err := g.scanStruct(reflect.Indirect(fld), &field, handler); err != nil { - return err - } - - if len(g.options)+len(g.groups) != flagCountBefore { - realval.Field(i).Set(fld) - } - } - - longname := mtag.Get("long") - shortname := mtag.Get("short") - - // Need at least either a short or long name - if longname == "" && shortname == "" && mtag.Get("ini-name") == "" { - continue - } - - short := rune(0) - rc := utf8.RuneCountInString(shortname) - - if rc > 1 { - return newErrorf(ErrShortNameTooLong, - "short names can only be 1 character long, not `%s'", - shortname) - - } else if rc == 1 { - short, _ = utf8.DecodeRuneInString(shortname) - } - - description := mtag.Get("description") - def := mtag.GetMany("default") - - optionalValue := mtag.GetMany("optional-value") - valueName := mtag.Get("value-name") - defaultMask := mtag.Get("default-mask") - - optional := !isStringFalsy(mtag.Get("optional")) - required := !isStringFalsy(mtag.Get("required")) - choices := mtag.GetMany("choice") - hidden := !isStringFalsy(mtag.Get("hidden")) - - option := &Option{ - Description: description, - ShortName: short, - LongName: longname, - Default: def, - EnvDefaultKey: mtag.Get("env"), - EnvDefaultDelim: mtag.Get("env-delim"), - OptionalArgument: optional, - OptionalValue: optionalValue, - Required: required, - ValueName: valueName, - DefaultMask: defaultMask, - Choices: choices, - Hidden: hidden, - - group: g, - - field: field, - value: realval.Field(i), - tag: mtag, - } - - if option.isBool() && option.Default != nil { - return newErrorf(ErrInvalidTag, - "boolean flag `%s' may not have default values, they always default to `false' and can only be turned on", - option.shortAndLongName()) - } - - g.options = append(g.options, option) - } - - return nil -} - -func (g *Group) checkForDuplicateFlags() *Error { - shortNames := make(map[rune]*Option) - longNames := make(map[string]*Option) - - var duplicateError *Error - - g.eachGroup(func(g *Group) { - for _, option := range g.options { - if option.LongName != "" { - longName := option.LongNameWithNamespace() - - if otherOption, ok := longNames[longName]; ok { - duplicateError = newErrorf(ErrDuplicatedFlag, "option `%s' uses the same long name as option `%s'", option, otherOption) - return - } - longNames[longName] = option - } - if option.ShortName != 0 { - if otherOption, ok := shortNames[option.ShortName]; ok { - duplicateError = newErrorf(ErrDuplicatedFlag, "option `%s' uses the same short name as option `%s'", option, otherOption) - return - } - shortNames[option.ShortName] = option - } - } - }) - - return duplicateError -} - -func (g *Group) scanSubGroupHandler(realval reflect.Value, sfield *reflect.StructField) (bool, error) { - mtag := newMultiTag(string(sfield.Tag)) - - if err := mtag.Parse(); err != nil { - return true, err - } - - subgroup := mtag.Get("group") - - if len(subgroup) != 0 { - ptrval := reflect.NewAt(realval.Type(), unsafe.Pointer(realval.UnsafeAddr())) - description := mtag.Get("description") - - group, err := g.AddGroup(subgroup, description, ptrval.Interface()) - if err != nil { - return true, err - } - - group.Namespace = mtag.Get("namespace") - group.Hidden = mtag.Get("hidden") != "" - - return true, nil - } - - return false, nil -} - -func (g *Group) scanType(handler scanHandler) error { - // Get all the public fields in the data struct - ptrval := reflect.ValueOf(g.data) - - if ptrval.Type().Kind() != reflect.Ptr { - panic(ErrNotPointerToStruct) - } - - stype := ptrval.Type().Elem() - - if stype.Kind() != reflect.Struct { - panic(ErrNotPointerToStruct) - } - - realval := reflect.Indirect(ptrval) - - if err := g.scanStruct(realval, nil, handler); err != nil { - return err - } - - if err := g.checkForDuplicateFlags(); err != nil { - return err - } - - return nil -} - -func (g *Group) scan() error { - return g.scanType(g.scanSubGroupHandler) -} - -func (g *Group) groupByName(name string) *Group { - if len(name) == 0 { - return g - } - - return g.Find(name) -} diff --git a/vendor/github.com/jessevdk/go-flags/group_test.go b/vendor/github.com/jessevdk/go-flags/group_test.go deleted file mode 100644 index 18cd6c1..0000000 --- a/vendor/github.com/jessevdk/go-flags/group_test.go +++ /dev/null @@ -1,255 +0,0 @@ -package flags - -import ( - "testing" -) - -func TestGroupInline(t *testing.T) { - var opts = struct { - Value bool `short:"v"` - - Group struct { - G bool `short:"g"` - } `group:"Grouped Options"` - }{} - - p, ret := assertParserSuccess(t, &opts, "-v", "-g") - - assertStringArray(t, ret, []string{}) - - if !opts.Value { - t.Errorf("Expected Value to be true") - } - - if !opts.Group.G { - t.Errorf("Expected Group.G to be true") - } - - if p.Command.Group.Find("Grouped Options") == nil { - t.Errorf("Expected to find group `Grouped Options'") - } -} - -func TestGroupAdd(t *testing.T) { - var opts = struct { - Value bool `short:"v"` - }{} - - var grp = struct { - G bool `short:"g"` - }{} - - p := NewParser(&opts, Default) - g, err := p.AddGroup("Grouped Options", "", &grp) - - if err != nil { - t.Fatalf("Unexpected error: %v", err) - return - } - - ret, err := p.ParseArgs([]string{"-v", "-g", "rest"}) - - if err != nil { - t.Fatalf("Unexpected error: %v", err) - return - } - - assertStringArray(t, ret, []string{"rest"}) - - if !opts.Value { - t.Errorf("Expected Value to be true") - } - - if !grp.G { - t.Errorf("Expected Group.G to be true") - } - - if p.Command.Group.Find("Grouped Options") != g { - t.Errorf("Expected to find group `Grouped Options'") - } - - if p.Groups()[1] != g { - t.Errorf("Expected group %#v, but got %#v", g, p.Groups()[0]) - } - - if g.Options()[0].ShortName != 'g' { - t.Errorf("Expected short name `g' but got %v", g.Options()[0].ShortName) - } -} - -func TestGroupNestedInline(t *testing.T) { - var opts = struct { - Value bool `short:"v"` - - Group struct { - G bool `short:"g"` - - Nested struct { - N string `long:"n"` - } `group:"Nested Options"` - } `group:"Grouped Options"` - }{} - - p, ret := assertParserSuccess(t, &opts, "-v", "-g", "--n", "n", "rest") - - assertStringArray(t, ret, []string{"rest"}) - - if !opts.Value { - t.Errorf("Expected Value to be true") - } - - if !opts.Group.G { - t.Errorf("Expected Group.G to be true") - } - - assertString(t, opts.Group.Nested.N, "n") - - if p.Command.Group.Find("Grouped Options") == nil { - t.Errorf("Expected to find group `Grouped Options'") - } - - if p.Command.Group.Find("Nested Options") == nil { - t.Errorf("Expected to find group `Nested Options'") - } -} - -func TestGroupNestedInlineNamespace(t *testing.T) { - var opts = struct { - Opt string `long:"opt"` - - Group struct { - Opt string `long:"opt"` - Group struct { - Opt string `long:"opt"` - } `group:"Subsubgroup" namespace:"sap"` - } `group:"Subgroup" namespace:"sip"` - }{} - - p, ret := assertParserSuccess(t, &opts, "--opt", "a", "--sip.opt", "b", "--sip.sap.opt", "c", "rest") - - assertStringArray(t, ret, []string{"rest"}) - - assertString(t, opts.Opt, "a") - assertString(t, opts.Group.Opt, "b") - assertString(t, opts.Group.Group.Opt, "c") - - for _, name := range []string{"Subgroup", "Subsubgroup"} { - if p.Command.Group.Find(name) == nil { - t.Errorf("Expected to find group '%s'", name) - } - } -} - -func TestDuplicateShortFlags(t *testing.T) { - var opts struct { - Verbose []bool `short:"v" long:"verbose" description:"Show verbose debug information"` - Variables []string `short:"v" long:"variable" description:"Set a variable value."` - } - - args := []string{ - "--verbose", - "-v", "123", - "-v", "456", - } - - _, err := ParseArgs(&opts, args) - - if err == nil { - t.Errorf("Expected an error with type ErrDuplicatedFlag") - } else { - err2 := err.(*Error) - if err2.Type != ErrDuplicatedFlag { - t.Errorf("Expected an error with type ErrDuplicatedFlag") - } - } -} - -func TestDuplicateLongFlags(t *testing.T) { - var opts struct { - Test1 []bool `short:"a" long:"testing" description:"Test 1"` - Test2 []string `short:"b" long:"testing" description:"Test 2."` - } - - args := []string{ - "--testing", - } - - _, err := ParseArgs(&opts, args) - - if err == nil { - t.Errorf("Expected an error with type ErrDuplicatedFlag") - } else { - err2 := err.(*Error) - if err2.Type != ErrDuplicatedFlag { - t.Errorf("Expected an error with type ErrDuplicatedFlag") - } - } -} - -func TestFindOptionByLongFlag(t *testing.T) { - var opts struct { - Testing bool `long:"testing" description:"Testing"` - } - - p := NewParser(&opts, Default) - opt := p.FindOptionByLongName("testing") - - if opt == nil { - t.Errorf("Expected option, but found none") - } - - assertString(t, opt.LongName, "testing") -} - -func TestFindOptionByShortFlag(t *testing.T) { - var opts struct { - Testing bool `short:"t" description:"Testing"` - } - - p := NewParser(&opts, Default) - opt := p.FindOptionByShortName('t') - - if opt == nil { - t.Errorf("Expected option, but found none") - } - - if opt.ShortName != 't' { - t.Errorf("Expected 't', but got %v", opt.ShortName) - } -} - -func TestFindOptionByLongFlagInSubGroup(t *testing.T) { - var opts struct { - Group struct { - Testing bool `long:"testing" description:"Testing"` - } `group:"sub-group"` - } - - p := NewParser(&opts, Default) - opt := p.FindOptionByLongName("testing") - - if opt == nil { - t.Errorf("Expected option, but found none") - } - - assertString(t, opt.LongName, "testing") -} - -func TestFindOptionByShortFlagInSubGroup(t *testing.T) { - var opts struct { - Group struct { - Testing bool `short:"t" description:"Testing"` - } `group:"sub-group"` - } - - p := NewParser(&opts, Default) - opt := p.FindOptionByShortName('t') - - if opt == nil { - t.Errorf("Expected option, but found none") - } - - if opt.ShortName != 't' { - t.Errorf("Expected 't', but got %v", opt.ShortName) - } -} diff --git a/vendor/github.com/jessevdk/go-flags/help.go b/vendor/github.com/jessevdk/go-flags/help.go deleted file mode 100644 index d380305..0000000 --- a/vendor/github.com/jessevdk/go-flags/help.go +++ /dev/null @@ -1,491 +0,0 @@ -// Copyright 2012 Jesse van den Kieboom. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package flags - -import ( - "bufio" - "bytes" - "fmt" - "io" - "runtime" - "strings" - "unicode/utf8" -) - -type alignmentInfo struct { - maxLongLen int - hasShort bool - hasValueName bool - terminalColumns int - indent bool -} - -const ( - paddingBeforeOption = 2 - distanceBetweenOptionAndDescription = 2 -) - -func (a *alignmentInfo) descriptionStart() int { - ret := a.maxLongLen + distanceBetweenOptionAndDescription - - if a.hasShort { - ret += 2 - } - - if a.maxLongLen > 0 { - ret += 4 - } - - if a.hasValueName { - ret += 3 - } - - return ret -} - -func (a *alignmentInfo) updateLen(name string, indent bool) { - l := utf8.RuneCountInString(name) - - if indent { - l = l + 4 - } - - if l > a.maxLongLen { - a.maxLongLen = l - } -} - -func (p *Parser) getAlignmentInfo() alignmentInfo { - ret := alignmentInfo{ - maxLongLen: 0, - hasShort: false, - hasValueName: false, - terminalColumns: getTerminalColumns(), - } - - if ret.terminalColumns <= 0 { - ret.terminalColumns = 80 - } - - var prevcmd *Command - - p.eachActiveGroup(func(c *Command, grp *Group) { - if c != prevcmd { - for _, arg := range c.args { - ret.updateLen(arg.Name, c != p.Command) - } - } - - for _, info := range grp.options { - if !info.canCli() { - continue - } - - if info.ShortName != 0 { - ret.hasShort = true - } - - if len(info.ValueName) > 0 { - ret.hasValueName = true - } - - l := info.LongNameWithNamespace() + info.ValueName - - if len(info.Choices) != 0 { - l += "[" + strings.Join(info.Choices, "|") + "]" - } - - ret.updateLen(l, c != p.Command) - } - }) - - return ret -} - -func wrapText(s string, l int, prefix string) string { - var ret string - - if l < 10 { - l = 10 - } - - // Basic text wrapping of s at spaces to fit in l - lines := strings.Split(s, "\n") - - for _, line := range lines { - var retline string - - line = strings.TrimSpace(line) - - for len(line) > l { - // Try to split on space - suffix := "" - - pos := strings.LastIndex(line[:l], " ") - - if pos < 0 { - pos = l - 1 - suffix = "-\n" - } - - if len(retline) != 0 { - retline += "\n" + prefix - } - - retline += strings.TrimSpace(line[:pos]) + suffix - line = strings.TrimSpace(line[pos:]) - } - - if len(line) > 0 { - if len(retline) != 0 { - retline += "\n" + prefix - } - - retline += line - } - - if len(ret) > 0 { - ret += "\n" - - if len(retline) > 0 { - ret += prefix - } - } - - ret += retline - } - - return ret -} - -func (p *Parser) writeHelpOption(writer *bufio.Writer, option *Option, info alignmentInfo) { - line := &bytes.Buffer{} - - prefix := paddingBeforeOption - - if info.indent { - prefix += 4 - } - - if option.Hidden { - return - } - - line.WriteString(strings.Repeat(" ", prefix)) - - if option.ShortName != 0 { - line.WriteRune(defaultShortOptDelimiter) - line.WriteRune(option.ShortName) - } else if info.hasShort { - line.WriteString(" ") - } - - descstart := info.descriptionStart() + paddingBeforeOption - - if len(option.LongName) > 0 { - if option.ShortName != 0 { - line.WriteString(", ") - } else if info.hasShort { - line.WriteString(" ") - } - - line.WriteString(defaultLongOptDelimiter) - line.WriteString(option.LongNameWithNamespace()) - } - - if option.canArgument() { - line.WriteRune(defaultNameArgDelimiter) - - if len(option.ValueName) > 0 { - line.WriteString(option.ValueName) - } - - if len(option.Choices) > 0 { - line.WriteString("[" + strings.Join(option.Choices, "|") + "]") - } - } - - written := line.Len() - line.WriteTo(writer) - - if option.Description != "" { - dw := descstart - written - writer.WriteString(strings.Repeat(" ", dw)) - - var def string - - if len(option.DefaultMask) != 0 { - if option.DefaultMask != "-" { - def = option.DefaultMask - } - } else { - def = option.defaultLiteral - } - - var envDef string - if option.EnvDefaultKey != "" { - var envPrintable string - if runtime.GOOS == "windows" { - envPrintable = "%" + option.EnvDefaultKey + "%" - } else { - envPrintable = "$" + option.EnvDefaultKey - } - envDef = fmt.Sprintf(" [%s]", envPrintable) - } - - var desc string - - if def != "" { - desc = fmt.Sprintf("%s (default: %v)%s", option.Description, def, envDef) - } else { - desc = option.Description + envDef - } - - writer.WriteString(wrapText(desc, - info.terminalColumns-descstart, - strings.Repeat(" ", descstart))) - } - - writer.WriteString("\n") -} - -func maxCommandLength(s []*Command) int { - if len(s) == 0 { - return 0 - } - - ret := len(s[0].Name) - - for _, v := range s[1:] { - l := len(v.Name) - - if l > ret { - ret = l - } - } - - return ret -} - -// WriteHelp writes a help message containing all the possible options and -// their descriptions to the provided writer. Note that the HelpFlag parser -// option provides a convenient way to add a -h/--help option group to the -// command line parser which will automatically show the help messages using -// this method. -func (p *Parser) WriteHelp(writer io.Writer) { - if writer == nil { - return - } - - wr := bufio.NewWriter(writer) - aligninfo := p.getAlignmentInfo() - - cmd := p.Command - - for cmd.Active != nil { - cmd = cmd.Active - } - - if p.Name != "" { - wr.WriteString("Usage:\n") - wr.WriteString(" ") - - allcmd := p.Command - - for allcmd != nil { - var usage string - - if allcmd == p.Command { - if len(p.Usage) != 0 { - usage = p.Usage - } else if p.Options&HelpFlag != 0 { - usage = "[OPTIONS]" - } - } else if us, ok := allcmd.data.(Usage); ok { - usage = us.Usage() - } else if allcmd.hasCliOptions() { - usage = fmt.Sprintf("[%s-OPTIONS]", allcmd.Name) - } - - if len(usage) != 0 { - fmt.Fprintf(wr, " %s %s", allcmd.Name, usage) - } else { - fmt.Fprintf(wr, " %s", allcmd.Name) - } - - if len(allcmd.args) > 0 { - fmt.Fprintf(wr, " ") - } - - for i, arg := range allcmd.args { - if i != 0 { - fmt.Fprintf(wr, " ") - } - - name := arg.Name - - if arg.isRemaining() { - name = name + "..." - } - - if !allcmd.ArgsRequired { - fmt.Fprintf(wr, "[%s]", name) - } else { - fmt.Fprintf(wr, "%s", name) - } - } - - if allcmd.Active == nil && len(allcmd.commands) > 0 { - var co, cc string - - if allcmd.SubcommandsOptional { - co, cc = "[", "]" - } else { - co, cc = "<", ">" - } - - visibleCommands := allcmd.visibleCommands() - - if len(visibleCommands) > 3 { - fmt.Fprintf(wr, " %scommand%s", co, cc) - } else { - subcommands := allcmd.sortedVisibleCommands() - names := make([]string, len(subcommands)) - - for i, subc := range subcommands { - names[i] = subc.Name - } - - fmt.Fprintf(wr, " %s%s%s", co, strings.Join(names, " | "), cc) - } - } - - allcmd = allcmd.Active - } - - fmt.Fprintln(wr) - - if len(cmd.LongDescription) != 0 { - fmt.Fprintln(wr) - - t := wrapText(cmd.LongDescription, - aligninfo.terminalColumns, - "") - - fmt.Fprintln(wr, t) - } - } - - c := p.Command - - for c != nil { - printcmd := c != p.Command - - c.eachGroup(func(grp *Group) { - first := true - - // Skip built-in help group for all commands except the top-level - // parser - if grp.Hidden || (grp.isBuiltinHelp && c != p.Command) { - return - } - - for _, info := range grp.options { - if !info.canCli() || info.Hidden { - continue - } - - if printcmd { - fmt.Fprintf(wr, "\n[%s command options]\n", c.Name) - aligninfo.indent = true - printcmd = false - } - - if first && cmd.Group != grp { - fmt.Fprintln(wr) - - if aligninfo.indent { - wr.WriteString(" ") - } - - fmt.Fprintf(wr, "%s:\n", grp.ShortDescription) - first = false - } - - p.writeHelpOption(wr, info, aligninfo) - } - }) - - var args []*Arg - for _, arg := range c.args { - if arg.Description != "" { - args = append(args, arg) - } - } - - if len(args) > 0 { - if c == p.Command { - fmt.Fprintf(wr, "\nArguments:\n") - } else { - fmt.Fprintf(wr, "\n[%s command arguments]\n", c.Name) - } - - descStart := aligninfo.descriptionStart() + paddingBeforeOption - - for _, arg := range args { - argPrefix := strings.Repeat(" ", paddingBeforeOption) - argPrefix += arg.Name - - if len(arg.Description) > 0 { - argPrefix += ":" - wr.WriteString(argPrefix) - - // Space between "arg:" and the description start - descPadding := strings.Repeat(" ", descStart-len(argPrefix)) - // How much space the description gets before wrapping - descWidth := aligninfo.terminalColumns - 1 - descStart - // Whitespace to which we can indent new description lines - descPrefix := strings.Repeat(" ", descStart) - - wr.WriteString(descPadding) - wr.WriteString(wrapText(arg.Description, descWidth, descPrefix)) - } else { - wr.WriteString(argPrefix) - } - - fmt.Fprintln(wr) - } - } - - c = c.Active - } - - scommands := cmd.sortedVisibleCommands() - - if len(scommands) > 0 { - maxnamelen := maxCommandLength(scommands) - - fmt.Fprintln(wr) - fmt.Fprintln(wr, "Available commands:") - - for _, c := range scommands { - fmt.Fprintf(wr, " %s", c.Name) - - if len(c.ShortDescription) > 0 { - pad := strings.Repeat(" ", maxnamelen-len(c.Name)) - fmt.Fprintf(wr, "%s %s", pad, c.ShortDescription) - - if len(c.Aliases) > 0 { - fmt.Fprintf(wr, " (aliases: %s)", strings.Join(c.Aliases, ", ")) - } - - } - - fmt.Fprintln(wr) - } - } - - wr.Flush() -} diff --git a/vendor/github.com/jessevdk/go-flags/help_test.go b/vendor/github.com/jessevdk/go-flags/help_test.go deleted file mode 100644 index bb76640..0000000 --- a/vendor/github.com/jessevdk/go-flags/help_test.go +++ /dev/null @@ -1,538 +0,0 @@ -package flags - -import ( - "bufio" - "bytes" - "fmt" - "os" - "runtime" - "strings" - "testing" - "time" -) - -type helpOptions struct { - Verbose []bool `short:"v" long:"verbose" description:"Show verbose debug information" ini-name:"verbose"` - Call func(string) `short:"c" description:"Call phone number" ini-name:"call"` - PtrSlice []*string `long:"ptrslice" description:"A slice of pointers to string"` - EmptyDescription bool `long:"empty-description"` - - Default string `long:"default" default:"Some\nvalue" description:"Test default value"` - DefaultArray []string `long:"default-array" default:"Some value" default:"Other\tvalue" description:"Test default array value"` - DefaultMap map[string]string `long:"default-map" default:"some:value" default:"another:value" description:"Testdefault map value"` - EnvDefault1 string `long:"env-default1" default:"Some value" env:"ENV_DEFAULT" description:"Test env-default1 value"` - EnvDefault2 string `long:"env-default2" env:"ENV_DEFAULT" description:"Test env-default2 value"` - OptionWithArgName string `long:"opt-with-arg-name" value-name:"something" description:"Option with named argument"` - OptionWithChoices string `long:"opt-with-choices" value-name:"choice" choice:"dog" choice:"cat" description:"Option with choices"` - Hidden string `long:"hidden" description:"Hidden option" hidden:"yes"` - - OnlyIni string `ini-name:"only-ini" description:"Option only available in ini"` - - Other struct { - StringSlice []string `short:"s" default:"some" default:"value" description:"A slice of strings"` - IntMap map[string]int `long:"intmap" default:"a:1" description:"A map from string to int" ini-name:"int-map"` - } `group:"Other Options"` - - HiddenGroup struct { - InsideHiddenGroup string `long:"inside-hidden-group" description:"Inside hidden group"` - } `group:"Hidden group" hidden:"yes"` - - Group struct { - Opt string `long:"opt" description:"This is a subgroup option"` - HiddenInsideGroup string `long:"hidden-inside-group" description:"Hidden inside group" hidden:"yes"` - NotHiddenInsideGroup string `long:"not-hidden-inside-group" description:"Not hidden inside group" hidden:"false"` - - Group struct { - Opt string `long:"opt" description:"This is a subsubgroup option"` - } `group:"Subsubgroup" namespace:"sap"` - } `group:"Subgroup" namespace:"sip"` - - Command struct { - ExtraVerbose []bool `long:"extra-verbose" description:"Use for extra verbosity"` - } `command:"command" alias:"cm" alias:"cmd" description:"A command"` - - HiddenCommand struct { - ExtraVerbose []bool `long:"extra-verbose" description:"Use for extra verbosity"` - } `command:"hidden-command" description:"A hidden command" hidden:"yes"` - - Args struct { - Filename string `positional-arg-name:"filename" description:"A filename with a long description to trigger line wrapping"` - Number int `positional-arg-name:"num" description:"A number"` - HiddenInHelp float32 `positional-arg-name:"hidden-in-help" required:"yes"` - } `positional-args:"yes"` -} - -func TestHelp(t *testing.T) { - oldEnv := EnvSnapshot() - defer oldEnv.Restore() - os.Setenv("ENV_DEFAULT", "env-def") - - var opts helpOptions - p := NewNamedParser("TestHelp", HelpFlag) - p.AddGroup("Application Options", "The application options", &opts) - - _, err := p.ParseArgs([]string{"--help"}) - - if err == nil { - t.Fatalf("Expected help error") - } - - if e, ok := err.(*Error); !ok { - t.Fatalf("Expected flags.Error, but got %T", err) - } else { - if e.Type != ErrHelp { - t.Errorf("Expected flags.ErrHelp type, but got %s", e.Type) - } - - var expected string - - if runtime.GOOS == "windows" { - expected = `Usage: - TestHelp [OPTIONS] [filename] [num] [hidden-in-help] - -Application Options: - /v, /verbose Show verbose debug information - /c: Call phone number - /ptrslice: A slice of pointers to string - /empty-description - /default: Test default value (default: - "Some\nvalue") - /default-array: Test default array value (default: - Some value, "Other\tvalue") - /default-map: Testdefault map value (default: - some:value, another:value) - /env-default1: Test env-default1 value (default: - Some value) [%ENV_DEFAULT%] - /env-default2: Test env-default2 value - [%ENV_DEFAULT%] - /opt-with-arg-name:something Option with named argument - /opt-with-choices:choice[dog|cat] Option with choices - -Other Options: - /s: A slice of strings (default: some, - value) - /intmap: A map from string to int (default: - a:1) - -Subgroup: - /sip.opt: This is a subgroup option - /sip.not-hidden-inside-group: Not hidden inside group - -Subsubgroup: - /sip.sap.opt: This is a subsubgroup option - -Help Options: - /? Show this help message - /h, /help Show this help message - -Arguments: - filename: A filename with a long description - to trigger line wrapping - num: A number - -Available commands: - command A command (aliases: cm, cmd) -` - } else { - expected = `Usage: - TestHelp [OPTIONS] [filename] [num] [hidden-in-help] - -Application Options: - -v, --verbose Show verbose debug information - -c= Call phone number - --ptrslice= A slice of pointers to string - --empty-description - --default= Test default value (default: - "Some\nvalue") - --default-array= Test default array value (default: - Some value, "Other\tvalue") - --default-map= Testdefault map value (default: - some:value, another:value) - --env-default1= Test env-default1 value (default: - Some value) [$ENV_DEFAULT] - --env-default2= Test env-default2 value - [$ENV_DEFAULT] - --opt-with-arg-name=something Option with named argument - --opt-with-choices=choice[dog|cat] Option with choices - -Other Options: - -s= A slice of strings (default: some, - value) - --intmap= A map from string to int (default: - a:1) - -Subgroup: - --sip.opt= This is a subgroup option - --sip.not-hidden-inside-group= Not hidden inside group - -Subsubgroup: - --sip.sap.opt= This is a subsubgroup option - -Help Options: - -h, --help Show this help message - -Arguments: - filename: A filename with a long description - to trigger line wrapping - num: A number - -Available commands: - command A command (aliases: cm, cmd) -` - } - - assertDiff(t, e.Message, expected, "help message") - } -} - -func TestMan(t *testing.T) { - oldEnv := EnvSnapshot() - defer oldEnv.Restore() - os.Setenv("ENV_DEFAULT", "env-def") - - var opts helpOptions - p := NewNamedParser("TestMan", HelpFlag) - p.ShortDescription = "Test manpage generation" - p.LongDescription = "This is a somewhat `longer' description of what this does" - p.AddGroup("Application Options", "The application options", &opts) - - p.Commands()[0].LongDescription = "Longer `command' description" - - var buf bytes.Buffer - p.WriteManPage(&buf) - - got := buf.String() - - tt := time.Now() - - var envDefaultName string - - if runtime.GOOS == "windows" { - envDefaultName = "%ENV_DEFAULT%" - } else { - envDefaultName = "$ENV_DEFAULT" - } - - expected := fmt.Sprintf(`.TH TestMan 1 "%s" -.SH NAME -TestMan \- Test manpage generation -.SH SYNOPSIS -\fBTestMan\fP [OPTIONS] -.SH DESCRIPTION -This is a somewhat \fBlonger\fP description of what this does -.SH OPTIONS -.SS Application Options -The application options -.TP -\fB\fB\-v\fR, \fB\-\-verbose\fR\fP -Show verbose debug information -.TP -\fB\fB\-c\fR\fP -Call phone number -.TP -\fB\fB\-\-ptrslice\fR\fP -A slice of pointers to string -.TP -\fB\fB\-\-empty-description\fR\fP -.TP -\fB\fB\-\-default\fR \fP -Test default value -.TP -\fB\fB\-\-default-array\fR \fP -Test default array value -.TP -\fB\fB\-\-default-map\fR \fP -Testdefault map value -.TP -\fB\fB\-\-env-default1\fR \fP -Test env-default1 value -.TP -\fB\fB\-\-env-default2\fR \fP -Test env-default2 value -.TP -\fB\fB\-\-opt-with-arg-name\fR \fIsomething\fR\fP -Option with named argument -.TP -\fB\fB\-\-opt-with-choices\fR \fIchoice\fR\fP -Option with choices -.SS Other Options -.TP -\fB\fB\-s\fR \fP -A slice of strings -.TP -\fB\fB\-\-intmap\fR \fP -A map from string to int -.SS Subgroup -.TP -\fB\fB\-\-sip.opt\fR\fP -This is a subgroup option -.TP -\fB\fB\-\-sip.not-hidden-inside-group\fR\fP -Not hidden inside group -.SS Subsubgroup -.TP -\fB\fB\-\-sip.sap.opt\fR\fP -This is a subsubgroup option -.SH COMMANDS -.SS command -A command - -Longer \fBcommand\fP description - -\fBUsage\fP: TestMan [OPTIONS] command [command-OPTIONS] -.TP - -\fBAliases\fP: cm, cmd - -.TP -\fB\fB\-\-extra-verbose\fR\fP -Use for extra verbosity -`, tt.Format("2 January 2006"), envDefaultName) - - assertDiff(t, got, expected, "man page") -} - -type helpCommandNoOptions struct { - Command struct { - } `command:"command" description:"A command"` -} - -func TestHelpCommand(t *testing.T) { - oldEnv := EnvSnapshot() - defer oldEnv.Restore() - os.Setenv("ENV_DEFAULT", "env-def") - - var opts helpCommandNoOptions - p := NewNamedParser("TestHelpCommand", HelpFlag) - p.AddGroup("Application Options", "The application options", &opts) - - _, err := p.ParseArgs([]string{"command", "--help"}) - - if err == nil { - t.Fatalf("Expected help error") - } - - if e, ok := err.(*Error); !ok { - t.Fatalf("Expected flags.Error, but got %T", err) - } else { - if e.Type != ErrHelp { - t.Errorf("Expected flags.ErrHelp type, but got %s", e.Type) - } - - var expected string - - if runtime.GOOS == "windows" { - expected = `Usage: - TestHelpCommand [OPTIONS] command - -Help Options: - /? Show this help message - /h, /help Show this help message -` - } else { - expected = `Usage: - TestHelpCommand [OPTIONS] command - -Help Options: - -h, --help Show this help message -` - } - - assertDiff(t, e.Message, expected, "help message") - } -} - -func TestHelpDefaults(t *testing.T) { - var expected string - - if runtime.GOOS == "windows" { - expected = `Usage: - TestHelpDefaults [OPTIONS] - -Application Options: - /with-default: With default (default: default-value) - /without-default: Without default - /with-programmatic-default: With programmatic default (default: - default-value) - -Help Options: - /? Show this help message - /h, /help Show this help message -` - } else { - expected = `Usage: - TestHelpDefaults [OPTIONS] - -Application Options: - --with-default= With default (default: default-value) - --without-default= Without default - --with-programmatic-default= With programmatic default (default: - default-value) - -Help Options: - -h, --help Show this help message -` - } - - tests := []struct { - Args []string - Output string - }{ - { - Args: []string{"-h"}, - Output: expected, - }, - { - Args: []string{"--with-default", "other-value", "--with-programmatic-default", "other-value", "-h"}, - Output: expected, - }, - } - - for _, test := range tests { - var opts struct { - WithDefault string `long:"with-default" default:"default-value" description:"With default"` - WithoutDefault string `long:"without-default" description:"Without default"` - WithProgrammaticDefault string `long:"with-programmatic-default" description:"With programmatic default"` - } - - opts.WithProgrammaticDefault = "default-value" - - p := NewNamedParser("TestHelpDefaults", HelpFlag) - p.AddGroup("Application Options", "The application options", &opts) - - _, err := p.ParseArgs(test.Args) - - if err == nil { - t.Fatalf("Expected help error") - } - - if e, ok := err.(*Error); !ok { - t.Fatalf("Expected flags.Error, but got %T", err) - } else { - if e.Type != ErrHelp { - t.Errorf("Expected flags.ErrHelp type, but got %s", e.Type) - } - - assertDiff(t, e.Message, test.Output, "help message") - } - } -} - -func TestHelpRestArgs(t *testing.T) { - opts := struct { - Verbose bool `short:"v"` - }{} - - p := NewNamedParser("TestHelpDefaults", HelpFlag) - p.AddGroup("Application Options", "The application options", &opts) - - retargs, err := p.ParseArgs([]string{"-h", "-v", "rest"}) - - if err == nil { - t.Fatalf("Expected help error") - } - - assertStringArray(t, retargs, []string{"-v", "rest"}) -} - -func TestWrapText(t *testing.T) { - s := "Lorem ipsum dolor sit amet, consectetur adipisicing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum." - - got := wrapText(s, 60, " ") - expected := `Lorem ipsum dolor sit amet, consectetur adipisicing elit, - sed do eiusmod tempor incididunt ut labore et dolore magna - aliqua. Ut enim ad minim veniam, quis nostrud exercitation - ullamco laboris nisi ut aliquip ex ea commodo consequat. - Duis aute irure dolor in reprehenderit in voluptate velit - esse cillum dolore eu fugiat nulla pariatur. Excepteur sint - occaecat cupidatat non proident, sunt in culpa qui officia - deserunt mollit anim id est laborum.` - - assertDiff(t, got, expected, "wrapped text") -} - -func TestWrapParagraph(t *testing.T) { - s := "Lorem ipsum dolor sit amet, consectetur adipisicing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua.\n\n" - s += "Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat.\n\n" - s += "Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur.\n\n" - s += "Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum.\n" - - got := wrapText(s, 60, " ") - expected := `Lorem ipsum dolor sit amet, consectetur adipisicing elit, - sed do eiusmod tempor incididunt ut labore et dolore magna - aliqua. - - Ut enim ad minim veniam, quis nostrud exercitation ullamco - laboris nisi ut aliquip ex ea commodo consequat. - - Duis aute irure dolor in reprehenderit in voluptate velit - esse cillum dolore eu fugiat nulla pariatur. - - Excepteur sint occaecat cupidatat non proident, sunt in - culpa qui officia deserunt mollit anim id est laborum. -` - - assertDiff(t, got, expected, "wrapped paragraph") -} - -func TestHelpDefaultMask(t *testing.T) { - var tests = []struct { - opts interface{} - present string - }{ - { - opts: &struct { - Value string `short:"v" default:"123" description:"V"` - }{}, - present: "V (default: 123)\n", - }, - { - opts: &struct { - Value string `short:"v" default:"123" default-mask:"abc" description:"V"` - }{}, - present: "V (default: abc)\n", - }, - { - opts: &struct { - Value string `short:"v" default:"123" default-mask:"-" description:"V"` - }{}, - present: "V\n", - }, - { - opts: &struct { - Value string `short:"v" description:"V"` - }{Value: "123"}, - present: "V (default: 123)\n", - }, - { - opts: &struct { - Value string `short:"v" default-mask:"abc" description:"V"` - }{Value: "123"}, - present: "V (default: abc)\n", - }, - { - opts: &struct { - Value string `short:"v" default-mask:"-" description:"V"` - }{Value: "123"}, - present: "V\n", - }, - } - - for _, test := range tests { - p := NewParser(test.opts, HelpFlag) - _, err := p.ParseArgs([]string{"-h"}) - if flagsErr, ok := err.(*Error); ok && flagsErr.Type == ErrHelp { - err = nil - } - if err != nil { - t.Fatalf("Unexpected error: %v", err) - } - h := &bytes.Buffer{} - w := bufio.NewWriter(h) - p.writeHelpOption(w, p.FindOptionByShortName('v'), p.getAlignmentInfo()) - w.Flush() - if strings.Index(h.String(), test.present) < 0 { - t.Errorf("Not present %q\n%s", test.present, h.String()) - } - } -} diff --git a/vendor/github.com/jessevdk/go-flags/ini.go b/vendor/github.com/jessevdk/go-flags/ini.go deleted file mode 100644 index e714d3d..0000000 --- a/vendor/github.com/jessevdk/go-flags/ini.go +++ /dev/null @@ -1,597 +0,0 @@ -package flags - -import ( - "bufio" - "fmt" - "io" - "os" - "reflect" - "sort" - "strconv" - "strings" -) - -// IniError contains location information on where an error occurred. -type IniError struct { - // The error message. - Message string - - // The filename of the file in which the error occurred. - File string - - // The line number at which the error occurred. - LineNumber uint -} - -// Error provides a "file:line: message" formatted message of the ini error. -func (x *IniError) Error() string { - return fmt.Sprintf( - "%s:%d: %s", - x.File, - x.LineNumber, - x.Message, - ) -} - -// IniOptions for writing -type IniOptions uint - -const ( - // IniNone indicates no options. - IniNone IniOptions = 0 - - // IniIncludeDefaults indicates that default values should be written. - IniIncludeDefaults = 1 << iota - - // IniCommentDefaults indicates that if IniIncludeDefaults is used - // options with default values are written but commented out. - IniCommentDefaults - - // IniIncludeComments indicates that comments containing the description - // of an option should be written. - IniIncludeComments - - // IniDefault provides a default set of options. - IniDefault = IniIncludeComments -) - -// IniParser is a utility to read and write flags options from and to ini -// formatted strings. -type IniParser struct { - ParseAsDefaults bool // override default flags - - parser *Parser -} - -type iniValue struct { - Name string - Value string - Quoted bool - LineNumber uint -} - -type iniSection []iniValue - -type ini struct { - File string - Sections map[string]iniSection -} - -// NewIniParser creates a new ini parser for a given Parser. -func NewIniParser(p *Parser) *IniParser { - return &IniParser{ - parser: p, - } -} - -// IniParse is a convenience function to parse command line options with default -// settings from an ini formatted file. The provided data is a pointer to a struct -// representing the default option group (named "Application Options"). For -// more control, use flags.NewParser. -func IniParse(filename string, data interface{}) error { - p := NewParser(data, Default) - - return NewIniParser(p).ParseFile(filename) -} - -// ParseFile parses flags from an ini formatted file. See Parse for more -// information on the ini file format. The returned errors can be of the type -// flags.Error or flags.IniError. -func (i *IniParser) ParseFile(filename string) error { - ini, err := readIniFromFile(filename) - - if err != nil { - return err - } - - return i.parse(ini) -} - -// Parse parses flags from an ini format. You can use ParseFile as a -// convenience function to parse from a filename instead of a general -// io.Reader. -// -// The format of the ini file is as follows: -// -// [Option group name] -// option = value -// -// Each section in the ini file represents an option group or command in the -// flags parser. The default flags parser option group (i.e. when using -// flags.Parse) is named 'Application Options'. The ini option name is matched -// in the following order: -// -// 1. Compared to the ini-name tag on the option struct field (if present) -// 2. Compared to the struct field name -// 3. Compared to the option long name (if present) -// 4. Compared to the option short name (if present) -// -// Sections for nested groups and commands can be addressed using a dot `.' -// namespacing notation (i.e [subcommand.Options]). Group section names are -// matched case insensitive. -// -// The returned errors can be of the type flags.Error or flags.IniError. -func (i *IniParser) Parse(reader io.Reader) error { - ini, err := readIni(reader, "") - - if err != nil { - return err - } - - return i.parse(ini) -} - -// WriteFile writes the flags as ini format into a file. See Write -// for more information. The returned error occurs when the specified file -// could not be opened for writing. -func (i *IniParser) WriteFile(filename string, options IniOptions) error { - return writeIniToFile(i, filename, options) -} - -// Write writes the current values of all the flags to an ini format. -// See Parse for more information on the ini file format. You typically -// call this only after settings have been parsed since the default values of each -// option are stored just before parsing the flags (this is only relevant when -// IniIncludeDefaults is _not_ set in options). -func (i *IniParser) Write(writer io.Writer, options IniOptions) { - writeIni(i, writer, options) -} - -func readFullLine(reader *bufio.Reader) (string, error) { - var line []byte - - for { - l, more, err := reader.ReadLine() - - if err != nil { - return "", err - } - - if line == nil && !more { - return string(l), nil - } - - line = append(line, l...) - - if !more { - break - } - } - - return string(line), nil -} - -func optionIniName(option *Option) string { - name := option.tag.Get("_read-ini-name") - - if len(name) != 0 { - return name - } - - name = option.tag.Get("ini-name") - - if len(name) != 0 { - return name - } - - return option.field.Name -} - -func writeGroupIni(cmd *Command, group *Group, namespace string, writer io.Writer, options IniOptions) { - var sname string - - if len(namespace) != 0 { - sname = namespace - } - - if cmd.Group != group && len(group.ShortDescription) != 0 { - if len(sname) != 0 { - sname += "." - } - - sname += group.ShortDescription - } - - sectionwritten := false - comments := (options & IniIncludeComments) != IniNone - - for _, option := range group.options { - if option.isFunc() || option.Hidden { - continue - } - - if len(option.tag.Get("no-ini")) != 0 { - continue - } - - val := option.value - - if (options&IniIncludeDefaults) == IniNone && option.valueIsDefault() { - continue - } - - if !sectionwritten { - fmt.Fprintf(writer, "[%s]\n", sname) - sectionwritten = true - } - - if comments && len(option.Description) != 0 { - fmt.Fprintf(writer, "; %s\n", option.Description) - } - - oname := optionIniName(option) - - commentOption := (options&(IniIncludeDefaults|IniCommentDefaults)) == IniIncludeDefaults|IniCommentDefaults && option.valueIsDefault() - - kind := val.Type().Kind() - switch kind { - case reflect.Slice: - kind = val.Type().Elem().Kind() - - if val.Len() == 0 { - writeOption(writer, oname, kind, "", "", true, option.iniQuote) - } else { - for idx := 0; idx < val.Len(); idx++ { - v, _ := convertToString(val.Index(idx), option.tag) - - writeOption(writer, oname, kind, "", v, commentOption, option.iniQuote) - } - } - case reflect.Map: - kind = val.Type().Elem().Kind() - - if val.Len() == 0 { - writeOption(writer, oname, kind, "", "", true, option.iniQuote) - } else { - mkeys := val.MapKeys() - keys := make([]string, len(val.MapKeys())) - kkmap := make(map[string]reflect.Value) - - for i, k := range mkeys { - keys[i], _ = convertToString(k, option.tag) - kkmap[keys[i]] = k - } - - sort.Strings(keys) - - for _, k := range keys { - v, _ := convertToString(val.MapIndex(kkmap[k]), option.tag) - - writeOption(writer, oname, kind, k, v, commentOption, option.iniQuote) - } - } - default: - v, _ := convertToString(val, option.tag) - - writeOption(writer, oname, kind, "", v, commentOption, option.iniQuote) - } - - if comments { - fmt.Fprintln(writer) - } - } - - if sectionwritten && !comments { - fmt.Fprintln(writer) - } -} - -func writeOption(writer io.Writer, optionName string, optionType reflect.Kind, optionKey string, optionValue string, commentOption bool, forceQuote bool) { - if forceQuote || (optionType == reflect.String && !isPrint(optionValue)) { - optionValue = strconv.Quote(optionValue) - } - - comment := "" - if commentOption { - comment = "; " - } - - fmt.Fprintf(writer, "%s%s =", comment, optionName) - - if optionKey != "" { - fmt.Fprintf(writer, " %s:%s", optionKey, optionValue) - } else if optionValue != "" { - fmt.Fprintf(writer, " %s", optionValue) - } - - fmt.Fprintln(writer) -} - -func writeCommandIni(command *Command, namespace string, writer io.Writer, options IniOptions) { - command.eachGroup(func(group *Group) { - if !group.Hidden { - writeGroupIni(command, group, namespace, writer, options) - } - }) - - for _, c := range command.commands { - var nns string - - if c.Hidden { - continue - } - - if len(namespace) != 0 { - nns = c.Name + "." + nns - } else { - nns = c.Name - } - - writeCommandIni(c, nns, writer, options) - } -} - -func writeIni(parser *IniParser, writer io.Writer, options IniOptions) { - writeCommandIni(parser.parser.Command, "", writer, options) -} - -func writeIniToFile(parser *IniParser, filename string, options IniOptions) error { - file, err := os.Create(filename) - - if err != nil { - return err - } - - defer file.Close() - - writeIni(parser, file, options) - - return nil -} - -func readIniFromFile(filename string) (*ini, error) { - file, err := os.Open(filename) - - if err != nil { - return nil, err - } - - defer file.Close() - - return readIni(file, filename) -} - -func readIni(contents io.Reader, filename string) (*ini, error) { - ret := &ini{ - File: filename, - Sections: make(map[string]iniSection), - } - - reader := bufio.NewReader(contents) - - // Empty global section - section := make(iniSection, 0, 10) - sectionname := "" - - ret.Sections[sectionname] = section - - var lineno uint - - for { - line, err := readFullLine(reader) - - if err == io.EOF { - break - } else if err != nil { - return nil, err - } - - lineno++ - line = strings.TrimSpace(line) - - // Skip empty lines and lines starting with ; (comments) - if len(line) == 0 || line[0] == ';' || line[0] == '#' { - continue - } - - if line[0] == '[' { - if line[0] != '[' || line[len(line)-1] != ']' { - return nil, &IniError{ - Message: "malformed section header", - File: filename, - LineNumber: lineno, - } - } - - name := strings.TrimSpace(line[1 : len(line)-1]) - - if len(name) == 0 { - return nil, &IniError{ - Message: "empty section name", - File: filename, - LineNumber: lineno, - } - } - - sectionname = name - section = ret.Sections[name] - - if section == nil { - section = make(iniSection, 0, 10) - ret.Sections[name] = section - } - - continue - } - - // Parse option here - keyval := strings.SplitN(line, "=", 2) - - if len(keyval) != 2 { - return nil, &IniError{ - Message: fmt.Sprintf("malformed key=value (%s)", line), - File: filename, - LineNumber: lineno, - } - } - - name := strings.TrimSpace(keyval[0]) - value := strings.TrimSpace(keyval[1]) - quoted := false - - if len(value) != 0 && value[0] == '"' { - if v, err := strconv.Unquote(value); err == nil { - value = v - - quoted = true - } else { - return nil, &IniError{ - Message: err.Error(), - File: filename, - LineNumber: lineno, - } - } - } - - section = append(section, iniValue{ - Name: name, - Value: value, - Quoted: quoted, - LineNumber: lineno, - }) - - ret.Sections[sectionname] = section - } - - return ret, nil -} - -func (i *IniParser) matchingGroups(name string) []*Group { - if len(name) == 0 { - var ret []*Group - - i.parser.eachGroup(func(g *Group) { - ret = append(ret, g) - }) - - return ret - } - - g := i.parser.groupByName(name) - - if g != nil { - return []*Group{g} - } - - return nil -} - -func (i *IniParser) parse(ini *ini) error { - p := i.parser - - var quotesLookup = make(map[*Option]bool) - - for name, section := range ini.Sections { - groups := i.matchingGroups(name) - - if len(groups) == 0 { - return newErrorf(ErrUnknownGroup, "could not find option group `%s'", name) - } - - for _, inival := range section { - var opt *Option - - for _, group := range groups { - opt = group.optionByName(inival.Name, func(o *Option, n string) bool { - return strings.ToLower(o.tag.Get("ini-name")) == strings.ToLower(n) - }) - - if opt != nil && len(opt.tag.Get("no-ini")) != 0 { - opt = nil - } - - if opt != nil { - break - } - } - - if opt == nil { - if (p.Options & IgnoreUnknown) == None { - return &IniError{ - Message: fmt.Sprintf("unknown option: %s", inival.Name), - File: ini.File, - LineNumber: inival.LineNumber, - } - } - - continue - } - - // ini value is ignored if override is set and - // value was previously set from non default - if i.ParseAsDefaults && !opt.isSetDefault { - continue - } - - pval := &inival.Value - - if !opt.canArgument() && len(inival.Value) == 0 { - pval = nil - } else { - if opt.value.Type().Kind() == reflect.Map { - parts := strings.SplitN(inival.Value, ":", 2) - - // only handle unquoting - if len(parts) == 2 && parts[1][0] == '"' { - if v, err := strconv.Unquote(parts[1]); err == nil { - parts[1] = v - - inival.Quoted = true - } else { - return &IniError{ - Message: err.Error(), - File: ini.File, - LineNumber: inival.LineNumber, - } - } - - s := parts[0] + ":" + parts[1] - - pval = &s - } - } - } - - if err := opt.set(pval); err != nil { - return &IniError{ - Message: err.Error(), - File: ini.File, - LineNumber: inival.LineNumber, - } - } - - // either all INI values are quoted or only values who need quoting - if _, ok := quotesLookup[opt]; !inival.Quoted || !ok { - quotesLookup[opt] = inival.Quoted - } - - opt.tag.Set("_read-ini-name", inival.Name) - } - } - - for opt, quoted := range quotesLookup { - opt.iniQuote = quoted - } - - return nil -} diff --git a/vendor/github.com/jessevdk/go-flags/ini_test.go b/vendor/github.com/jessevdk/go-flags/ini_test.go deleted file mode 100644 index ad4852e..0000000 --- a/vendor/github.com/jessevdk/go-flags/ini_test.go +++ /dev/null @@ -1,1053 +0,0 @@ -package flags - -import ( - "bytes" - "fmt" - "io/ioutil" - "os" - "reflect" - "strings" - "testing" -) - -func TestWriteIni(t *testing.T) { - oldEnv := EnvSnapshot() - defer oldEnv.Restore() - os.Setenv("ENV_DEFAULT", "env-def") - - var tests = []struct { - args []string - options IniOptions - expected string - }{ - { - []string{"-vv", "--intmap=a:2", "--intmap", "b:3", "filename", "0", "3.14", "command"}, - IniDefault, - `[Application Options] -; Show verbose debug information -verbose = true -verbose = true - -; Test env-default1 value -EnvDefault1 = env-def - -; Test env-default2 value -EnvDefault2 = env-def - -[Other Options] -; A map from string to int -int-map = a:2 -int-map = b:3 - -`, - }, - { - []string{"-vv", "--intmap=a:2", "--intmap", "b:3", "filename", "0", "3.14", "command"}, - IniDefault | IniIncludeDefaults, - `[Application Options] -; Show verbose debug information -verbose = true -verbose = true - -; A slice of pointers to string -; PtrSlice = - -EmptyDescription = false - -; Test default value -Default = "Some\nvalue" - -; Test default array value -DefaultArray = Some value -DefaultArray = "Other\tvalue" - -; Testdefault map value -DefaultMap = another:value -DefaultMap = some:value - -; Test env-default1 value -EnvDefault1 = env-def - -; Test env-default2 value -EnvDefault2 = env-def - -; Option with named argument -OptionWithArgName = - -; Option with choices -OptionWithChoices = - -; Option only available in ini -only-ini = - -[Other Options] -; A slice of strings -StringSlice = some -StringSlice = value - -; A map from string to int -int-map = a:2 -int-map = b:3 - -[Subgroup] -; This is a subgroup option -Opt = - -; Not hidden inside group -NotHiddenInsideGroup = - -[Subsubgroup] -; This is a subsubgroup option -Opt = - -[command] -; Use for extra verbosity -; ExtraVerbose = - -`, - }, - { - []string{"filename", "0", "3.14", "command"}, - IniDefault | IniIncludeDefaults | IniCommentDefaults, - `[Application Options] -; Show verbose debug information -; verbose = - -; A slice of pointers to string -; PtrSlice = - -; EmptyDescription = false - -; Test default value -; Default = "Some\nvalue" - -; Test default array value -; DefaultArray = Some value -; DefaultArray = "Other\tvalue" - -; Testdefault map value -; DefaultMap = another:value -; DefaultMap = some:value - -; Test env-default1 value -EnvDefault1 = env-def - -; Test env-default2 value -EnvDefault2 = env-def - -; Option with named argument -; OptionWithArgName = - -; Option with choices -; OptionWithChoices = - -; Option only available in ini -; only-ini = - -[Other Options] -; A slice of strings -; StringSlice = some -; StringSlice = value - -; A map from string to int -; int-map = a:1 - -[Subgroup] -; This is a subgroup option -; Opt = - -; Not hidden inside group -; NotHiddenInsideGroup = - -[Subsubgroup] -; This is a subsubgroup option -; Opt = - -[command] -; Use for extra verbosity -; ExtraVerbose = - -`, - }, - { - []string{"--default=New value", "--default-array=New value", "--default-map=new:value", "filename", "0", "3.14", "command"}, - IniDefault | IniIncludeDefaults | IniCommentDefaults, - `[Application Options] -; Show verbose debug information -; verbose = - -; A slice of pointers to string -; PtrSlice = - -; EmptyDescription = false - -; Test default value -Default = New value - -; Test default array value -DefaultArray = New value - -; Testdefault map value -DefaultMap = new:value - -; Test env-default1 value -EnvDefault1 = env-def - -; Test env-default2 value -EnvDefault2 = env-def - -; Option with named argument -; OptionWithArgName = - -; Option with choices -; OptionWithChoices = - -; Option only available in ini -; only-ini = - -[Other Options] -; A slice of strings -; StringSlice = some -; StringSlice = value - -; A map from string to int -; int-map = a:1 - -[Subgroup] -; This is a subgroup option -; Opt = - -; Not hidden inside group -; NotHiddenInsideGroup = - -[Subsubgroup] -; This is a subsubgroup option -; Opt = - -[command] -; Use for extra verbosity -; ExtraVerbose = - -`, - }, - } - - for _, test := range tests { - var opts helpOptions - - p := NewNamedParser("TestIni", Default) - p.AddGroup("Application Options", "The application options", &opts) - - _, err := p.ParseArgs(test.args) - - if err != nil { - t.Fatalf("Unexpected error: %v", err) - } - - inip := NewIniParser(p) - - var b bytes.Buffer - inip.Write(&b, test.options) - - got := b.String() - expected := test.expected - - msg := fmt.Sprintf("with arguments %+v and ini options %b", test.args, test.options) - assertDiff(t, got, expected, msg) - } -} - -func TestReadIni_flagEquivalent(t *testing.T) { - type options struct { - Opt1 bool `long:"opt1"` - - Group1 struct { - Opt2 bool `long:"opt2"` - } `group:"group1"` - - Group2 struct { - Opt3 bool `long:"opt3"` - } `group:"group2" namespace:"ns1"` - - Cmd1 struct { - Opt4 bool `long:"opt4"` - Opt5 bool `long:"foo.opt5"` - - Group1 struct { - Opt6 bool `long:"opt6"` - Opt7 bool `long:"foo.opt7"` - } `group:"group1"` - - Group2 struct { - Opt8 bool `long:"opt8"` - } `group:"group2" namespace:"ns1"` - } `command:"cmd1"` - } - - a := ` -opt1=true - -[group1] -opt2=true - -[group2] -ns1.opt3=true - -[cmd1] -opt4=true -foo.opt5=true - -[cmd1.group1] -opt6=true -foo.opt7=true - -[cmd1.group2] -ns1.opt8=true -` - b := ` -opt1=true -opt2=true -ns1.opt3=true - -[cmd1] -opt4=true -foo.opt5=true -opt6=true -foo.opt7=true -ns1.opt8=true -` - - parse := func(readIni string) (opts options, writeIni string) { - p := NewNamedParser("TestIni", Default) - p.AddGroup("Application Options", "The application options", &opts) - - inip := NewIniParser(p) - err := inip.Parse(strings.NewReader(readIni)) - - if err != nil { - t.Fatalf("Unexpected error: %s\n\nFile:\n%s", err, readIni) - } - - var b bytes.Buffer - inip.Write(&b, Default) - - return opts, b.String() - } - - aOpt, aIni := parse(a) - bOpt, bIni := parse(b) - - assertDiff(t, aIni, bIni, "") - if !reflect.DeepEqual(aOpt, bOpt) { - t.Errorf("not equal") - } -} - -func TestReadIni(t *testing.T) { - var opts helpOptions - - p := NewNamedParser("TestIni", Default) - p.AddGroup("Application Options", "The application options", &opts) - - inip := NewIniParser(p) - - inic := ` -; Show verbose debug information -verbose = true -verbose = true - -DefaultMap = another:"value\n1" -DefaultMap = some:value 2 - -[Application Options] -; A slice of pointers to string -; PtrSlice = - -; Test default value -Default = "New\nvalue" - -; Test env-default1 value -EnvDefault1 = New value - -[Other Options] -# A slice of strings -StringSlice = "some\nvalue" -StringSlice = another value - -; A map from string to int -int-map = a:2 -int-map = b:3 - -` - - b := strings.NewReader(inic) - err := inip.Parse(b) - - if err != nil { - t.Fatalf("Unexpected error: %s", err) - } - - assertBoolArray(t, opts.Verbose, []bool{true, true}) - - if v := map[string]string{"another": "value\n1", "some": "value 2"}; !reflect.DeepEqual(opts.DefaultMap, v) { - t.Fatalf("Expected %#v for DefaultMap but got %#v", v, opts.DefaultMap) - } - - assertString(t, opts.Default, "New\nvalue") - - assertString(t, opts.EnvDefault1, "New value") - - assertStringArray(t, opts.Other.StringSlice, []string{"some\nvalue", "another value"}) - - if v, ok := opts.Other.IntMap["a"]; !ok { - t.Errorf("Expected \"a\" in Other.IntMap") - } else if v != 2 { - t.Errorf("Expected Other.IntMap[\"a\"] = 2, but got %v", v) - } - - if v, ok := opts.Other.IntMap["b"]; !ok { - t.Errorf("Expected \"b\" in Other.IntMap") - } else if v != 3 { - t.Errorf("Expected Other.IntMap[\"b\"] = 3, but got %v", v) - } -} - -func TestReadAndWriteIni(t *testing.T) { - var tests = []struct { - options IniOptions - read string - write string - }{ - { - IniIncludeComments, - `[Application Options] -; Show verbose debug information -verbose = true -verbose = true - -; Test default value -Default = "quote me" - -; Test default array value -DefaultArray = 1 -DefaultArray = "2" -DefaultArray = 3 - -; Testdefault map value -; DefaultMap = - -; Test env-default1 value -EnvDefault1 = env-def - -; Test env-default2 value -EnvDefault2 = env-def - -[Other Options] -; A slice of strings -; StringSlice = - -; A map from string to int -int-map = a:2 -int-map = b:"3" - -`, - `[Application Options] -; Show verbose debug information -verbose = true -verbose = true - -; Test default value -Default = "quote me" - -; Test default array value -DefaultArray = 1 -DefaultArray = 2 -DefaultArray = 3 - -; Testdefault map value -; DefaultMap = - -; Test env-default1 value -EnvDefault1 = env-def - -; Test env-default2 value -EnvDefault2 = env-def - -[Other Options] -; A slice of strings -; StringSlice = - -; A map from string to int -int-map = a:2 -int-map = b:3 - -`, - }, - { - IniIncludeComments, - `[Application Options] -; Show verbose debug information -verbose = true -verbose = true - -; Test default value -Default = "quote me" - -; Test default array value -DefaultArray = "1" -DefaultArray = "2" -DefaultArray = "3" - -; Testdefault map value -; DefaultMap = - -; Test env-default1 value -EnvDefault1 = env-def - -; Test env-default2 value -EnvDefault2 = env-def - -[Other Options] -; A slice of strings -; StringSlice = - -; A map from string to int -int-map = a:"2" -int-map = b:"3" - -`, - `[Application Options] -; Show verbose debug information -verbose = true -verbose = true - -; Test default value -Default = "quote me" - -; Test default array value -DefaultArray = "1" -DefaultArray = "2" -DefaultArray = "3" - -; Testdefault map value -; DefaultMap = - -; Test env-default1 value -EnvDefault1 = env-def - -; Test env-default2 value -EnvDefault2 = env-def - -[Other Options] -; A slice of strings -; StringSlice = - -; A map from string to int -int-map = a:"2" -int-map = b:"3" - -`, - }, - } - - for _, test := range tests { - var opts helpOptions - - p := NewNamedParser("TestIni", Default) - p.AddGroup("Application Options", "The application options", &opts) - - inip := NewIniParser(p) - - read := strings.NewReader(test.read) - err := inip.Parse(read) - if err != nil { - t.Fatalf("Unexpected error: %s", err) - } - - var write bytes.Buffer - inip.Write(&write, test.options) - - got := write.String() - - msg := fmt.Sprintf("with ini options %b", test.options) - assertDiff(t, got, test.write, msg) - } -} - -func TestReadIniWrongQuoting(t *testing.T) { - var tests = []struct { - iniFile string - lineNumber uint - }{ - { - iniFile: `Default = "New\nvalue`, - lineNumber: 1, - }, - { - iniFile: `StringSlice = "New\nvalue`, - lineNumber: 1, - }, - { - iniFile: `StringSlice = "New\nvalue" - StringSlice = "Second\nvalue`, - lineNumber: 2, - }, - { - iniFile: `DefaultMap = some:"value`, - lineNumber: 1, - }, - { - iniFile: `DefaultMap = some:value - DefaultMap = another:"value`, - lineNumber: 2, - }, - } - - for _, test := range tests { - var opts helpOptions - - p := NewNamedParser("TestIni", Default) - p.AddGroup("Application Options", "The application options", &opts) - - inip := NewIniParser(p) - - inic := test.iniFile - - b := strings.NewReader(inic) - err := inip.Parse(b) - - if err == nil { - t.Fatalf("Expect error") - } - - iniError := err.(*IniError) - - if iniError.LineNumber != test.lineNumber { - t.Fatalf("Expect error on line %d", test.lineNumber) - } - } -} - -func TestIniCommands(t *testing.T) { - var opts struct { - Value string `short:"v" long:"value"` - - Add struct { - Name int `short:"n" long:"name" ini-name:"AliasName"` - - Other struct { - O string `short:"o" long:"other"` - } `group:"Other Options"` - } `command:"add"` - } - - p := NewNamedParser("TestIni", Default) - p.AddGroup("Application Options", "The application options", &opts) - - inip := NewIniParser(p) - - inic := `[Application Options] -value = some value - -[add] -AliasName = 5 - -[add.Other Options] -other = subgroup - -` - - b := strings.NewReader(inic) - err := inip.Parse(b) - - if err != nil { - t.Fatalf("Unexpected error: %s", err) - } - - assertString(t, opts.Value, "some value") - - if opts.Add.Name != 5 { - t.Errorf("Expected opts.Add.Name to be 5, but got %v", opts.Add.Name) - } - - assertString(t, opts.Add.Other.O, "subgroup") - - // Test writing it back - buf := &bytes.Buffer{} - - inip.Write(buf, IniDefault) - - assertDiff(t, buf.String(), inic, "ini contents") -} - -func TestIniNoIni(t *testing.T) { - var opts struct { - NoValue string `short:"n" long:"novalue" no-ini:"yes"` - Value string `short:"v" long:"value"` - } - - p := NewNamedParser("TestIni", Default) - p.AddGroup("Application Options", "The application options", &opts) - - inip := NewIniParser(p) - - // read INI - inic := `[Application Options] -novalue = some value -value = some other value -` - - b := strings.NewReader(inic) - err := inip.Parse(b) - - if err == nil { - t.Fatalf("Expected error") - } - - iniError := err.(*IniError) - - if v := uint(2); iniError.LineNumber != v { - t.Errorf("Expected opts.Add.Name to be %d, but got %d", v, iniError.LineNumber) - } - - if v := "unknown option: novalue"; iniError.Message != v { - t.Errorf("Expected opts.Add.Name to be %s, but got %s", v, iniError.Message) - } - - // write INI - opts.NoValue = "some value" - opts.Value = "some other value" - - file, err := ioutil.TempFile("", "") - if err != nil { - t.Fatalf("Cannot create temporary file: %s", err) - } - defer os.Remove(file.Name()) - - err = inip.WriteFile(file.Name(), IniIncludeDefaults) - if err != nil { - t.Fatalf("Could not write ini file: %s", err) - } - - found, err := ioutil.ReadFile(file.Name()) - if err != nil { - t.Fatalf("Could not read written ini file: %s", err) - } - - expected := "[Application Options]\nValue = some other value\n\n" - - assertDiff(t, string(found), expected, "ini content") -} - -func TestIniParse(t *testing.T) { - file, err := ioutil.TempFile("", "") - if err != nil { - t.Fatalf("Cannot create temporary file: %s", err) - } - defer os.Remove(file.Name()) - - _, err = file.WriteString("value = 123") - if err != nil { - t.Fatalf("Cannot write to temporary file: %s", err) - } - - file.Close() - - var opts struct { - Value int `long:"value"` - } - - err = IniParse(file.Name(), &opts) - if err != nil { - t.Fatalf("Could not parse ini: %s", err) - } - - if opts.Value != 123 { - t.Fatalf("Expected Value to be \"123\" but was \"%d\"", opts.Value) - } -} - -func TestIniCliOverrides(t *testing.T) { - file, err := ioutil.TempFile("", "") - - if err != nil { - t.Fatalf("Cannot create temporary file: %s", err) - } - - defer os.Remove(file.Name()) - - _, err = file.WriteString("values = 123\n") - _, err = file.WriteString("values = 456\n") - - if err != nil { - t.Fatalf("Cannot write to temporary file: %s", err) - } - - file.Close() - - var opts struct { - Values []int `long:"values"` - } - - p := NewParser(&opts, Default) - err = NewIniParser(p).ParseFile(file.Name()) - - if err != nil { - t.Fatalf("Could not parse ini: %s", err) - } - - _, err = p.ParseArgs([]string{"--values", "111", "--values", "222"}) - - if err != nil { - t.Fatalf("Failed to parse arguments: %s", err) - } - - if len(opts.Values) != 2 { - t.Fatalf("Expected Values to contain two elements, but got %d", len(opts.Values)) - } - - if opts.Values[0] != 111 { - t.Fatalf("Expected Values[0] to be 111, but got '%d'", opts.Values[0]) - } - - if opts.Values[1] != 222 { - t.Fatalf("Expected Values[1] to be 222, but got '%d'", opts.Values[1]) - } -} - -func TestIniOverrides(t *testing.T) { - file, err := ioutil.TempFile("", "") - - if err != nil { - t.Fatalf("Cannot create temporary file: %s", err) - } - - defer os.Remove(file.Name()) - - _, err = file.WriteString("value-with-default = \"ini-value\"\n") - _, err = file.WriteString("value-with-default-override-cli = \"ini-value\"\n") - - if err != nil { - t.Fatalf("Cannot write to temporary file: %s", err) - } - - file.Close() - - var opts struct { - ValueWithDefault string `long:"value-with-default" default:"value"` - ValueWithDefaultOverrideCli string `long:"value-with-default-override-cli" default:"value"` - } - - p := NewParser(&opts, Default) - err = NewIniParser(p).ParseFile(file.Name()) - - if err != nil { - t.Fatalf("Could not parse ini: %s", err) - } - - _, err = p.ParseArgs([]string{"--value-with-default-override-cli", "cli-value"}) - - if err != nil { - t.Fatalf("Failed to parse arguments: %s", err) - } - - assertString(t, opts.ValueWithDefault, "ini-value") - assertString(t, opts.ValueWithDefaultOverrideCli, "cli-value") -} - -func TestIniRequired(t *testing.T) { - var opts struct { - Required string `short:"r" required:"yes" description:"required"` - Config func(s string) error `long:"config" default:"no-ini-file" no-ini:"true"` - } - - p := NewParser(&opts, Default) - - opts.Config = func(s string) error { - inip := NewIniParser(p) - inip.ParseAsDefaults = true - return inip.Parse(strings.NewReader("Required = ini-value\n")) - } - - _, err := p.ParseArgs([]string{"-r", "cli-value"}) - - if err != nil { - t.Fatalf("Failed to parse arguments: %s", err) - } - - assertString(t, opts.Required, "cli-value") -} - -func TestWriteFile(t *testing.T) { - file, err := ioutil.TempFile("", "") - if err != nil { - t.Fatalf("Cannot create temporary file: %s", err) - } - defer os.Remove(file.Name()) - - var opts struct { - Value int `long:"value"` - } - - opts.Value = 123 - - p := NewParser(&opts, Default) - ini := NewIniParser(p) - - err = ini.WriteFile(file.Name(), IniIncludeDefaults) - if err != nil { - t.Fatalf("Could not write ini file: %s", err) - } - - found, err := ioutil.ReadFile(file.Name()) - if err != nil { - t.Fatalf("Could not read written ini file: %s", err) - } - - expected := "[Application Options]\nValue = 123\n\n" - - assertDiff(t, string(found), expected, "ini content") -} - -func TestOverwriteRequiredOptions(t *testing.T) { - var tests = []struct { - args []string - expected []string - }{ - { - args: []string{"--value", "from CLI"}, - expected: []string{ - "from CLI", - "from default", - }, - }, - { - args: []string{"--value", "from CLI", "--default", "from CLI"}, - expected: []string{ - "from CLI", - "from CLI", - }, - }, - { - args: []string{"--config", "no file name"}, - expected: []string{ - "from INI", - "from INI", - }, - }, - { - args: []string{"--value", "from CLI before", "--default", "from CLI before", "--config", "no file name"}, - expected: []string{ - "from INI", - "from INI", - }, - }, - { - args: []string{"--value", "from CLI before", "--default", "from CLI before", "--config", "no file name", "--value", "from CLI after", "--default", "from CLI after"}, - expected: []string{ - "from CLI after", - "from CLI after", - }, - }, - } - - for _, test := range tests { - var opts struct { - Config func(s string) error `long:"config" no-ini:"true"` - Value string `long:"value" required:"true"` - Default string `long:"default" required:"true" default:"from default"` - } - - p := NewParser(&opts, Default) - - opts.Config = func(s string) error { - ini := NewIniParser(p) - - return ini.Parse(bytes.NewBufferString("value = from INI\ndefault = from INI")) - } - - _, err := p.ParseArgs(test.args) - if err != nil { - t.Fatalf("Unexpected error %s with args %+v", err, test.args) - } - - if opts.Value != test.expected[0] { - t.Fatalf("Expected Value to be \"%s\" but was \"%s\" with args %+v", test.expected[0], opts.Value, test.args) - } - - if opts.Default != test.expected[1] { - t.Fatalf("Expected Default to be \"%s\" but was \"%s\" with args %+v", test.expected[1], opts.Default, test.args) - } - } -} - -func TestIniOverwriteOptions(t *testing.T) { - var tests = []struct { - args []string - expected string - toggled bool - }{ - { - args: []string{}, - expected: "from default", - }, - { - args: []string{"--value", "from CLI"}, - expected: "from CLI", - }, - { - args: []string{"--config", "no file name"}, - expected: "from INI", - toggled: true, - }, - { - args: []string{"--value", "from CLI before", "--config", "no file name"}, - expected: "from CLI before", - toggled: true, - }, - { - args: []string{"--config", "no file name", "--value", "from CLI after"}, - expected: "from CLI after", - toggled: true, - }, - { - args: []string{"--toggle"}, - toggled: true, - expected: "from default", - }, - } - - for _, test := range tests { - var opts struct { - Config string `long:"config" no-ini:"true"` - Value string `long:"value" default:"from default"` - Toggle bool `long:"toggle"` - } - - p := NewParser(&opts, Default) - - _, err := p.ParseArgs(test.args) - if err != nil { - t.Fatalf("Unexpected error %s with args %+v", err, test.args) - } - - if opts.Config != "" { - inip := NewIniParser(p) - inip.ParseAsDefaults = true - - err = inip.Parse(bytes.NewBufferString("value = from INI\ntoggle = true")) - if err != nil { - t.Fatalf("Unexpected error %s with args %+v", err, test.args) - } - } - - if opts.Value != test.expected { - t.Fatalf("Expected Value to be \"%s\" but was \"%s\" with args %+v", test.expected, opts.Value, test.args) - } - - if opts.Toggle != test.toggled { - t.Fatalf("Expected Toggle to be \"%v\" but was \"%v\" with args %+v", test.toggled, opts.Toggle, test.args) - } - - } -} diff --git a/vendor/github.com/jessevdk/go-flags/long_test.go b/vendor/github.com/jessevdk/go-flags/long_test.go deleted file mode 100644 index 02fc8c7..0000000 --- a/vendor/github.com/jessevdk/go-flags/long_test.go +++ /dev/null @@ -1,85 +0,0 @@ -package flags - -import ( - "testing" -) - -func TestLong(t *testing.T) { - var opts = struct { - Value bool `long:"value"` - }{} - - ret := assertParseSuccess(t, &opts, "--value") - - assertStringArray(t, ret, []string{}) - - if !opts.Value { - t.Errorf("Expected Value to be true") - } -} - -func TestLongArg(t *testing.T) { - var opts = struct { - Value string `long:"value"` - }{} - - ret := assertParseSuccess(t, &opts, "--value", "value") - - assertStringArray(t, ret, []string{}) - assertString(t, opts.Value, "value") -} - -func TestLongArgEqual(t *testing.T) { - var opts = struct { - Value string `long:"value"` - }{} - - ret := assertParseSuccess(t, &opts, "--value=value") - - assertStringArray(t, ret, []string{}) - assertString(t, opts.Value, "value") -} - -func TestLongDefault(t *testing.T) { - var opts = struct { - Value string `long:"value" default:"value"` - }{} - - ret := assertParseSuccess(t, &opts) - - assertStringArray(t, ret, []string{}) - assertString(t, opts.Value, "value") -} - -func TestLongOptional(t *testing.T) { - var opts = struct { - Value string `long:"value" optional:"yes" optional-value:"value"` - }{} - - ret := assertParseSuccess(t, &opts, "--value") - - assertStringArray(t, ret, []string{}) - assertString(t, opts.Value, "value") -} - -func TestLongOptionalArg(t *testing.T) { - var opts = struct { - Value string `long:"value" optional:"yes" optional-value:"value"` - }{} - - ret := assertParseSuccess(t, &opts, "--value", "no") - - assertStringArray(t, ret, []string{"no"}) - assertString(t, opts.Value, "value") -} - -func TestLongOptionalArgEqual(t *testing.T) { - var opts = struct { - Value string `long:"value" optional:"yes" optional-value:"value"` - }{} - - ret := assertParseSuccess(t, &opts, "--value=value", "no") - - assertStringArray(t, ret, []string{"no"}) - assertString(t, opts.Value, "value") -} diff --git a/vendor/github.com/jessevdk/go-flags/man.go b/vendor/github.com/jessevdk/go-flags/man.go deleted file mode 100644 index 0cb114e..0000000 --- a/vendor/github.com/jessevdk/go-flags/man.go +++ /dev/null @@ -1,205 +0,0 @@ -package flags - -import ( - "fmt" - "io" - "runtime" - "strings" - "time" -) - -func manQuote(s string) string { - return strings.Replace(s, "\\", "\\\\", -1) -} - -func formatForMan(wr io.Writer, s string) { - for { - idx := strings.IndexRune(s, '`') - - if idx < 0 { - fmt.Fprintf(wr, "%s", manQuote(s)) - break - } - - fmt.Fprintf(wr, "%s", manQuote(s[:idx])) - - s = s[idx+1:] - idx = strings.IndexRune(s, '\'') - - if idx < 0 { - fmt.Fprintf(wr, "%s", manQuote(s)) - break - } - - fmt.Fprintf(wr, "\\fB%s\\fP", manQuote(s[:idx])) - s = s[idx+1:] - } -} - -func writeManPageOptions(wr io.Writer, grp *Group) { - grp.eachGroup(func(group *Group) { - if group.Hidden || len(group.options) == 0 { - return - } - - // If the parent (grp) has any subgroups, display their descriptions as - // subsection headers similar to the output of --help. - if group.ShortDescription != "" && len(grp.groups) > 0 { - fmt.Fprintf(wr, ".SS %s\n", group.ShortDescription) - - if group.LongDescription != "" { - formatForMan(wr, group.LongDescription) - fmt.Fprintln(wr, "") - } - } - - for _, opt := range group.options { - if !opt.canCli() || opt.Hidden { - continue - } - - fmt.Fprintln(wr, ".TP") - fmt.Fprintf(wr, "\\fB") - - if opt.ShortName != 0 { - fmt.Fprintf(wr, "\\fB\\-%c\\fR", opt.ShortName) - } - - if len(opt.LongName) != 0 { - if opt.ShortName != 0 { - fmt.Fprintf(wr, ", ") - } - - fmt.Fprintf(wr, "\\fB\\-\\-%s\\fR", manQuote(opt.LongNameWithNamespace())) - } - - if len(opt.ValueName) != 0 || opt.OptionalArgument { - if opt.OptionalArgument { - fmt.Fprintf(wr, " [\\fI%s=%s\\fR]", manQuote(opt.ValueName), manQuote(strings.Join(quoteV(opt.OptionalValue), ", "))) - } else { - fmt.Fprintf(wr, " \\fI%s\\fR", manQuote(opt.ValueName)) - } - } - - if len(opt.Default) != 0 { - fmt.Fprintf(wr, " ", manQuote(strings.Join(quoteV(opt.Default), ", "))) - } else if len(opt.EnvDefaultKey) != 0 { - if runtime.GOOS == "windows" { - fmt.Fprintf(wr, " ", manQuote(opt.EnvDefaultKey)) - } else { - fmt.Fprintf(wr, " ", manQuote(opt.EnvDefaultKey)) - } - } - - if opt.Required { - fmt.Fprintf(wr, " (\\fIrequired\\fR)") - } - - fmt.Fprintln(wr, "\\fP") - - if len(opt.Description) != 0 { - formatForMan(wr, opt.Description) - fmt.Fprintln(wr, "") - } - } - }) -} - -func writeManPageSubcommands(wr io.Writer, name string, root *Command) { - commands := root.sortedVisibleCommands() - - for _, c := range commands { - var nn string - - if c.Hidden { - continue - } - - if len(name) != 0 { - nn = name + " " + c.Name - } else { - nn = c.Name - } - - writeManPageCommand(wr, nn, root, c) - } -} - -func writeManPageCommand(wr io.Writer, name string, root *Command, command *Command) { - fmt.Fprintf(wr, ".SS %s\n", name) - fmt.Fprintln(wr, command.ShortDescription) - - if len(command.LongDescription) > 0 { - fmt.Fprintln(wr, "") - - cmdstart := fmt.Sprintf("The %s command", manQuote(command.Name)) - - if strings.HasPrefix(command.LongDescription, cmdstart) { - fmt.Fprintf(wr, "The \\fI%s\\fP command", manQuote(command.Name)) - - formatForMan(wr, command.LongDescription[len(cmdstart):]) - fmt.Fprintln(wr, "") - } else { - formatForMan(wr, command.LongDescription) - fmt.Fprintln(wr, "") - } - } - - var usage string - if us, ok := command.data.(Usage); ok { - usage = us.Usage() - } else if command.hasCliOptions() { - usage = fmt.Sprintf("[%s-OPTIONS]", command.Name) - } - - var pre string - if root.hasCliOptions() { - pre = fmt.Sprintf("%s [OPTIONS] %s", root.Name, command.Name) - } else { - pre = fmt.Sprintf("%s %s", root.Name, command.Name) - } - - if len(usage) > 0 { - fmt.Fprintf(wr, "\n\\fBUsage\\fP: %s %s\n.TP\n", manQuote(pre), manQuote(usage)) - } - - if len(command.Aliases) > 0 { - fmt.Fprintf(wr, "\n\\fBAliases\\fP: %s\n\n", manQuote(strings.Join(command.Aliases, ", "))) - } - - writeManPageOptions(wr, command.Group) - writeManPageSubcommands(wr, name, command) -} - -// WriteManPage writes a basic man page in groff format to the specified -// writer. -func (p *Parser) WriteManPage(wr io.Writer) { - t := time.Now() - - fmt.Fprintf(wr, ".TH %s 1 \"%s\"\n", manQuote(p.Name), t.Format("2 January 2006")) - fmt.Fprintln(wr, ".SH NAME") - fmt.Fprintf(wr, "%s \\- %s\n", manQuote(p.Name), manQuote(p.ShortDescription)) - fmt.Fprintln(wr, ".SH SYNOPSIS") - - usage := p.Usage - - if len(usage) == 0 { - usage = "[OPTIONS]" - } - - fmt.Fprintf(wr, "\\fB%s\\fP %s\n", manQuote(p.Name), manQuote(usage)) - fmt.Fprintln(wr, ".SH DESCRIPTION") - - formatForMan(wr, p.LongDescription) - fmt.Fprintln(wr, "") - - fmt.Fprintln(wr, ".SH OPTIONS") - - writeManPageOptions(wr, p.Command.Group) - - if len(p.visibleCommands()) > 0 { - fmt.Fprintln(wr, ".SH COMMANDS") - - writeManPageSubcommands(wr, "", p.Command) - } -} diff --git a/vendor/github.com/jessevdk/go-flags/marshal_test.go b/vendor/github.com/jessevdk/go-flags/marshal_test.go deleted file mode 100644 index 4cfe865..0000000 --- a/vendor/github.com/jessevdk/go-flags/marshal_test.go +++ /dev/null @@ -1,119 +0,0 @@ -package flags - -import ( - "fmt" - "testing" -) - -type marshalled string - -func (m *marshalled) UnmarshalFlag(value string) error { - if value == "yes" { - *m = "true" - } else if value == "no" { - *m = "false" - } else { - return fmt.Errorf("`%s' is not a valid value, please specify `yes' or `no'", value) - } - - return nil -} - -func (m marshalled) MarshalFlag() (string, error) { - if m == "true" { - return "yes", nil - } - - return "no", nil -} - -type marshalledError bool - -func (m marshalledError) MarshalFlag() (string, error) { - return "", newErrorf(ErrMarshal, "Failed to marshal") -} - -func TestUnmarshal(t *testing.T) { - var opts = struct { - Value marshalled `short:"v"` - }{} - - ret := assertParseSuccess(t, &opts, "-v=yes") - - assertStringArray(t, ret, []string{}) - - if opts.Value != "true" { - t.Errorf("Expected Value to be \"true\"") - } -} - -func TestUnmarshalDefault(t *testing.T) { - var opts = struct { - Value marshalled `short:"v" default:"yes"` - }{} - - ret := assertParseSuccess(t, &opts) - - assertStringArray(t, ret, []string{}) - - if opts.Value != "true" { - t.Errorf("Expected Value to be \"true\"") - } -} - -func TestUnmarshalOptional(t *testing.T) { - var opts = struct { - Value marshalled `short:"v" optional:"yes" optional-value:"yes"` - }{} - - ret := assertParseSuccess(t, &opts, "-v") - - assertStringArray(t, ret, []string{}) - - if opts.Value != "true" { - t.Errorf("Expected Value to be \"true\"") - } -} - -func TestUnmarshalError(t *testing.T) { - var opts = struct { - Value marshalled `short:"v"` - }{} - - assertParseFail(t, ErrMarshal, fmt.Sprintf("invalid argument for flag `%cv' (expected flags.marshalled): `invalid' is not a valid value, please specify `yes' or `no'", defaultShortOptDelimiter), &opts, "-vinvalid") -} - -func TestUnmarshalPositionalError(t *testing.T) { - var opts = struct { - Args struct { - Value marshalled - } `positional-args:"yes"` - }{} - - parser := NewParser(&opts, Default&^PrintErrors) - _, err := parser.ParseArgs([]string{"invalid"}) - - msg := "`invalid' is not a valid value, please specify `yes' or `no'" - - if err == nil { - assertFatalf(t, "Expected error: %s", msg) - return - } - - if err.Error() != msg { - assertErrorf(t, "Expected error message %#v, but got %#v", msg, err.Error()) - } -} - -func TestMarshalError(t *testing.T) { - var opts = struct { - Value marshalledError `short:"v"` - }{} - - p := NewParser(&opts, Default) - o := p.Command.Groups()[0].Options()[0] - - _, err := convertToString(o.value, o.tag) - - assertError(t, err, ErrMarshal, "Failed to marshal") -} diff --git a/vendor/github.com/jessevdk/go-flags/multitag.go b/vendor/github.com/jessevdk/go-flags/multitag.go deleted file mode 100644 index 96bb1a3..0000000 --- a/vendor/github.com/jessevdk/go-flags/multitag.go +++ /dev/null @@ -1,140 +0,0 @@ -package flags - -import ( - "strconv" -) - -type multiTag struct { - value string - cache map[string][]string -} - -func newMultiTag(v string) multiTag { - return multiTag{ - value: v, - } -} - -func (x *multiTag) scan() (map[string][]string, error) { - v := x.value - - ret := make(map[string][]string) - - // This is mostly copied from reflect.StructTag.Get - for v != "" { - i := 0 - - // Skip whitespace - for i < len(v) && v[i] == ' ' { - i++ - } - - v = v[i:] - - if v == "" { - break - } - - // Scan to colon to find key - i = 0 - - for i < len(v) && v[i] != ' ' && v[i] != ':' && v[i] != '"' { - i++ - } - - if i >= len(v) { - return nil, newErrorf(ErrTag, "expected `:' after key name, but got end of tag (in `%v`)", x.value) - } - - if v[i] != ':' { - return nil, newErrorf(ErrTag, "expected `:' after key name, but got `%v' (in `%v`)", v[i], x.value) - } - - if i+1 >= len(v) { - return nil, newErrorf(ErrTag, "expected `\"' to start tag value at end of tag (in `%v`)", x.value) - } - - if v[i+1] != '"' { - return nil, newErrorf(ErrTag, "expected `\"' to start tag value, but got `%v' (in `%v`)", v[i+1], x.value) - } - - name := v[:i] - v = v[i+1:] - - // Scan quoted string to find value - i = 1 - - for i < len(v) && v[i] != '"' { - if v[i] == '\n' { - return nil, newErrorf(ErrTag, "unexpected newline in tag value `%v' (in `%v`)", name, x.value) - } - - if v[i] == '\\' { - i++ - } - i++ - } - - if i >= len(v) { - return nil, newErrorf(ErrTag, "expected end of tag value `\"' at end of tag (in `%v`)", x.value) - } - - val, err := strconv.Unquote(v[:i+1]) - - if err != nil { - return nil, newErrorf(ErrTag, "Malformed value of tag `%v:%v` => %v (in `%v`)", name, v[:i+1], err, x.value) - } - - v = v[i+1:] - - ret[name] = append(ret[name], val) - } - - return ret, nil -} - -func (x *multiTag) Parse() error { - vals, err := x.scan() - x.cache = vals - - return err -} - -func (x *multiTag) cached() map[string][]string { - if x.cache == nil { - cache, _ := x.scan() - - if cache == nil { - cache = make(map[string][]string) - } - - x.cache = cache - } - - return x.cache -} - -func (x *multiTag) Get(key string) string { - c := x.cached() - - if v, ok := c[key]; ok { - return v[len(v)-1] - } - - return "" -} - -func (x *multiTag) GetMany(key string) []string { - c := x.cached() - return c[key] -} - -func (x *multiTag) Set(key string, value string) { - c := x.cached() - c[key] = []string{value} -} - -func (x *multiTag) SetMany(key string, value []string) { - c := x.cached() - c[key] = value -} diff --git a/vendor/github.com/jessevdk/go-flags/option.go b/vendor/github.com/jessevdk/go-flags/option.go deleted file mode 100644 index ea09fb4..0000000 --- a/vendor/github.com/jessevdk/go-flags/option.go +++ /dev/null @@ -1,461 +0,0 @@ -package flags - -import ( - "bytes" - "fmt" - "reflect" - "strings" - "syscall" - "unicode/utf8" -) - -// Option flag information. Contains a description of the option, short and -// long name as well as a default value and whether an argument for this -// flag is optional. -type Option struct { - // The description of the option flag. This description is shown - // automatically in the built-in help. - Description string - - // The short name of the option (a single character). If not 0, the - // option flag can be 'activated' using -. Either ShortName - // or LongName needs to be non-empty. - ShortName rune - - // The long name of the option. If not "", the option flag can be - // activated using --. Either ShortName or LongName needs - // to be non-empty. - LongName string - - // The default value of the option. - Default []string - - // The optional environment default value key name. - EnvDefaultKey string - - // The optional delimiter string for EnvDefaultKey values. - EnvDefaultDelim string - - // If true, specifies that the argument to an option flag is optional. - // When no argument to the flag is specified on the command line, the - // value of OptionalValue will be set in the field this option represents. - // This is only valid for non-boolean options. - OptionalArgument bool - - // The optional value of the option. The optional value is used when - // the option flag is marked as having an OptionalArgument. This means - // that when the flag is specified, but no option argument is given, - // the value of the field this option represents will be set to - // OptionalValue. This is only valid for non-boolean options. - OptionalValue []string - - // If true, the option _must_ be specified on the command line. If the - // option is not specified, the parser will generate an ErrRequired type - // error. - Required bool - - // A name for the value of an option shown in the Help as --flag [ValueName] - ValueName string - - // A mask value to show in the help instead of the default value. This - // is useful for hiding sensitive information in the help, such as - // passwords. - DefaultMask string - - // If non empty, only a certain set of values is allowed for an option. - Choices []string - - // If true, the option is not displayed in the help or man page - Hidden bool - - // The group which the option belongs to - group *Group - - // The struct field which the option represents. - field reflect.StructField - - // The struct field value which the option represents. - value reflect.Value - - // Determines if the option will be always quoted in the INI output - iniQuote bool - - tag multiTag - isSet bool - isSetDefault bool - preventDefault bool - - defaultLiteral string -} - -// LongNameWithNamespace returns the option's long name with the group namespaces -// prepended by walking up the option's group tree. Namespaces and the long name -// itself are separated by the parser's namespace delimiter. If the long name is -// empty an empty string is returned. -func (option *Option) LongNameWithNamespace() string { - if len(option.LongName) == 0 { - return "" - } - - // fetch the namespace delimiter from the parser which is always at the - // end of the group hierarchy - namespaceDelimiter := "" - g := option.group - - for { - if p, ok := g.parent.(*Parser); ok { - namespaceDelimiter = p.NamespaceDelimiter - - break - } - - switch i := g.parent.(type) { - case *Command: - g = i.Group - case *Group: - g = i - } - } - - // concatenate long name with namespace - longName := option.LongName - g = option.group - - for g != nil { - if g.Namespace != "" { - longName = g.Namespace + namespaceDelimiter + longName - } - - switch i := g.parent.(type) { - case *Command: - g = i.Group - case *Group: - g = i - case *Parser: - g = nil - } - } - - return longName -} - -// String converts an option to a human friendly readable string describing the -// option. -func (option *Option) String() string { - var s string - var short string - - if option.ShortName != 0 { - data := make([]byte, utf8.RuneLen(option.ShortName)) - utf8.EncodeRune(data, option.ShortName) - short = string(data) - - if len(option.LongName) != 0 { - s = fmt.Sprintf("%s%s, %s%s", - string(defaultShortOptDelimiter), short, - defaultLongOptDelimiter, option.LongNameWithNamespace()) - } else { - s = fmt.Sprintf("%s%s", string(defaultShortOptDelimiter), short) - } - } else if len(option.LongName) != 0 { - s = fmt.Sprintf("%s%s", defaultLongOptDelimiter, option.LongNameWithNamespace()) - } - - return s -} - -// Value returns the option value as an interface{}. -func (option *Option) Value() interface{} { - return option.value.Interface() -} - -// Field returns the reflect struct field of the option. -func (option *Option) Field() reflect.StructField { - return option.field -} - -// IsSet returns true if option has been set -func (option *Option) IsSet() bool { - return option.isSet -} - -// IsSetDefault returns true if option has been set via the default option tag -func (option *Option) IsSetDefault() bool { - return option.isSetDefault -} - -// Set the value of an option to the specified value. An error will be returned -// if the specified value could not be converted to the corresponding option -// value type. -func (option *Option) set(value *string) error { - kind := option.value.Type().Kind() - - if (kind == reflect.Map || kind == reflect.Slice) && !option.isSet { - option.empty() - } - - option.isSet = true - option.preventDefault = true - - if len(option.Choices) != 0 { - found := false - - for _, choice := range option.Choices { - if choice == *value { - found = true - break - } - } - - if !found { - allowed := strings.Join(option.Choices[0:len(option.Choices)-1], ", ") - - if len(option.Choices) > 1 { - allowed += " or " + option.Choices[len(option.Choices)-1] - } - - return newErrorf(ErrInvalidChoice, - "Invalid value `%s' for option `%s'. Allowed values are: %s", - *value, option, allowed) - } - } - - if option.isFunc() { - return option.call(value) - } else if value != nil { - return convert(*value, option.value, option.tag) - } - - return convert("", option.value, option.tag) -} - -func (option *Option) canCli() bool { - return option.ShortName != 0 || len(option.LongName) != 0 -} - -func (option *Option) canArgument() bool { - if u := option.isUnmarshaler(); u != nil { - return true - } - - return !option.isBool() -} - -func (option *Option) emptyValue() reflect.Value { - tp := option.value.Type() - - if tp.Kind() == reflect.Map { - return reflect.MakeMap(tp) - } - - return reflect.Zero(tp) -} - -func (option *Option) empty() { - if !option.isFunc() { - option.value.Set(option.emptyValue()) - } -} - -func (option *Option) clearDefault() { - usedDefault := option.Default - - if envKey := option.EnvDefaultKey; envKey != "" { - // os.Getenv() makes no distinction between undefined and - // empty values, so we use syscall.Getenv() - if value, ok := syscall.Getenv(envKey); ok { - if option.EnvDefaultDelim != "" { - usedDefault = strings.Split(value, - option.EnvDefaultDelim) - } else { - usedDefault = []string{value} - } - } - } - - option.isSetDefault = true - - if len(usedDefault) > 0 { - option.empty() - - for _, d := range usedDefault { - option.set(&d) - option.isSetDefault = true - } - } else { - tp := option.value.Type() - - switch tp.Kind() { - case reflect.Map: - if option.value.IsNil() { - option.empty() - } - case reflect.Slice: - if option.value.IsNil() { - option.empty() - } - } - } -} - -func (option *Option) valueIsDefault() bool { - // Check if the value of the option corresponds to its - // default value - emptyval := option.emptyValue() - - checkvalptr := reflect.New(emptyval.Type()) - checkval := reflect.Indirect(checkvalptr) - - checkval.Set(emptyval) - - if len(option.Default) != 0 { - for _, v := range option.Default { - convert(v, checkval, option.tag) - } - } - - return reflect.DeepEqual(option.value.Interface(), checkval.Interface()) -} - -func (option *Option) isUnmarshaler() Unmarshaler { - v := option.value - - for { - if !v.CanInterface() { - break - } - - i := v.Interface() - - if u, ok := i.(Unmarshaler); ok { - return u - } - - if !v.CanAddr() { - break - } - - v = v.Addr() - } - - return nil -} - -func (option *Option) isBool() bool { - tp := option.value.Type() - - for { - switch tp.Kind() { - case reflect.Slice, reflect.Ptr: - tp = tp.Elem() - case reflect.Bool: - return true - case reflect.Func: - return tp.NumIn() == 0 - default: - return false - } - } -} - -func (option *Option) isSignedNumber() bool { - tp := option.value.Type() - - for { - switch tp.Kind() { - case reflect.Slice, reflect.Ptr: - tp = tp.Elem() - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Float32, reflect.Float64: - return true - default: - return false - } - } -} - -func (option *Option) isFunc() bool { - return option.value.Type().Kind() == reflect.Func -} - -func (option *Option) call(value *string) error { - var retval []reflect.Value - - if value == nil { - retval = option.value.Call(nil) - } else { - tp := option.value.Type().In(0) - - val := reflect.New(tp) - val = reflect.Indirect(val) - - if err := convert(*value, val, option.tag); err != nil { - return err - } - - retval = option.value.Call([]reflect.Value{val}) - } - - if len(retval) == 1 && retval[0].Type() == reflect.TypeOf((*error)(nil)).Elem() { - if retval[0].Interface() == nil { - return nil - } - - return retval[0].Interface().(error) - } - - return nil -} - -func (option *Option) updateDefaultLiteral() { - defs := option.Default - def := "" - - if len(defs) == 0 && option.canArgument() { - var showdef bool - - switch option.field.Type.Kind() { - case reflect.Func, reflect.Ptr: - showdef = !option.value.IsNil() - case reflect.Slice, reflect.String, reflect.Array: - showdef = option.value.Len() > 0 - case reflect.Map: - showdef = !option.value.IsNil() && option.value.Len() > 0 - default: - zeroval := reflect.Zero(option.field.Type) - showdef = !reflect.DeepEqual(zeroval.Interface(), option.value.Interface()) - } - - if showdef { - def, _ = convertToString(option.value, option.tag) - } - } else if len(defs) != 0 { - l := len(defs) - 1 - - for i := 0; i < l; i++ { - def += quoteIfNeeded(defs[i]) + ", " - } - - def += quoteIfNeeded(defs[l]) - } - - option.defaultLiteral = def -} - -func (option *Option) shortAndLongName() string { - ret := &bytes.Buffer{} - - if option.ShortName != 0 { - ret.WriteRune(defaultShortOptDelimiter) - ret.WriteRune(option.ShortName) - } - - if len(option.LongName) != 0 { - if option.ShortName != 0 { - ret.WriteRune('/') - } - - ret.WriteString(option.LongName) - } - - return ret.String() -} diff --git a/vendor/github.com/jessevdk/go-flags/options_test.go b/vendor/github.com/jessevdk/go-flags/options_test.go deleted file mode 100644 index b0fe9f4..0000000 --- a/vendor/github.com/jessevdk/go-flags/options_test.go +++ /dev/null @@ -1,45 +0,0 @@ -package flags - -import ( - "testing" -) - -func TestPassDoubleDash(t *testing.T) { - var opts = struct { - Value bool `short:"v"` - }{} - - p := NewParser(&opts, PassDoubleDash) - ret, err := p.ParseArgs([]string{"-v", "--", "-v", "-g"}) - - if err != nil { - t.Fatalf("Unexpected error: %v", err) - return - } - - if !opts.Value { - t.Errorf("Expected Value to be true") - } - - assertStringArray(t, ret, []string{"-v", "-g"}) -} - -func TestPassAfterNonOption(t *testing.T) { - var opts = struct { - Value bool `short:"v"` - }{} - - p := NewParser(&opts, PassAfterNonOption) - ret, err := p.ParseArgs([]string{"-v", "arg", "-v", "-g"}) - - if err != nil { - t.Fatalf("Unexpected error: %v", err) - return - } - - if !opts.Value { - t.Errorf("Expected Value to be true") - } - - assertStringArray(t, ret, []string{"arg", "-v", "-g"}) -} diff --git a/vendor/github.com/jessevdk/go-flags/optstyle_other.go b/vendor/github.com/jessevdk/go-flags/optstyle_other.go deleted file mode 100644 index 56dfdae..0000000 --- a/vendor/github.com/jessevdk/go-flags/optstyle_other.go +++ /dev/null @@ -1,67 +0,0 @@ -// +build !windows forceposix - -package flags - -import ( - "strings" -) - -const ( - defaultShortOptDelimiter = '-' - defaultLongOptDelimiter = "--" - defaultNameArgDelimiter = '=' -) - -func argumentStartsOption(arg string) bool { - return len(arg) > 0 && arg[0] == '-' -} - -func argumentIsOption(arg string) bool { - if len(arg) > 1 && arg[0] == '-' && arg[1] != '-' { - return true - } - - if len(arg) > 2 && arg[0] == '-' && arg[1] == '-' && arg[2] != '-' { - return true - } - - return false -} - -// stripOptionPrefix returns the option without the prefix and whether or -// not the option is a long option or not. -func stripOptionPrefix(optname string) (prefix string, name string, islong bool) { - if strings.HasPrefix(optname, "--") { - return "--", optname[2:], true - } else if strings.HasPrefix(optname, "-") { - return "-", optname[1:], false - } - - return "", optname, false -} - -// splitOption attempts to split the passed option into a name and an argument. -// When there is no argument specified, nil will be returned for it. -func splitOption(prefix string, option string, islong bool) (string, string, *string) { - pos := strings.Index(option, "=") - - if (islong && pos >= 0) || (!islong && pos == 1) { - rest := option[pos+1:] - return option[:pos], "=", &rest - } - - return option, "", nil -} - -// addHelpGroup adds a new group that contains default help parameters. -func (c *Command) addHelpGroup(showHelp func() error) *Group { - var help struct { - ShowHelp func() error `short:"h" long:"help" description:"Show this help message"` - } - - help.ShowHelp = showHelp - ret, _ := c.AddGroup("Help Options", "", &help) - ret.isBuiltinHelp = true - - return ret -} diff --git a/vendor/github.com/jessevdk/go-flags/optstyle_windows.go b/vendor/github.com/jessevdk/go-flags/optstyle_windows.go deleted file mode 100644 index f3f28ae..0000000 --- a/vendor/github.com/jessevdk/go-flags/optstyle_windows.go +++ /dev/null @@ -1,108 +0,0 @@ -// +build !forceposix - -package flags - -import ( - "strings" -) - -// Windows uses a front slash for both short and long options. Also it uses -// a colon for name/argument delimter. -const ( - defaultShortOptDelimiter = '/' - defaultLongOptDelimiter = "/" - defaultNameArgDelimiter = ':' -) - -func argumentStartsOption(arg string) bool { - return len(arg) > 0 && (arg[0] == '-' || arg[0] == '/') -} - -func argumentIsOption(arg string) bool { - // Windows-style options allow front slash for the option - // delimiter. - if len(arg) > 1 && arg[0] == '/' { - return true - } - - if len(arg) > 1 && arg[0] == '-' && arg[1] != '-' { - return true - } - - if len(arg) > 2 && arg[0] == '-' && arg[1] == '-' && arg[2] != '-' { - return true - } - - return false -} - -// stripOptionPrefix returns the option without the prefix and whether or -// not the option is a long option or not. -func stripOptionPrefix(optname string) (prefix string, name string, islong bool) { - // Determine if the argument is a long option or not. Windows - // typically supports both long and short options with a single - // front slash as the option delimiter, so handle this situation - // nicely. - possplit := 0 - - if strings.HasPrefix(optname, "--") { - possplit = 2 - islong = true - } else if strings.HasPrefix(optname, "-") { - possplit = 1 - islong = false - } else if strings.HasPrefix(optname, "/") { - possplit = 1 - islong = len(optname) > 2 - } - - return optname[:possplit], optname[possplit:], islong -} - -// splitOption attempts to split the passed option into a name and an argument. -// When there is no argument specified, nil will be returned for it. -func splitOption(prefix string, option string, islong bool) (string, string, *string) { - if len(option) == 0 { - return option, "", nil - } - - // Windows typically uses a colon for the option name and argument - // delimiter while POSIX typically uses an equals. Support both styles, - // but don't allow the two to be mixed. That is to say /foo:bar and - // --foo=bar are acceptable, but /foo=bar and --foo:bar are not. - var pos int - var sp string - - if prefix == "/" { - sp = ":" - pos = strings.Index(option, sp) - } else if len(prefix) > 0 { - sp = "=" - pos = strings.Index(option, sp) - } - - if (islong && pos >= 0) || (!islong && pos == 1) { - rest := option[pos+1:] - return option[:pos], sp, &rest - } - - return option, "", nil -} - -// addHelpGroup adds a new group that contains default help parameters. -func (c *Command) addHelpGroup(showHelp func() error) *Group { - // Windows CLI applications typically use /? for help, so make both - // that available as well as the POSIX style h and help. - var help struct { - ShowHelpWindows func() error `short:"?" description:"Show this help message"` - ShowHelpPosix func() error `short:"h" long:"help" description:"Show this help message"` - } - - help.ShowHelpWindows = showHelp - help.ShowHelpPosix = showHelp - - ret, _ := c.AddGroup("Help Options", "", &help) - ret.isBuiltinHelp = true - - return ret -} diff --git a/vendor/github.com/jessevdk/go-flags/parser.go b/vendor/github.com/jessevdk/go-flags/parser.go deleted file mode 100644 index 0a7922a..0000000 --- a/vendor/github.com/jessevdk/go-flags/parser.go +++ /dev/null @@ -1,700 +0,0 @@ -// Copyright 2012 Jesse van den Kieboom. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package flags - -import ( - "bytes" - "fmt" - "os" - "path" - "sort" - "strings" - "unicode/utf8" -) - -// A Parser provides command line option parsing. It can contain several -// option groups each with their own set of options. -type Parser struct { - // Embedded, see Command for more information - *Command - - // A usage string to be displayed in the help message. - Usage string - - // Option flags changing the behavior of the parser. - Options Options - - // NamespaceDelimiter separates group namespaces and option long names - NamespaceDelimiter string - - // UnknownOptionsHandler is a function which gets called when the parser - // encounters an unknown option. The function receives the unknown option - // name, a SplitArgument which specifies its value if set with an argument - // separator, and the remaining command line arguments. - // It should return a new list of remaining arguments to continue parsing, - // or an error to indicate a parse failure. - UnknownOptionHandler func(option string, arg SplitArgument, args []string) ([]string, error) - - // CompletionHandler is a function gets called to handle the completion of - // items. By default, the items are printed and the application is exited. - // You can override this default behavior by specifying a custom CompletionHandler. - CompletionHandler func(items []Completion) - - // CommandHandler is a function that gets called to handle execution of a - // command. By default, the command will simply be executed. This can be - // overridden to perform certain actions (such as applying global flags) - // just before the command is executed. Note that if you override the - // handler it is your responsibility to call the command.Execute function. - // - // The command passed into CommandHandler may be nil in case there is no - // command to be executed when parsing has finished. - CommandHandler func(command Commander, args []string) error - - internalError error -} - -// SplitArgument represents the argument value of an option that was passed using -// an argument separator. -type SplitArgument interface { - // String returns the option's value as a string, and a boolean indicating - // if the option was present. - Value() (string, bool) -} - -type strArgument struct { - value *string -} - -func (s strArgument) Value() (string, bool) { - if s.value == nil { - return "", false - } - - return *s.value, true -} - -// Options provides parser options that change the behavior of the option -// parser. -type Options uint - -const ( - // None indicates no options. - None Options = 0 - - // HelpFlag adds a default Help Options group to the parser containing - // -h and --help options. When either -h or --help is specified on the - // command line, the parser will return the special error of type - // ErrHelp. When PrintErrors is also specified, then the help message - // will also be automatically printed to os.Stdout. - HelpFlag = 1 << iota - - // PassDoubleDash passes all arguments after a double dash, --, as - // remaining command line arguments (i.e. they will not be parsed for - // flags). - PassDoubleDash - - // IgnoreUnknown ignores any unknown options and passes them as - // remaining command line arguments instead of generating an error. - IgnoreUnknown - - // PrintErrors prints any errors which occurred during parsing to - // os.Stderr. In the special case of ErrHelp, the message will be printed - // to os.Stdout. - PrintErrors - - // PassAfterNonOption passes all arguments after the first non option - // as remaining command line arguments. This is equivalent to strict - // POSIX processing. - PassAfterNonOption - - // Default is a convenient default set of options which should cover - // most of the uses of the flags package. - Default = HelpFlag | PrintErrors | PassDoubleDash -) - -type parseState struct { - arg string - args []string - retargs []string - positional []*Arg - err error - - command *Command - lookup lookup -} - -// Parse is a convenience function to parse command line options with default -// settings. The provided data is a pointer to a struct representing the -// default option group (named "Application Options"). For more control, use -// flags.NewParser. -func Parse(data interface{}) ([]string, error) { - return NewParser(data, Default).Parse() -} - -// ParseArgs is a convenience function to parse command line options with default -// settings. The provided data is a pointer to a struct representing the -// default option group (named "Application Options"). The args argument is -// the list of command line arguments to parse. If you just want to parse the -// default program command line arguments (i.e. os.Args), then use flags.Parse -// instead. For more control, use flags.NewParser. -func ParseArgs(data interface{}, args []string) ([]string, error) { - return NewParser(data, Default).ParseArgs(args) -} - -// NewParser creates a new parser. It uses os.Args[0] as the application -// name and then calls Parser.NewNamedParser (see Parser.NewNamedParser for -// more details). The provided data is a pointer to a struct representing the -// default option group (named "Application Options"), or nil if the default -// group should not be added. The options parameter specifies a set of options -// for the parser. -func NewParser(data interface{}, options Options) *Parser { - p := NewNamedParser(path.Base(os.Args[0]), options) - - if data != nil { - g, err := p.AddGroup("Application Options", "", data) - - if err == nil { - g.parent = p - } - - p.internalError = err - } - - return p -} - -// NewNamedParser creates a new parser. The appname is used to display the -// executable name in the built-in help message. Option groups and commands can -// be added to this parser by using AddGroup and AddCommand. -func NewNamedParser(appname string, options Options) *Parser { - p := &Parser{ - Command: newCommand(appname, "", "", nil), - Options: options, - NamespaceDelimiter: ".", - } - - p.Command.parent = p - - return p -} - -// Parse parses the command line arguments from os.Args using Parser.ParseArgs. -// For more detailed information see ParseArgs. -func (p *Parser) Parse() ([]string, error) { - return p.ParseArgs(os.Args[1:]) -} - -// ParseArgs parses the command line arguments according to the option groups that -// were added to the parser. On successful parsing of the arguments, the -// remaining, non-option, arguments (if any) are returned. The returned error -// indicates a parsing error and can be used with PrintError to display -// contextual information on where the error occurred exactly. -// -// When the common help group has been added (AddHelp) and either -h or --help -// was specified in the command line arguments, a help message will be -// automatically printed if the PrintErrors option is enabled. -// Furthermore, the special error type ErrHelp is returned. -// It is up to the caller to exit the program if so desired. -func (p *Parser) ParseArgs(args []string) ([]string, error) { - if p.internalError != nil { - return nil, p.internalError - } - - p.eachOption(func(c *Command, g *Group, option *Option) { - option.isSet = false - option.isSetDefault = false - option.updateDefaultLiteral() - }) - - // Add built-in help group to all commands if necessary - if (p.Options & HelpFlag) != None { - p.addHelpGroups(p.showBuiltinHelp) - } - - compval := os.Getenv("GO_FLAGS_COMPLETION") - - if len(compval) != 0 { - comp := &completion{parser: p} - items := comp.complete(args) - - if p.CompletionHandler != nil { - p.CompletionHandler(items) - } else { - comp.print(items, compval == "verbose") - os.Exit(0) - } - - return nil, nil - } - - s := &parseState{ - args: args, - retargs: make([]string, 0, len(args)), - } - - p.fillParseState(s) - - for !s.eof() { - arg := s.pop() - - // When PassDoubleDash is set and we encounter a --, then - // simply append all the rest as arguments and break out - if (p.Options&PassDoubleDash) != None && arg == "--" { - s.addArgs(s.args...) - break - } - - if !argumentIsOption(arg) { - // Note: this also sets s.err, so we can just check for - // nil here and use s.err later - if p.parseNonOption(s) != nil { - break - } - - continue - } - - var err error - - prefix, optname, islong := stripOptionPrefix(arg) - optname, _, argument := splitOption(prefix, optname, islong) - - if islong { - err = p.parseLong(s, optname, argument) - } else { - err = p.parseShort(s, optname, argument) - } - - if err != nil { - ignoreUnknown := (p.Options & IgnoreUnknown) != None - parseErr := wrapError(err) - - if parseErr.Type != ErrUnknownFlag || (!ignoreUnknown && p.UnknownOptionHandler == nil) { - s.err = parseErr - break - } - - if ignoreUnknown { - s.addArgs(arg) - } else if p.UnknownOptionHandler != nil { - modifiedArgs, err := p.UnknownOptionHandler(optname, strArgument{argument}, s.args) - - if err != nil { - s.err = err - break - } - - s.args = modifiedArgs - } - } - } - - if s.err == nil { - p.eachOption(func(c *Command, g *Group, option *Option) { - if option.preventDefault { - return - } - - option.clearDefault() - }) - - s.checkRequired(p) - } - - var reterr error - - if s.err != nil { - reterr = s.err - } else if len(s.command.commands) != 0 && !s.command.SubcommandsOptional { - reterr = s.estimateCommand() - } else if cmd, ok := s.command.data.(Commander); ok { - if p.CommandHandler != nil { - reterr = p.CommandHandler(cmd, s.retargs) - } else { - reterr = cmd.Execute(s.retargs) - } - } else if p.CommandHandler != nil { - reterr = p.CommandHandler(nil, s.retargs) - } - - if reterr != nil { - var retargs []string - - if ourErr, ok := reterr.(*Error); !ok || ourErr.Type != ErrHelp { - retargs = append([]string{s.arg}, s.args...) - } else { - retargs = s.args - } - - return retargs, p.printError(reterr) - } - - return s.retargs, nil -} - -func (p *parseState) eof() bool { - return len(p.args) == 0 -} - -func (p *parseState) pop() string { - if p.eof() { - return "" - } - - p.arg = p.args[0] - p.args = p.args[1:] - - return p.arg -} - -func (p *parseState) peek() string { - if p.eof() { - return "" - } - - return p.args[0] -} - -func (p *parseState) checkRequired(parser *Parser) error { - c := parser.Command - - var required []*Option - - for c != nil { - c.eachGroup(func(g *Group) { - for _, option := range g.options { - if !option.isSet && option.Required { - required = append(required, option) - } - } - }) - - c = c.Active - } - - if len(required) == 0 { - if len(p.positional) > 0 { - var reqnames []string - - for _, arg := range p.positional { - argRequired := (!arg.isRemaining() && p.command.ArgsRequired) || arg.Required != -1 || arg.RequiredMaximum != -1 - - if !argRequired { - continue - } - - if arg.isRemaining() { - if arg.value.Len() < arg.Required { - var arguments string - - if arg.Required > 1 { - arguments = "arguments, but got only " + fmt.Sprintf("%d", arg.value.Len()) - } else { - arguments = "argument" - } - - reqnames = append(reqnames, "`"+arg.Name+" (at least "+fmt.Sprintf("%d", arg.Required)+" "+arguments+")`") - } else if arg.RequiredMaximum != -1 && arg.value.Len() > arg.RequiredMaximum { - if arg.RequiredMaximum == 0 { - reqnames = append(reqnames, "`"+arg.Name+" (zero arguments)`") - } else { - var arguments string - - if arg.RequiredMaximum > 1 { - arguments = "arguments, but got " + fmt.Sprintf("%d", arg.value.Len()) - } else { - arguments = "argument" - } - - reqnames = append(reqnames, "`"+arg.Name+" (at most "+fmt.Sprintf("%d", arg.RequiredMaximum)+" "+arguments+")`") - } - } - } else { - reqnames = append(reqnames, "`"+arg.Name+"`") - } - } - - if len(reqnames) == 0 { - return nil - } - - var msg string - - if len(reqnames) == 1 { - msg = fmt.Sprintf("the required argument %s was not provided", reqnames[0]) - } else { - msg = fmt.Sprintf("the required arguments %s and %s were not provided", - strings.Join(reqnames[:len(reqnames)-1], ", "), reqnames[len(reqnames)-1]) - } - - p.err = newError(ErrRequired, msg) - return p.err - } - - return nil - } - - names := make([]string, 0, len(required)) - - for _, k := range required { - names = append(names, "`"+k.String()+"'") - } - - sort.Strings(names) - - var msg string - - if len(names) == 1 { - msg = fmt.Sprintf("the required flag %s was not specified", names[0]) - } else { - msg = fmt.Sprintf("the required flags %s and %s were not specified", - strings.Join(names[:len(names)-1], ", "), names[len(names)-1]) - } - - p.err = newError(ErrRequired, msg) - return p.err -} - -func (p *parseState) estimateCommand() error { - commands := p.command.sortedVisibleCommands() - cmdnames := make([]string, len(commands)) - - for i, v := range commands { - cmdnames[i] = v.Name - } - - var msg string - var errtype ErrorType - - if len(p.retargs) != 0 { - c, l := closestChoice(p.retargs[0], cmdnames) - msg = fmt.Sprintf("Unknown command `%s'", p.retargs[0]) - errtype = ErrUnknownCommand - - if float32(l)/float32(len(c)) < 0.5 { - msg = fmt.Sprintf("%s, did you mean `%s'?", msg, c) - } else if len(cmdnames) == 1 { - msg = fmt.Sprintf("%s. You should use the %s command", - msg, - cmdnames[0]) - } else if len(cmdnames) > 1 { - msg = fmt.Sprintf("%s. Please specify one command of: %s or %s", - msg, - strings.Join(cmdnames[:len(cmdnames)-1], ", "), - cmdnames[len(cmdnames)-1]) - } - } else { - errtype = ErrCommandRequired - - if len(cmdnames) == 1 { - msg = fmt.Sprintf("Please specify the %s command", cmdnames[0]) - } else if len(cmdnames) > 1 { - msg = fmt.Sprintf("Please specify one command of: %s or %s", - strings.Join(cmdnames[:len(cmdnames)-1], ", "), - cmdnames[len(cmdnames)-1]) - } - } - - return newError(errtype, msg) -} - -func (p *Parser) parseOption(s *parseState, name string, option *Option, canarg bool, argument *string) (err error) { - if !option.canArgument() { - if argument != nil { - return newErrorf(ErrNoArgumentForBool, "bool flag `%s' cannot have an argument", option) - } - - err = option.set(nil) - } else if argument != nil || (canarg && !s.eof()) { - var arg string - - if argument != nil { - arg = *argument - } else { - arg = s.pop() - - if argumentIsOption(arg) && !(option.isSignedNumber() && len(arg) > 1 && arg[0] == '-' && arg[1] >= '0' && arg[1] <= '9') { - return newErrorf(ErrExpectedArgument, "expected argument for flag `%s', but got option `%s'", option, arg) - } else if p.Options&PassDoubleDash != 0 && arg == "--" { - return newErrorf(ErrExpectedArgument, "expected argument for flag `%s', but got double dash `--'", option) - } - } - - if option.tag.Get("unquote") != "false" { - arg, err = unquoteIfPossible(arg) - } - - if err == nil { - err = option.set(&arg) - } - } else if option.OptionalArgument { - option.empty() - - for _, v := range option.OptionalValue { - err = option.set(&v) - - if err != nil { - break - } - } - } else { - err = newErrorf(ErrExpectedArgument, "expected argument for flag `%s'", option) - } - - if err != nil { - if _, ok := err.(*Error); !ok { - err = newErrorf(ErrMarshal, "invalid argument for flag `%s' (expected %s): %s", - option, - option.value.Type(), - err.Error()) - } - } - - return err -} - -func (p *Parser) parseLong(s *parseState, name string, argument *string) error { - if option := s.lookup.longNames[name]; option != nil { - // Only long options that are required can consume an argument - // from the argument list - canarg := !option.OptionalArgument - - return p.parseOption(s, name, option, canarg, argument) - } - - return newErrorf(ErrUnknownFlag, "unknown flag `%s'", name) -} - -func (p *Parser) splitShortConcatArg(s *parseState, optname string) (string, *string) { - c, n := utf8.DecodeRuneInString(optname) - - if n == len(optname) { - return optname, nil - } - - first := string(c) - - if option := s.lookup.shortNames[first]; option != nil && option.canArgument() { - arg := optname[n:] - return first, &arg - } - - return optname, nil -} - -func (p *Parser) parseShort(s *parseState, optname string, argument *string) error { - if argument == nil { - optname, argument = p.splitShortConcatArg(s, optname) - } - - for i, c := range optname { - shortname := string(c) - - if option := s.lookup.shortNames[shortname]; option != nil { - // Only the last short argument can consume an argument from - // the arguments list, and only if it's non optional - canarg := (i+utf8.RuneLen(c) == len(optname)) && !option.OptionalArgument - - if err := p.parseOption(s, shortname, option, canarg, argument); err != nil { - return err - } - } else { - return newErrorf(ErrUnknownFlag, "unknown flag `%s'", shortname) - } - - // Only the first option can have a concatted argument, so just - // clear argument here - argument = nil - } - - return nil -} - -func (p *parseState) addArgs(args ...string) error { - for len(p.positional) > 0 && len(args) > 0 { - arg := p.positional[0] - - if err := convert(args[0], arg.value, arg.tag); err != nil { - p.err = err - return err - } - - if !arg.isRemaining() { - p.positional = p.positional[1:] - } - - args = args[1:] - } - - p.retargs = append(p.retargs, args...) - return nil -} - -func (p *Parser) parseNonOption(s *parseState) error { - if len(s.positional) > 0 { - return s.addArgs(s.arg) - } - - if len(s.command.commands) > 0 && len(s.retargs) == 0 { - if cmd := s.lookup.commands[s.arg]; cmd != nil { - s.command.Active = cmd - cmd.fillParseState(s) - - return nil - } else if !s.command.SubcommandsOptional { - s.addArgs(s.arg) - return newErrorf(ErrUnknownCommand, "Unknown command `%s'", s.arg) - } - } - - if (p.Options & PassAfterNonOption) != None { - // If PassAfterNonOption is set then all remaining arguments - // are considered positional - if err := s.addArgs(s.arg); err != nil { - return err - } - - if err := s.addArgs(s.args...); err != nil { - return err - } - - s.args = []string{} - } else { - return s.addArgs(s.arg) - } - - return nil -} - -func (p *Parser) showBuiltinHelp() error { - var b bytes.Buffer - - p.WriteHelp(&b) - return newError(ErrHelp, b.String()) -} - -func (p *Parser) printError(err error) error { - if err != nil && (p.Options&PrintErrors) != None { - flagsErr, ok := err.(*Error) - - if ok && flagsErr.Type == ErrHelp { - fmt.Fprintln(os.Stdout, err) - } else { - fmt.Fprintln(os.Stderr, err) - } - } - - return err -} - -func (p *Parser) clearIsSet() { - p.eachCommand(func(c *Command) { - c.eachGroup(func(g *Group) { - for _, option := range g.options { - option.isSet = false - } - }) - }, true) -} diff --git a/vendor/github.com/jessevdk/go-flags/parser_test.go b/vendor/github.com/jessevdk/go-flags/parser_test.go deleted file mode 100644 index 374f21c..0000000 --- a/vendor/github.com/jessevdk/go-flags/parser_test.go +++ /dev/null @@ -1,612 +0,0 @@ -package flags - -import ( - "fmt" - "os" - "reflect" - "runtime" - "strconv" - "strings" - "testing" - "time" -) - -type defaultOptions struct { - Int int `long:"i"` - IntDefault int `long:"id" default:"1"` - - Float64 float64 `long:"f"` - Float64Default float64 `long:"fd" default:"-3.14"` - - NumericFlag bool `short:"3"` - - String string `long:"str"` - StringDefault string `long:"strd" default:"abc"` - StringNotUnquoted string `long:"strnot" unquote:"false"` - - Time time.Duration `long:"t"` - TimeDefault time.Duration `long:"td" default:"1m"` - - Map map[string]int `long:"m"` - MapDefault map[string]int `long:"md" default:"a:1"` - - Slice []int `long:"s"` - SliceDefault []int `long:"sd" default:"1" default:"2"` -} - -func TestDefaults(t *testing.T) { - var tests = []struct { - msg string - args []string - expected defaultOptions - }{ - { - msg: "no arguments, expecting default values", - args: []string{}, - expected: defaultOptions{ - Int: 0, - IntDefault: 1, - - Float64: 0.0, - Float64Default: -3.14, - - NumericFlag: false, - - String: "", - StringDefault: "abc", - - Time: 0, - TimeDefault: time.Minute, - - Map: map[string]int{}, - MapDefault: map[string]int{"a": 1}, - - Slice: []int{}, - SliceDefault: []int{1, 2}, - }, - }, - { - msg: "non-zero value arguments, expecting overwritten arguments", - args: []string{"--i=3", "--id=3", "--f=-2.71", "--fd=2.71", "-3", "--str=def", "--strd=def", "--t=3ms", "--td=3ms", "--m=c:3", "--md=c:3", "--s=3", "--sd=3"}, - expected: defaultOptions{ - Int: 3, - IntDefault: 3, - - Float64: -2.71, - Float64Default: 2.71, - - NumericFlag: true, - - String: "def", - StringDefault: "def", - - Time: 3 * time.Millisecond, - TimeDefault: 3 * time.Millisecond, - - Map: map[string]int{"c": 3}, - MapDefault: map[string]int{"c": 3}, - - Slice: []int{3}, - SliceDefault: []int{3}, - }, - }, - { - msg: "zero value arguments, expecting overwritten arguments", - args: []string{"--i=0", "--id=0", "--f=0", "--fd=0", "--str", "", "--strd=\"\"", "--t=0ms", "--td=0s", "--m=:0", "--md=:0", "--s=0", "--sd=0"}, - expected: defaultOptions{ - Int: 0, - IntDefault: 0, - - Float64: 0, - Float64Default: 0, - - String: "", - StringDefault: "", - - Time: 0, - TimeDefault: 0, - - Map: map[string]int{"": 0}, - MapDefault: map[string]int{"": 0}, - - Slice: []int{0}, - SliceDefault: []int{0}, - }, - }, - } - - for _, test := range tests { - var opts defaultOptions - - _, err := ParseArgs(&opts, test.args) - if err != nil { - t.Fatalf("%s:\nUnexpected error: %v", test.msg, err) - } - - if opts.Slice == nil { - opts.Slice = []int{} - } - - if !reflect.DeepEqual(opts, test.expected) { - t.Errorf("%s:\nUnexpected options with arguments %+v\nexpected\n%+v\nbut got\n%+v\n", test.msg, test.args, test.expected, opts) - } - } -} - -func TestNoDefaultsForBools(t *testing.T) { - var opts struct { - DefaultBool bool `short:"d" default:"true"` - } - - if runtime.GOOS == "windows" { - assertParseFail(t, ErrInvalidTag, "boolean flag `/d' may not have default values, they always default to `false' and can only be turned on", &opts) - } else { - assertParseFail(t, ErrInvalidTag, "boolean flag `-d' may not have default values, they always default to `false' and can only be turned on", &opts) - } -} - -func TestUnquoting(t *testing.T) { - var tests = []struct { - arg string - err error - value string - }{ - { - arg: "\"abc", - err: strconv.ErrSyntax, - value: "", - }, - { - arg: "\"\"abc\"", - err: strconv.ErrSyntax, - value: "", - }, - { - arg: "\"abc\"", - err: nil, - value: "abc", - }, - { - arg: "\"\\\"abc\\\"\"", - err: nil, - value: "\"abc\"", - }, - { - arg: "\"\\\"abc\"", - err: nil, - value: "\"abc", - }, - } - - for _, test := range tests { - var opts defaultOptions - - for _, delimiter := range []bool{false, true} { - p := NewParser(&opts, None) - - var err error - if delimiter { - _, err = p.ParseArgs([]string{"--str=" + test.arg, "--strnot=" + test.arg}) - } else { - _, err = p.ParseArgs([]string{"--str", test.arg, "--strnot", test.arg}) - } - - if test.err == nil { - if err != nil { - t.Fatalf("Expected no error but got: %v", err) - } - - if test.value != opts.String { - t.Fatalf("Expected String to be %q but got %q", test.value, opts.String) - } - if q := strconv.Quote(test.value); q != opts.StringNotUnquoted { - t.Fatalf("Expected StringDefault to be %q but got %q", q, opts.StringNotUnquoted) - } - } else { - if err == nil { - t.Fatalf("Expected error") - } else if e, ok := err.(*Error); ok { - if strings.HasPrefix(e.Message, test.err.Error()) { - t.Fatalf("Expected error message to end with %q but got %v", test.err.Error(), e.Message) - } - } - } - } - } -} - -// EnvRestorer keeps a copy of a set of env variables and can restore the env from them -type EnvRestorer struct { - env map[string]string -} - -func (r *EnvRestorer) Restore() { - os.Clearenv() - - for k, v := range r.env { - os.Setenv(k, v) - } -} - -// EnvSnapshot returns a snapshot of the currently set env variables -func EnvSnapshot() *EnvRestorer { - r := EnvRestorer{make(map[string]string)} - - for _, kv := range os.Environ() { - parts := strings.SplitN(kv, "=", 2) - - if len(parts) != 2 { - panic("got a weird env variable: " + kv) - } - - r.env[parts[0]] = parts[1] - } - - return &r -} - -type envDefaultOptions struct { - Int int `long:"i" default:"1" env:"TEST_I"` - Time time.Duration `long:"t" default:"1m" env:"TEST_T"` - Map map[string]int `long:"m" default:"a:1" env:"TEST_M" env-delim:";"` - Slice []int `long:"s" default:"1" default:"2" env:"TEST_S" env-delim:","` -} - -func TestEnvDefaults(t *testing.T) { - var tests = []struct { - msg string - args []string - expected envDefaultOptions - env map[string]string - }{ - { - msg: "no arguments, no env, expecting default values", - args: []string{}, - expected: envDefaultOptions{ - Int: 1, - Time: time.Minute, - Map: map[string]int{"a": 1}, - Slice: []int{1, 2}, - }, - }, - { - msg: "no arguments, env defaults, expecting env default values", - args: []string{}, - expected: envDefaultOptions{ - Int: 2, - Time: 2 * time.Minute, - Map: map[string]int{"a": 2, "b": 3}, - Slice: []int{4, 5, 6}, - }, - env: map[string]string{ - "TEST_I": "2", - "TEST_T": "2m", - "TEST_M": "a:2;b:3", - "TEST_S": "4,5,6", - }, - }, - { - msg: "non-zero value arguments, expecting overwritten arguments", - args: []string{"--i=3", "--t=3ms", "--m=c:3", "--s=3"}, - expected: envDefaultOptions{ - Int: 3, - Time: 3 * time.Millisecond, - Map: map[string]int{"c": 3}, - Slice: []int{3}, - }, - env: map[string]string{ - "TEST_I": "2", - "TEST_T": "2m", - "TEST_M": "a:2;b:3", - "TEST_S": "4,5,6", - }, - }, - { - msg: "zero value arguments, expecting overwritten arguments", - args: []string{"--i=0", "--t=0ms", "--m=:0", "--s=0"}, - expected: envDefaultOptions{ - Int: 0, - Time: 0, - Map: map[string]int{"": 0}, - Slice: []int{0}, - }, - env: map[string]string{ - "TEST_I": "2", - "TEST_T": "2m", - "TEST_M": "a:2;b:3", - "TEST_S": "4,5,6", - }, - }, - } - - oldEnv := EnvSnapshot() - defer oldEnv.Restore() - - for _, test := range tests { - var opts envDefaultOptions - oldEnv.Restore() - for envKey, envValue := range test.env { - os.Setenv(envKey, envValue) - } - _, err := ParseArgs(&opts, test.args) - if err != nil { - t.Fatalf("%s:\nUnexpected error: %v", test.msg, err) - } - - if opts.Slice == nil { - opts.Slice = []int{} - } - - if !reflect.DeepEqual(opts, test.expected) { - t.Errorf("%s:\nUnexpected options with arguments %+v\nexpected\n%+v\nbut got\n%+v\n", test.msg, test.args, test.expected, opts) - } - } -} - -func TestOptionAsArgument(t *testing.T) { - var tests = []struct { - args []string - expectError bool - errType ErrorType - errMsg string - rest []string - }{ - { - // short option must not be accepted as argument - args: []string{"--string-slice", "foobar", "--string-slice", "-o"}, - expectError: true, - errType: ErrExpectedArgument, - errMsg: "expected argument for flag `" + defaultLongOptDelimiter + "string-slice', but got option `-o'", - }, - { - // long option must not be accepted as argument - args: []string{"--string-slice", "foobar", "--string-slice", "--other-option"}, - expectError: true, - errType: ErrExpectedArgument, - errMsg: "expected argument for flag `" + defaultLongOptDelimiter + "string-slice', but got option `--other-option'", - }, - { - // long option must not be accepted as argument - args: []string{"--string-slice", "--"}, - expectError: true, - errType: ErrExpectedArgument, - errMsg: "expected argument for flag `" + defaultLongOptDelimiter + "string-slice', but got double dash `--'", - }, - { - // quoted and appended option should be accepted as argument (even if it looks like an option) - args: []string{"--string-slice", "foobar", "--string-slice=\"--other-option\""}, - }, - { - // Accept any single character arguments including '-' - args: []string{"--string-slice", "-"}, - }, - { - // Do not accept arguments which start with '-' even if the next character is a digit - args: []string{"--string-slice", "-3.14"}, - expectError: true, - errType: ErrExpectedArgument, - errMsg: "expected argument for flag `" + defaultLongOptDelimiter + "string-slice', but got option `-3.14'", - }, - { - // Do not accept arguments which start with '-' if the next character is not a digit - args: []string{"--string-slice", "-character"}, - expectError: true, - errType: ErrExpectedArgument, - errMsg: "expected argument for flag `" + defaultLongOptDelimiter + "string-slice', but got option `-character'", - }, - { - args: []string{"-o", "-", "-"}, - rest: []string{"-", "-"}, - }, - { - // Accept arguments which start with '-' if the next character is a digit, for number options only - args: []string{"--int-slice", "-3"}, - }, - { - // Accept arguments which start with '-' if the next character is a digit, for number options only - args: []string{"--int16", "-3"}, - }, - { - // Accept arguments which start with '-' if the next character is a digit, for number options only - args: []string{"--float32", "-3.2"}, - }, - { - // Accept arguments which start with '-' if the next character is a digit, for number options only - args: []string{"--float32ptr", "-3.2"}, - }, - } - - var opts struct { - StringSlice []string `long:"string-slice"` - IntSlice []int `long:"int-slice"` - Int16 int16 `long:"int16"` - Float32 float32 `long:"float32"` - Float32Ptr *float32 `long:"float32ptr"` - OtherOption bool `long:"other-option" short:"o"` - } - - for _, test := range tests { - if test.expectError { - assertParseFail(t, test.errType, test.errMsg, &opts, test.args...) - } else { - args := assertParseSuccess(t, &opts, test.args...) - - assertStringArray(t, args, test.rest) - } - } -} - -func TestUnknownFlagHandler(t *testing.T) { - - var opts struct { - Flag1 string `long:"flag1"` - Flag2 string `long:"flag2"` - } - - p := NewParser(&opts, None) - - var unknownFlag1 string - var unknownFlag2 bool - var unknownFlag3 string - - // Set up a callback to intercept unknown options during parsing - p.UnknownOptionHandler = func(option string, arg SplitArgument, args []string) ([]string, error) { - if option == "unknownFlag1" { - if argValue, ok := arg.Value(); ok { - unknownFlag1 = argValue - return args, nil - } - // consume a value from remaining args list - unknownFlag1 = args[0] - return args[1:], nil - } else if option == "unknownFlag2" { - // treat this one as a bool switch, don't consume any args - unknownFlag2 = true - return args, nil - } else if option == "unknownFlag3" { - if argValue, ok := arg.Value(); ok { - unknownFlag3 = argValue - return args, nil - } - // consume a value from remaining args list - unknownFlag3 = args[0] - return args[1:], nil - } - - return args, fmt.Errorf("Unknown flag: %v", option) - } - - // Parse args containing some unknown flags, verify that - // our callback can handle all of them - _, err := p.ParseArgs([]string{"--flag1=stuff", "--unknownFlag1", "blah", "--unknownFlag2", "--unknownFlag3=baz", "--flag2=foo"}) - - if err != nil { - assertErrorf(t, "Parser returned unexpected error %v", err) - } - - assertString(t, opts.Flag1, "stuff") - assertString(t, opts.Flag2, "foo") - assertString(t, unknownFlag1, "blah") - assertString(t, unknownFlag3, "baz") - - if !unknownFlag2 { - assertErrorf(t, "Flag should have been set by unknown handler, but had value: %v", unknownFlag2) - } - - // Parse args with unknown flags that callback doesn't handle, verify it returns error - _, err = p.ParseArgs([]string{"--flag1=stuff", "--unknownFlagX", "blah", "--flag2=foo"}) - - if err == nil { - assertErrorf(t, "Parser should have returned error, but returned nil") - } -} - -func TestChoices(t *testing.T) { - var opts struct { - Choice string `long:"choose" choice:"v1" choice:"v2"` - } - - assertParseFail(t, ErrInvalidChoice, "Invalid value `invalid' for option `"+defaultLongOptDelimiter+"choose'. Allowed values are: v1 or v2", &opts, "--choose", "invalid") - assertParseSuccess(t, &opts, "--choose", "v2") - assertString(t, opts.Choice, "v2") -} - -func TestEmbedded(t *testing.T) { - type embedded struct { - V bool `short:"v"` - } - var opts struct { - embedded - } - - assertParseSuccess(t, &opts, "-v") - - if !opts.V { - t.Errorf("Expected V to be true") - } -} - -type command struct { -} - -func (c *command) Execute(args []string) error { - return nil -} - -func TestCommandHandlerNoCommand(t *testing.T) { - var opts = struct { - Value bool `short:"v"` - }{} - - parser := NewParser(&opts, Default&^PrintErrors) - - var executedCommand Commander - var executedArgs []string - - executed := false - - parser.CommandHandler = func(command Commander, args []string) error { - executed = true - - executedCommand = command - executedArgs = args - - return nil - } - - _, err := parser.ParseArgs([]string{"arg1", "arg2"}) - - if err != nil { - t.Fatalf("Unexpected parse error: %s", err) - } - - if !executed { - t.Errorf("Expected command handler to be executed") - } - - if executedCommand != nil { - t.Errorf("Did not exect an executed command") - } - - assertStringArray(t, executedArgs, []string{"arg1", "arg2"}) -} - -func TestCommandHandler(t *testing.T) { - var opts = struct { - Value bool `short:"v"` - - Command command `command:"cmd"` - }{} - - parser := NewParser(&opts, Default&^PrintErrors) - - var executedCommand Commander - var executedArgs []string - - executed := false - - parser.CommandHandler = func(command Commander, args []string) error { - executed = true - - executedCommand = command - executedArgs = args - - return nil - } - - _, err := parser.ParseArgs([]string{"cmd", "arg1", "arg2"}) - - if err != nil { - t.Fatalf("Unexpected parse error: %s", err) - } - - if !executed { - t.Errorf("Expected command handler to be executed") - } - - if executedCommand == nil { - t.Errorf("Expected command handler to be executed") - } - - assertStringArray(t, executedArgs, []string{"arg1", "arg2"}) -} diff --git a/vendor/github.com/jessevdk/go-flags/pointer_test.go b/vendor/github.com/jessevdk/go-flags/pointer_test.go deleted file mode 100644 index dc779c7..0000000 --- a/vendor/github.com/jessevdk/go-flags/pointer_test.go +++ /dev/null @@ -1,164 +0,0 @@ -package flags - -import ( - "testing" -) - -func TestPointerBool(t *testing.T) { - var opts = struct { - Value *bool `short:"v"` - }{} - - ret := assertParseSuccess(t, &opts, "-v") - - assertStringArray(t, ret, []string{}) - - if !*opts.Value { - t.Errorf("Expected Value to be true") - } -} - -func TestPointerString(t *testing.T) { - var opts = struct { - Value *string `short:"v"` - }{} - - ret := assertParseSuccess(t, &opts, "-v", "value") - - assertStringArray(t, ret, []string{}) - assertString(t, *opts.Value, "value") -} - -func TestPointerSlice(t *testing.T) { - var opts = struct { - Value *[]string `short:"v"` - }{} - - ret := assertParseSuccess(t, &opts, "-v", "value1", "-v", "value2") - - assertStringArray(t, ret, []string{}) - assertStringArray(t, *opts.Value, []string{"value1", "value2"}) -} - -func TestPointerMap(t *testing.T) { - var opts = struct { - Value *map[string]int `short:"v"` - }{} - - ret := assertParseSuccess(t, &opts, "-v", "k1:2", "-v", "k2:-5") - - assertStringArray(t, ret, []string{}) - - if v, ok := (*opts.Value)["k1"]; !ok { - t.Errorf("Expected key \"k1\" to exist") - } else if v != 2 { - t.Errorf("Expected \"k1\" to be 2, but got %#v", v) - } - - if v, ok := (*opts.Value)["k2"]; !ok { - t.Errorf("Expected key \"k2\" to exist") - } else if v != -5 { - t.Errorf("Expected \"k2\" to be -5, but got %#v", v) - } -} - -type marshalledString string - -func (m *marshalledString) UnmarshalFlag(value string) error { - *m = marshalledString(value) - return nil -} - -func (m marshalledString) MarshalFlag() (string, error) { - return string(m), nil -} - -func TestPointerStringMarshalled(t *testing.T) { - var opts = struct { - Value *marshalledString `short:"v"` - }{} - - ret := assertParseSuccess(t, &opts, "-v", "value") - - assertStringArray(t, ret, []string{}) - - if opts.Value == nil { - t.Error("Expected value not to be nil") - return - } - - assertString(t, string(*opts.Value), "value") -} - -type marshalledStruct struct { - Value string -} - -func (m *marshalledStruct) UnmarshalFlag(value string) error { - m.Value = value - return nil -} - -func (m marshalledStruct) MarshalFlag() (string, error) { - return m.Value, nil -} - -func TestPointerStructMarshalled(t *testing.T) { - var opts = struct { - Value *marshalledStruct `short:"v"` - }{} - - ret := assertParseSuccess(t, &opts, "-v", "value") - - assertStringArray(t, ret, []string{}) - - if opts.Value == nil { - t.Error("Expected value not to be nil") - return - } - - assertString(t, opts.Value.Value, "value") -} - -type PointerGroup struct { - Value bool `short:"v"` -} - -func TestPointerGroup(t *testing.T) { - var opts = struct { - Group *PointerGroup `group:"Group Options"` - }{} - - ret := assertParseSuccess(t, &opts, "-v") - - assertStringArray(t, ret, []string{}) - - if !opts.Group.Value { - t.Errorf("Expected Group.Value to be true") - } -} - -func TestDoNotChangeNonTaggedFields(t *testing.T) { - var opts struct { - A struct { - Pointer *int - } - B *struct { - Pointer *int - } - } - - ret := assertParseSuccess(t, &opts) - - assertStringArray(t, ret, []string{}) - - if opts.A.Pointer != nil { - t.Error("Expected A.Pointer to be nil") - } - if opts.B != nil { - t.Error("Expected B to be nil") - } - if opts.B != nil && opts.B.Pointer != nil { - t.Error("Expected B.Pointer to be nil") - } -} diff --git a/vendor/github.com/jessevdk/go-flags/short_test.go b/vendor/github.com/jessevdk/go-flags/short_test.go deleted file mode 100644 index 5f4106b..0000000 --- a/vendor/github.com/jessevdk/go-flags/short_test.go +++ /dev/null @@ -1,234 +0,0 @@ -package flags - -import ( - "fmt" - "testing" -) - -func TestShort(t *testing.T) { - var opts = struct { - Value bool `short:"v"` - }{} - - ret := assertParseSuccess(t, &opts, "-v") - - assertStringArray(t, ret, []string{}) - - if !opts.Value { - t.Errorf("Expected Value to be true") - } -} - -func TestShortTooLong(t *testing.T) { - var opts = struct { - Value bool `short:"vv"` - }{} - - assertParseFail(t, ErrShortNameTooLong, "short names can only be 1 character long, not `vv'", &opts) -} - -func TestShortRequired(t *testing.T) { - var opts = struct { - Value bool `short:"v" required:"true"` - }{} - - assertParseFail(t, ErrRequired, fmt.Sprintf("the required flag `%cv' was not specified", defaultShortOptDelimiter), &opts) -} - -func TestShortRequiredFalsy1(t *testing.T) { - var opts = struct { - Value bool `short:"v" required:"false"` - }{} - - assertParseSuccess(t, &opts) -} - -func TestShortRequiredFalsy2(t *testing.T) { - var opts = struct { - Value bool `short:"v" required:"no"` - }{} - - assertParseSuccess(t, &opts) -} - -func TestShortMultiConcat(t *testing.T) { - var opts = struct { - V bool `short:"v"` - O bool `short:"o"` - F bool `short:"f"` - }{} - - ret := assertParseSuccess(t, &opts, "-vo", "-f") - - assertStringArray(t, ret, []string{}) - - if !opts.V { - t.Errorf("Expected V to be true") - } - - if !opts.O { - t.Errorf("Expected O to be true") - } - - if !opts.F { - t.Errorf("Expected F to be true") - } -} - -func TestShortMultiRequiredConcat(t *testing.T) { - var opts = struct { - V bool `short:"v" required:"true"` - O bool `short:"o" required:"true"` - F bool `short:"f" required:"true"` - }{} - - ret := assertParseSuccess(t, &opts, "-vo", "-f") - - assertStringArray(t, ret, []string{}) - - if !opts.V { - t.Errorf("Expected V to be true") - } - - if !opts.O { - t.Errorf("Expected O to be true") - } - - if !opts.F { - t.Errorf("Expected F to be true") - } -} - -func TestShortMultiSlice(t *testing.T) { - var opts = struct { - Values []bool `short:"v"` - }{} - - ret := assertParseSuccess(t, &opts, "-v", "-v") - - assertStringArray(t, ret, []string{}) - assertBoolArray(t, opts.Values, []bool{true, true}) -} - -func TestShortMultiSliceConcat(t *testing.T) { - var opts = struct { - Values []bool `short:"v"` - }{} - - ret := assertParseSuccess(t, &opts, "-vvv") - - assertStringArray(t, ret, []string{}) - assertBoolArray(t, opts.Values, []bool{true, true, true}) -} - -func TestShortWithEqualArg(t *testing.T) { - var opts = struct { - Value string `short:"v"` - }{} - - ret := assertParseSuccess(t, &opts, "-v=value") - - assertStringArray(t, ret, []string{}) - assertString(t, opts.Value, "value") -} - -func TestShortWithArg(t *testing.T) { - var opts = struct { - Value string `short:"v"` - }{} - - ret := assertParseSuccess(t, &opts, "-vvalue") - - assertStringArray(t, ret, []string{}) - assertString(t, opts.Value, "value") -} - -func TestShortArg(t *testing.T) { - var opts = struct { - Value string `short:"v"` - }{} - - ret := assertParseSuccess(t, &opts, "-v", "value") - - assertStringArray(t, ret, []string{}) - assertString(t, opts.Value, "value") -} - -func TestShortMultiWithEqualArg(t *testing.T) { - var opts = struct { - F []bool `short:"f"` - Value string `short:"v"` - }{} - - assertParseFail(t, ErrExpectedArgument, fmt.Sprintf("expected argument for flag `%cv'", defaultShortOptDelimiter), &opts, "-ffv=value") -} - -func TestShortMultiArg(t *testing.T) { - var opts = struct { - F []bool `short:"f"` - Value string `short:"v"` - }{} - - ret := assertParseSuccess(t, &opts, "-ffv", "value") - - assertStringArray(t, ret, []string{}) - assertBoolArray(t, opts.F, []bool{true, true}) - assertString(t, opts.Value, "value") -} - -func TestShortMultiArgConcatFail(t *testing.T) { - var opts = struct { - F []bool `short:"f"` - Value string `short:"v"` - }{} - - assertParseFail(t, ErrExpectedArgument, fmt.Sprintf("expected argument for flag `%cv'", defaultShortOptDelimiter), &opts, "-ffvvalue") -} - -func TestShortMultiArgConcat(t *testing.T) { - var opts = struct { - F []bool `short:"f"` - Value string `short:"v"` - }{} - - ret := assertParseSuccess(t, &opts, "-vff") - - assertStringArray(t, ret, []string{}) - assertString(t, opts.Value, "ff") -} - -func TestShortOptional(t *testing.T) { - var opts = struct { - F []bool `short:"f"` - Value string `short:"v" optional:"yes" optional-value:"value"` - }{} - - ret := assertParseSuccess(t, &opts, "-fv", "f") - - assertStringArray(t, ret, []string{"f"}) - assertString(t, opts.Value, "value") -} - -func TestShortOptionalFalsy1(t *testing.T) { - var opts = struct { - F []bool `short:"f"` - Value string `short:"v" optional:"false" optional-value:"value"` - }{} - - ret := assertParseSuccess(t, &opts, "-fv", "f") - - assertStringArray(t, ret, []string{}) - assertString(t, opts.Value, "f") -} - -func TestShortOptionalFalsy2(t *testing.T) { - var opts = struct { - F []bool `short:"f"` - Value string `short:"v" optional:"no" optional-value:"value"` - }{} - - ret := assertParseSuccess(t, &opts, "-fv", "f") - - assertStringArray(t, ret, []string{}) - assertString(t, opts.Value, "f") -} diff --git a/vendor/github.com/jessevdk/go-flags/tag_test.go b/vendor/github.com/jessevdk/go-flags/tag_test.go deleted file mode 100644 index 9daa740..0000000 --- a/vendor/github.com/jessevdk/go-flags/tag_test.go +++ /dev/null @@ -1,38 +0,0 @@ -package flags - -import ( - "testing" -) - -func TestTagMissingColon(t *testing.T) { - var opts = struct { - Value bool `short` - }{} - - assertParseFail(t, ErrTag, "expected `:' after key name, but got end of tag (in `short`)", &opts, "") -} - -func TestTagMissingValue(t *testing.T) { - var opts = struct { - Value bool `short:` - }{} - - assertParseFail(t, ErrTag, "expected `\"' to start tag value at end of tag (in `short:`)", &opts, "") -} - -func TestTagMissingQuote(t *testing.T) { - var opts = struct { - Value bool `short:"v` - }{} - - assertParseFail(t, ErrTag, "expected end of tag value `\"' at end of tag (in `short:\"v`)", &opts, "") -} - -func TestTagNewline(t *testing.T) { - var opts = struct { - Value bool `long:"verbose" description:"verbose -something"` - }{} - - assertParseFail(t, ErrTag, "unexpected newline in tag value `description' (in `long:\"verbose\" description:\"verbose\nsomething\"`)", &opts, "") -} diff --git a/vendor/github.com/jessevdk/go-flags/termsize.go b/vendor/github.com/jessevdk/go-flags/termsize.go deleted file mode 100644 index df97e7e..0000000 --- a/vendor/github.com/jessevdk/go-flags/termsize.go +++ /dev/null @@ -1,28 +0,0 @@ -// +build !windows,!plan9,!solaris - -package flags - -import ( - "syscall" - "unsafe" -) - -type winsize struct { - row, col uint16 - xpixel, ypixel uint16 -} - -func getTerminalColumns() int { - ws := winsize{} - - if tIOCGWINSZ != 0 { - syscall.Syscall(syscall.SYS_IOCTL, - uintptr(0), - uintptr(tIOCGWINSZ), - uintptr(unsafe.Pointer(&ws))) - - return int(ws.col) - } - - return 80 -} diff --git a/vendor/github.com/jessevdk/go-flags/termsize_linux.go b/vendor/github.com/jessevdk/go-flags/termsize_linux.go deleted file mode 100644 index e3975e2..0000000 --- a/vendor/github.com/jessevdk/go-flags/termsize_linux.go +++ /dev/null @@ -1,7 +0,0 @@ -// +build linux - -package flags - -const ( - tIOCGWINSZ = 0x5413 -) diff --git a/vendor/github.com/jessevdk/go-flags/termsize_nosysioctl.go b/vendor/github.com/jessevdk/go-flags/termsize_nosysioctl.go deleted file mode 100644 index 2a9bbe0..0000000 --- a/vendor/github.com/jessevdk/go-flags/termsize_nosysioctl.go +++ /dev/null @@ -1,7 +0,0 @@ -// +build windows plan9 solaris - -package flags - -func getTerminalColumns() int { - return 80 -} diff --git a/vendor/github.com/jessevdk/go-flags/termsize_other.go b/vendor/github.com/jessevdk/go-flags/termsize_other.go deleted file mode 100644 index 3082151..0000000 --- a/vendor/github.com/jessevdk/go-flags/termsize_other.go +++ /dev/null @@ -1,7 +0,0 @@ -// +build !darwin,!freebsd,!netbsd,!openbsd,!linux - -package flags - -const ( - tIOCGWINSZ = 0 -) diff --git a/vendor/github.com/jessevdk/go-flags/termsize_unix.go b/vendor/github.com/jessevdk/go-flags/termsize_unix.go deleted file mode 100644 index fcc1186..0000000 --- a/vendor/github.com/jessevdk/go-flags/termsize_unix.go +++ /dev/null @@ -1,7 +0,0 @@ -// +build darwin freebsd netbsd openbsd - -package flags - -const ( - tIOCGWINSZ = 0x40087468 -) diff --git a/vendor/github.com/jessevdk/go-flags/unknown_test.go b/vendor/github.com/jessevdk/go-flags/unknown_test.go deleted file mode 100644 index 858be45..0000000 --- a/vendor/github.com/jessevdk/go-flags/unknown_test.go +++ /dev/null @@ -1,66 +0,0 @@ -package flags - -import ( - "testing" -) - -func TestUnknownFlags(t *testing.T) { - var opts = struct { - Verbose []bool `short:"v" long:"verbose" description:"Verbose output"` - }{} - - args := []string{ - "-f", - } - - p := NewParser(&opts, 0) - args, err := p.ParseArgs(args) - - if err == nil { - t.Fatal("Expected error for unknown argument") - } -} - -func TestIgnoreUnknownFlags(t *testing.T) { - var opts = struct { - Verbose []bool `short:"v" long:"verbose" description:"Verbose output"` - }{} - - args := []string{ - "hello", - "world", - "-v", - "--foo=bar", - "--verbose", - "-f", - } - - p := NewParser(&opts, IgnoreUnknown) - args, err := p.ParseArgs(args) - - if err != nil { - t.Fatal(err) - } - - exargs := []string{ - "hello", - "world", - "--foo=bar", - "-f", - } - - issame := (len(args) == len(exargs)) - - if issame { - for i := 0; i < len(args); i++ { - if args[i] != exargs[i] { - issame = false - break - } - } - } - - if !issame { - t.Fatalf("Expected %v but got %v", exargs, args) - } -} diff --git a/vendor/github.com/jmoiron/sqlx/.gitignore b/vendor/github.com/jmoiron/sqlx/.gitignore deleted file mode 100644 index 529841c..0000000 --- a/vendor/github.com/jmoiron/sqlx/.gitignore +++ /dev/null @@ -1,24 +0,0 @@ -# Compiled Object files, Static and Dynamic libs (Shared Objects) -*.o -*.a -*.so - -# Folders -_obj -_test - -# Architecture specific extensions/prefixes -*.[568vq] -[568vq].out - -*.cgo1.go -*.cgo2.c -_cgo_defun.c -_cgo_gotypes.go -_cgo_export.* - -_testmain.go - -*.exe -tags -environ diff --git a/vendor/github.com/jmoiron/sqlx/LICENSE b/vendor/github.com/jmoiron/sqlx/LICENSE deleted file mode 100644 index 0d31edf..0000000 --- a/vendor/github.com/jmoiron/sqlx/LICENSE +++ /dev/null @@ -1,23 +0,0 @@ - Copyright (c) 2013, Jason Moiron - - Permission is hereby granted, free of charge, to any person - obtaining a copy of this software and associated documentation - files (the "Software"), to deal in the Software without - restriction, including without limitation the rights to use, - copy, modify, merge, publish, distribute, sublicense, and/or sell - copies of the Software, and to permit persons to whom the - Software is furnished to do so, subject to the following - conditions: - - The above copyright notice and this permission notice shall be - included in all copies or substantial portions of the Software. - - THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES - OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT - HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, - WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - OTHER DEALINGS IN THE SOFTWARE. - diff --git a/vendor/github.com/jmoiron/sqlx/README.md b/vendor/github.com/jmoiron/sqlx/README.md deleted file mode 100644 index d2d1258..0000000 --- a/vendor/github.com/jmoiron/sqlx/README.md +++ /dev/null @@ -1,185 +0,0 @@ -# sqlx - -[![Build Status](https://drone.io/github.com/jmoiron/sqlx/status.png)](https://drone.io/github.com/jmoiron/sqlx/latest) [![Godoc](http://img.shields.io/badge/godoc-reference-blue.svg?style=flat)](https://godoc.org/github.com/jmoiron/sqlx) [![license](http://img.shields.io/badge/license-MIT-red.svg?style=flat)](https://raw.githubusercontent.com/jmoiron/sqlx/master/LICENSE) - -sqlx is a library which provides a set of extensions on go's standard -`database/sql` library. The sqlx versions of `sql.DB`, `sql.TX`, `sql.Stmt`, -et al. all leave the underlying interfaces untouched, so that their interfaces -are a superset on the standard ones. This makes it relatively painless to -integrate existing codebases using database/sql with sqlx. - -Major additional concepts are: - -* Marshal rows into structs (with embedded struct support), maps, and slices -* Named parameter support including prepared statements -* `Get` and `Select` to go quickly from query to struct/slice - -In addition to the [godoc API documentation](http://godoc.org/github.com/jmoiron/sqlx), -there is also some [standard documentation](http://jmoiron.github.io/sqlx/) that -explains how to use `database/sql` along with sqlx. - -## Recent Changes - -* sqlx/types.JsonText has been renamed to JSONText to follow Go naming conventions. - -This breaks backwards compatibility, but it's in a way that is trivially fixable -(`s/JsonText/JSONText/g`). The `types` package is both experimental and not in -active development currently. - -* Using Go 1.6 and below with `types.JSONText` and `types.GzippedText` can be _potentially unsafe_, **especially** when used with common auto-scan sqlx idioms like `Select` and `Get`. See [golang bug #13905](https://github.com/golang/go/issues/13905). - -### Backwards Compatibility - -There is no Go1-like promise of absolute stability, but I take the issue seriously -and will maintain the library in a compatible state unless vital bugs prevent me -from doing so. Since [#59](https://github.com/jmoiron/sqlx/issues/59) and -[#60](https://github.com/jmoiron/sqlx/issues/60) necessitated breaking behavior, -a wider API cleanup was done at the time of fixing. It's possible this will happen -in future; if it does, a git tag will be provided for users requiring the old -behavior to continue to use it until such a time as they can migrate. - -## install - - go get github.com/jmoiron/sqlx - -## issues - -Row headers can be ambiguous (`SELECT 1 AS a, 2 AS a`), and the result of -`Columns()` does not fully qualify column names in queries like: - -```sql -SELECT a.id, a.name, b.id, b.name FROM foos AS a JOIN foos AS b ON a.parent = b.id; -``` - -making a struct or map destination ambiguous. Use `AS` in your queries -to give columns distinct names, `rows.Scan` to scan them manually, or -`SliceScan` to get a slice of results. - -## usage - -Below is an example which shows some common use cases for sqlx. Check -[sqlx_test.go](https://github.com/jmoiron/sqlx/blob/master/sqlx_test.go) for more -usage. - - -```go -package main - -import ( - "database/sql" - "fmt" - "log" - - _ "github.com/lib/pq" - "github.com/jmoiron/sqlx" -) - -var schema = ` -CREATE TABLE person ( - first_name text, - last_name text, - email text -); - -CREATE TABLE place ( - country text, - city text NULL, - telcode integer -)` - -type Person struct { - FirstName string `db:"first_name"` - LastName string `db:"last_name"` - Email string -} - -type Place struct { - Country string - City sql.NullString - TelCode int -} - -func main() { - // this Pings the database trying to connect, panics on error - // use sqlx.Open() for sql.Open() semantics - db, err := sqlx.Connect("postgres", "user=foo dbname=bar sslmode=disable") - if err != nil { - log.Fatalln(err) - } - - // exec the schema or fail; multi-statement Exec behavior varies between - // database drivers; pq will exec them all, sqlite3 won't, ymmv - db.MustExec(schema) - - tx := db.MustBegin() - tx.MustExec("INSERT INTO person (first_name, last_name, email) VALUES ($1, $2, $3)", "Jason", "Moiron", "jmoiron@jmoiron.net") - tx.MustExec("INSERT INTO person (first_name, last_name, email) VALUES ($1, $2, $3)", "John", "Doe", "johndoeDNE@gmail.net") - tx.MustExec("INSERT INTO place (country, city, telcode) VALUES ($1, $2, $3)", "United States", "New York", "1") - tx.MustExec("INSERT INTO place (country, telcode) VALUES ($1, $2)", "Hong Kong", "852") - tx.MustExec("INSERT INTO place (country, telcode) VALUES ($1, $2)", "Singapore", "65") - // Named queries can use structs, so if you have an existing struct (i.e. person := &Person{}) that you have populated, you can pass it in as &person - tx.NamedExec("INSERT INTO person (first_name, last_name, email) VALUES (:first_name, :last_name, :email)", &Person{"Jane", "Citizen", "jane.citzen@example.com"}) - tx.Commit() - - // Query the database, storing results in a []Person (wrapped in []interface{}) - people := []Person{} - db.Select(&people, "SELECT * FROM person ORDER BY first_name ASC") - jason, john := people[0], people[1] - - fmt.Printf("%#v\n%#v", jason, john) - // Person{FirstName:"Jason", LastName:"Moiron", Email:"jmoiron@jmoiron.net"} - // Person{FirstName:"John", LastName:"Doe", Email:"johndoeDNE@gmail.net"} - - // You can also get a single result, a la QueryRow - jason = Person{} - err = db.Get(&jason, "SELECT * FROM person WHERE first_name=$1", "Jason") - fmt.Printf("%#v\n", jason) - // Person{FirstName:"Jason", LastName:"Moiron", Email:"jmoiron@jmoiron.net"} - - // if you have null fields and use SELECT *, you must use sql.Null* in your struct - places := []Place{} - err = db.Select(&places, "SELECT * FROM place ORDER BY telcode ASC") - if err != nil { - fmt.Println(err) - return - } - usa, singsing, honkers := places[0], places[1], places[2] - - fmt.Printf("%#v\n%#v\n%#v\n", usa, singsing, honkers) - // Place{Country:"United States", City:sql.NullString{String:"New York", Valid:true}, TelCode:1} - // Place{Country:"Singapore", City:sql.NullString{String:"", Valid:false}, TelCode:65} - // Place{Country:"Hong Kong", City:sql.NullString{String:"", Valid:false}, TelCode:852} - - // Loop through rows using only one struct - place := Place{} - rows, err := db.Queryx("SELECT * FROM place") - for rows.Next() { - err := rows.StructScan(&place) - if err != nil { - log.Fatalln(err) - } - fmt.Printf("%#v\n", place) - } - // Place{Country:"United States", City:sql.NullString{String:"New York", Valid:true}, TelCode:1} - // Place{Country:"Hong Kong", City:sql.NullString{String:"", Valid:false}, TelCode:852} - // Place{Country:"Singapore", City:sql.NullString{String:"", Valid:false}, TelCode:65} - - // Named queries, using `:name` as the bindvar. Automatic bindvar support - // which takes into account the dbtype based on the driverName on sqlx.Open/Connect - _, err = db.NamedExec(`INSERT INTO person (first_name,last_name,email) VALUES (:first,:last,:email)`, - map[string]interface{}{ - "first": "Bin", - "last": "Smuth", - "email": "bensmith@allblacks.nz", - }) - - // Selects Mr. Smith from the database - rows, err = db.NamedQuery(`SELECT * FROM person WHERE first_name=:fn`, map[string]interface{}{"fn": "Bin"}) - - // Named queries can also use structs. Their bind names follow the same rules - // as the name -> db mapping, so struct fields are lowercased and the `db` tag - // is taken into consideration. - rows, err = db.NamedQuery(`SELECT * FROM person WHERE first_name=:first_name`, jason) -} -``` - diff --git a/vendor/github.com/jmoiron/sqlx/bind.go b/vendor/github.com/jmoiron/sqlx/bind.go deleted file mode 100644 index 10f7bdf..0000000 --- a/vendor/github.com/jmoiron/sqlx/bind.go +++ /dev/null @@ -1,207 +0,0 @@ -package sqlx - -import ( - "bytes" - "errors" - "reflect" - "strconv" - "strings" - - "github.com/jmoiron/sqlx/reflectx" -) - -// Bindvar types supported by Rebind, BindMap and BindStruct. -const ( - UNKNOWN = iota - QUESTION - DOLLAR - NAMED -) - -// BindType returns the bindtype for a given database given a drivername. -func BindType(driverName string) int { - switch driverName { - case "postgres", "pgx": - return DOLLAR - case "mysql": - return QUESTION - case "sqlite3": - return QUESTION - case "oci8", "ora", "goracle": - return NAMED - } - return UNKNOWN -} - -// FIXME: this should be able to be tolerant of escaped ?'s in queries without -// losing much speed, and should be to avoid confusion. - -// Rebind a query from the default bindtype (QUESTION) to the target bindtype. -func Rebind(bindType int, query string) string { - switch bindType { - case QUESTION, UNKNOWN: - return query - } - - // Add space enough for 10 params before we have to allocate - rqb := make([]byte, 0, len(query)+10) - - var i, j int - - for i = strings.Index(query, "?"); i != -1; i = strings.Index(query, "?") { - rqb = append(rqb, query[:i]...) - - switch bindType { - case DOLLAR: - rqb = append(rqb, '$') - case NAMED: - rqb = append(rqb, ':', 'a', 'r', 'g') - } - - j++ - rqb = strconv.AppendInt(rqb, int64(j), 10) - - query = query[i+1:] - } - - return string(append(rqb, query...)) -} - -// Experimental implementation of Rebind which uses a bytes.Buffer. The code is -// much simpler and should be more resistant to odd unicode, but it is twice as -// slow. Kept here for benchmarking purposes and to possibly replace Rebind if -// problems arise with its somewhat naive handling of unicode. -func rebindBuff(bindType int, query string) string { - if bindType != DOLLAR { - return query - } - - b := make([]byte, 0, len(query)) - rqb := bytes.NewBuffer(b) - j := 1 - for _, r := range query { - if r == '?' { - rqb.WriteRune('$') - rqb.WriteString(strconv.Itoa(j)) - j++ - } else { - rqb.WriteRune(r) - } - } - - return rqb.String() -} - -// In expands slice values in args, returning the modified query string -// and a new arg list that can be executed by a database. The `query` should -// use the `?` bindVar. The return value uses the `?` bindVar. -func In(query string, args ...interface{}) (string, []interface{}, error) { - // argMeta stores reflect.Value and length for slices and - // the value itself for non-slice arguments - type argMeta struct { - v reflect.Value - i interface{} - length int - } - - var flatArgsCount int - var anySlices bool - - meta := make([]argMeta, len(args)) - - for i, arg := range args { - v := reflect.ValueOf(arg) - t := reflectx.Deref(v.Type()) - - if t.Kind() == reflect.Slice { - meta[i].length = v.Len() - meta[i].v = v - - anySlices = true - flatArgsCount += meta[i].length - - if meta[i].length == 0 { - return "", nil, errors.New("empty slice passed to 'in' query") - } - } else { - meta[i].i = arg - flatArgsCount++ - } - } - - // don't do any parsing if there aren't any slices; note that this means - // some errors that we might have caught below will not be returned. - if !anySlices { - return query, args, nil - } - - newArgs := make([]interface{}, 0, flatArgsCount) - buf := bytes.NewBuffer(make([]byte, 0, len(query)+len(", ?")*flatArgsCount)) - - var arg, offset int - - for i := strings.IndexByte(query[offset:], '?'); i != -1; i = strings.IndexByte(query[offset:], '?') { - if arg >= len(meta) { - // if an argument wasn't passed, lets return an error; this is - // not actually how database/sql Exec/Query works, but since we are - // creating an argument list programmatically, we want to be able - // to catch these programmer errors earlier. - return "", nil, errors.New("number of bindVars exceeds arguments") - } - - argMeta := meta[arg] - arg++ - - // not a slice, continue. - // our questionmark will either be written before the next expansion - // of a slice or after the loop when writing the rest of the query - if argMeta.length == 0 { - offset = offset + i + 1 - newArgs = append(newArgs, argMeta.i) - continue - } - - // write everything up to and including our ? character - buf.WriteString(query[:offset+i+1]) - - for si := 1; si < argMeta.length; si++ { - buf.WriteString(", ?") - } - - newArgs = appendReflectSlice(newArgs, argMeta.v, argMeta.length) - - // slice the query and reset the offset. this avoids some bookkeeping for - // the write after the loop - query = query[offset+i+1:] - offset = 0 - } - - buf.WriteString(query) - - if arg < len(meta) { - return "", nil, errors.New("number of bindVars less than number arguments") - } - - return buf.String(), newArgs, nil -} - -func appendReflectSlice(args []interface{}, v reflect.Value, vlen int) []interface{} { - switch val := v.Interface().(type) { - case []interface{}: - args = append(args, val...) - case []int: - for i := range val { - args = append(args, val[i]) - } - case []string: - for i := range val { - args = append(args, val[i]) - } - default: - for si := 0; si < vlen; si++ { - args = append(args, v.Index(si).Interface()) - } - } - - return args -} diff --git a/vendor/github.com/jmoiron/sqlx/doc.go b/vendor/github.com/jmoiron/sqlx/doc.go deleted file mode 100644 index e2b4e60..0000000 --- a/vendor/github.com/jmoiron/sqlx/doc.go +++ /dev/null @@ -1,12 +0,0 @@ -// Package sqlx provides general purpose extensions to database/sql. -// -// It is intended to seamlessly wrap database/sql and provide convenience -// methods which are useful in the development of database driven applications. -// None of the underlying database/sql methods are changed. Instead all extended -// behavior is implemented through new methods defined on wrapper types. -// -// Additions include scanning into structs, named query support, rebinding -// queries for different drivers, convenient shorthands for common error handling -// and more. -// -package sqlx diff --git a/vendor/github.com/jmoiron/sqlx/named.go b/vendor/github.com/jmoiron/sqlx/named.go deleted file mode 100644 index 69eb954..0000000 --- a/vendor/github.com/jmoiron/sqlx/named.go +++ /dev/null @@ -1,346 +0,0 @@ -package sqlx - -// Named Query Support -// -// * BindMap - bind query bindvars to map/struct args -// * NamedExec, NamedQuery - named query w/ struct or map -// * NamedStmt - a pre-compiled named query which is a prepared statement -// -// Internal Interfaces: -// -// * compileNamedQuery - rebind a named query, returning a query and list of names -// * bindArgs, bindMapArgs, bindAnyArgs - given a list of names, return an arglist -// -import ( - "database/sql" - "errors" - "fmt" - "reflect" - "strconv" - "unicode" - - "github.com/jmoiron/sqlx/reflectx" -) - -// NamedStmt is a prepared statement that executes named queries. Prepare it -// how you would execute a NamedQuery, but pass in a struct or map when executing. -type NamedStmt struct { - Params []string - QueryString string - Stmt *Stmt -} - -// Close closes the named statement. -func (n *NamedStmt) Close() error { - return n.Stmt.Close() -} - -// Exec executes a named statement using the struct passed. -// Any named placeholder parameters are replaced with fields from arg. -func (n *NamedStmt) Exec(arg interface{}) (sql.Result, error) { - args, err := bindAnyArgs(n.Params, arg, n.Stmt.Mapper) - if err != nil { - return *new(sql.Result), err - } - return n.Stmt.Exec(args...) -} - -// Query executes a named statement using the struct argument, returning rows. -// Any named placeholder parameters are replaced with fields from arg. -func (n *NamedStmt) Query(arg interface{}) (*sql.Rows, error) { - args, err := bindAnyArgs(n.Params, arg, n.Stmt.Mapper) - if err != nil { - return nil, err - } - return n.Stmt.Query(args...) -} - -// QueryRow executes a named statement against the database. Because sqlx cannot -// create a *sql.Row with an error condition pre-set for binding errors, sqlx -// returns a *sqlx.Row instead. -// Any named placeholder parameters are replaced with fields from arg. -func (n *NamedStmt) QueryRow(arg interface{}) *Row { - args, err := bindAnyArgs(n.Params, arg, n.Stmt.Mapper) - if err != nil { - return &Row{err: err} - } - return n.Stmt.QueryRowx(args...) -} - -// MustExec execs a NamedStmt, panicing on error -// Any named placeholder parameters are replaced with fields from arg. -func (n *NamedStmt) MustExec(arg interface{}) sql.Result { - res, err := n.Exec(arg) - if err != nil { - panic(err) - } - return res -} - -// Queryx using this NamedStmt -// Any named placeholder parameters are replaced with fields from arg. -func (n *NamedStmt) Queryx(arg interface{}) (*Rows, error) { - r, err := n.Query(arg) - if err != nil { - return nil, err - } - return &Rows{Rows: r, Mapper: n.Stmt.Mapper, unsafe: isUnsafe(n)}, err -} - -// QueryRowx this NamedStmt. Because of limitations with QueryRow, this is -// an alias for QueryRow. -// Any named placeholder parameters are replaced with fields from arg. -func (n *NamedStmt) QueryRowx(arg interface{}) *Row { - return n.QueryRow(arg) -} - -// Select using this NamedStmt -// Any named placeholder parameters are replaced with fields from arg. -func (n *NamedStmt) Select(dest interface{}, arg interface{}) error { - rows, err := n.Queryx(arg) - if err != nil { - return err - } - // if something happens here, we want to make sure the rows are Closed - defer rows.Close() - return scanAll(rows, dest, false) -} - -// Get using this NamedStmt -// Any named placeholder parameters are replaced with fields from arg. -func (n *NamedStmt) Get(dest interface{}, arg interface{}) error { - r := n.QueryRowx(arg) - return r.scanAny(dest, false) -} - -// Unsafe creates an unsafe version of the NamedStmt -func (n *NamedStmt) Unsafe() *NamedStmt { - r := &NamedStmt{Params: n.Params, Stmt: n.Stmt, QueryString: n.QueryString} - r.Stmt.unsafe = true - return r -} - -// A union interface of preparer and binder, required to be able to prepare -// named statements (as the bindtype must be determined). -type namedPreparer interface { - Preparer - binder -} - -func prepareNamed(p namedPreparer, query string) (*NamedStmt, error) { - bindType := BindType(p.DriverName()) - q, args, err := compileNamedQuery([]byte(query), bindType) - if err != nil { - return nil, err - } - stmt, err := Preparex(p, q) - if err != nil { - return nil, err - } - return &NamedStmt{ - QueryString: q, - Params: args, - Stmt: stmt, - }, nil -} - -func bindAnyArgs(names []string, arg interface{}, m *reflectx.Mapper) ([]interface{}, error) { - if maparg, ok := arg.(map[string]interface{}); ok { - return bindMapArgs(names, maparg) - } - return bindArgs(names, arg, m) -} - -// private interface to generate a list of interfaces from a given struct -// type, given a list of names to pull out of the struct. Used by public -// BindStruct interface. -func bindArgs(names []string, arg interface{}, m *reflectx.Mapper) ([]interface{}, error) { - arglist := make([]interface{}, 0, len(names)) - - // grab the indirected value of arg - v := reflect.ValueOf(arg) - for v = reflect.ValueOf(arg); v.Kind() == reflect.Ptr; { - v = v.Elem() - } - - err := m.TraversalsByNameFunc(v.Type(), names, func(i int, t []int) error { - if len(t) == 0 { - return fmt.Errorf("could not find name %s in %#v", names[i], arg) - } - - val := reflectx.FieldByIndexesReadOnly(v, t) - arglist = append(arglist, val.Interface()) - - return nil - }) - - return arglist, err -} - -// like bindArgs, but for maps. -func bindMapArgs(names []string, arg map[string]interface{}) ([]interface{}, error) { - arglist := make([]interface{}, 0, len(names)) - - for _, name := range names { - val, ok := arg[name] - if !ok { - return arglist, fmt.Errorf("could not find name %s in %#v", name, arg) - } - arglist = append(arglist, val) - } - return arglist, nil -} - -// bindStruct binds a named parameter query with fields from a struct argument. -// The rules for binding field names to parameter names follow the same -// conventions as for StructScan, including obeying the `db` struct tags. -func bindStruct(bindType int, query string, arg interface{}, m *reflectx.Mapper) (string, []interface{}, error) { - bound, names, err := compileNamedQuery([]byte(query), bindType) - if err != nil { - return "", []interface{}{}, err - } - - arglist, err := bindArgs(names, arg, m) - if err != nil { - return "", []interface{}{}, err - } - - return bound, arglist, nil -} - -// bindMap binds a named parameter query with a map of arguments. -func bindMap(bindType int, query string, args map[string]interface{}) (string, []interface{}, error) { - bound, names, err := compileNamedQuery([]byte(query), bindType) - if err != nil { - return "", []interface{}{}, err - } - - arglist, err := bindMapArgs(names, args) - return bound, arglist, err -} - -// -- Compilation of Named Queries - -// Allow digits and letters in bind params; additionally runes are -// checked against underscores, meaning that bind params can have be -// alphanumeric with underscores. Mind the difference between unicode -// digits and numbers, where '5' is a digit but '五' is not. -var allowedBindRunes = []*unicode.RangeTable{unicode.Letter, unicode.Digit} - -// FIXME: this function isn't safe for unicode named params, as a failing test -// can testify. This is not a regression but a failure of the original code -// as well. It should be modified to range over runes in a string rather than -// bytes, even though this is less convenient and slower. Hopefully the -// addition of the prepared NamedStmt (which will only do this once) will make -// up for the slightly slower ad-hoc NamedExec/NamedQuery. - -// compile a NamedQuery into an unbound query (using the '?' bindvar) and -// a list of names. -func compileNamedQuery(qs []byte, bindType int) (query string, names []string, err error) { - names = make([]string, 0, 10) - rebound := make([]byte, 0, len(qs)) - - inName := false - last := len(qs) - 1 - currentVar := 1 - name := make([]byte, 0, 10) - - for i, b := range qs { - // a ':' while we're in a name is an error - if b == ':' { - // if this is the second ':' in a '::' escape sequence, append a ':' - if inName && i > 0 && qs[i-1] == ':' { - rebound = append(rebound, ':') - inName = false - continue - } else if inName { - err = errors.New("unexpected `:` while reading named param at " + strconv.Itoa(i)) - return query, names, err - } - inName = true - name = []byte{} - // if we're in a name, and this is an allowed character, continue - } else if inName && (unicode.IsOneOf(allowedBindRunes, rune(b)) || b == '_' || b == '.') && i != last { - // append the byte to the name if we are in a name and not on the last byte - name = append(name, b) - // if we're in a name and it's not an allowed character, the name is done - } else if inName { - inName = false - // if this is the final byte of the string and it is part of the name, then - // make sure to add it to the name - if i == last && unicode.IsOneOf(allowedBindRunes, rune(b)) { - name = append(name, b) - } - // add the string representation to the names list - names = append(names, string(name)) - // add a proper bindvar for the bindType - switch bindType { - // oracle only supports named type bind vars even for positional - case NAMED: - rebound = append(rebound, ':') - rebound = append(rebound, name...) - case QUESTION, UNKNOWN: - rebound = append(rebound, '?') - case DOLLAR: - rebound = append(rebound, '$') - for _, b := range strconv.Itoa(currentVar) { - rebound = append(rebound, byte(b)) - } - currentVar++ - } - // add this byte to string unless it was not part of the name - if i != last { - rebound = append(rebound, b) - } else if !unicode.IsOneOf(allowedBindRunes, rune(b)) { - rebound = append(rebound, b) - } - } else { - // this is a normal byte and should just go onto the rebound query - rebound = append(rebound, b) - } - } - - return string(rebound), names, err -} - -// BindNamed binds a struct or a map to a query with named parameters. -// DEPRECATED: use sqlx.Named` instead of this, it may be removed in future. -func BindNamed(bindType int, query string, arg interface{}) (string, []interface{}, error) { - return bindNamedMapper(bindType, query, arg, mapper()) -} - -// Named takes a query using named parameters and an argument and -// returns a new query with a list of args that can be executed by -// a database. The return value uses the `?` bindvar. -func Named(query string, arg interface{}) (string, []interface{}, error) { - return bindNamedMapper(QUESTION, query, arg, mapper()) -} - -func bindNamedMapper(bindType int, query string, arg interface{}, m *reflectx.Mapper) (string, []interface{}, error) { - if maparg, ok := arg.(map[string]interface{}); ok { - return bindMap(bindType, query, maparg) - } - return bindStruct(bindType, query, arg, m) -} - -// NamedQuery binds a named query and then runs Query on the result using the -// provided Ext (sqlx.Tx, sqlx.Db). It works with both structs and with -// map[string]interface{} types. -func NamedQuery(e Ext, query string, arg interface{}) (*Rows, error) { - q, args, err := bindNamedMapper(BindType(e.DriverName()), query, arg, mapperFor(e)) - if err != nil { - return nil, err - } - return e.Queryx(q, args...) -} - -// NamedExec uses BindStruct to get a query executable by the driver and -// then runs Exec on the result. Returns an error from the binding -// or the query excution itself. -func NamedExec(e Ext, query string, arg interface{}) (sql.Result, error) { - q, args, err := bindNamedMapper(BindType(e.DriverName()), query, arg, mapperFor(e)) - if err != nil { - return nil, err - } - return e.Exec(q, args...) -} diff --git a/vendor/github.com/jmoiron/sqlx/named_context.go b/vendor/github.com/jmoiron/sqlx/named_context.go deleted file mode 100644 index 9405007..0000000 --- a/vendor/github.com/jmoiron/sqlx/named_context.go +++ /dev/null @@ -1,132 +0,0 @@ -// +build go1.8 - -package sqlx - -import ( - "context" - "database/sql" -) - -// A union interface of contextPreparer and binder, required to be able to -// prepare named statements with context (as the bindtype must be determined). -type namedPreparerContext interface { - PreparerContext - binder -} - -func prepareNamedContext(ctx context.Context, p namedPreparerContext, query string) (*NamedStmt, error) { - bindType := BindType(p.DriverName()) - q, args, err := compileNamedQuery([]byte(query), bindType) - if err != nil { - return nil, err - } - stmt, err := PreparexContext(ctx, p, q) - if err != nil { - return nil, err - } - return &NamedStmt{ - QueryString: q, - Params: args, - Stmt: stmt, - }, nil -} - -// ExecContext executes a named statement using the struct passed. -// Any named placeholder parameters are replaced with fields from arg. -func (n *NamedStmt) ExecContext(ctx context.Context, arg interface{}) (sql.Result, error) { - args, err := bindAnyArgs(n.Params, arg, n.Stmt.Mapper) - if err != nil { - return *new(sql.Result), err - } - return n.Stmt.ExecContext(ctx, args...) -} - -// QueryContext executes a named statement using the struct argument, returning rows. -// Any named placeholder parameters are replaced with fields from arg. -func (n *NamedStmt) QueryContext(ctx context.Context, arg interface{}) (*sql.Rows, error) { - args, err := bindAnyArgs(n.Params, arg, n.Stmt.Mapper) - if err != nil { - return nil, err - } - return n.Stmt.QueryContext(ctx, args...) -} - -// QueryRowContext executes a named statement against the database. Because sqlx cannot -// create a *sql.Row with an error condition pre-set for binding errors, sqlx -// returns a *sqlx.Row instead. -// Any named placeholder parameters are replaced with fields from arg. -func (n *NamedStmt) QueryRowContext(ctx context.Context, arg interface{}) *Row { - args, err := bindAnyArgs(n.Params, arg, n.Stmt.Mapper) - if err != nil { - return &Row{err: err} - } - return n.Stmt.QueryRowxContext(ctx, args...) -} - -// MustExecContext execs a NamedStmt, panicing on error -// Any named placeholder parameters are replaced with fields from arg. -func (n *NamedStmt) MustExecContext(ctx context.Context, arg interface{}) sql.Result { - res, err := n.ExecContext(ctx, arg) - if err != nil { - panic(err) - } - return res -} - -// QueryxContext using this NamedStmt -// Any named placeholder parameters are replaced with fields from arg. -func (n *NamedStmt) QueryxContext(ctx context.Context, arg interface{}) (*Rows, error) { - r, err := n.QueryContext(ctx, arg) - if err != nil { - return nil, err - } - return &Rows{Rows: r, Mapper: n.Stmt.Mapper, unsafe: isUnsafe(n)}, err -} - -// QueryRowxContext this NamedStmt. Because of limitations with QueryRow, this is -// an alias for QueryRow. -// Any named placeholder parameters are replaced with fields from arg. -func (n *NamedStmt) QueryRowxContext(ctx context.Context, arg interface{}) *Row { - return n.QueryRowContext(ctx, arg) -} - -// SelectContext using this NamedStmt -// Any named placeholder parameters are replaced with fields from arg. -func (n *NamedStmt) SelectContext(ctx context.Context, dest interface{}, arg interface{}) error { - rows, err := n.QueryxContext(ctx, arg) - if err != nil { - return err - } - // if something happens here, we want to make sure the rows are Closed - defer rows.Close() - return scanAll(rows, dest, false) -} - -// GetContext using this NamedStmt -// Any named placeholder parameters are replaced with fields from arg. -func (n *NamedStmt) GetContext(ctx context.Context, dest interface{}, arg interface{}) error { - r := n.QueryRowxContext(ctx, arg) - return r.scanAny(dest, false) -} - -// NamedQueryContext binds a named query and then runs Query on the result using the -// provided Ext (sqlx.Tx, sqlx.Db). It works with both structs and with -// map[string]interface{} types. -func NamedQueryContext(ctx context.Context, e ExtContext, query string, arg interface{}) (*Rows, error) { - q, args, err := bindNamedMapper(BindType(e.DriverName()), query, arg, mapperFor(e)) - if err != nil { - return nil, err - } - return e.QueryxContext(ctx, q, args...) -} - -// NamedExecContext uses BindStruct to get a query executable by the driver and -// then runs Exec on the result. Returns an error from the binding -// or the query excution itself. -func NamedExecContext(ctx context.Context, e ExtContext, query string, arg interface{}) (sql.Result, error) { - q, args, err := bindNamedMapper(BindType(e.DriverName()), query, arg, mapperFor(e)) - if err != nil { - return nil, err - } - return e.ExecContext(ctx, q, args...) -} diff --git a/vendor/github.com/jmoiron/sqlx/named_context_test.go b/vendor/github.com/jmoiron/sqlx/named_context_test.go deleted file mode 100644 index 87e94ac..0000000 --- a/vendor/github.com/jmoiron/sqlx/named_context_test.go +++ /dev/null @@ -1,136 +0,0 @@ -// +build go1.8 - -package sqlx - -import ( - "context" - "database/sql" - "testing" -) - -func TestNamedContextQueries(t *testing.T) { - RunWithSchema(defaultSchema, t, func(db *DB, t *testing.T) { - loadDefaultFixture(db, t) - test := Test{t} - var ns *NamedStmt - var err error - - ctx := context.Background() - - // Check that invalid preparations fail - ns, err = db.PrepareNamedContext(ctx, "SELECT * FROM person WHERE first_name=:first:name") - if err == nil { - t.Error("Expected an error with invalid prepared statement.") - } - - ns, err = db.PrepareNamedContext(ctx, "invalid sql") - if err == nil { - t.Error("Expected an error with invalid prepared statement.") - } - - // Check closing works as anticipated - ns, err = db.PrepareNamedContext(ctx, "SELECT * FROM person WHERE first_name=:first_name") - test.Error(err) - err = ns.Close() - test.Error(err) - - ns, err = db.PrepareNamedContext(ctx, ` - SELECT first_name, last_name, email - FROM person WHERE first_name=:first_name AND email=:email`) - test.Error(err) - - // test Queryx w/ uses Query - p := Person{FirstName: "Jason", LastName: "Moiron", Email: "jmoiron@jmoiron.net"} - - rows, err := ns.QueryxContext(ctx, p) - test.Error(err) - for rows.Next() { - var p2 Person - rows.StructScan(&p2) - if p.FirstName != p2.FirstName { - t.Errorf("got %s, expected %s", p.FirstName, p2.FirstName) - } - if p.LastName != p2.LastName { - t.Errorf("got %s, expected %s", p.LastName, p2.LastName) - } - if p.Email != p2.Email { - t.Errorf("got %s, expected %s", p.Email, p2.Email) - } - } - - // test Select - people := make([]Person, 0, 5) - err = ns.SelectContext(ctx, &people, p) - test.Error(err) - - if len(people) != 1 { - t.Errorf("got %d results, expected %d", len(people), 1) - } - if p.FirstName != people[0].FirstName { - t.Errorf("got %s, expected %s", p.FirstName, people[0].FirstName) - } - if p.LastName != people[0].LastName { - t.Errorf("got %s, expected %s", p.LastName, people[0].LastName) - } - if p.Email != people[0].Email { - t.Errorf("got %s, expected %s", p.Email, people[0].Email) - } - - // test Exec - ns, err = db.PrepareNamedContext(ctx, ` - INSERT INTO person (first_name, last_name, email) - VALUES (:first_name, :last_name, :email)`) - test.Error(err) - - js := Person{ - FirstName: "Julien", - LastName: "Savea", - Email: "jsavea@ab.co.nz", - } - _, err = ns.ExecContext(ctx, js) - test.Error(err) - - // Make sure we can pull him out again - p2 := Person{} - db.GetContext(ctx, &p2, db.Rebind("SELECT * FROM person WHERE email=?"), js.Email) - if p2.Email != js.Email { - t.Errorf("expected %s, got %s", js.Email, p2.Email) - } - - // test Txn NamedStmts - tx := db.MustBeginTx(ctx, nil) - txns := tx.NamedStmtContext(ctx, ns) - - // We're going to add Steven in this txn - sl := Person{ - FirstName: "Steven", - LastName: "Luatua", - Email: "sluatua@ab.co.nz", - } - - _, err = txns.ExecContext(ctx, sl) - test.Error(err) - // then rollback... - tx.Rollback() - // looking for Steven after a rollback should fail - err = db.GetContext(ctx, &p2, db.Rebind("SELECT * FROM person WHERE email=?"), sl.Email) - if err != sql.ErrNoRows { - t.Errorf("expected no rows error, got %v", err) - } - - // now do the same, but commit - tx = db.MustBeginTx(ctx, nil) - txns = tx.NamedStmtContext(ctx, ns) - _, err = txns.ExecContext(ctx, sl) - test.Error(err) - tx.Commit() - - // looking for Steven after a Commit should succeed - err = db.GetContext(ctx, &p2, db.Rebind("SELECT * FROM person WHERE email=?"), sl.Email) - test.Error(err) - if p2.Email != sl.Email { - t.Errorf("expected %s, got %s", sl.Email, p2.Email) - } - - }) -} diff --git a/vendor/github.com/jmoiron/sqlx/named_test.go b/vendor/github.com/jmoiron/sqlx/named_test.go deleted file mode 100644 index d3459a8..0000000 --- a/vendor/github.com/jmoiron/sqlx/named_test.go +++ /dev/null @@ -1,227 +0,0 @@ -package sqlx - -import ( - "database/sql" - "testing" -) - -func TestCompileQuery(t *testing.T) { - table := []struct { - Q, R, D, N string - V []string - }{ - // basic test for named parameters, invalid char ',' terminating - { - Q: `INSERT INTO foo (a,b,c,d) VALUES (:name, :age, :first, :last)`, - R: `INSERT INTO foo (a,b,c,d) VALUES (?, ?, ?, ?)`, - D: `INSERT INTO foo (a,b,c,d) VALUES ($1, $2, $3, $4)`, - N: `INSERT INTO foo (a,b,c,d) VALUES (:name, :age, :first, :last)`, - V: []string{"name", "age", "first", "last"}, - }, - // This query tests a named parameter ending the string as well as numbers - { - Q: `SELECT * FROM a WHERE first_name=:name1 AND last_name=:name2`, - R: `SELECT * FROM a WHERE first_name=? AND last_name=?`, - D: `SELECT * FROM a WHERE first_name=$1 AND last_name=$2`, - N: `SELECT * FROM a WHERE first_name=:name1 AND last_name=:name2`, - V: []string{"name1", "name2"}, - }, - { - Q: `SELECT "::foo" FROM a WHERE first_name=:name1 AND last_name=:name2`, - R: `SELECT ":foo" FROM a WHERE first_name=? AND last_name=?`, - D: `SELECT ":foo" FROM a WHERE first_name=$1 AND last_name=$2`, - N: `SELECT ":foo" FROM a WHERE first_name=:name1 AND last_name=:name2`, - V: []string{"name1", "name2"}, - }, - { - Q: `SELECT 'a::b::c' || first_name, '::::ABC::_::' FROM person WHERE first_name=:first_name AND last_name=:last_name`, - R: `SELECT 'a:b:c' || first_name, '::ABC:_:' FROM person WHERE first_name=? AND last_name=?`, - D: `SELECT 'a:b:c' || first_name, '::ABC:_:' FROM person WHERE first_name=$1 AND last_name=$2`, - N: `SELECT 'a:b:c' || first_name, '::ABC:_:' FROM person WHERE first_name=:first_name AND last_name=:last_name`, - V: []string{"first_name", "last_name"}, - }, - /* This unicode awareness test sadly fails, because of our byte-wise worldview. - * We could certainly iterate by Rune instead, though it's a great deal slower, - * it's probably the RightWay(tm) - { - Q: `INSERT INTO foo (a,b,c,d) VALUES (:あ, :b, :キコ, :名前)`, - R: `INSERT INTO foo (a,b,c,d) VALUES (?, ?, ?, ?)`, - D: `INSERT INTO foo (a,b,c,d) VALUES ($1, $2, $3, $4)`, - N: []string{"name", "age", "first", "last"}, - }, - */ - } - - for _, test := range table { - qr, names, err := compileNamedQuery([]byte(test.Q), QUESTION) - if err != nil { - t.Error(err) - } - if qr != test.R { - t.Errorf("expected %s, got %s", test.R, qr) - } - if len(names) != len(test.V) { - t.Errorf("expected %#v, got %#v", test.V, names) - } else { - for i, name := range names { - if name != test.V[i] { - t.Errorf("expected %dth name to be %s, got %s", i+1, test.V[i], name) - } - } - } - qd, _, _ := compileNamedQuery([]byte(test.Q), DOLLAR) - if qd != test.D { - t.Errorf("\nexpected: `%s`\ngot: `%s`", test.D, qd) - } - - qq, _, _ := compileNamedQuery([]byte(test.Q), NAMED) - if qq != test.N { - t.Errorf("\nexpected: `%s`\ngot: `%s`\n(len: %d vs %d)", test.N, qq, len(test.N), len(qq)) - } - } -} - -type Test struct { - t *testing.T -} - -func (t Test) Error(err error, msg ...interface{}) { - if err != nil { - if len(msg) == 0 { - t.t.Error(err) - } else { - t.t.Error(msg...) - } - } -} - -func (t Test) Errorf(err error, format string, args ...interface{}) { - if err != nil { - t.t.Errorf(format, args...) - } -} - -func TestNamedQueries(t *testing.T) { - RunWithSchema(defaultSchema, t, func(db *DB, t *testing.T) { - loadDefaultFixture(db, t) - test := Test{t} - var ns *NamedStmt - var err error - - // Check that invalid preparations fail - ns, err = db.PrepareNamed("SELECT * FROM person WHERE first_name=:first:name") - if err == nil { - t.Error("Expected an error with invalid prepared statement.") - } - - ns, err = db.PrepareNamed("invalid sql") - if err == nil { - t.Error("Expected an error with invalid prepared statement.") - } - - // Check closing works as anticipated - ns, err = db.PrepareNamed("SELECT * FROM person WHERE first_name=:first_name") - test.Error(err) - err = ns.Close() - test.Error(err) - - ns, err = db.PrepareNamed(` - SELECT first_name, last_name, email - FROM person WHERE first_name=:first_name AND email=:email`) - test.Error(err) - - // test Queryx w/ uses Query - p := Person{FirstName: "Jason", LastName: "Moiron", Email: "jmoiron@jmoiron.net"} - - rows, err := ns.Queryx(p) - test.Error(err) - for rows.Next() { - var p2 Person - rows.StructScan(&p2) - if p.FirstName != p2.FirstName { - t.Errorf("got %s, expected %s", p.FirstName, p2.FirstName) - } - if p.LastName != p2.LastName { - t.Errorf("got %s, expected %s", p.LastName, p2.LastName) - } - if p.Email != p2.Email { - t.Errorf("got %s, expected %s", p.Email, p2.Email) - } - } - - // test Select - people := make([]Person, 0, 5) - err = ns.Select(&people, p) - test.Error(err) - - if len(people) != 1 { - t.Errorf("got %d results, expected %d", len(people), 1) - } - if p.FirstName != people[0].FirstName { - t.Errorf("got %s, expected %s", p.FirstName, people[0].FirstName) - } - if p.LastName != people[0].LastName { - t.Errorf("got %s, expected %s", p.LastName, people[0].LastName) - } - if p.Email != people[0].Email { - t.Errorf("got %s, expected %s", p.Email, people[0].Email) - } - - // test Exec - ns, err = db.PrepareNamed(` - INSERT INTO person (first_name, last_name, email) - VALUES (:first_name, :last_name, :email)`) - test.Error(err) - - js := Person{ - FirstName: "Julien", - LastName: "Savea", - Email: "jsavea@ab.co.nz", - } - _, err = ns.Exec(js) - test.Error(err) - - // Make sure we can pull him out again - p2 := Person{} - db.Get(&p2, db.Rebind("SELECT * FROM person WHERE email=?"), js.Email) - if p2.Email != js.Email { - t.Errorf("expected %s, got %s", js.Email, p2.Email) - } - - // test Txn NamedStmts - tx := db.MustBegin() - txns := tx.NamedStmt(ns) - - // We're going to add Steven in this txn - sl := Person{ - FirstName: "Steven", - LastName: "Luatua", - Email: "sluatua@ab.co.nz", - } - - _, err = txns.Exec(sl) - test.Error(err) - // then rollback... - tx.Rollback() - // looking for Steven after a rollback should fail - err = db.Get(&p2, db.Rebind("SELECT * FROM person WHERE email=?"), sl.Email) - if err != sql.ErrNoRows { - t.Errorf("expected no rows error, got %v", err) - } - - // now do the same, but commit - tx = db.MustBegin() - txns = tx.NamedStmt(ns) - _, err = txns.Exec(sl) - test.Error(err) - tx.Commit() - - // looking for Steven after a Commit should succeed - err = db.Get(&p2, db.Rebind("SELECT * FROM person WHERE email=?"), sl.Email) - test.Error(err) - if p2.Email != sl.Email { - t.Errorf("expected %s, got %s", sl.Email, p2.Email) - } - - }) -} diff --git a/vendor/github.com/jmoiron/sqlx/reflectx/README.md b/vendor/github.com/jmoiron/sqlx/reflectx/README.md deleted file mode 100644 index f01d3d1..0000000 --- a/vendor/github.com/jmoiron/sqlx/reflectx/README.md +++ /dev/null @@ -1,17 +0,0 @@ -# reflectx - -The sqlx package has special reflect needs. In particular, it needs to: - -* be able to map a name to a field -* understand embedded structs -* understand mapping names to fields by a particular tag -* user specified name -> field mapping functions - -These behaviors mimic the behaviors by the standard library marshallers and also the -behavior of standard Go accessors. - -The first two are amply taken care of by `Reflect.Value.FieldByName`, and the third is -addressed by `Reflect.Value.FieldByNameFunc`, but these don't quite understand struct -tags in the ways that are vital to most marshallers, and they are slow. - -This reflectx package extends reflect to achieve these goals. diff --git a/vendor/github.com/jmoiron/sqlx/reflectx/reflect.go b/vendor/github.com/jmoiron/sqlx/reflectx/reflect.go deleted file mode 100644 index 73c21eb..0000000 --- a/vendor/github.com/jmoiron/sqlx/reflectx/reflect.go +++ /dev/null @@ -1,441 +0,0 @@ -// Package reflectx implements extensions to the standard reflect lib suitable -// for implementing marshalling and unmarshalling packages. The main Mapper type -// allows for Go-compatible named attribute access, including accessing embedded -// struct attributes and the ability to use functions and struct tags to -// customize field names. -// -package reflectx - -import ( - "reflect" - "runtime" - "strings" - "sync" -) - -// A FieldInfo is metadata for a struct field. -type FieldInfo struct { - Index []int - Path string - Field reflect.StructField - Zero reflect.Value - Name string - Options map[string]string - Embedded bool - Children []*FieldInfo - Parent *FieldInfo -} - -// A StructMap is an index of field metadata for a struct. -type StructMap struct { - Tree *FieldInfo - Index []*FieldInfo - Paths map[string]*FieldInfo - Names map[string]*FieldInfo -} - -// GetByPath returns a *FieldInfo for a given string path. -func (f StructMap) GetByPath(path string) *FieldInfo { - return f.Paths[path] -} - -// GetByTraversal returns a *FieldInfo for a given integer path. It is -// analogous to reflect.FieldByIndex, but using the cached traversal -// rather than re-executing the reflect machinery each time. -func (f StructMap) GetByTraversal(index []int) *FieldInfo { - if len(index) == 0 { - return nil - } - - tree := f.Tree - for _, i := range index { - if i >= len(tree.Children) || tree.Children[i] == nil { - return nil - } - tree = tree.Children[i] - } - return tree -} - -// Mapper is a general purpose mapper of names to struct fields. A Mapper -// behaves like most marshallers in the standard library, obeying a field tag -// for name mapping but also providing a basic transform function. -type Mapper struct { - cache map[reflect.Type]*StructMap - tagName string - tagMapFunc func(string) string - mapFunc func(string) string - mutex sync.Mutex -} - -// NewMapper returns a new mapper using the tagName as its struct field tag. -// If tagName is the empty string, it is ignored. -func NewMapper(tagName string) *Mapper { - return &Mapper{ - cache: make(map[reflect.Type]*StructMap), - tagName: tagName, - } -} - -// NewMapperTagFunc returns a new mapper which contains a mapper for field names -// AND a mapper for tag values. This is useful for tags like json which can -// have values like "name,omitempty". -func NewMapperTagFunc(tagName string, mapFunc, tagMapFunc func(string) string) *Mapper { - return &Mapper{ - cache: make(map[reflect.Type]*StructMap), - tagName: tagName, - mapFunc: mapFunc, - tagMapFunc: tagMapFunc, - } -} - -// NewMapperFunc returns a new mapper which optionally obeys a field tag and -// a struct field name mapper func given by f. Tags will take precedence, but -// for any other field, the mapped name will be f(field.Name) -func NewMapperFunc(tagName string, f func(string) string) *Mapper { - return &Mapper{ - cache: make(map[reflect.Type]*StructMap), - tagName: tagName, - mapFunc: f, - } -} - -// TypeMap returns a mapping of field strings to int slices representing -// the traversal down the struct to reach the field. -func (m *Mapper) TypeMap(t reflect.Type) *StructMap { - m.mutex.Lock() - mapping, ok := m.cache[t] - if !ok { - mapping = getMapping(t, m.tagName, m.mapFunc, m.tagMapFunc) - m.cache[t] = mapping - } - m.mutex.Unlock() - return mapping -} - -// FieldMap returns the mapper's mapping of field names to reflect values. Panics -// if v's Kind is not Struct, or v is not Indirectable to a struct kind. -func (m *Mapper) FieldMap(v reflect.Value) map[string]reflect.Value { - v = reflect.Indirect(v) - mustBe(v, reflect.Struct) - - r := map[string]reflect.Value{} - tm := m.TypeMap(v.Type()) - for tagName, fi := range tm.Names { - r[tagName] = FieldByIndexes(v, fi.Index) - } - return r -} - -// FieldByName returns a field by its mapped name as a reflect.Value. -// Panics if v's Kind is not Struct or v is not Indirectable to a struct Kind. -// Returns zero Value if the name is not found. -func (m *Mapper) FieldByName(v reflect.Value, name string) reflect.Value { - v = reflect.Indirect(v) - mustBe(v, reflect.Struct) - - tm := m.TypeMap(v.Type()) - fi, ok := tm.Names[name] - if !ok { - return v - } - return FieldByIndexes(v, fi.Index) -} - -// FieldsByName returns a slice of values corresponding to the slice of names -// for the value. Panics if v's Kind is not Struct or v is not Indirectable -// to a struct Kind. Returns zero Value for each name not found. -func (m *Mapper) FieldsByName(v reflect.Value, names []string) []reflect.Value { - v = reflect.Indirect(v) - mustBe(v, reflect.Struct) - - tm := m.TypeMap(v.Type()) - vals := make([]reflect.Value, 0, len(names)) - for _, name := range names { - fi, ok := tm.Names[name] - if !ok { - vals = append(vals, *new(reflect.Value)) - } else { - vals = append(vals, FieldByIndexes(v, fi.Index)) - } - } - return vals -} - -// TraversalsByName returns a slice of int slices which represent the struct -// traversals for each mapped name. Panics if t is not a struct or Indirectable -// to a struct. Returns empty int slice for each name not found. -func (m *Mapper) TraversalsByName(t reflect.Type, names []string) [][]int { - r := make([][]int, 0, len(names)) - m.TraversalsByNameFunc(t, names, func(_ int, i []int) error { - if i == nil { - r = append(r, []int{}) - } else { - r = append(r, i) - } - - return nil - }) - return r -} - -// TraversalsByNameFunc traverses the mapped names and calls fn with the index of -// each name and the struct traversal represented by that name. Panics if t is not -// a struct or Indirectable to a struct. Returns the first error returned by fn or nil. -func (m *Mapper) TraversalsByNameFunc(t reflect.Type, names []string, fn func(int, []int) error) error { - t = Deref(t) - mustBe(t, reflect.Struct) - tm := m.TypeMap(t) - for i, name := range names { - fi, ok := tm.Names[name] - if !ok { - if err := fn(i, nil); err != nil { - return err - } - } else { - if err := fn(i, fi.Index); err != nil { - return err - } - } - } - return nil -} - -// FieldByIndexes returns a value for the field given by the struct traversal -// for the given value. -func FieldByIndexes(v reflect.Value, indexes []int) reflect.Value { - for _, i := range indexes { - v = reflect.Indirect(v).Field(i) - // if this is a pointer and it's nil, allocate a new value and set it - if v.Kind() == reflect.Ptr && v.IsNil() { - alloc := reflect.New(Deref(v.Type())) - v.Set(alloc) - } - if v.Kind() == reflect.Map && v.IsNil() { - v.Set(reflect.MakeMap(v.Type())) - } - } - return v -} - -// FieldByIndexesReadOnly returns a value for a particular struct traversal, -// but is not concerned with allocating nil pointers because the value is -// going to be used for reading and not setting. -func FieldByIndexesReadOnly(v reflect.Value, indexes []int) reflect.Value { - for _, i := range indexes { - v = reflect.Indirect(v).Field(i) - } - return v -} - -// Deref is Indirect for reflect.Types -func Deref(t reflect.Type) reflect.Type { - if t.Kind() == reflect.Ptr { - t = t.Elem() - } - return t -} - -// -- helpers & utilities -- - -type kinder interface { - Kind() reflect.Kind -} - -// mustBe checks a value against a kind, panicing with a reflect.ValueError -// if the kind isn't that which is required. -func mustBe(v kinder, expected reflect.Kind) { - if k := v.Kind(); k != expected { - panic(&reflect.ValueError{Method: methodName(), Kind: k}) - } -} - -// methodName returns the caller of the function calling methodName -func methodName() string { - pc, _, _, _ := runtime.Caller(2) - f := runtime.FuncForPC(pc) - if f == nil { - return "unknown method" - } - return f.Name() -} - -type typeQueue struct { - t reflect.Type - fi *FieldInfo - pp string // Parent path -} - -// A copying append that creates a new slice each time. -func apnd(is []int, i int) []int { - x := make([]int, len(is)+1) - for p, n := range is { - x[p] = n - } - x[len(x)-1] = i - return x -} - -type mapf func(string) string - -// parseName parses the tag and the target name for the given field using -// the tagName (eg 'json' for `json:"foo"` tags), mapFunc for mapping the -// field's name to a target name, and tagMapFunc for mapping the tag to -// a target name. -func parseName(field reflect.StructField, tagName string, mapFunc, tagMapFunc mapf) (tag, fieldName string) { - // first, set the fieldName to the field's name - fieldName = field.Name - // if a mapFunc is set, use that to override the fieldName - if mapFunc != nil { - fieldName = mapFunc(fieldName) - } - - // if there's no tag to look for, return the field name - if tagName == "" { - return "", fieldName - } - - // if this tag is not set using the normal convention in the tag, - // then return the fieldname.. this check is done because according - // to the reflect documentation: - // If the tag does not have the conventional format, - // the value returned by Get is unspecified. - // which doesn't sound great. - if !strings.Contains(string(field.Tag), tagName+":") { - return "", fieldName - } - - // at this point we're fairly sure that we have a tag, so lets pull it out - tag = field.Tag.Get(tagName) - - // if we have a mapper function, call it on the whole tag - // XXX: this is a change from the old version, which pulled out the name - // before the tagMapFunc could be run, but I think this is the right way - if tagMapFunc != nil { - tag = tagMapFunc(tag) - } - - // finally, split the options from the name - parts := strings.Split(tag, ",") - fieldName = parts[0] - - return tag, fieldName -} - -// parseOptions parses options out of a tag string, skipping the name -func parseOptions(tag string) map[string]string { - parts := strings.Split(tag, ",") - options := make(map[string]string, len(parts)) - if len(parts) > 1 { - for _, opt := range parts[1:] { - // short circuit potentially expensive split op - if strings.Contains(opt, "=") { - kv := strings.Split(opt, "=") - options[kv[0]] = kv[1] - continue - } - options[opt] = "" - } - } - return options -} - -// getMapping returns a mapping for the t type, using the tagName, mapFunc and -// tagMapFunc to determine the canonical names of fields. -func getMapping(t reflect.Type, tagName string, mapFunc, tagMapFunc mapf) *StructMap { - m := []*FieldInfo{} - - root := &FieldInfo{} - queue := []typeQueue{} - queue = append(queue, typeQueue{Deref(t), root, ""}) - -QueueLoop: - for len(queue) != 0 { - // pop the first item off of the queue - tq := queue[0] - queue = queue[1:] - - // ignore recursive field - for p := tq.fi.Parent; p != nil; p = p.Parent { - if tq.fi.Field.Type == p.Field.Type { - continue QueueLoop - } - } - - nChildren := 0 - if tq.t.Kind() == reflect.Struct { - nChildren = tq.t.NumField() - } - tq.fi.Children = make([]*FieldInfo, nChildren) - - // iterate through all of its fields - for fieldPos := 0; fieldPos < nChildren; fieldPos++ { - - f := tq.t.Field(fieldPos) - - // parse the tag and the target name using the mapping options for this field - tag, name := parseName(f, tagName, mapFunc, tagMapFunc) - - // if the name is "-", disabled via a tag, skip it - if name == "-" { - continue - } - - fi := FieldInfo{ - Field: f, - Name: name, - Zero: reflect.New(f.Type).Elem(), - Options: parseOptions(tag), - } - - // if the path is empty this path is just the name - if tq.pp == "" { - fi.Path = fi.Name - } else { - fi.Path = tq.pp + "." + fi.Name - } - - // skip unexported fields - if len(f.PkgPath) != 0 && !f.Anonymous { - continue - } - - // bfs search of anonymous embedded structs - if f.Anonymous { - pp := tq.pp - if tag != "" { - pp = fi.Path - } - - fi.Embedded = true - fi.Index = apnd(tq.fi.Index, fieldPos) - nChildren := 0 - ft := Deref(f.Type) - if ft.Kind() == reflect.Struct { - nChildren = ft.NumField() - } - fi.Children = make([]*FieldInfo, nChildren) - queue = append(queue, typeQueue{Deref(f.Type), &fi, pp}) - } else if fi.Zero.Kind() == reflect.Struct || (fi.Zero.Kind() == reflect.Ptr && fi.Zero.Type().Elem().Kind() == reflect.Struct) { - fi.Index = apnd(tq.fi.Index, fieldPos) - fi.Children = make([]*FieldInfo, Deref(f.Type).NumField()) - queue = append(queue, typeQueue{Deref(f.Type), &fi, fi.Path}) - } - - fi.Index = apnd(tq.fi.Index, fieldPos) - fi.Parent = tq.fi - tq.fi.Children[fieldPos] = &fi - m = append(m, &fi) - } - } - - flds := &StructMap{Index: m, Tree: root, Paths: map[string]*FieldInfo{}, Names: map[string]*FieldInfo{}} - for _, fi := range flds.Index { - flds.Paths[fi.Path] = fi - if fi.Name != "" && !fi.Embedded { - flds.Names[fi.Path] = fi - } - } - - return flds -} diff --git a/vendor/github.com/jmoiron/sqlx/reflectx/reflect_test.go b/vendor/github.com/jmoiron/sqlx/reflectx/reflect_test.go deleted file mode 100644 index d3879ed..0000000 --- a/vendor/github.com/jmoiron/sqlx/reflectx/reflect_test.go +++ /dev/null @@ -1,974 +0,0 @@ -package reflectx - -import ( - "reflect" - "strings" - "testing" -) - -func ival(v reflect.Value) int { - return v.Interface().(int) -} - -func TestBasic(t *testing.T) { - type Foo struct { - A int - B int - C int - } - - f := Foo{1, 2, 3} - fv := reflect.ValueOf(f) - m := NewMapperFunc("", func(s string) string { return s }) - - v := m.FieldByName(fv, "A") - if ival(v) != f.A { - t.Errorf("Expecting %d, got %d", ival(v), f.A) - } - v = m.FieldByName(fv, "B") - if ival(v) != f.B { - t.Errorf("Expecting %d, got %d", f.B, ival(v)) - } - v = m.FieldByName(fv, "C") - if ival(v) != f.C { - t.Errorf("Expecting %d, got %d", f.C, ival(v)) - } -} - -func TestBasicEmbedded(t *testing.T) { - type Foo struct { - A int - } - - type Bar struct { - Foo // `db:""` is implied for an embedded struct - B int - C int `db:"-"` - } - - type Baz struct { - A int - Bar `db:"Bar"` - } - - m := NewMapperFunc("db", func(s string) string { return s }) - - z := Baz{} - z.A = 1 - z.B = 2 - z.C = 4 - z.Bar.Foo.A = 3 - - zv := reflect.ValueOf(z) - fields := m.TypeMap(reflect.TypeOf(z)) - - if len(fields.Index) != 5 { - t.Errorf("Expecting 5 fields") - } - - // for _, fi := range fields.Index { - // log.Println(fi) - // } - - v := m.FieldByName(zv, "A") - if ival(v) != z.A { - t.Errorf("Expecting %d, got %d", z.A, ival(v)) - } - v = m.FieldByName(zv, "Bar.B") - if ival(v) != z.Bar.B { - t.Errorf("Expecting %d, got %d", z.Bar.B, ival(v)) - } - v = m.FieldByName(zv, "Bar.A") - if ival(v) != z.Bar.Foo.A { - t.Errorf("Expecting %d, got %d", z.Bar.Foo.A, ival(v)) - } - v = m.FieldByName(zv, "Bar.C") - if _, ok := v.Interface().(int); ok { - t.Errorf("Expecting Bar.C to not exist") - } - - fi := fields.GetByPath("Bar.C") - if fi != nil { - t.Errorf("Bar.C should not exist") - } -} - -func TestEmbeddedSimple(t *testing.T) { - type UUID [16]byte - type MyID struct { - UUID - } - type Item struct { - ID MyID - } - z := Item{} - - m := NewMapper("db") - m.TypeMap(reflect.TypeOf(z)) -} - -func TestBasicEmbeddedWithTags(t *testing.T) { - type Foo struct { - A int `db:"a"` - } - - type Bar struct { - Foo // `db:""` is implied for an embedded struct - B int `db:"b"` - } - - type Baz struct { - A int `db:"a"` - Bar // `db:""` is implied for an embedded struct - } - - m := NewMapper("db") - - z := Baz{} - z.A = 1 - z.B = 2 - z.Bar.Foo.A = 3 - - zv := reflect.ValueOf(z) - fields := m.TypeMap(reflect.TypeOf(z)) - - if len(fields.Index) != 5 { - t.Errorf("Expecting 5 fields") - } - - // for _, fi := range fields.index { - // log.Println(fi) - // } - - v := m.FieldByName(zv, "a") - if ival(v) != z.Bar.Foo.A { // the dominant field - t.Errorf("Expecting %d, got %d", z.Bar.Foo.A, ival(v)) - } - v = m.FieldByName(zv, "b") - if ival(v) != z.B { - t.Errorf("Expecting %d, got %d", z.B, ival(v)) - } -} - -func TestFlatTags(t *testing.T) { - m := NewMapper("db") - - type Asset struct { - Title string `db:"title"` - } - type Post struct { - Author string `db:"author,required"` - Asset Asset `db:""` - } - // Post columns: (author title) - - post := Post{Author: "Joe", Asset: Asset{Title: "Hello"}} - pv := reflect.ValueOf(post) - - v := m.FieldByName(pv, "author") - if v.Interface().(string) != post.Author { - t.Errorf("Expecting %s, got %s", post.Author, v.Interface().(string)) - } - v = m.FieldByName(pv, "title") - if v.Interface().(string) != post.Asset.Title { - t.Errorf("Expecting %s, got %s", post.Asset.Title, v.Interface().(string)) - } -} - -func TestNestedStruct(t *testing.T) { - m := NewMapper("db") - - type Details struct { - Active bool `db:"active"` - } - type Asset struct { - Title string `db:"title"` - Details Details `db:"details"` - } - type Post struct { - Author string `db:"author,required"` - Asset `db:"asset"` - } - // Post columns: (author asset.title asset.details.active) - - post := Post{ - Author: "Joe", - Asset: Asset{Title: "Hello", Details: Details{Active: true}}, - } - pv := reflect.ValueOf(post) - - v := m.FieldByName(pv, "author") - if v.Interface().(string) != post.Author { - t.Errorf("Expecting %s, got %s", post.Author, v.Interface().(string)) - } - v = m.FieldByName(pv, "title") - if _, ok := v.Interface().(string); ok { - t.Errorf("Expecting field to not exist") - } - v = m.FieldByName(pv, "asset.title") - if v.Interface().(string) != post.Asset.Title { - t.Errorf("Expecting %s, got %s", post.Asset.Title, v.Interface().(string)) - } - v = m.FieldByName(pv, "asset.details.active") - if v.Interface().(bool) != post.Asset.Details.Active { - t.Errorf("Expecting %v, got %v", post.Asset.Details.Active, v.Interface().(bool)) - } -} - -func TestInlineStruct(t *testing.T) { - m := NewMapperTagFunc("db", strings.ToLower, nil) - - type Employee struct { - Name string - ID int - } - type Boss Employee - type person struct { - Employee `db:"employee"` - Boss `db:"boss"` - } - // employees columns: (employee.name employee.id boss.name boss.id) - - em := person{Employee: Employee{Name: "Joe", ID: 2}, Boss: Boss{Name: "Dick", ID: 1}} - ev := reflect.ValueOf(em) - - fields := m.TypeMap(reflect.TypeOf(em)) - if len(fields.Index) != 6 { - t.Errorf("Expecting 6 fields") - } - - v := m.FieldByName(ev, "employee.name") - if v.Interface().(string) != em.Employee.Name { - t.Errorf("Expecting %s, got %s", em.Employee.Name, v.Interface().(string)) - } - v = m.FieldByName(ev, "boss.id") - if ival(v) != em.Boss.ID { - t.Errorf("Expecting %v, got %v", em.Boss.ID, ival(v)) - } -} - -func TestRecursiveStruct(t *testing.T) { - type Person struct { - Parent *Person - } - m := NewMapperFunc("db", strings.ToLower) - var p *Person - m.TypeMap(reflect.TypeOf(p)) -} - -func TestFieldsEmbedded(t *testing.T) { - m := NewMapper("db") - - type Person struct { - Name string `db:"name,size=64"` - } - type Place struct { - Name string `db:"name"` - } - type Article struct { - Title string `db:"title"` - } - type PP struct { - Person `db:"person,required"` - Place `db:",someflag"` - Article `db:",required"` - } - // PP columns: (person.name name title) - - pp := PP{} - pp.Person.Name = "Peter" - pp.Place.Name = "Toronto" - pp.Article.Title = "Best city ever" - - fields := m.TypeMap(reflect.TypeOf(pp)) - // for i, f := range fields { - // log.Println(i, f) - // } - - ppv := reflect.ValueOf(pp) - - v := m.FieldByName(ppv, "person.name") - if v.Interface().(string) != pp.Person.Name { - t.Errorf("Expecting %s, got %s", pp.Person.Name, v.Interface().(string)) - } - - v = m.FieldByName(ppv, "name") - if v.Interface().(string) != pp.Place.Name { - t.Errorf("Expecting %s, got %s", pp.Place.Name, v.Interface().(string)) - } - - v = m.FieldByName(ppv, "title") - if v.Interface().(string) != pp.Article.Title { - t.Errorf("Expecting %s, got %s", pp.Article.Title, v.Interface().(string)) - } - - fi := fields.GetByPath("person") - if _, ok := fi.Options["required"]; !ok { - t.Errorf("Expecting required option to be set") - } - if !fi.Embedded { - t.Errorf("Expecting field to be embedded") - } - if len(fi.Index) != 1 || fi.Index[0] != 0 { - t.Errorf("Expecting index to be [0]") - } - - fi = fields.GetByPath("person.name") - if fi == nil { - t.Errorf("Expecting person.name to exist") - } - if fi.Path != "person.name" { - t.Errorf("Expecting %s, got %s", "person.name", fi.Path) - } - if fi.Options["size"] != "64" { - t.Errorf("Expecting %s, got %s", "64", fi.Options["size"]) - } - - fi = fields.GetByTraversal([]int{1, 0}) - if fi == nil { - t.Errorf("Expecting traveral to exist") - } - if fi.Path != "name" { - t.Errorf("Expecting %s, got %s", "name", fi.Path) - } - - fi = fields.GetByTraversal([]int{2}) - if fi == nil { - t.Errorf("Expecting traversal to exist") - } - if _, ok := fi.Options["required"]; !ok { - t.Errorf("Expecting required option to be set") - } - - trs := m.TraversalsByName(reflect.TypeOf(pp), []string{"person.name", "name", "title"}) - if !reflect.DeepEqual(trs, [][]int{{0, 0}, {1, 0}, {2, 0}}) { - t.Errorf("Expecting traversal: %v", trs) - } -} - -func TestPtrFields(t *testing.T) { - m := NewMapperTagFunc("db", strings.ToLower, nil) - type Asset struct { - Title string - } - type Post struct { - *Asset `db:"asset"` - Author string - } - - post := &Post{Author: "Joe", Asset: &Asset{Title: "Hiyo"}} - pv := reflect.ValueOf(post) - - fields := m.TypeMap(reflect.TypeOf(post)) - if len(fields.Index) != 3 { - t.Errorf("Expecting 3 fields") - } - - v := m.FieldByName(pv, "asset.title") - if v.Interface().(string) != post.Asset.Title { - t.Errorf("Expecting %s, got %s", post.Asset.Title, v.Interface().(string)) - } - v = m.FieldByName(pv, "author") - if v.Interface().(string) != post.Author { - t.Errorf("Expecting %s, got %s", post.Author, v.Interface().(string)) - } -} - -func TestNamedPtrFields(t *testing.T) { - m := NewMapperTagFunc("db", strings.ToLower, nil) - - type User struct { - Name string - } - - type Asset struct { - Title string - - Owner *User `db:"owner"` - } - type Post struct { - Author string - - Asset1 *Asset `db:"asset1"` - Asset2 *Asset `db:"asset2"` - } - - post := &Post{Author: "Joe", Asset1: &Asset{Title: "Hiyo", Owner: &User{"Username"}}} // Let Asset2 be nil - pv := reflect.ValueOf(post) - - fields := m.TypeMap(reflect.TypeOf(post)) - if len(fields.Index) != 9 { - t.Errorf("Expecting 9 fields") - } - - v := m.FieldByName(pv, "asset1.title") - if v.Interface().(string) != post.Asset1.Title { - t.Errorf("Expecting %s, got %s", post.Asset1.Title, v.Interface().(string)) - } - v = m.FieldByName(pv, "asset1.owner.name") - if v.Interface().(string) != post.Asset1.Owner.Name { - t.Errorf("Expecting %s, got %s", post.Asset1.Owner.Name, v.Interface().(string)) - } - v = m.FieldByName(pv, "asset2.title") - if v.Interface().(string) != post.Asset2.Title { - t.Errorf("Expecting %s, got %s", post.Asset2.Title, v.Interface().(string)) - } - v = m.FieldByName(pv, "asset2.owner.name") - if v.Interface().(string) != post.Asset2.Owner.Name { - t.Errorf("Expecting %s, got %s", post.Asset2.Owner.Name, v.Interface().(string)) - } - v = m.FieldByName(pv, "author") - if v.Interface().(string) != post.Author { - t.Errorf("Expecting %s, got %s", post.Author, v.Interface().(string)) - } -} - -func TestFieldMap(t *testing.T) { - type Foo struct { - A int - B int - C int - } - - f := Foo{1, 2, 3} - m := NewMapperFunc("db", strings.ToLower) - - fm := m.FieldMap(reflect.ValueOf(f)) - - if len(fm) != 3 { - t.Errorf("Expecting %d keys, got %d", 3, len(fm)) - } - if fm["a"].Interface().(int) != 1 { - t.Errorf("Expecting %d, got %d", 1, ival(fm["a"])) - } - if fm["b"].Interface().(int) != 2 { - t.Errorf("Expecting %d, got %d", 2, ival(fm["b"])) - } - if fm["c"].Interface().(int) != 3 { - t.Errorf("Expecting %d, got %d", 3, ival(fm["c"])) - } -} - -func TestTagNameMapping(t *testing.T) { - type Strategy struct { - StrategyID string `protobuf:"bytes,1,opt,name=strategy_id" json:"strategy_id,omitempty"` - StrategyName string - } - - m := NewMapperTagFunc("json", strings.ToUpper, func(value string) string { - if strings.Contains(value, ",") { - return strings.Split(value, ",")[0] - } - return value - }) - strategy := Strategy{"1", "Alpah"} - mapping := m.TypeMap(reflect.TypeOf(strategy)) - - for _, key := range []string{"strategy_id", "STRATEGYNAME"} { - if fi := mapping.GetByPath(key); fi == nil { - t.Errorf("Expecting to find key %s in mapping but did not.", key) - } - } -} - -func TestMapping(t *testing.T) { - type Person struct { - ID int - Name string - WearsGlasses bool `db:"wears_glasses"` - } - - m := NewMapperFunc("db", strings.ToLower) - p := Person{1, "Jason", true} - mapping := m.TypeMap(reflect.TypeOf(p)) - - for _, key := range []string{"id", "name", "wears_glasses"} { - if fi := mapping.GetByPath(key); fi == nil { - t.Errorf("Expecting to find key %s in mapping but did not.", key) - } - } - - type SportsPerson struct { - Weight int - Age int - Person - } - s := SportsPerson{Weight: 100, Age: 30, Person: p} - mapping = m.TypeMap(reflect.TypeOf(s)) - for _, key := range []string{"id", "name", "wears_glasses", "weight", "age"} { - if fi := mapping.GetByPath(key); fi == nil { - t.Errorf("Expecting to find key %s in mapping but did not.", key) - } - } - - type RugbyPlayer struct { - Position int - IsIntense bool `db:"is_intense"` - IsAllBlack bool `db:"-"` - SportsPerson - } - r := RugbyPlayer{12, true, false, s} - mapping = m.TypeMap(reflect.TypeOf(r)) - for _, key := range []string{"id", "name", "wears_glasses", "weight", "age", "position", "is_intense"} { - if fi := mapping.GetByPath(key); fi == nil { - t.Errorf("Expecting to find key %s in mapping but did not.", key) - } - } - - if fi := mapping.GetByPath("isallblack"); fi != nil { - t.Errorf("Expecting to ignore `IsAllBlack` field") - } -} - -func TestGetByTraversal(t *testing.T) { - type C struct { - C0 int - C1 int - } - type B struct { - B0 string - B1 *C - } - type A struct { - A0 int - A1 B - } - - testCases := []struct { - Index []int - ExpectedName string - ExpectNil bool - }{ - { - Index: []int{0}, - ExpectedName: "A0", - }, - { - Index: []int{1, 0}, - ExpectedName: "B0", - }, - { - Index: []int{1, 1, 1}, - ExpectedName: "C1", - }, - { - Index: []int{3, 4, 5}, - ExpectNil: true, - }, - { - Index: []int{}, - ExpectNil: true, - }, - { - Index: nil, - ExpectNil: true, - }, - } - - m := NewMapperFunc("db", func(n string) string { return n }) - tm := m.TypeMap(reflect.TypeOf(A{})) - - for i, tc := range testCases { - fi := tm.GetByTraversal(tc.Index) - if tc.ExpectNil { - if fi != nil { - t.Errorf("%d: expected nil, got %v", i, fi) - } - continue - } - - if fi == nil { - t.Errorf("%d: expected %s, got nil", i, tc.ExpectedName) - continue - } - - if fi.Name != tc.ExpectedName { - t.Errorf("%d: expected %s, got %s", i, tc.ExpectedName, fi.Name) - } - } -} - -// TestMapperMethodsByName tests Mapper methods FieldByName and TraversalsByName -func TestMapperMethodsByName(t *testing.T) { - type C struct { - C0 string - C1 int - } - type B struct { - B0 *C `db:"B0"` - B1 C `db:"B1"` - B2 string `db:"B2"` - } - type A struct { - A0 *B `db:"A0"` - B `db:"A1"` - A2 int - a3 int - } - - val := &A{ - A0: &B{ - B0: &C{C0: "0", C1: 1}, - B1: C{C0: "2", C1: 3}, - B2: "4", - }, - B: B{ - B0: nil, - B1: C{C0: "5", C1: 6}, - B2: "7", - }, - A2: 8, - } - - testCases := []struct { - Name string - ExpectInvalid bool - ExpectedValue interface{} - ExpectedIndexes []int - }{ - { - Name: "A0.B0.C0", - ExpectedValue: "0", - ExpectedIndexes: []int{0, 0, 0}, - }, - { - Name: "A0.B0.C1", - ExpectedValue: 1, - ExpectedIndexes: []int{0, 0, 1}, - }, - { - Name: "A0.B1.C0", - ExpectedValue: "2", - ExpectedIndexes: []int{0, 1, 0}, - }, - { - Name: "A0.B1.C1", - ExpectedValue: 3, - ExpectedIndexes: []int{0, 1, 1}, - }, - { - Name: "A0.B2", - ExpectedValue: "4", - ExpectedIndexes: []int{0, 2}, - }, - { - Name: "A1.B0.C0", - ExpectedValue: "", - ExpectedIndexes: []int{1, 0, 0}, - }, - { - Name: "A1.B0.C1", - ExpectedValue: 0, - ExpectedIndexes: []int{1, 0, 1}, - }, - { - Name: "A1.B1.C0", - ExpectedValue: "5", - ExpectedIndexes: []int{1, 1, 0}, - }, - { - Name: "A1.B1.C1", - ExpectedValue: 6, - ExpectedIndexes: []int{1, 1, 1}, - }, - { - Name: "A1.B2", - ExpectedValue: "7", - ExpectedIndexes: []int{1, 2}, - }, - { - Name: "A2", - ExpectedValue: 8, - ExpectedIndexes: []int{2}, - }, - { - Name: "XYZ", - ExpectInvalid: true, - ExpectedIndexes: []int{}, - }, - { - Name: "a3", - ExpectInvalid: true, - ExpectedIndexes: []int{}, - }, - } - - // build the names array from the test cases - names := make([]string, len(testCases)) - for i, tc := range testCases { - names[i] = tc.Name - } - m := NewMapperFunc("db", func(n string) string { return n }) - v := reflect.ValueOf(val) - values := m.FieldsByName(v, names) - if len(values) != len(testCases) { - t.Errorf("expected %d values, got %d", len(testCases), len(values)) - t.FailNow() - } - indexes := m.TraversalsByName(v.Type(), names) - if len(indexes) != len(testCases) { - t.Errorf("expected %d traversals, got %d", len(testCases), len(indexes)) - t.FailNow() - } - for i, val := range values { - tc := testCases[i] - traversal := indexes[i] - if !reflect.DeepEqual(tc.ExpectedIndexes, traversal) { - t.Errorf("expected %v, got %v", tc.ExpectedIndexes, traversal) - t.FailNow() - } - val = reflect.Indirect(val) - if tc.ExpectInvalid { - if val.IsValid() { - t.Errorf("%d: expected zero value, got %v", i, val) - } - continue - } - if !val.IsValid() { - t.Errorf("%d: expected valid value, got %v", i, val) - continue - } - actualValue := reflect.Indirect(val).Interface() - if !reflect.DeepEqual(tc.ExpectedValue, actualValue) { - t.Errorf("%d: expected %v, got %v", i, tc.ExpectedValue, actualValue) - } - } -} - -func TestFieldByIndexes(t *testing.T) { - type C struct { - C0 bool - C1 string - C2 int - C3 map[string]int - } - type B struct { - B1 C - B2 *C - } - type A struct { - A1 B - A2 *B - } - testCases := []struct { - value interface{} - indexes []int - expectedValue interface{} - readOnly bool - }{ - { - value: A{ - A1: B{B1: C{C0: true}}, - }, - indexes: []int{0, 0, 0}, - expectedValue: true, - readOnly: true, - }, - { - value: A{ - A2: &B{B2: &C{C1: "answer"}}, - }, - indexes: []int{1, 1, 1}, - expectedValue: "answer", - readOnly: true, - }, - { - value: &A{}, - indexes: []int{1, 1, 3}, - expectedValue: map[string]int{}, - }, - } - - for i, tc := range testCases { - checkResults := func(v reflect.Value) { - if tc.expectedValue == nil { - if !v.IsNil() { - t.Errorf("%d: expected nil, actual %v", i, v.Interface()) - } - } else { - if !reflect.DeepEqual(tc.expectedValue, v.Interface()) { - t.Errorf("%d: expected %v, actual %v", i, tc.expectedValue, v.Interface()) - } - } - } - - checkResults(FieldByIndexes(reflect.ValueOf(tc.value), tc.indexes)) - if tc.readOnly { - checkResults(FieldByIndexesReadOnly(reflect.ValueOf(tc.value), tc.indexes)) - } - } -} - -func TestMustBe(t *testing.T) { - typ := reflect.TypeOf(E1{}) - mustBe(typ, reflect.Struct) - - defer func() { - if r := recover(); r != nil { - valueErr, ok := r.(*reflect.ValueError) - if !ok { - t.Errorf("unexpected Method: %s", valueErr.Method) - t.Error("expected panic with *reflect.ValueError") - return - } - if valueErr.Method != "github.com/jmoiron/sqlx/reflectx.TestMustBe" { - } - if valueErr.Kind != reflect.String { - t.Errorf("unexpected Kind: %s", valueErr.Kind) - } - } else { - t.Error("expected panic") - } - }() - - typ = reflect.TypeOf("string") - mustBe(typ, reflect.Struct) - t.Error("got here, didn't expect to") -} - -type E1 struct { - A int -} -type E2 struct { - E1 - B int -} -type E3 struct { - E2 - C int -} -type E4 struct { - E3 - D int -} - -func BenchmarkFieldNameL1(b *testing.B) { - e4 := E4{D: 1} - for i := 0; i < b.N; i++ { - v := reflect.ValueOf(e4) - f := v.FieldByName("D") - if f.Interface().(int) != 1 { - b.Fatal("Wrong value.") - } - } -} - -func BenchmarkFieldNameL4(b *testing.B) { - e4 := E4{} - e4.A = 1 - for i := 0; i < b.N; i++ { - v := reflect.ValueOf(e4) - f := v.FieldByName("A") - if f.Interface().(int) != 1 { - b.Fatal("Wrong value.") - } - } -} - -func BenchmarkFieldPosL1(b *testing.B) { - e4 := E4{D: 1} - for i := 0; i < b.N; i++ { - v := reflect.ValueOf(e4) - f := v.Field(1) - if f.Interface().(int) != 1 { - b.Fatal("Wrong value.") - } - } -} - -func BenchmarkFieldPosL4(b *testing.B) { - e4 := E4{} - e4.A = 1 - for i := 0; i < b.N; i++ { - v := reflect.ValueOf(e4) - f := v.Field(0) - f = f.Field(0) - f = f.Field(0) - f = f.Field(0) - if f.Interface().(int) != 1 { - b.Fatal("Wrong value.") - } - } -} - -func BenchmarkFieldByIndexL4(b *testing.B) { - e4 := E4{} - e4.A = 1 - idx := []int{0, 0, 0, 0} - for i := 0; i < b.N; i++ { - v := reflect.ValueOf(e4) - f := FieldByIndexes(v, idx) - if f.Interface().(int) != 1 { - b.Fatal("Wrong value.") - } - } -} - -func BenchmarkTraversalsByName(b *testing.B) { - type A struct { - Value int - } - - type B struct { - A A - } - - type C struct { - B B - } - - type D struct { - C C - } - - m := NewMapper("") - t := reflect.TypeOf(D{}) - names := []string{"C", "B", "A", "Value"} - - b.ResetTimer() - - for i := 0; i < b.N; i++ { - if l := len(m.TraversalsByName(t, names)); l != len(names) { - b.Errorf("expected %d values, got %d", len(names), l) - } - } -} - -func BenchmarkTraversalsByNameFunc(b *testing.B) { - type A struct { - Z int - } - - type B struct { - A A - } - - type C struct { - B B - } - - type D struct { - C C - } - - m := NewMapper("") - t := reflect.TypeOf(D{}) - names := []string{"C", "B", "A", "Z", "Y"} - - b.ResetTimer() - - for i := 0; i < b.N; i++ { - var l int - - if err := m.TraversalsByNameFunc(t, names, func(_ int, _ []int) error { - l++ - return nil - }); err != nil { - b.Errorf("unexpected error %s", err) - } - - if l != len(names) { - b.Errorf("expected %d values, got %d", len(names), l) - } - } -} \ No newline at end of file diff --git a/vendor/github.com/jmoiron/sqlx/sqlx.go b/vendor/github.com/jmoiron/sqlx/sqlx.go deleted file mode 100644 index e95f23f..0000000 --- a/vendor/github.com/jmoiron/sqlx/sqlx.go +++ /dev/null @@ -1,1039 +0,0 @@ -package sqlx - -import ( - "database/sql" - "database/sql/driver" - "errors" - "fmt" - - "io/ioutil" - "path/filepath" - "reflect" - "strings" - "sync" - - "github.com/jmoiron/sqlx/reflectx" -) - -// Although the NameMapper is convenient, in practice it should not -// be relied on except for application code. If you are writing a library -// that uses sqlx, you should be aware that the name mappings you expect -// can be overridden by your user's application. - -// NameMapper is used to map column names to struct field names. By default, -// it uses strings.ToLower to lowercase struct field names. It can be set -// to whatever you want, but it is encouraged to be set before sqlx is used -// as name-to-field mappings are cached after first use on a type. -var NameMapper = strings.ToLower -var origMapper = reflect.ValueOf(NameMapper) - -// Rather than creating on init, this is created when necessary so that -// importers have time to customize the NameMapper. -var mpr *reflectx.Mapper - -// mprMu protects mpr. -var mprMu sync.Mutex - -// mapper returns a valid mapper using the configured NameMapper func. -func mapper() *reflectx.Mapper { - mprMu.Lock() - defer mprMu.Unlock() - - if mpr == nil { - mpr = reflectx.NewMapperFunc("db", NameMapper) - } else if origMapper != reflect.ValueOf(NameMapper) { - // if NameMapper has changed, create a new mapper - mpr = reflectx.NewMapperFunc("db", NameMapper) - origMapper = reflect.ValueOf(NameMapper) - } - return mpr -} - -// isScannable takes the reflect.Type and the actual dest value and returns -// whether or not it's Scannable. Something is scannable if: -// * it is not a struct -// * it implements sql.Scanner -// * it has no exported fields -func isScannable(t reflect.Type) bool { - if reflect.PtrTo(t).Implements(_scannerInterface) { - return true - } - if t.Kind() != reflect.Struct { - return true - } - - // it's not important that we use the right mapper for this particular object, - // we're only concerned on how many exported fields this struct has - m := mapper() - if len(m.TypeMap(t).Index) == 0 { - return true - } - return false -} - -// ColScanner is an interface used by MapScan and SliceScan -type ColScanner interface { - Columns() ([]string, error) - Scan(dest ...interface{}) error - Err() error -} - -// Queryer is an interface used by Get and Select -type Queryer interface { - Query(query string, args ...interface{}) (*sql.Rows, error) - Queryx(query string, args ...interface{}) (*Rows, error) - QueryRowx(query string, args ...interface{}) *Row -} - -// Execer is an interface used by MustExec and LoadFile -type Execer interface { - Exec(query string, args ...interface{}) (sql.Result, error) -} - -// Binder is an interface for something which can bind queries (Tx, DB) -type binder interface { - DriverName() string - Rebind(string) string - BindNamed(string, interface{}) (string, []interface{}, error) -} - -// Ext is a union interface which can bind, query, and exec, used by -// NamedQuery and NamedExec. -type Ext interface { - binder - Queryer - Execer -} - -// Preparer is an interface used by Preparex. -type Preparer interface { - Prepare(query string) (*sql.Stmt, error) -} - -// determine if any of our extensions are unsafe -func isUnsafe(i interface{}) bool { - switch v := i.(type) { - case Row: - return v.unsafe - case *Row: - return v.unsafe - case Rows: - return v.unsafe - case *Rows: - return v.unsafe - case NamedStmt: - return v.Stmt.unsafe - case *NamedStmt: - return v.Stmt.unsafe - case Stmt: - return v.unsafe - case *Stmt: - return v.unsafe - case qStmt: - return v.unsafe - case *qStmt: - return v.unsafe - case DB: - return v.unsafe - case *DB: - return v.unsafe - case Tx: - return v.unsafe - case *Tx: - return v.unsafe - case sql.Rows, *sql.Rows: - return false - default: - return false - } -} - -func mapperFor(i interface{}) *reflectx.Mapper { - switch i.(type) { - case DB: - return i.(DB).Mapper - case *DB: - return i.(*DB).Mapper - case Tx: - return i.(Tx).Mapper - case *Tx: - return i.(*Tx).Mapper - default: - return mapper() - } -} - -var _scannerInterface = reflect.TypeOf((*sql.Scanner)(nil)).Elem() -var _valuerInterface = reflect.TypeOf((*driver.Valuer)(nil)).Elem() - -// Row is a reimplementation of sql.Row in order to gain access to the underlying -// sql.Rows.Columns() data, necessary for StructScan. -type Row struct { - err error - unsafe bool - rows *sql.Rows - Mapper *reflectx.Mapper -} - -// Scan is a fixed implementation of sql.Row.Scan, which does not discard the -// underlying error from the internal rows object if it exists. -func (r *Row) Scan(dest ...interface{}) error { - if r.err != nil { - return r.err - } - - // TODO(bradfitz): for now we need to defensively clone all - // []byte that the driver returned (not permitting - // *RawBytes in Rows.Scan), since we're about to close - // the Rows in our defer, when we return from this function. - // the contract with the driver.Next(...) interface is that it - // can return slices into read-only temporary memory that's - // only valid until the next Scan/Close. But the TODO is that - // for a lot of drivers, this copy will be unnecessary. We - // should provide an optional interface for drivers to - // implement to say, "don't worry, the []bytes that I return - // from Next will not be modified again." (for instance, if - // they were obtained from the network anyway) But for now we - // don't care. - defer r.rows.Close() - for _, dp := range dest { - if _, ok := dp.(*sql.RawBytes); ok { - return errors.New("sql: RawBytes isn't allowed on Row.Scan") - } - } - - if !r.rows.Next() { - if err := r.rows.Err(); err != nil { - return err - } - return sql.ErrNoRows - } - err := r.rows.Scan(dest...) - if err != nil { - return err - } - // Make sure the query can be processed to completion with no errors. - if err := r.rows.Close(); err != nil { - return err - } - return nil -} - -// Columns returns the underlying sql.Rows.Columns(), or the deferred error usually -// returned by Row.Scan() -func (r *Row) Columns() ([]string, error) { - if r.err != nil { - return []string{}, r.err - } - return r.rows.Columns() -} - -// Err returns the error encountered while scanning. -func (r *Row) Err() error { - return r.err -} - -// DB is a wrapper around sql.DB which keeps track of the driverName upon Open, -// used mostly to automatically bind named queries using the right bindvars. -type DB struct { - *sql.DB - driverName string - unsafe bool - Mapper *reflectx.Mapper -} - -// NewDb returns a new sqlx DB wrapper for a pre-existing *sql.DB. The -// driverName of the original database is required for named query support. -func NewDb(db *sql.DB, driverName string) *DB { - return &DB{DB: db, driverName: driverName, Mapper: mapper()} -} - -// DriverName returns the driverName passed to the Open function for this DB. -func (db *DB) DriverName() string { - return db.driverName -} - -// Open is the same as sql.Open, but returns an *sqlx.DB instead. -func Open(driverName, dataSourceName string) (*DB, error) { - db, err := sql.Open(driverName, dataSourceName) - if err != nil { - return nil, err - } - return &DB{DB: db, driverName: driverName, Mapper: mapper()}, err -} - -// MustOpen is the same as sql.Open, but returns an *sqlx.DB instead and panics on error. -func MustOpen(driverName, dataSourceName string) *DB { - db, err := Open(driverName, dataSourceName) - if err != nil { - panic(err) - } - return db -} - -// MapperFunc sets a new mapper for this db using the default sqlx struct tag -// and the provided mapper function. -func (db *DB) MapperFunc(mf func(string) string) { - db.Mapper = reflectx.NewMapperFunc("db", mf) -} - -// Rebind transforms a query from QUESTION to the DB driver's bindvar type. -func (db *DB) Rebind(query string) string { - return Rebind(BindType(db.driverName), query) -} - -// Unsafe returns a version of DB which will silently succeed to scan when -// columns in the SQL result have no fields in the destination struct. -// sqlx.Stmt and sqlx.Tx which are created from this DB will inherit its -// safety behavior. -func (db *DB) Unsafe() *DB { - return &DB{DB: db.DB, driverName: db.driverName, unsafe: true, Mapper: db.Mapper} -} - -// BindNamed binds a query using the DB driver's bindvar type. -func (db *DB) BindNamed(query string, arg interface{}) (string, []interface{}, error) { - return bindNamedMapper(BindType(db.driverName), query, arg, db.Mapper) -} - -// NamedQuery using this DB. -// Any named placeholder parameters are replaced with fields from arg. -func (db *DB) NamedQuery(query string, arg interface{}) (*Rows, error) { - return NamedQuery(db, query, arg) -} - -// NamedExec using this DB. -// Any named placeholder parameters are replaced with fields from arg. -func (db *DB) NamedExec(query string, arg interface{}) (sql.Result, error) { - return NamedExec(db, query, arg) -} - -// Select using this DB. -// Any placeholder parameters are replaced with supplied args. -func (db *DB) Select(dest interface{}, query string, args ...interface{}) error { - return Select(db, dest, query, args...) -} - -// Get using this DB. -// Any placeholder parameters are replaced with supplied args. -// An error is returned if the result set is empty. -func (db *DB) Get(dest interface{}, query string, args ...interface{}) error { - return Get(db, dest, query, args...) -} - -// MustBegin starts a transaction, and panics on error. Returns an *sqlx.Tx instead -// of an *sql.Tx. -func (db *DB) MustBegin() *Tx { - tx, err := db.Beginx() - if err != nil { - panic(err) - } - return tx -} - -// Beginx begins a transaction and returns an *sqlx.Tx instead of an *sql.Tx. -func (db *DB) Beginx() (*Tx, error) { - tx, err := db.DB.Begin() - if err != nil { - return nil, err - } - return &Tx{Tx: tx, driverName: db.driverName, unsafe: db.unsafe, Mapper: db.Mapper}, err -} - -// Queryx queries the database and returns an *sqlx.Rows. -// Any placeholder parameters are replaced with supplied args. -func (db *DB) Queryx(query string, args ...interface{}) (*Rows, error) { - r, err := db.DB.Query(query, args...) - if err != nil { - return nil, err - } - return &Rows{Rows: r, unsafe: db.unsafe, Mapper: db.Mapper}, err -} - -// QueryRowx queries the database and returns an *sqlx.Row. -// Any placeholder parameters are replaced with supplied args. -func (db *DB) QueryRowx(query string, args ...interface{}) *Row { - rows, err := db.DB.Query(query, args...) - return &Row{rows: rows, err: err, unsafe: db.unsafe, Mapper: db.Mapper} -} - -// MustExec (panic) runs MustExec using this database. -// Any placeholder parameters are replaced with supplied args. -func (db *DB) MustExec(query string, args ...interface{}) sql.Result { - return MustExec(db, query, args...) -} - -// Preparex returns an sqlx.Stmt instead of a sql.Stmt -func (db *DB) Preparex(query string) (*Stmt, error) { - return Preparex(db, query) -} - -// PrepareNamed returns an sqlx.NamedStmt -func (db *DB) PrepareNamed(query string) (*NamedStmt, error) { - return prepareNamed(db, query) -} - -// Tx is an sqlx wrapper around sql.Tx with extra functionality -type Tx struct { - *sql.Tx - driverName string - unsafe bool - Mapper *reflectx.Mapper -} - -// DriverName returns the driverName used by the DB which began this transaction. -func (tx *Tx) DriverName() string { - return tx.driverName -} - -// Rebind a query within a transaction's bindvar type. -func (tx *Tx) Rebind(query string) string { - return Rebind(BindType(tx.driverName), query) -} - -// Unsafe returns a version of Tx which will silently succeed to scan when -// columns in the SQL result have no fields in the destination struct. -func (tx *Tx) Unsafe() *Tx { - return &Tx{Tx: tx.Tx, driverName: tx.driverName, unsafe: true, Mapper: tx.Mapper} -} - -// BindNamed binds a query within a transaction's bindvar type. -func (tx *Tx) BindNamed(query string, arg interface{}) (string, []interface{}, error) { - return bindNamedMapper(BindType(tx.driverName), query, arg, tx.Mapper) -} - -// NamedQuery within a transaction. -// Any named placeholder parameters are replaced with fields from arg. -func (tx *Tx) NamedQuery(query string, arg interface{}) (*Rows, error) { - return NamedQuery(tx, query, arg) -} - -// NamedExec a named query within a transaction. -// Any named placeholder parameters are replaced with fields from arg. -func (tx *Tx) NamedExec(query string, arg interface{}) (sql.Result, error) { - return NamedExec(tx, query, arg) -} - -// Select within a transaction. -// Any placeholder parameters are replaced with supplied args. -func (tx *Tx) Select(dest interface{}, query string, args ...interface{}) error { - return Select(tx, dest, query, args...) -} - -// Queryx within a transaction. -// Any placeholder parameters are replaced with supplied args. -func (tx *Tx) Queryx(query string, args ...interface{}) (*Rows, error) { - r, err := tx.Tx.Query(query, args...) - if err != nil { - return nil, err - } - return &Rows{Rows: r, unsafe: tx.unsafe, Mapper: tx.Mapper}, err -} - -// QueryRowx within a transaction. -// Any placeholder parameters are replaced with supplied args. -func (tx *Tx) QueryRowx(query string, args ...interface{}) *Row { - rows, err := tx.Tx.Query(query, args...) - return &Row{rows: rows, err: err, unsafe: tx.unsafe, Mapper: tx.Mapper} -} - -// Get within a transaction. -// Any placeholder parameters are replaced with supplied args. -// An error is returned if the result set is empty. -func (tx *Tx) Get(dest interface{}, query string, args ...interface{}) error { - return Get(tx, dest, query, args...) -} - -// MustExec runs MustExec within a transaction. -// Any placeholder parameters are replaced with supplied args. -func (tx *Tx) MustExec(query string, args ...interface{}) sql.Result { - return MustExec(tx, query, args...) -} - -// Preparex a statement within a transaction. -func (tx *Tx) Preparex(query string) (*Stmt, error) { - return Preparex(tx, query) -} - -// Stmtx returns a version of the prepared statement which runs within a transaction. Provided -// stmt can be either *sql.Stmt or *sqlx.Stmt. -func (tx *Tx) Stmtx(stmt interface{}) *Stmt { - var s *sql.Stmt - switch v := stmt.(type) { - case Stmt: - s = v.Stmt - case *Stmt: - s = v.Stmt - case sql.Stmt: - s = &v - case *sql.Stmt: - s = v - default: - panic(fmt.Sprintf("non-statement type %v passed to Stmtx", reflect.ValueOf(stmt).Type())) - } - return &Stmt{Stmt: tx.Stmt(s), Mapper: tx.Mapper} -} - -// NamedStmt returns a version of the prepared statement which runs within a transaction. -func (tx *Tx) NamedStmt(stmt *NamedStmt) *NamedStmt { - return &NamedStmt{ - QueryString: stmt.QueryString, - Params: stmt.Params, - Stmt: tx.Stmtx(stmt.Stmt), - } -} - -// PrepareNamed returns an sqlx.NamedStmt -func (tx *Tx) PrepareNamed(query string) (*NamedStmt, error) { - return prepareNamed(tx, query) -} - -// Stmt is an sqlx wrapper around sql.Stmt with extra functionality -type Stmt struct { - *sql.Stmt - unsafe bool - Mapper *reflectx.Mapper -} - -// Unsafe returns a version of Stmt which will silently succeed to scan when -// columns in the SQL result have no fields in the destination struct. -func (s *Stmt) Unsafe() *Stmt { - return &Stmt{Stmt: s.Stmt, unsafe: true, Mapper: s.Mapper} -} - -// Select using the prepared statement. -// Any placeholder parameters are replaced with supplied args. -func (s *Stmt) Select(dest interface{}, args ...interface{}) error { - return Select(&qStmt{s}, dest, "", args...) -} - -// Get using the prepared statement. -// Any placeholder parameters are replaced with supplied args. -// An error is returned if the result set is empty. -func (s *Stmt) Get(dest interface{}, args ...interface{}) error { - return Get(&qStmt{s}, dest, "", args...) -} - -// MustExec (panic) using this statement. Note that the query portion of the error -// output will be blank, as Stmt does not expose its query. -// Any placeholder parameters are replaced with supplied args. -func (s *Stmt) MustExec(args ...interface{}) sql.Result { - return MustExec(&qStmt{s}, "", args...) -} - -// QueryRowx using this statement. -// Any placeholder parameters are replaced with supplied args. -func (s *Stmt) QueryRowx(args ...interface{}) *Row { - qs := &qStmt{s} - return qs.QueryRowx("", args...) -} - -// Queryx using this statement. -// Any placeholder parameters are replaced with supplied args. -func (s *Stmt) Queryx(args ...interface{}) (*Rows, error) { - qs := &qStmt{s} - return qs.Queryx("", args...) -} - -// qStmt is an unexposed wrapper which lets you use a Stmt as a Queryer & Execer by -// implementing those interfaces and ignoring the `query` argument. -type qStmt struct{ *Stmt } - -func (q *qStmt) Query(query string, args ...interface{}) (*sql.Rows, error) { - return q.Stmt.Query(args...) -} - -func (q *qStmt) Queryx(query string, args ...interface{}) (*Rows, error) { - r, err := q.Stmt.Query(args...) - if err != nil { - return nil, err - } - return &Rows{Rows: r, unsafe: q.Stmt.unsafe, Mapper: q.Stmt.Mapper}, err -} - -func (q *qStmt) QueryRowx(query string, args ...interface{}) *Row { - rows, err := q.Stmt.Query(args...) - return &Row{rows: rows, err: err, unsafe: q.Stmt.unsafe, Mapper: q.Stmt.Mapper} -} - -func (q *qStmt) Exec(query string, args ...interface{}) (sql.Result, error) { - return q.Stmt.Exec(args...) -} - -// Rows is a wrapper around sql.Rows which caches costly reflect operations -// during a looped StructScan -type Rows struct { - *sql.Rows - unsafe bool - Mapper *reflectx.Mapper - // these fields cache memory use for a rows during iteration w/ structScan - started bool - fields [][]int - values []interface{} -} - -// SliceScan using this Rows. -func (r *Rows) SliceScan() ([]interface{}, error) { - return SliceScan(r) -} - -// MapScan using this Rows. -func (r *Rows) MapScan(dest map[string]interface{}) error { - return MapScan(r, dest) -} - -// StructScan is like sql.Rows.Scan, but scans a single Row into a single Struct. -// Use this and iterate over Rows manually when the memory load of Select() might be -// prohibitive. *Rows.StructScan caches the reflect work of matching up column -// positions to fields to avoid that overhead per scan, which means it is not safe -// to run StructScan on the same Rows instance with different struct types. -func (r *Rows) StructScan(dest interface{}) error { - v := reflect.ValueOf(dest) - - if v.Kind() != reflect.Ptr { - return errors.New("must pass a pointer, not a value, to StructScan destination") - } - - v = reflect.Indirect(v) - - if !r.started { - columns, err := r.Columns() - if err != nil { - return err - } - m := r.Mapper - - r.fields = m.TraversalsByName(v.Type(), columns) - // if we are not unsafe and are missing fields, return an error - if f, err := missingFields(r.fields); err != nil && !r.unsafe { - return fmt.Errorf("missing destination name %s in %T", columns[f], dest) - } - r.values = make([]interface{}, len(columns)) - r.started = true - } - - err := fieldsByTraversal(v, r.fields, r.values, true) - if err != nil { - return err - } - // scan into the struct field pointers and append to our results - err = r.Scan(r.values...) - if err != nil { - return err - } - return r.Err() -} - -// Connect to a database and verify with a ping. -func Connect(driverName, dataSourceName string) (*DB, error) { - db, err := Open(driverName, dataSourceName) - if err != nil { - return nil, err - } - err = db.Ping() - if err != nil { - db.Close() - return nil, err - } - return db, nil -} - -// MustConnect connects to a database and panics on error. -func MustConnect(driverName, dataSourceName string) *DB { - db, err := Connect(driverName, dataSourceName) - if err != nil { - panic(err) - } - return db -} - -// Preparex prepares a statement. -func Preparex(p Preparer, query string) (*Stmt, error) { - s, err := p.Prepare(query) - if err != nil { - return nil, err - } - return &Stmt{Stmt: s, unsafe: isUnsafe(p), Mapper: mapperFor(p)}, err -} - -// Select executes a query using the provided Queryer, and StructScans each row -// into dest, which must be a slice. If the slice elements are scannable, then -// the result set must have only one column. Otherwise, StructScan is used. -// The *sql.Rows are closed automatically. -// Any placeholder parameters are replaced with supplied args. -func Select(q Queryer, dest interface{}, query string, args ...interface{}) error { - rows, err := q.Queryx(query, args...) - if err != nil { - return err - } - // if something happens here, we want to make sure the rows are Closed - defer rows.Close() - return scanAll(rows, dest, false) -} - -// Get does a QueryRow using the provided Queryer, and scans the resulting row -// to dest. If dest is scannable, the result must only have one column. Otherwise, -// StructScan is used. Get will return sql.ErrNoRows like row.Scan would. -// Any placeholder parameters are replaced with supplied args. -// An error is returned if the result set is empty. -func Get(q Queryer, dest interface{}, query string, args ...interface{}) error { - r := q.QueryRowx(query, args...) - return r.scanAny(dest, false) -} - -// LoadFile exec's every statement in a file (as a single call to Exec). -// LoadFile may return a nil *sql.Result if errors are encountered locating or -// reading the file at path. LoadFile reads the entire file into memory, so it -// is not suitable for loading large data dumps, but can be useful for initializing -// schemas or loading indexes. -// -// FIXME: this does not really work with multi-statement files for mattn/go-sqlite3 -// or the go-mysql-driver/mysql drivers; pq seems to be an exception here. Detecting -// this by requiring something with DriverName() and then attempting to split the -// queries will be difficult to get right, and its current driver-specific behavior -// is deemed at least not complex in its incorrectness. -func LoadFile(e Execer, path string) (*sql.Result, error) { - realpath, err := filepath.Abs(path) - if err != nil { - return nil, err - } - contents, err := ioutil.ReadFile(realpath) - if err != nil { - return nil, err - } - res, err := e.Exec(string(contents)) - return &res, err -} - -// MustExec execs the query using e and panics if there was an error. -// Any placeholder parameters are replaced with supplied args. -func MustExec(e Execer, query string, args ...interface{}) sql.Result { - res, err := e.Exec(query, args...) - if err != nil { - panic(err) - } - return res -} - -// SliceScan using this Rows. -func (r *Row) SliceScan() ([]interface{}, error) { - return SliceScan(r) -} - -// MapScan using this Rows. -func (r *Row) MapScan(dest map[string]interface{}) error { - return MapScan(r, dest) -} - -func (r *Row) scanAny(dest interface{}, structOnly bool) error { - if r.err != nil { - return r.err - } - if r.rows == nil { - r.err = sql.ErrNoRows - return r.err - } - defer r.rows.Close() - - v := reflect.ValueOf(dest) - if v.Kind() != reflect.Ptr { - return errors.New("must pass a pointer, not a value, to StructScan destination") - } - if v.IsNil() { - return errors.New("nil pointer passed to StructScan destination") - } - - base := reflectx.Deref(v.Type()) - scannable := isScannable(base) - - if structOnly && scannable { - return structOnlyError(base) - } - - columns, err := r.Columns() - if err != nil { - return err - } - - if scannable && len(columns) > 1 { - return fmt.Errorf("scannable dest type %s with >1 columns (%d) in result", base.Kind(), len(columns)) - } - - if scannable { - return r.Scan(dest) - } - - m := r.Mapper - - fields := m.TraversalsByName(v.Type(), columns) - // if we are not unsafe and are missing fields, return an error - if f, err := missingFields(fields); err != nil && !r.unsafe { - return fmt.Errorf("missing destination name %s in %T", columns[f], dest) - } - values := make([]interface{}, len(columns)) - - err = fieldsByTraversal(v, fields, values, true) - if err != nil { - return err - } - // scan into the struct field pointers and append to our results - return r.Scan(values...) -} - -// StructScan a single Row into dest. -func (r *Row) StructScan(dest interface{}) error { - return r.scanAny(dest, true) -} - -// SliceScan a row, returning a []interface{} with values similar to MapScan. -// This function is primarily intended for use where the number of columns -// is not known. Because you can pass an []interface{} directly to Scan, -// it's recommended that you do that as it will not have to allocate new -// slices per row. -func SliceScan(r ColScanner) ([]interface{}, error) { - // ignore r.started, since we needn't use reflect for anything. - columns, err := r.Columns() - if err != nil { - return []interface{}{}, err - } - - values := make([]interface{}, len(columns)) - for i := range values { - values[i] = new(interface{}) - } - - err = r.Scan(values...) - - if err != nil { - return values, err - } - - for i := range columns { - values[i] = *(values[i].(*interface{})) - } - - return values, r.Err() -} - -// MapScan scans a single Row into the dest map[string]interface{}. -// Use this to get results for SQL that might not be under your control -// (for instance, if you're building an interface for an SQL server that -// executes SQL from input). Please do not use this as a primary interface! -// This will modify the map sent to it in place, so reuse the same map with -// care. Columns which occur more than once in the result will overwrite -// each other! -func MapScan(r ColScanner, dest map[string]interface{}) error { - // ignore r.started, since we needn't use reflect for anything. - columns, err := r.Columns() - if err != nil { - return err - } - - values := make([]interface{}, len(columns)) - for i := range values { - values[i] = new(interface{}) - } - - err = r.Scan(values...) - if err != nil { - return err - } - - for i, column := range columns { - dest[column] = *(values[i].(*interface{})) - } - - return r.Err() -} - -type rowsi interface { - Close() error - Columns() ([]string, error) - Err() error - Next() bool - Scan(...interface{}) error -} - -// structOnlyError returns an error appropriate for type when a non-scannable -// struct is expected but something else is given -func structOnlyError(t reflect.Type) error { - isStruct := t.Kind() == reflect.Struct - isScanner := reflect.PtrTo(t).Implements(_scannerInterface) - if !isStruct { - return fmt.Errorf("expected %s but got %s", reflect.Struct, t.Kind()) - } - if isScanner { - return fmt.Errorf("structscan expects a struct dest but the provided struct type %s implements scanner", t.Name()) - } - return fmt.Errorf("expected a struct, but struct %s has no exported fields", t.Name()) -} - -// scanAll scans all rows into a destination, which must be a slice of any -// type. If the destination slice type is a Struct, then StructScan will be -// used on each row. If the destination is some other kind of base type, then -// each row must only have one column which can scan into that type. This -// allows you to do something like: -// -// rows, _ := db.Query("select id from people;") -// var ids []int -// scanAll(rows, &ids, false) -// -// and ids will be a list of the id results. I realize that this is a desirable -// interface to expose to users, but for now it will only be exposed via changes -// to `Get` and `Select`. The reason that this has been implemented like this is -// this is the only way to not duplicate reflect work in the new API while -// maintaining backwards compatibility. -func scanAll(rows rowsi, dest interface{}, structOnly bool) error { - var v, vp reflect.Value - - value := reflect.ValueOf(dest) - - // json.Unmarshal returns errors for these - if value.Kind() != reflect.Ptr { - return errors.New("must pass a pointer, not a value, to StructScan destination") - } - if value.IsNil() { - return errors.New("nil pointer passed to StructScan destination") - } - direct := reflect.Indirect(value) - - slice, err := baseType(value.Type(), reflect.Slice) - if err != nil { - return err - } - - isPtr := slice.Elem().Kind() == reflect.Ptr - base := reflectx.Deref(slice.Elem()) - scannable := isScannable(base) - - if structOnly && scannable { - return structOnlyError(base) - } - - columns, err := rows.Columns() - if err != nil { - return err - } - - // if it's a base type make sure it only has 1 column; if not return an error - if scannable && len(columns) > 1 { - return fmt.Errorf("non-struct dest type %s with >1 columns (%d)", base.Kind(), len(columns)) - } - - if !scannable { - var values []interface{} - var m *reflectx.Mapper - - switch rows.(type) { - case *Rows: - m = rows.(*Rows).Mapper - default: - m = mapper() - } - - fields := m.TraversalsByName(base, columns) - // if we are not unsafe and are missing fields, return an error - if f, err := missingFields(fields); err != nil && !isUnsafe(rows) { - return fmt.Errorf("missing destination name %s in %T", columns[f], dest) - } - values = make([]interface{}, len(columns)) - - for rows.Next() { - // create a new struct type (which returns PtrTo) and indirect it - vp = reflect.New(base) - v = reflect.Indirect(vp) - - err = fieldsByTraversal(v, fields, values, true) - if err != nil { - return err - } - - // scan into the struct field pointers and append to our results - err = rows.Scan(values...) - if err != nil { - return err - } - - if isPtr { - direct.Set(reflect.Append(direct, vp)) - } else { - direct.Set(reflect.Append(direct, v)) - } - } - } else { - for rows.Next() { - vp = reflect.New(base) - err = rows.Scan(vp.Interface()) - if err != nil { - return err - } - // append - if isPtr { - direct.Set(reflect.Append(direct, vp)) - } else { - direct.Set(reflect.Append(direct, reflect.Indirect(vp))) - } - } - } - - return rows.Err() -} - -// FIXME: StructScan was the very first bit of API in sqlx, and now unfortunately -// it doesn't really feel like it's named properly. There is an incongruency -// between this and the way that StructScan (which might better be ScanStruct -// anyway) works on a rows object. - -// StructScan all rows from an sql.Rows or an sqlx.Rows into the dest slice. -// StructScan will scan in the entire rows result, so if you do not want to -// allocate structs for the entire result, use Queryx and see sqlx.Rows.StructScan. -// If rows is sqlx.Rows, it will use its mapper, otherwise it will use the default. -func StructScan(rows rowsi, dest interface{}) error { - return scanAll(rows, dest, true) - -} - -// reflect helpers - -func baseType(t reflect.Type, expected reflect.Kind) (reflect.Type, error) { - t = reflectx.Deref(t) - if t.Kind() != expected { - return nil, fmt.Errorf("expected %s but got %s", expected, t.Kind()) - } - return t, nil -} - -// fieldsByName fills a values interface with fields from the passed value based -// on the traversals in int. If ptrs is true, return addresses instead of values. -// We write this instead of using FieldsByName to save allocations and map lookups -// when iterating over many rows. Empty traversals will get an interface pointer. -// Because of the necessity of requesting ptrs or values, it's considered a bit too -// specialized for inclusion in reflectx itself. -func fieldsByTraversal(v reflect.Value, traversals [][]int, values []interface{}, ptrs bool) error { - v = reflect.Indirect(v) - if v.Kind() != reflect.Struct { - return errors.New("argument not a struct") - } - - for i, traversal := range traversals { - if len(traversal) == 0 { - values[i] = new(interface{}) - continue - } - f := reflectx.FieldByIndexes(v, traversal) - if ptrs { - values[i] = f.Addr().Interface() - } else { - values[i] = f.Interface() - } - } - return nil -} - -func missingFields(transversals [][]int) (field int, err error) { - for i, t := range transversals { - if len(t) == 0 { - return i, errors.New("missing field") - } - } - return 0, nil -} diff --git a/vendor/github.com/jmoiron/sqlx/sqlx_context.go b/vendor/github.com/jmoiron/sqlx/sqlx_context.go deleted file mode 100644 index 0b17145..0000000 --- a/vendor/github.com/jmoiron/sqlx/sqlx_context.go +++ /dev/null @@ -1,335 +0,0 @@ -// +build go1.8 - -package sqlx - -import ( - "context" - "database/sql" - "fmt" - "io/ioutil" - "path/filepath" - "reflect" -) - -// ConnectContext to a database and verify with a ping. -func ConnectContext(ctx context.Context, driverName, dataSourceName string) (*DB, error) { - db, err := Open(driverName, dataSourceName) - if err != nil { - return db, err - } - err = db.PingContext(ctx) - return db, err -} - -// QueryerContext is an interface used by GetContext and SelectContext -type QueryerContext interface { - QueryContext(ctx context.Context, query string, args ...interface{}) (*sql.Rows, error) - QueryxContext(ctx context.Context, query string, args ...interface{}) (*Rows, error) - QueryRowxContext(ctx context.Context, query string, args ...interface{}) *Row -} - -// PreparerContext is an interface used by PreparexContext. -type PreparerContext interface { - PrepareContext(ctx context.Context, query string) (*sql.Stmt, error) -} - -// ExecerContext is an interface used by MustExecContext and LoadFileContext -type ExecerContext interface { - ExecContext(ctx context.Context, query string, args ...interface{}) (sql.Result, error) -} - -// ExtContext is a union interface which can bind, query, and exec, with Context -// used by NamedQueryContext and NamedExecContext. -type ExtContext interface { - binder - QueryerContext - ExecerContext -} - -// SelectContext executes a query using the provided Queryer, and StructScans -// each row into dest, which must be a slice. If the slice elements are -// scannable, then the result set must have only one column. Otherwise, -// StructScan is used. The *sql.Rows are closed automatically. -// Any placeholder parameters are replaced with supplied args. -func SelectContext(ctx context.Context, q QueryerContext, dest interface{}, query string, args ...interface{}) error { - rows, err := q.QueryxContext(ctx, query, args...) - if err != nil { - return err - } - // if something happens here, we want to make sure the rows are Closed - defer rows.Close() - return scanAll(rows, dest, false) -} - -// PreparexContext prepares a statement. -// -// The provided context is used for the preparation of the statement, not for -// the execution of the statement. -func PreparexContext(ctx context.Context, p PreparerContext, query string) (*Stmt, error) { - s, err := p.PrepareContext(ctx, query) - if err != nil { - return nil, err - } - return &Stmt{Stmt: s, unsafe: isUnsafe(p), Mapper: mapperFor(p)}, err -} - -// GetContext does a QueryRow using the provided Queryer, and scans the -// resulting row to dest. If dest is scannable, the result must only have one -// column. Otherwise, StructScan is used. Get will return sql.ErrNoRows like -// row.Scan would. Any placeholder parameters are replaced with supplied args. -// An error is returned if the result set is empty. -func GetContext(ctx context.Context, q QueryerContext, dest interface{}, query string, args ...interface{}) error { - r := q.QueryRowxContext(ctx, query, args...) - return r.scanAny(dest, false) -} - -// LoadFileContext exec's every statement in a file (as a single call to Exec). -// LoadFileContext may return a nil *sql.Result if errors are encountered -// locating or reading the file at path. LoadFile reads the entire file into -// memory, so it is not suitable for loading large data dumps, but can be useful -// for initializing schemas or loading indexes. -// -// FIXME: this does not really work with multi-statement files for mattn/go-sqlite3 -// or the go-mysql-driver/mysql drivers; pq seems to be an exception here. Detecting -// this by requiring something with DriverName() and then attempting to split the -// queries will be difficult to get right, and its current driver-specific behavior -// is deemed at least not complex in its incorrectness. -func LoadFileContext(ctx context.Context, e ExecerContext, path string) (*sql.Result, error) { - realpath, err := filepath.Abs(path) - if err != nil { - return nil, err - } - contents, err := ioutil.ReadFile(realpath) - if err != nil { - return nil, err - } - res, err := e.ExecContext(ctx, string(contents)) - return &res, err -} - -// MustExecContext execs the query using e and panics if there was an error. -// Any placeholder parameters are replaced with supplied args. -func MustExecContext(ctx context.Context, e ExecerContext, query string, args ...interface{}) sql.Result { - res, err := e.ExecContext(ctx, query, args...) - if err != nil { - panic(err) - } - return res -} - -// PrepareNamedContext returns an sqlx.NamedStmt -func (db *DB) PrepareNamedContext(ctx context.Context, query string) (*NamedStmt, error) { - return prepareNamedContext(ctx, db, query) -} - -// NamedQueryContext using this DB. -// Any named placeholder parameters are replaced with fields from arg. -func (db *DB) NamedQueryContext(ctx context.Context, query string, arg interface{}) (*Rows, error) { - return NamedQueryContext(ctx, db, query, arg) -} - -// NamedExecContext using this DB. -// Any named placeholder parameters are replaced with fields from arg. -func (db *DB) NamedExecContext(ctx context.Context, query string, arg interface{}) (sql.Result, error) { - return NamedExecContext(ctx, db, query, arg) -} - -// SelectContext using this DB. -// Any placeholder parameters are replaced with supplied args. -func (db *DB) SelectContext(ctx context.Context, dest interface{}, query string, args ...interface{}) error { - return SelectContext(ctx, db, dest, query, args...) -} - -// GetContext using this DB. -// Any placeholder parameters are replaced with supplied args. -// An error is returned if the result set is empty. -func (db *DB) GetContext(ctx context.Context, dest interface{}, query string, args ...interface{}) error { - return GetContext(ctx, db, dest, query, args...) -} - -// PreparexContext returns an sqlx.Stmt instead of a sql.Stmt. -// -// The provided context is used for the preparation of the statement, not for -// the execution of the statement. -func (db *DB) PreparexContext(ctx context.Context, query string) (*Stmt, error) { - return PreparexContext(ctx, db, query) -} - -// QueryxContext queries the database and returns an *sqlx.Rows. -// Any placeholder parameters are replaced with supplied args. -func (db *DB) QueryxContext(ctx context.Context, query string, args ...interface{}) (*Rows, error) { - r, err := db.DB.QueryContext(ctx, query, args...) - if err != nil { - return nil, err - } - return &Rows{Rows: r, unsafe: db.unsafe, Mapper: db.Mapper}, err -} - -// QueryRowxContext queries the database and returns an *sqlx.Row. -// Any placeholder parameters are replaced with supplied args. -func (db *DB) QueryRowxContext(ctx context.Context, query string, args ...interface{}) *Row { - rows, err := db.DB.QueryContext(ctx, query, args...) - return &Row{rows: rows, err: err, unsafe: db.unsafe, Mapper: db.Mapper} -} - -// MustBeginTx starts a transaction, and panics on error. Returns an *sqlx.Tx instead -// of an *sql.Tx. -// -// The provided context is used until the transaction is committed or rolled -// back. If the context is canceled, the sql package will roll back the -// transaction. Tx.Commit will return an error if the context provided to -// MustBeginContext is canceled. -func (db *DB) MustBeginTx(ctx context.Context, opts *sql.TxOptions) *Tx { - tx, err := db.BeginTxx(ctx, opts) - if err != nil { - panic(err) - } - return tx -} - -// MustExecContext (panic) runs MustExec using this database. -// Any placeholder parameters are replaced with supplied args. -func (db *DB) MustExecContext(ctx context.Context, query string, args ...interface{}) sql.Result { - return MustExecContext(ctx, db, query, args...) -} - -// BeginTxx begins a transaction and returns an *sqlx.Tx instead of an -// *sql.Tx. -// -// The provided context is used until the transaction is committed or rolled -// back. If the context is canceled, the sql package will roll back the -// transaction. Tx.Commit will return an error if the context provided to -// BeginxContext is canceled. -func (db *DB) BeginTxx(ctx context.Context, opts *sql.TxOptions) (*Tx, error) { - tx, err := db.DB.BeginTx(ctx, opts) - if err != nil { - return nil, err - } - return &Tx{Tx: tx, driverName: db.driverName, unsafe: db.unsafe, Mapper: db.Mapper}, err -} - -// StmtxContext returns a version of the prepared statement which runs within a -// transaction. Provided stmt can be either *sql.Stmt or *sqlx.Stmt. -func (tx *Tx) StmtxContext(ctx context.Context, stmt interface{}) *Stmt { - var s *sql.Stmt - switch v := stmt.(type) { - case Stmt: - s = v.Stmt - case *Stmt: - s = v.Stmt - case sql.Stmt: - s = &v - case *sql.Stmt: - s = v - default: - panic(fmt.Sprintf("non-statement type %v passed to Stmtx", reflect.ValueOf(stmt).Type())) - } - return &Stmt{Stmt: tx.StmtContext(ctx, s), Mapper: tx.Mapper} -} - -// NamedStmtContext returns a version of the prepared statement which runs -// within a transaction. -func (tx *Tx) NamedStmtContext(ctx context.Context, stmt *NamedStmt) *NamedStmt { - return &NamedStmt{ - QueryString: stmt.QueryString, - Params: stmt.Params, - Stmt: tx.StmtxContext(ctx, stmt.Stmt), - } -} - -// MustExecContext runs MustExecContext within a transaction. -// Any placeholder parameters are replaced with supplied args. -func (tx *Tx) MustExecContext(ctx context.Context, query string, args ...interface{}) sql.Result { - return MustExecContext(ctx, tx, query, args...) -} - -// QueryxContext within a transaction and context. -// Any placeholder parameters are replaced with supplied args. -func (tx *Tx) QueryxContext(ctx context.Context, query string, args ...interface{}) (*Rows, error) { - r, err := tx.Tx.QueryContext(ctx, query, args...) - if err != nil { - return nil, err - } - return &Rows{Rows: r, unsafe: tx.unsafe, Mapper: tx.Mapper}, err -} - -// SelectContext within a transaction and context. -// Any placeholder parameters are replaced with supplied args. -func (tx *Tx) SelectContext(ctx context.Context, dest interface{}, query string, args ...interface{}) error { - return SelectContext(ctx, tx, dest, query, args...) -} - -// GetContext within a transaction and context. -// Any placeholder parameters are replaced with supplied args. -// An error is returned if the result set is empty. -func (tx *Tx) GetContext(ctx context.Context, dest interface{}, query string, args ...interface{}) error { - return GetContext(ctx, tx, dest, query, args...) -} - -// QueryRowxContext within a transaction and context. -// Any placeholder parameters are replaced with supplied args. -func (tx *Tx) QueryRowxContext(ctx context.Context, query string, args ...interface{}) *Row { - rows, err := tx.Tx.QueryContext(ctx, query, args...) - return &Row{rows: rows, err: err, unsafe: tx.unsafe, Mapper: tx.Mapper} -} - -// NamedExecContext using this Tx. -// Any named placeholder parameters are replaced with fields from arg. -func (tx *Tx) NamedExecContext(ctx context.Context, query string, arg interface{}) (sql.Result, error) { - return NamedExecContext(ctx, tx, query, arg) -} - -// SelectContext using the prepared statement. -// Any placeholder parameters are replaced with supplied args. -func (s *Stmt) SelectContext(ctx context.Context, dest interface{}, args ...interface{}) error { - return SelectContext(ctx, &qStmt{s}, dest, "", args...) -} - -// GetContext using the prepared statement. -// Any placeholder parameters are replaced with supplied args. -// An error is returned if the result set is empty. -func (s *Stmt) GetContext(ctx context.Context, dest interface{}, args ...interface{}) error { - return GetContext(ctx, &qStmt{s}, dest, "", args...) -} - -// MustExecContext (panic) using this statement. Note that the query portion of -// the error output will be blank, as Stmt does not expose its query. -// Any placeholder parameters are replaced with supplied args. -func (s *Stmt) MustExecContext(ctx context.Context, args ...interface{}) sql.Result { - return MustExecContext(ctx, &qStmt{s}, "", args...) -} - -// QueryRowxContext using this statement. -// Any placeholder parameters are replaced with supplied args. -func (s *Stmt) QueryRowxContext(ctx context.Context, args ...interface{}) *Row { - qs := &qStmt{s} - return qs.QueryRowxContext(ctx, "", args...) -} - -// QueryxContext using this statement. -// Any placeholder parameters are replaced with supplied args. -func (s *Stmt) QueryxContext(ctx context.Context, args ...interface{}) (*Rows, error) { - qs := &qStmt{s} - return qs.QueryxContext(ctx, "", args...) -} - -func (q *qStmt) QueryContext(ctx context.Context, query string, args ...interface{}) (*sql.Rows, error) { - return q.Stmt.QueryContext(ctx, args...) -} - -func (q *qStmt) QueryxContext(ctx context.Context, query string, args ...interface{}) (*Rows, error) { - r, err := q.Stmt.QueryContext(ctx, args...) - if err != nil { - return nil, err - } - return &Rows{Rows: r, unsafe: q.Stmt.unsafe, Mapper: q.Stmt.Mapper}, err -} - -func (q *qStmt) QueryRowxContext(ctx context.Context, query string, args ...interface{}) *Row { - rows, err := q.Stmt.QueryContext(ctx, args...) - return &Row{rows: rows, err: err, unsafe: q.Stmt.unsafe, Mapper: q.Stmt.Mapper} -} - -func (q *qStmt) ExecContext(ctx context.Context, query string, args ...interface{}) (sql.Result, error) { - return q.Stmt.ExecContext(ctx, args...) -} diff --git a/vendor/github.com/jmoiron/sqlx/sqlx_context_test.go b/vendor/github.com/jmoiron/sqlx/sqlx_context_test.go deleted file mode 100644 index 85e112b..0000000 --- a/vendor/github.com/jmoiron/sqlx/sqlx_context_test.go +++ /dev/null @@ -1,1344 +0,0 @@ -// +build go1.8 - -// The following environment variables, if set, will be used: -// -// * SQLX_SQLITE_DSN -// * SQLX_POSTGRES_DSN -// * SQLX_MYSQL_DSN -// -// Set any of these variables to 'skip' to skip them. Note that for MySQL, -// the string '?parseTime=True' will be appended to the DSN if it's not there -// already. -// -package sqlx - -import ( - "context" - "database/sql" - "encoding/json" - "fmt" - "log" - "strings" - "testing" - "time" - - _ "github.com/go-sql-driver/mysql" - "github.com/jmoiron/sqlx/reflectx" - _ "github.com/lib/pq" - _ "github.com/mattn/go-sqlite3" -) - -func MultiExecContext(ctx context.Context, e ExecerContext, query string) { - stmts := strings.Split(query, ";\n") - if len(strings.Trim(stmts[len(stmts)-1], " \n\t\r")) == 0 { - stmts = stmts[:len(stmts)-1] - } - for _, s := range stmts { - _, err := e.ExecContext(ctx, s) - if err != nil { - fmt.Println(err, s) - } - } -} - -func RunWithSchemaContext(ctx context.Context, schema Schema, t *testing.T, test func(ctx context.Context, db *DB, t *testing.T)) { - runner := func(ctx context.Context, db *DB, t *testing.T, create, drop string) { - defer func() { - MultiExecContext(ctx, db, drop) - }() - - MultiExecContext(ctx, db, create) - test(ctx, db, t) - } - - if TestPostgres { - create, drop := schema.Postgres() - runner(ctx, pgdb, t, create, drop) - } - if TestSqlite { - create, drop := schema.Sqlite3() - runner(ctx, sldb, t, create, drop) - } - if TestMysql { - create, drop := schema.MySQL() - runner(ctx, mysqldb, t, create, drop) - } -} - -func loadDefaultFixtureContext(ctx context.Context, db *DB, t *testing.T) { - tx := db.MustBeginTx(ctx, nil) - tx.MustExecContext(ctx, tx.Rebind("INSERT INTO person (first_name, last_name, email) VALUES (?, ?, ?)"), "Jason", "Moiron", "jmoiron@jmoiron.net") - tx.MustExecContext(ctx, tx.Rebind("INSERT INTO person (first_name, last_name, email) VALUES (?, ?, ?)"), "John", "Doe", "johndoeDNE@gmail.net") - tx.MustExecContext(ctx, tx.Rebind("INSERT INTO place (country, city, telcode) VALUES (?, ?, ?)"), "United States", "New York", "1") - tx.MustExecContext(ctx, tx.Rebind("INSERT INTO place (country, telcode) VALUES (?, ?)"), "Hong Kong", "852") - tx.MustExecContext(ctx, tx.Rebind("INSERT INTO place (country, telcode) VALUES (?, ?)"), "Singapore", "65") - if db.DriverName() == "mysql" { - tx.MustExecContext(ctx, tx.Rebind("INSERT INTO capplace (`COUNTRY`, `TELCODE`) VALUES (?, ?)"), "Sarf Efrica", "27") - } else { - tx.MustExecContext(ctx, tx.Rebind("INSERT INTO capplace (\"COUNTRY\", \"TELCODE\") VALUES (?, ?)"), "Sarf Efrica", "27") - } - tx.MustExecContext(ctx, tx.Rebind("INSERT INTO employees (name, id) VALUES (?, ?)"), "Peter", "4444") - tx.MustExecContext(ctx, tx.Rebind("INSERT INTO employees (name, id, boss_id) VALUES (?, ?, ?)"), "Joe", "1", "4444") - tx.MustExecContext(ctx, tx.Rebind("INSERT INTO employees (name, id, boss_id) VALUES (?, ?, ?)"), "Martin", "2", "4444") - tx.Commit() -} - -// Test a new backwards compatible feature, that missing scan destinations -// will silently scan into sql.RawText rather than failing/panicing -func TestMissingNamesContextContext(t *testing.T) { - RunWithSchemaContext(context.Background(), defaultSchema, t, func(ctx context.Context, db *DB, t *testing.T) { - loadDefaultFixtureContext(ctx, db, t) - type PersonPlus struct { - FirstName string `db:"first_name"` - LastName string `db:"last_name"` - Email string - //AddedAt time.Time `db:"added_at"` - } - - // test Select first - pps := []PersonPlus{} - // pps lacks added_at destination - err := db.SelectContext(ctx, &pps, "SELECT * FROM person") - if err == nil { - t.Error("Expected missing name from Select to fail, but it did not.") - } - - // test Get - pp := PersonPlus{} - err = db.GetContext(ctx, &pp, "SELECT * FROM person LIMIT 1") - if err == nil { - t.Error("Expected missing name Get to fail, but it did not.") - } - - // test naked StructScan - pps = []PersonPlus{} - rows, err := db.QueryContext(ctx, "SELECT * FROM person LIMIT 1") - if err != nil { - t.Fatal(err) - } - rows.Next() - err = StructScan(rows, &pps) - if err == nil { - t.Error("Expected missing name in StructScan to fail, but it did not.") - } - rows.Close() - - // now try various things with unsafe set. - db = db.Unsafe() - pps = []PersonPlus{} - err = db.SelectContext(ctx, &pps, "SELECT * FROM person") - if err != nil { - t.Error(err) - } - - // test Get - pp = PersonPlus{} - err = db.GetContext(ctx, &pp, "SELECT * FROM person LIMIT 1") - if err != nil { - t.Error(err) - } - - // test naked StructScan - pps = []PersonPlus{} - rowsx, err := db.QueryxContext(ctx, "SELECT * FROM person LIMIT 1") - if err != nil { - t.Fatal(err) - } - rowsx.Next() - err = StructScan(rowsx, &pps) - if err != nil { - t.Error(err) - } - rowsx.Close() - - // test Named stmt - if !isUnsafe(db) { - t.Error("Expected db to be unsafe, but it isn't") - } - nstmt, err := db.PrepareNamedContext(ctx, `SELECT * FROM person WHERE first_name != :name`) - if err != nil { - t.Fatal(err) - } - // its internal stmt should be marked unsafe - if !nstmt.Stmt.unsafe { - t.Error("expected NamedStmt to be unsafe but its underlying stmt did not inherit safety") - } - pps = []PersonPlus{} - err = nstmt.SelectContext(ctx, &pps, map[string]interface{}{"name": "Jason"}) - if err != nil { - t.Fatal(err) - } - if len(pps) != 1 { - t.Errorf("Expected 1 person back, got %d", len(pps)) - } - - // test it with a safe db - db.unsafe = false - if isUnsafe(db) { - t.Error("expected db to be safe but it isn't") - } - nstmt, err = db.PrepareNamedContext(ctx, `SELECT * FROM person WHERE first_name != :name`) - if err != nil { - t.Fatal(err) - } - // it should be safe - if isUnsafe(nstmt) { - t.Error("NamedStmt did not inherit safety") - } - nstmt.Unsafe() - if !isUnsafe(nstmt) { - t.Error("expected newly unsafed NamedStmt to be unsafe") - } - pps = []PersonPlus{} - err = nstmt.SelectContext(ctx, &pps, map[string]interface{}{"name": "Jason"}) - if err != nil { - t.Fatal(err) - } - if len(pps) != 1 { - t.Errorf("Expected 1 person back, got %d", len(pps)) - } - - }) -} - -func TestEmbeddedStructsContextContext(t *testing.T) { - type Loop1 struct{ Person } - type Loop2 struct{ Loop1 } - type Loop3 struct{ Loop2 } - - RunWithSchemaContext(context.Background(), defaultSchema, t, func(ctx context.Context, db *DB, t *testing.T) { - loadDefaultFixtureContext(ctx, db, t) - peopleAndPlaces := []PersonPlace{} - err := db.SelectContext( - ctx, - &peopleAndPlaces, - `SELECT person.*, place.* FROM - person natural join place`) - if err != nil { - t.Fatal(err) - } - for _, pp := range peopleAndPlaces { - if len(pp.Person.FirstName) == 0 { - t.Errorf("Expected non zero lengthed first name.") - } - if len(pp.Place.Country) == 0 { - t.Errorf("Expected non zero lengthed country.") - } - } - - // test embedded structs with StructScan - rows, err := db.QueryxContext( - ctx, - `SELECT person.*, place.* FROM - person natural join place`) - if err != nil { - t.Error(err) - } - - perp := PersonPlace{} - rows.Next() - err = rows.StructScan(&perp) - if err != nil { - t.Error(err) - } - - if len(perp.Person.FirstName) == 0 { - t.Errorf("Expected non zero lengthed first name.") - } - if len(perp.Place.Country) == 0 { - t.Errorf("Expected non zero lengthed country.") - } - - rows.Close() - - // test the same for embedded pointer structs - peopleAndPlacesPtrs := []PersonPlacePtr{} - err = db.SelectContext( - ctx, - &peopleAndPlacesPtrs, - `SELECT person.*, place.* FROM - person natural join place`) - if err != nil { - t.Fatal(err) - } - for _, pp := range peopleAndPlacesPtrs { - if len(pp.Person.FirstName) == 0 { - t.Errorf("Expected non zero lengthed first name.") - } - if len(pp.Place.Country) == 0 { - t.Errorf("Expected non zero lengthed country.") - } - } - - // test "deep nesting" - l3s := []Loop3{} - err = db.SelectContext(ctx, &l3s, `select * from person`) - if err != nil { - t.Fatal(err) - } - for _, l3 := range l3s { - if len(l3.Loop2.Loop1.Person.FirstName) == 0 { - t.Errorf("Expected non zero lengthed first name.") - } - } - - // test "embed conflicts" - ec := []EmbedConflict{} - err = db.SelectContext(ctx, &ec, `select * from person`) - // I'm torn between erroring here or having some kind of working behavior - // in order to allow for more flexibility in destination structs - if err != nil { - t.Errorf("Was not expecting an error on embed conflicts.") - } - }) -} - -func TestJoinQueryContext(t *testing.T) { - type Employee struct { - Name string - ID int64 - // BossID is an id into the employee table - BossID sql.NullInt64 `db:"boss_id"` - } - type Boss Employee - - RunWithSchemaContext(context.Background(), defaultSchema, t, func(ctx context.Context, db *DB, t *testing.T) { - loadDefaultFixtureContext(ctx, db, t) - - var employees []struct { - Employee - Boss `db:"boss"` - } - - err := db.SelectContext(ctx, - &employees, - `SELECT employees.*, boss.id "boss.id", boss.name "boss.name" FROM employees - JOIN employees AS boss ON employees.boss_id = boss.id`) - if err != nil { - t.Fatal(err) - } - - for _, em := range employees { - if len(em.Employee.Name) == 0 { - t.Errorf("Expected non zero lengthed name.") - } - if em.Employee.BossID.Int64 != em.Boss.ID { - t.Errorf("Expected boss ids to match") - } - } - }) -} - -func TestJoinQueryNamedPointerStructsContext(t *testing.T) { - type Employee struct { - Name string - ID int64 - // BossID is an id into the employee table - BossID sql.NullInt64 `db:"boss_id"` - } - type Boss Employee - - RunWithSchemaContext(context.Background(), defaultSchema, t, func(ctx context.Context, db *DB, t *testing.T) { - loadDefaultFixtureContext(ctx, db, t) - - var employees []struct { - Emp1 *Employee `db:"emp1"` - Emp2 *Employee `db:"emp2"` - *Boss `db:"boss"` - } - - err := db.SelectContext(ctx, - &employees, - `SELECT emp.name "emp1.name", emp.id "emp1.id", emp.boss_id "emp1.boss_id", - emp.name "emp2.name", emp.id "emp2.id", emp.boss_id "emp2.boss_id", - boss.id "boss.id", boss.name "boss.name" FROM employees AS emp - JOIN employees AS boss ON emp.boss_id = boss.id - `) - if err != nil { - t.Fatal(err) - } - - for _, em := range employees { - if len(em.Emp1.Name) == 0 || len(em.Emp2.Name) == 0 { - t.Errorf("Expected non zero lengthed name.") - } - if em.Emp1.BossID.Int64 != em.Boss.ID || em.Emp2.BossID.Int64 != em.Boss.ID { - t.Errorf("Expected boss ids to match") - } - } - }) -} - -func TestSelectSliceMapTimeContext(t *testing.T) { - RunWithSchemaContext(context.Background(), defaultSchema, t, func(ctx context.Context, db *DB, t *testing.T) { - loadDefaultFixtureContext(ctx, db, t) - rows, err := db.QueryxContext(ctx, "SELECT * FROM person") - if err != nil { - t.Fatal(err) - } - for rows.Next() { - _, err := rows.SliceScan() - if err != nil { - t.Error(err) - } - } - - rows, err = db.QueryxContext(ctx, "SELECT * FROM person") - if err != nil { - t.Fatal(err) - } - for rows.Next() { - m := map[string]interface{}{} - err := rows.MapScan(m) - if err != nil { - t.Error(err) - } - } - - }) -} - -func TestNilReceiverContext(t *testing.T) { - RunWithSchemaContext(context.Background(), defaultSchema, t, func(ctx context.Context, db *DB, t *testing.T) { - loadDefaultFixtureContext(ctx, db, t) - var p *Person - err := db.GetContext(ctx, p, "SELECT * FROM person LIMIT 1") - if err == nil { - t.Error("Expected error when getting into nil struct ptr.") - } - var pp *[]Person - err = db.SelectContext(ctx, pp, "SELECT * FROM person") - if err == nil { - t.Error("Expected an error when selecting into nil slice ptr.") - } - }) -} - -func TestNamedQueryContext(t *testing.T) { - var schema = Schema{ - create: ` - CREATE TABLE place ( - id integer PRIMARY KEY, - name text NULL - ); - CREATE TABLE person ( - first_name text NULL, - last_name text NULL, - email text NULL - ); - CREATE TABLE placeperson ( - first_name text NULL, - last_name text NULL, - email text NULL, - place_id integer NULL - ); - CREATE TABLE jsperson ( - "FIRST" text NULL, - last_name text NULL, - "EMAIL" text NULL - );`, - drop: ` - drop table person; - drop table jsperson; - drop table place; - drop table placeperson; - `, - } - - RunWithSchemaContext(context.Background(), schema, t, func(ctx context.Context, db *DB, t *testing.T) { - type Person struct { - FirstName sql.NullString `db:"first_name"` - LastName sql.NullString `db:"last_name"` - Email sql.NullString - } - - p := Person{ - FirstName: sql.NullString{String: "ben", Valid: true}, - LastName: sql.NullString{String: "doe", Valid: true}, - Email: sql.NullString{String: "ben@doe.com", Valid: true}, - } - - q1 := `INSERT INTO person (first_name, last_name, email) VALUES (:first_name, :last_name, :email)` - _, err := db.NamedExecContext(ctx, q1, p) - if err != nil { - log.Fatal(err) - } - - p2 := &Person{} - rows, err := db.NamedQueryContext(ctx, "SELECT * FROM person WHERE first_name=:first_name", p) - if err != nil { - log.Fatal(err) - } - for rows.Next() { - err = rows.StructScan(p2) - if err != nil { - t.Error(err) - } - if p2.FirstName.String != "ben" { - t.Error("Expected first name of `ben`, got " + p2.FirstName.String) - } - if p2.LastName.String != "doe" { - t.Error("Expected first name of `doe`, got " + p2.LastName.String) - } - } - - // these are tests for #73; they verify that named queries work if you've - // changed the db mapper. This code checks both NamedQuery "ad-hoc" style - // queries and NamedStmt queries, which use different code paths internally. - old := *db.Mapper - - type JSONPerson struct { - FirstName sql.NullString `json:"FIRST"` - LastName sql.NullString `json:"last_name"` - Email sql.NullString - } - - jp := JSONPerson{ - FirstName: sql.NullString{String: "ben", Valid: true}, - LastName: sql.NullString{String: "smith", Valid: true}, - Email: sql.NullString{String: "ben@smith.com", Valid: true}, - } - - db.Mapper = reflectx.NewMapperFunc("json", strings.ToUpper) - - // prepare queries for case sensitivity to test our ToUpper function. - // postgres and sqlite accept "", but mysql uses ``; since Go's multi-line - // strings are `` we use "" by default and swap out for MySQL - pdb := func(s string, db *DB) string { - if db.DriverName() == "mysql" { - return strings.Replace(s, `"`, "`", -1) - } - return s - } - - q1 = `INSERT INTO jsperson ("FIRST", last_name, "EMAIL") VALUES (:FIRST, :last_name, :EMAIL)` - _, err = db.NamedExecContext(ctx, pdb(q1, db), jp) - if err != nil { - t.Fatal(err, db.DriverName()) - } - - // Checks that a person pulled out of the db matches the one we put in - check := func(t *testing.T, rows *Rows) { - jp = JSONPerson{} - for rows.Next() { - err = rows.StructScan(&jp) - if err != nil { - t.Error(err) - } - if jp.FirstName.String != "ben" { - t.Errorf("Expected first name of `ben`, got `%s` (%s) ", jp.FirstName.String, db.DriverName()) - } - if jp.LastName.String != "smith" { - t.Errorf("Expected LastName of `smith`, got `%s` (%s)", jp.LastName.String, db.DriverName()) - } - if jp.Email.String != "ben@smith.com" { - t.Errorf("Expected first name of `doe`, got `%s` (%s)", jp.Email.String, db.DriverName()) - } - } - } - - ns, err := db.PrepareNamed(pdb(` - SELECT * FROM jsperson - WHERE - "FIRST"=:FIRST AND - last_name=:last_name AND - "EMAIL"=:EMAIL - `, db)) - - if err != nil { - t.Fatal(err) - } - rows, err = ns.QueryxContext(ctx, jp) - if err != nil { - t.Fatal(err) - } - - check(t, rows) - - // Check exactly the same thing, but with db.NamedQuery, which does not go - // through the PrepareNamed/NamedStmt path. - rows, err = db.NamedQueryContext(ctx, pdb(` - SELECT * FROM jsperson - WHERE - "FIRST"=:FIRST AND - last_name=:last_name AND - "EMAIL"=:EMAIL - `, db), jp) - if err != nil { - t.Fatal(err) - } - - check(t, rows) - - db.Mapper = &old - - // Test nested structs - type Place struct { - ID int `db:"id"` - Name sql.NullString `db:"name"` - } - type PlacePerson struct { - FirstName sql.NullString `db:"first_name"` - LastName sql.NullString `db:"last_name"` - Email sql.NullString - Place Place `db:"place"` - } - - pl := Place{ - Name: sql.NullString{String: "myplace", Valid: true}, - } - - pp := PlacePerson{ - FirstName: sql.NullString{String: "ben", Valid: true}, - LastName: sql.NullString{String: "doe", Valid: true}, - Email: sql.NullString{String: "ben@doe.com", Valid: true}, - } - - q2 := `INSERT INTO place (id, name) VALUES (1, :name)` - _, err = db.NamedExecContext(ctx, q2, pl) - if err != nil { - log.Fatal(err) - } - - id := 1 - pp.Place.ID = id - - q3 := `INSERT INTO placeperson (first_name, last_name, email, place_id) VALUES (:first_name, :last_name, :email, :place.id)` - _, err = db.NamedExecContext(ctx, q3, pp) - if err != nil { - log.Fatal(err) - } - - pp2 := &PlacePerson{} - rows, err = db.NamedQueryContext(ctx, ` - SELECT - first_name, - last_name, - email, - place.id AS "place.id", - place.name AS "place.name" - FROM placeperson - INNER JOIN place ON place.id = placeperson.place_id - WHERE - place.id=:place.id`, pp) - if err != nil { - log.Fatal(err) - } - for rows.Next() { - err = rows.StructScan(pp2) - if err != nil { - t.Error(err) - } - if pp2.FirstName.String != "ben" { - t.Error("Expected first name of `ben`, got " + pp2.FirstName.String) - } - if pp2.LastName.String != "doe" { - t.Error("Expected first name of `doe`, got " + pp2.LastName.String) - } - if pp2.Place.Name.String != "myplace" { - t.Error("Expected place name of `myplace`, got " + pp2.Place.Name.String) - } - if pp2.Place.ID != pp.Place.ID { - t.Errorf("Expected place name of %v, got %v", pp.Place.ID, pp2.Place.ID) - } - } - }) -} - -func TestNilInsertsContext(t *testing.T) { - var schema = Schema{ - create: ` - CREATE TABLE tt ( - id integer, - value text NULL DEFAULT NULL - );`, - drop: "drop table tt;", - } - - RunWithSchemaContext(context.Background(), schema, t, func(ctx context.Context, db *DB, t *testing.T) { - type TT struct { - ID int - Value *string - } - var v, v2 TT - r := db.Rebind - - db.MustExecContext(ctx, r(`INSERT INTO tt (id) VALUES (1)`)) - db.GetContext(ctx, &v, r(`SELECT * FROM tt`)) - if v.ID != 1 { - t.Errorf("Expecting id of 1, got %v", v.ID) - } - if v.Value != nil { - t.Errorf("Expecting NULL to map to nil, got %s", *v.Value) - } - - v.ID = 2 - // NOTE: this incidentally uncovered a bug which was that named queries with - // pointer destinations would not work if the passed value here was not addressable, - // as reflectx.FieldByIndexes attempts to allocate nil pointer receivers for - // writing. This was fixed by creating & using the reflectx.FieldByIndexesReadOnly - // function. This next line is important as it provides the only coverage for this. - db.NamedExecContext(ctx, `INSERT INTO tt (id, value) VALUES (:id, :value)`, v) - - db.GetContext(ctx, &v2, r(`SELECT * FROM tt WHERE id=2`)) - if v.ID != v2.ID { - t.Errorf("%v != %v", v.ID, v2.ID) - } - if v2.Value != nil { - t.Errorf("Expecting NULL to map to nil, got %s", *v.Value) - } - }) -} - -func TestScanErrorContext(t *testing.T) { - var schema = Schema{ - create: ` - CREATE TABLE kv ( - k text, - v integer - );`, - drop: `drop table kv;`, - } - - RunWithSchemaContext(context.Background(), schema, t, func(ctx context.Context, db *DB, t *testing.T) { - type WrongTypes struct { - K int - V string - } - _, err := db.Exec(db.Rebind("INSERT INTO kv (k, v) VALUES (?, ?)"), "hi", 1) - if err != nil { - t.Error(err) - } - - rows, err := db.QueryxContext(ctx, "SELECT * FROM kv") - if err != nil { - t.Error(err) - } - for rows.Next() { - var wt WrongTypes - err := rows.StructScan(&wt) - if err == nil { - t.Errorf("%s: Scanning wrong types into keys should have errored.", db.DriverName()) - } - } - }) -} - -// FIXME: this function is kinda big but it slows things down to be constantly -// loading and reloading the schema.. - -func TestUsageContext(t *testing.T) { - RunWithSchemaContext(context.Background(), defaultSchema, t, func(ctx context.Context, db *DB, t *testing.T) { - loadDefaultFixtureContext(ctx, db, t) - slicemembers := []SliceMember{} - err := db.SelectContext(ctx, &slicemembers, "SELECT * FROM place ORDER BY telcode ASC") - if err != nil { - t.Fatal(err) - } - - people := []Person{} - - err = db.SelectContext(ctx, &people, "SELECT * FROM person ORDER BY first_name ASC") - if err != nil { - t.Fatal(err) - } - - jason, john := people[0], people[1] - if jason.FirstName != "Jason" { - t.Errorf("Expecting FirstName of Jason, got %s", jason.FirstName) - } - if jason.LastName != "Moiron" { - t.Errorf("Expecting LastName of Moiron, got %s", jason.LastName) - } - if jason.Email != "jmoiron@jmoiron.net" { - t.Errorf("Expecting Email of jmoiron@jmoiron.net, got %s", jason.Email) - } - if john.FirstName != "John" || john.LastName != "Doe" || john.Email != "johndoeDNE@gmail.net" { - t.Errorf("John Doe's person record not what expected: Got %v\n", john) - } - - jason = Person{} - err = db.GetContext(ctx, &jason, db.Rebind("SELECT * FROM person WHERE first_name=?"), "Jason") - - if err != nil { - t.Fatal(err) - } - if jason.FirstName != "Jason" { - t.Errorf("Expecting to get back Jason, but got %v\n", jason.FirstName) - } - - err = db.GetContext(ctx, &jason, db.Rebind("SELECT * FROM person WHERE first_name=?"), "Foobar") - if err == nil { - t.Errorf("Expecting an error, got nil\n") - } - if err != sql.ErrNoRows { - t.Errorf("Expected sql.ErrNoRows, got %v\n", err) - } - - // The following tests check statement reuse, which was actually a problem - // due to copying being done when creating Stmt's which was eventually removed - stmt1, err := db.PreparexContext(ctx, db.Rebind("SELECT * FROM person WHERE first_name=?")) - if err != nil { - t.Fatal(err) - } - jason = Person{} - - row := stmt1.QueryRowx("DoesNotExist") - row.Scan(&jason) - row = stmt1.QueryRowx("DoesNotExist") - row.Scan(&jason) - - err = stmt1.GetContext(ctx, &jason, "DoesNotExist User") - if err == nil { - t.Error("Expected an error") - } - err = stmt1.GetContext(ctx, &jason, "DoesNotExist User 2") - if err == nil { - t.Fatal(err) - } - - stmt2, err := db.PreparexContext(ctx, db.Rebind("SELECT * FROM person WHERE first_name=?")) - if err != nil { - t.Fatal(err) - } - jason = Person{} - tx, err := db.Beginx() - if err != nil { - t.Fatal(err) - } - tstmt2 := tx.Stmtx(stmt2) - row2 := tstmt2.QueryRowx("Jason") - err = row2.StructScan(&jason) - if err != nil { - t.Error(err) - } - tx.Commit() - - places := []*Place{} - err = db.SelectContext(ctx, &places, "SELECT telcode FROM place ORDER BY telcode ASC") - if err != nil { - t.Fatal(err) - } - - usa, singsing, honkers := places[0], places[1], places[2] - - if usa.TelCode != 1 || honkers.TelCode != 852 || singsing.TelCode != 65 { - t.Errorf("Expected integer telcodes to work, got %#v", places) - } - - placesptr := []PlacePtr{} - err = db.SelectContext(ctx, &placesptr, "SELECT * FROM place ORDER BY telcode ASC") - if err != nil { - t.Error(err) - } - //fmt.Printf("%#v\n%#v\n%#v\n", placesptr[0], placesptr[1], placesptr[2]) - - // if you have null fields and use SELECT *, you must use sql.Null* in your struct - // this test also verifies that you can use either a []Struct{} or a []*Struct{} - places2 := []Place{} - err = db.SelectContext(ctx, &places2, "SELECT * FROM place ORDER BY telcode ASC") - if err != nil { - t.Fatal(err) - } - - usa, singsing, honkers = &places2[0], &places2[1], &places2[2] - - // this should return a type error that &p is not a pointer to a struct slice - p := Place{} - err = db.SelectContext(ctx, &p, "SELECT * FROM place ORDER BY telcode ASC") - if err == nil { - t.Errorf("Expected an error, argument to select should be a pointer to a struct slice") - } - - // this should be an error - pl := []Place{} - err = db.SelectContext(ctx, pl, "SELECT * FROM place ORDER BY telcode ASC") - if err == nil { - t.Errorf("Expected an error, argument to select should be a pointer to a struct slice, not a slice.") - } - - if usa.TelCode != 1 || honkers.TelCode != 852 || singsing.TelCode != 65 { - t.Errorf("Expected integer telcodes to work, got %#v", places) - } - - stmt, err := db.PreparexContext(ctx, db.Rebind("SELECT country, telcode FROM place WHERE telcode > ? ORDER BY telcode ASC")) - if err != nil { - t.Error(err) - } - - places = []*Place{} - err = stmt.SelectContext(ctx, &places, 10) - if len(places) != 2 { - t.Error("Expected 2 places, got 0.") - } - if err != nil { - t.Fatal(err) - } - singsing, honkers = places[0], places[1] - if singsing.TelCode != 65 || honkers.TelCode != 852 { - t.Errorf("Expected the right telcodes, got %#v", places) - } - - rows, err := db.QueryxContext(ctx, "SELECT * FROM place") - if err != nil { - t.Fatal(err) - } - place := Place{} - for rows.Next() { - err = rows.StructScan(&place) - if err != nil { - t.Fatal(err) - } - } - - rows, err = db.QueryxContext(ctx, "SELECT * FROM place") - if err != nil { - t.Fatal(err) - } - m := map[string]interface{}{} - for rows.Next() { - err = rows.MapScan(m) - if err != nil { - t.Fatal(err) - } - _, ok := m["country"] - if !ok { - t.Errorf("Expected key `country` in map but could not find it (%#v)\n", m) - } - } - - rows, err = db.QueryxContext(ctx, "SELECT * FROM place") - if err != nil { - t.Fatal(err) - } - for rows.Next() { - s, err := rows.SliceScan() - if err != nil { - t.Error(err) - } - if len(s) != 3 { - t.Errorf("Expected 3 columns in result, got %d\n", len(s)) - } - } - - // test advanced querying - // test that NamedExec works with a map as well as a struct - _, err = db.NamedExecContext(ctx, "INSERT INTO person (first_name, last_name, email) VALUES (:first, :last, :email)", map[string]interface{}{ - "first": "Bin", - "last": "Smuth", - "email": "bensmith@allblacks.nz", - }) - if err != nil { - t.Fatal(err) - } - - // ensure that if the named param happens right at the end it still works - // ensure that NamedQuery works with a map[string]interface{} - rows, err = db.NamedQueryContext(ctx, "SELECT * FROM person WHERE first_name=:first", map[string]interface{}{"first": "Bin"}) - if err != nil { - t.Fatal(err) - } - - ben := &Person{} - for rows.Next() { - err = rows.StructScan(ben) - if err != nil { - t.Fatal(err) - } - if ben.FirstName != "Bin" { - t.Fatal("Expected first name of `Bin`, got " + ben.FirstName) - } - if ben.LastName != "Smuth" { - t.Fatal("Expected first name of `Smuth`, got " + ben.LastName) - } - } - - ben.FirstName = "Ben" - ben.LastName = "Smith" - ben.Email = "binsmuth@allblacks.nz" - - // Insert via a named query using the struct - _, err = db.NamedExecContext(ctx, "INSERT INTO person (first_name, last_name, email) VALUES (:first_name, :last_name, :email)", ben) - - if err != nil { - t.Fatal(err) - } - - rows, err = db.NamedQueryContext(ctx, "SELECT * FROM person WHERE first_name=:first_name", ben) - if err != nil { - t.Fatal(err) - } - for rows.Next() { - err = rows.StructScan(ben) - if err != nil { - t.Fatal(err) - } - if ben.FirstName != "Ben" { - t.Fatal("Expected first name of `Ben`, got " + ben.FirstName) - } - if ben.LastName != "Smith" { - t.Fatal("Expected first name of `Smith`, got " + ben.LastName) - } - } - // ensure that Get does not panic on emppty result set - person := &Person{} - err = db.GetContext(ctx, person, "SELECT * FROM person WHERE first_name=$1", "does-not-exist") - if err == nil { - t.Fatal("Should have got an error for Get on non-existant row.") - } - - // lets test prepared statements some more - - stmt, err = db.PreparexContext(ctx, db.Rebind("SELECT * FROM person WHERE first_name=?")) - if err != nil { - t.Fatal(err) - } - rows, err = stmt.QueryxContext(ctx, "Ben") - if err != nil { - t.Fatal(err) - } - for rows.Next() { - err = rows.StructScan(ben) - if err != nil { - t.Fatal(err) - } - if ben.FirstName != "Ben" { - t.Fatal("Expected first name of `Ben`, got " + ben.FirstName) - } - if ben.LastName != "Smith" { - t.Fatal("Expected first name of `Smith`, got " + ben.LastName) - } - } - - john = Person{} - stmt, err = db.PreparexContext(ctx, db.Rebind("SELECT * FROM person WHERE first_name=?")) - if err != nil { - t.Error(err) - } - err = stmt.GetContext(ctx, &john, "John") - if err != nil { - t.Error(err) - } - - // test name mapping - // THIS USED TO WORK BUT WILL NO LONGER WORK. - db.MapperFunc(strings.ToUpper) - rsa := CPlace{} - err = db.GetContext(ctx, &rsa, "SELECT * FROM capplace;") - if err != nil { - t.Error(err, "in db:", db.DriverName()) - } - db.MapperFunc(strings.ToLower) - - // create a copy and change the mapper, then verify the copy behaves - // differently from the original. - dbCopy := NewDb(db.DB, db.DriverName()) - dbCopy.MapperFunc(strings.ToUpper) - err = dbCopy.GetContext(ctx, &rsa, "SELECT * FROM capplace;") - if err != nil { - fmt.Println(db.DriverName()) - t.Error(err) - } - - err = db.GetContext(ctx, &rsa, "SELECT * FROM cappplace;") - if err == nil { - t.Error("Expected no error, got ", err) - } - - // test base type slices - var sdest []string - rows, err = db.QueryxContext(ctx, "SELECT email FROM person ORDER BY email ASC;") - if err != nil { - t.Error(err) - } - err = scanAll(rows, &sdest, false) - if err != nil { - t.Error(err) - } - - // test Get with base types - var count int - err = db.GetContext(ctx, &count, "SELECT count(*) FROM person;") - if err != nil { - t.Error(err) - } - if count != len(sdest) { - t.Errorf("Expected %d == %d (count(*) vs len(SELECT ..)", count, len(sdest)) - } - - // test Get and Select with time.Time, #84 - var addedAt time.Time - err = db.GetContext(ctx, &addedAt, "SELECT added_at FROM person LIMIT 1;") - if err != nil { - t.Error(err) - } - - var addedAts []time.Time - err = db.SelectContext(ctx, &addedAts, "SELECT added_at FROM person;") - if err != nil { - t.Error(err) - } - - // test it on a double pointer - var pcount *int - err = db.GetContext(ctx, &pcount, "SELECT count(*) FROM person;") - if err != nil { - t.Error(err) - } - if *pcount != count { - t.Errorf("expected %d = %d", *pcount, count) - } - - // test Select... - sdest = []string{} - err = db.SelectContext(ctx, &sdest, "SELECT first_name FROM person ORDER BY first_name ASC;") - if err != nil { - t.Error(err) - } - expected := []string{"Ben", "Bin", "Jason", "John"} - for i, got := range sdest { - if got != expected[i] { - t.Errorf("Expected %d result to be %s, but got %s", i, expected[i], got) - } - } - - var nsdest []sql.NullString - err = db.SelectContext(ctx, &nsdest, "SELECT city FROM place ORDER BY city ASC") - if err != nil { - t.Error(err) - } - for _, val := range nsdest { - if val.Valid && val.String != "New York" { - t.Errorf("expected single valid result to be `New York`, but got %s", val.String) - } - } - }) -} - -// tests that sqlx will not panic when the wrong driver is passed because -// of an automatic nil dereference in sqlx.Open(), which was fixed. -func TestDoNotPanicOnConnectContext(t *testing.T) { - _, err := ConnectContext(context.Background(), "bogus", "hehe") - if err == nil { - t.Errorf("Should return error when using bogus driverName") - } -} - -func TestEmbeddedMapsContext(t *testing.T) { - var schema = Schema{ - create: ` - CREATE TABLE message ( - string text, - properties text - );`, - drop: `drop table message;`, - } - - RunWithSchemaContext(context.Background(), schema, t, func(ctx context.Context, db *DB, t *testing.T) { - messages := []Message{ - {"Hello, World", PropertyMap{"one": "1", "two": "2"}}, - {"Thanks, Joy", PropertyMap{"pull": "request"}}, - } - q1 := `INSERT INTO message (string, properties) VALUES (:string, :properties);` - for _, m := range messages { - _, err := db.NamedExecContext(ctx, q1, m) - if err != nil { - t.Fatal(err) - } - } - var count int - err := db.GetContext(ctx, &count, "SELECT count(*) FROM message") - if err != nil { - t.Fatal(err) - } - if count != len(messages) { - t.Fatalf("Expected %d messages in DB, found %d", len(messages), count) - } - - var m Message - err = db.GetContext(ctx, &m, "SELECT * FROM message LIMIT 1;") - if err != nil { - t.Fatal(err) - } - if m.Properties == nil { - t.Fatal("Expected m.Properties to not be nil, but it was.") - } - }) -} - -func TestIssue197Context(t *testing.T) { - // this test actually tests for a bug in database/sql: - // https://github.com/golang/go/issues/13905 - // this potentially makes _any_ named type that is an alias for []byte - // unsafe to use in a lot of different ways (basically, unsafe to hold - // onto after loading from the database). - t.Skip() - - type mybyte []byte - type Var struct{ Raw json.RawMessage } - type Var2 struct{ Raw []byte } - type Var3 struct{ Raw mybyte } - RunWithSchemaContext(context.Background(), defaultSchema, t, func(ctx context.Context, db *DB, t *testing.T) { - var err error - var v, q Var - if err = db.GetContext(ctx, &v, `SELECT '{"a": "b"}' AS raw`); err != nil { - t.Fatal(err) - } - if err = db.GetContext(ctx, &q, `SELECT 'null' AS raw`); err != nil { - t.Fatal(err) - } - - var v2, q2 Var2 - if err = db.GetContext(ctx, &v2, `SELECT '{"a": "b"}' AS raw`); err != nil { - t.Fatal(err) - } - if err = db.GetContext(ctx, &q2, `SELECT 'null' AS raw`); err != nil { - t.Fatal(err) - } - - var v3, q3 Var3 - if err = db.QueryRowContext(ctx, `SELECT '{"a": "b"}' AS raw`).Scan(&v3.Raw); err != nil { - t.Fatal(err) - } - if err = db.QueryRowContext(ctx, `SELECT '{"c": "d"}' AS raw`).Scan(&q3.Raw); err != nil { - t.Fatal(err) - } - t.Fail() - }) -} - -func TestInContext(t *testing.T) { - // some quite normal situations - type tr struct { - q string - args []interface{} - c int - } - tests := []tr{ - {"SELECT * FROM foo WHERE x = ? AND v in (?) AND y = ?", - []interface{}{"foo", []int{0, 5, 7, 2, 9}, "bar"}, - 7}, - {"SELECT * FROM foo WHERE x in (?)", - []interface{}{[]int{1, 2, 3, 4, 5, 6, 7, 8}}, - 8}, - } - for _, test := range tests { - q, a, err := In(test.q, test.args...) - if err != nil { - t.Error(err) - } - if len(a) != test.c { - t.Errorf("Expected %d args, but got %d (%+v)", test.c, len(a), a) - } - if strings.Count(q, "?") != test.c { - t.Errorf("Expected %d bindVars, got %d", test.c, strings.Count(q, "?")) - } - } - - // too many bindVars, but no slices, so short circuits parsing - // i'm not sure if this is the right behavior; this query/arg combo - // might not work, but we shouldn't parse if we don't need to - { - orig := "SELECT * FROM foo WHERE x = ? AND y = ?" - q, a, err := In(orig, "foo", "bar", "baz") - if err != nil { - t.Error(err) - } - if len(a) != 3 { - t.Errorf("Expected 3 args, but got %d (%+v)", len(a), a) - } - if q != orig { - t.Error("Expected unchanged query.") - } - } - - tests = []tr{ - // too many bindvars; slice present so should return error during parse - {"SELECT * FROM foo WHERE x = ? and y = ?", - []interface{}{"foo", []int{1, 2, 3}, "bar"}, - 0}, - // empty slice, should return error before parse - {"SELECT * FROM foo WHERE x = ?", - []interface{}{[]int{}}, - 0}, - // too *few* bindvars, should return an error - {"SELECT * FROM foo WHERE x = ? AND y in (?)", - []interface{}{[]int{1, 2, 3}}, - 0}, - } - for _, test := range tests { - _, _, err := In(test.q, test.args...) - if err == nil { - t.Error("Expected an error, but got nil.") - } - } - RunWithSchemaContext(context.Background(), defaultSchema, t, func(ctx context.Context, db *DB, t *testing.T) { - loadDefaultFixtureContext(ctx, db, t) - //tx.MustExecContext(ctx, tx.Rebind("INSERT INTO place (country, city, telcode) VALUES (?, ?, ?)"), "United States", "New York", "1") - //tx.MustExecContext(ctx, tx.Rebind("INSERT INTO place (country, telcode) VALUES (?, ?)"), "Hong Kong", "852") - //tx.MustExecContext(ctx, tx.Rebind("INSERT INTO place (country, telcode) VALUES (?, ?)"), "Singapore", "65") - telcodes := []int{852, 65} - q := "SELECT * FROM place WHERE telcode IN(?) ORDER BY telcode" - query, args, err := In(q, telcodes) - if err != nil { - t.Error(err) - } - query = db.Rebind(query) - places := []Place{} - err = db.SelectContext(ctx, &places, query, args...) - if err != nil { - t.Error(err) - } - if len(places) != 2 { - t.Fatalf("Expecting 2 results, got %d", len(places)) - } - if places[0].TelCode != 65 { - t.Errorf("Expecting singapore first, but got %#v", places[0]) - } - if places[1].TelCode != 852 { - t.Errorf("Expecting hong kong second, but got %#v", places[1]) - } - }) -} - -func TestEmbeddedLiteralsContext(t *testing.T) { - var schema = Schema{ - create: ` - CREATE TABLE x ( - k text - );`, - drop: `drop table x;`, - } - - RunWithSchemaContext(context.Background(), schema, t, func(ctx context.Context, db *DB, t *testing.T) { - type t1 struct { - K *string - } - type t2 struct { - Inline struct { - F string - } - K *string - } - - db.MustExecContext(ctx, db.Rebind("INSERT INTO x (k) VALUES (?), (?), (?);"), "one", "two", "three") - - target := t1{} - err := db.GetContext(ctx, &target, db.Rebind("SELECT * FROM x WHERE k=?"), "one") - if err != nil { - t.Error(err) - } - if *target.K != "one" { - t.Error("Expected target.K to be `one`, got ", target.K) - } - - target2 := t2{} - err = db.GetContext(ctx, &target2, db.Rebind("SELECT * FROM x WHERE k=?"), "one") - if err != nil { - t.Error(err) - } - if *target2.K != "one" { - t.Errorf("Expected target2.K to be `one`, got `%v`", target2.K) - } - }) -} diff --git a/vendor/github.com/jmoiron/sqlx/sqlx_test.go b/vendor/github.com/jmoiron/sqlx/sqlx_test.go deleted file mode 100644 index 9502c13..0000000 --- a/vendor/github.com/jmoiron/sqlx/sqlx_test.go +++ /dev/null @@ -1,1795 +0,0 @@ -// The following environment variables, if set, will be used: -// -// * SQLX_SQLITE_DSN -// * SQLX_POSTGRES_DSN -// * SQLX_MYSQL_DSN -// -// Set any of these variables to 'skip' to skip them. Note that for MySQL, -// the string '?parseTime=True' will be appended to the DSN if it's not there -// already. -// -package sqlx - -import ( - "database/sql" - "database/sql/driver" - "encoding/json" - "fmt" - "log" - "os" - "reflect" - "strings" - "testing" - "time" - - _ "github.com/go-sql-driver/mysql" - "github.com/jmoiron/sqlx/reflectx" - _ "github.com/lib/pq" - _ "github.com/mattn/go-sqlite3" -) - -/* compile time checks that Db, Tx, Stmt (qStmt) implement expected interfaces */ -var _, _ Ext = &DB{}, &Tx{} -var _, _ ColScanner = &Row{}, &Rows{} -var _ Queryer = &qStmt{} -var _ Execer = &qStmt{} - -var TestPostgres = true -var TestSqlite = true -var TestMysql = true - -var sldb *DB -var pgdb *DB -var mysqldb *DB -var active = []*DB{} - -func init() { - ConnectAll() -} - -func ConnectAll() { - var err error - - pgdsn := os.Getenv("SQLX_POSTGRES_DSN") - mydsn := os.Getenv("SQLX_MYSQL_DSN") - sqdsn := os.Getenv("SQLX_SQLITE_DSN") - - TestPostgres = pgdsn != "skip" - TestMysql = mydsn != "skip" - TestSqlite = sqdsn != "skip" - - if !strings.Contains(mydsn, "parseTime=true") { - mydsn += "?parseTime=true" - } - - if TestPostgres { - pgdb, err = Connect("postgres", pgdsn) - if err != nil { - fmt.Printf("Disabling PG tests:\n %v\n", err) - TestPostgres = false - } - } else { - fmt.Println("Disabling Postgres tests.") - } - - if TestMysql { - mysqldb, err = Connect("mysql", mydsn) - if err != nil { - fmt.Printf("Disabling MySQL tests:\n %v", err) - TestMysql = false - } - } else { - fmt.Println("Disabling MySQL tests.") - } - - if TestSqlite { - sldb, err = Connect("sqlite3", sqdsn) - if err != nil { - fmt.Printf("Disabling SQLite:\n %v", err) - TestSqlite = false - } - } else { - fmt.Println("Disabling SQLite tests.") - } -} - -type Schema struct { - create string - drop string -} - -func (s Schema) Postgres() (string, string) { - return s.create, s.drop -} - -func (s Schema) MySQL() (string, string) { - return strings.Replace(s.create, `"`, "`", -1), s.drop -} - -func (s Schema) Sqlite3() (string, string) { - return strings.Replace(s.create, `now()`, `CURRENT_TIMESTAMP`, -1), s.drop -} - -var defaultSchema = Schema{ - create: ` -CREATE TABLE person ( - first_name text, - last_name text, - email text, - added_at timestamp default now() -); - -CREATE TABLE place ( - country text, - city text NULL, - telcode integer -); - -CREATE TABLE capplace ( - "COUNTRY" text, - "CITY" text NULL, - "TELCODE" integer -); - -CREATE TABLE nullperson ( - first_name text NULL, - last_name text NULL, - email text NULL -); - -CREATE TABLE employees ( - name text, - id integer, - boss_id integer -); - -`, - drop: ` -drop table person; -drop table place; -drop table capplace; -drop table nullperson; -drop table employees; -`, -} - -type Person struct { - FirstName string `db:"first_name"` - LastName string `db:"last_name"` - Email string - AddedAt time.Time `db:"added_at"` -} - -type Person2 struct { - FirstName sql.NullString `db:"first_name"` - LastName sql.NullString `db:"last_name"` - Email sql.NullString -} - -type Place struct { - Country string - City sql.NullString - TelCode int -} - -type PlacePtr struct { - Country string - City *string - TelCode int -} - -type PersonPlace struct { - Person - Place -} - -type PersonPlacePtr struct { - *Person - *Place -} - -type EmbedConflict struct { - FirstName string `db:"first_name"` - Person -} - -type SliceMember struct { - Country string - City sql.NullString - TelCode int - People []Person `db:"-"` - Addresses []Place `db:"-"` -} - -// Note that because of field map caching, we need a new type here -// if we've used Place already somewhere in sqlx -type CPlace Place - -func MultiExec(e Execer, query string) { - stmts := strings.Split(query, ";\n") - if len(strings.Trim(stmts[len(stmts)-1], " \n\t\r")) == 0 { - stmts = stmts[:len(stmts)-1] - } - for _, s := range stmts { - _, err := e.Exec(s) - if err != nil { - fmt.Println(err, s) - } - } -} - -func RunWithSchema(schema Schema, t *testing.T, test func(db *DB, t *testing.T)) { - runner := func(db *DB, t *testing.T, create, drop string) { - defer func() { - MultiExec(db, drop) - }() - - MultiExec(db, create) - test(db, t) - } - - if TestPostgres { - create, drop := schema.Postgres() - runner(pgdb, t, create, drop) - } - if TestSqlite { - create, drop := schema.Sqlite3() - runner(sldb, t, create, drop) - } - if TestMysql { - create, drop := schema.MySQL() - runner(mysqldb, t, create, drop) - } -} - -func loadDefaultFixture(db *DB, t *testing.T) { - tx := db.MustBegin() - tx.MustExec(tx.Rebind("INSERT INTO person (first_name, last_name, email) VALUES (?, ?, ?)"), "Jason", "Moiron", "jmoiron@jmoiron.net") - tx.MustExec(tx.Rebind("INSERT INTO person (first_name, last_name, email) VALUES (?, ?, ?)"), "John", "Doe", "johndoeDNE@gmail.net") - tx.MustExec(tx.Rebind("INSERT INTO place (country, city, telcode) VALUES (?, ?, ?)"), "United States", "New York", "1") - tx.MustExec(tx.Rebind("INSERT INTO place (country, telcode) VALUES (?, ?)"), "Hong Kong", "852") - tx.MustExec(tx.Rebind("INSERT INTO place (country, telcode) VALUES (?, ?)"), "Singapore", "65") - if db.DriverName() == "mysql" { - tx.MustExec(tx.Rebind("INSERT INTO capplace (`COUNTRY`, `TELCODE`) VALUES (?, ?)"), "Sarf Efrica", "27") - } else { - tx.MustExec(tx.Rebind("INSERT INTO capplace (\"COUNTRY\", \"TELCODE\") VALUES (?, ?)"), "Sarf Efrica", "27") - } - tx.MustExec(tx.Rebind("INSERT INTO employees (name, id) VALUES (?, ?)"), "Peter", "4444") - tx.MustExec(tx.Rebind("INSERT INTO employees (name, id, boss_id) VALUES (?, ?, ?)"), "Joe", "1", "4444") - tx.MustExec(tx.Rebind("INSERT INTO employees (name, id, boss_id) VALUES (?, ?, ?)"), "Martin", "2", "4444") - tx.Commit() -} - -// Test a new backwards compatible feature, that missing scan destinations -// will silently scan into sql.RawText rather than failing/panicing -func TestMissingNames(t *testing.T) { - RunWithSchema(defaultSchema, t, func(db *DB, t *testing.T) { - loadDefaultFixture(db, t) - type PersonPlus struct { - FirstName string `db:"first_name"` - LastName string `db:"last_name"` - Email string - //AddedAt time.Time `db:"added_at"` - } - - // test Select first - pps := []PersonPlus{} - // pps lacks added_at destination - err := db.Select(&pps, "SELECT * FROM person") - if err == nil { - t.Error("Expected missing name from Select to fail, but it did not.") - } - - // test Get - pp := PersonPlus{} - err = db.Get(&pp, "SELECT * FROM person LIMIT 1") - if err == nil { - t.Error("Expected missing name Get to fail, but it did not.") - } - - // test naked StructScan - pps = []PersonPlus{} - rows, err := db.Query("SELECT * FROM person LIMIT 1") - if err != nil { - t.Fatal(err) - } - rows.Next() - err = StructScan(rows, &pps) - if err == nil { - t.Error("Expected missing name in StructScan to fail, but it did not.") - } - rows.Close() - - // now try various things with unsafe set. - db = db.Unsafe() - pps = []PersonPlus{} - err = db.Select(&pps, "SELECT * FROM person") - if err != nil { - t.Error(err) - } - - // test Get - pp = PersonPlus{} - err = db.Get(&pp, "SELECT * FROM person LIMIT 1") - if err != nil { - t.Error(err) - } - - // test naked StructScan - pps = []PersonPlus{} - rowsx, err := db.Queryx("SELECT * FROM person LIMIT 1") - if err != nil { - t.Fatal(err) - } - rowsx.Next() - err = StructScan(rowsx, &pps) - if err != nil { - t.Error(err) - } - rowsx.Close() - - // test Named stmt - if !isUnsafe(db) { - t.Error("Expected db to be unsafe, but it isn't") - } - nstmt, err := db.PrepareNamed(`SELECT * FROM person WHERE first_name != :name`) - if err != nil { - t.Fatal(err) - } - // its internal stmt should be marked unsafe - if !nstmt.Stmt.unsafe { - t.Error("expected NamedStmt to be unsafe but its underlying stmt did not inherit safety") - } - pps = []PersonPlus{} - err = nstmt.Select(&pps, map[string]interface{}{"name": "Jason"}) - if err != nil { - t.Fatal(err) - } - if len(pps) != 1 { - t.Errorf("Expected 1 person back, got %d", len(pps)) - } - - // test it with a safe db - db.unsafe = false - if isUnsafe(db) { - t.Error("expected db to be safe but it isn't") - } - nstmt, err = db.PrepareNamed(`SELECT * FROM person WHERE first_name != :name`) - if err != nil { - t.Fatal(err) - } - // it should be safe - if isUnsafe(nstmt) { - t.Error("NamedStmt did not inherit safety") - } - nstmt.Unsafe() - if !isUnsafe(nstmt) { - t.Error("expected newly unsafed NamedStmt to be unsafe") - } - pps = []PersonPlus{} - err = nstmt.Select(&pps, map[string]interface{}{"name": "Jason"}) - if err != nil { - t.Fatal(err) - } - if len(pps) != 1 { - t.Errorf("Expected 1 person back, got %d", len(pps)) - } - - }) -} - -func TestEmbeddedStructs(t *testing.T) { - type Loop1 struct{ Person } - type Loop2 struct{ Loop1 } - type Loop3 struct{ Loop2 } - - RunWithSchema(defaultSchema, t, func(db *DB, t *testing.T) { - loadDefaultFixture(db, t) - peopleAndPlaces := []PersonPlace{} - err := db.Select( - &peopleAndPlaces, - `SELECT person.*, place.* FROM - person natural join place`) - if err != nil { - t.Fatal(err) - } - for _, pp := range peopleAndPlaces { - if len(pp.Person.FirstName) == 0 { - t.Errorf("Expected non zero lengthed first name.") - } - if len(pp.Place.Country) == 0 { - t.Errorf("Expected non zero lengthed country.") - } - } - - // test embedded structs with StructScan - rows, err := db.Queryx( - `SELECT person.*, place.* FROM - person natural join place`) - if err != nil { - t.Error(err) - } - - perp := PersonPlace{} - rows.Next() - err = rows.StructScan(&perp) - if err != nil { - t.Error(err) - } - - if len(perp.Person.FirstName) == 0 { - t.Errorf("Expected non zero lengthed first name.") - } - if len(perp.Place.Country) == 0 { - t.Errorf("Expected non zero lengthed country.") - } - - rows.Close() - - // test the same for embedded pointer structs - peopleAndPlacesPtrs := []PersonPlacePtr{} - err = db.Select( - &peopleAndPlacesPtrs, - `SELECT person.*, place.* FROM - person natural join place`) - if err != nil { - t.Fatal(err) - } - for _, pp := range peopleAndPlacesPtrs { - if len(pp.Person.FirstName) == 0 { - t.Errorf("Expected non zero lengthed first name.") - } - if len(pp.Place.Country) == 0 { - t.Errorf("Expected non zero lengthed country.") - } - } - - // test "deep nesting" - l3s := []Loop3{} - err = db.Select(&l3s, `select * from person`) - if err != nil { - t.Fatal(err) - } - for _, l3 := range l3s { - if len(l3.Loop2.Loop1.Person.FirstName) == 0 { - t.Errorf("Expected non zero lengthed first name.") - } - } - - // test "embed conflicts" - ec := []EmbedConflict{} - err = db.Select(&ec, `select * from person`) - // I'm torn between erroring here or having some kind of working behavior - // in order to allow for more flexibility in destination structs - if err != nil { - t.Errorf("Was not expecting an error on embed conflicts.") - } - }) -} - -func TestJoinQuery(t *testing.T) { - type Employee struct { - Name string - ID int64 - // BossID is an id into the employee table - BossID sql.NullInt64 `db:"boss_id"` - } - type Boss Employee - - RunWithSchema(defaultSchema, t, func(db *DB, t *testing.T) { - loadDefaultFixture(db, t) - - var employees []struct { - Employee - Boss `db:"boss"` - } - - err := db.Select( - &employees, - `SELECT employees.*, boss.id "boss.id", boss.name "boss.name" FROM employees - JOIN employees AS boss ON employees.boss_id = boss.id`) - if err != nil { - t.Fatal(err) - } - - for _, em := range employees { - if len(em.Employee.Name) == 0 { - t.Errorf("Expected non zero lengthed name.") - } - if em.Employee.BossID.Int64 != em.Boss.ID { - t.Errorf("Expected boss ids to match") - } - } - }) -} - -func TestJoinQueryNamedPointerStructs(t *testing.T) { - type Employee struct { - Name string - ID int64 - // BossID is an id into the employee table - BossID sql.NullInt64 `db:"boss_id"` - } - type Boss Employee - - RunWithSchema(defaultSchema, t, func(db *DB, t *testing.T) { - loadDefaultFixture(db, t) - - var employees []struct { - Emp1 *Employee `db:"emp1"` - Emp2 *Employee `db:"emp2"` - *Boss `db:"boss"` - } - - err := db.Select( - &employees, - `SELECT emp.name "emp1.name", emp.id "emp1.id", emp.boss_id "emp1.boss_id", - emp.name "emp2.name", emp.id "emp2.id", emp.boss_id "emp2.boss_id", - boss.id "boss.id", boss.name "boss.name" FROM employees AS emp - JOIN employees AS boss ON emp.boss_id = boss.id - `) - if err != nil { - t.Fatal(err) - } - - for _, em := range employees { - if len(em.Emp1.Name) == 0 || len(em.Emp2.Name) == 0 { - t.Errorf("Expected non zero lengthed name.") - } - if em.Emp1.BossID.Int64 != em.Boss.ID || em.Emp2.BossID.Int64 != em.Boss.ID { - t.Errorf("Expected boss ids to match") - } - } - }) -} - -func TestSelectSliceMapTime(t *testing.T) { - RunWithSchema(defaultSchema, t, func(db *DB, t *testing.T) { - loadDefaultFixture(db, t) - rows, err := db.Queryx("SELECT * FROM person") - if err != nil { - t.Fatal(err) - } - for rows.Next() { - _, err := rows.SliceScan() - if err != nil { - t.Error(err) - } - } - - rows, err = db.Queryx("SELECT * FROM person") - if err != nil { - t.Fatal(err) - } - for rows.Next() { - m := map[string]interface{}{} - err := rows.MapScan(m) - if err != nil { - t.Error(err) - } - } - - }) -} - -func TestNilReceiver(t *testing.T) { - RunWithSchema(defaultSchema, t, func(db *DB, t *testing.T) { - loadDefaultFixture(db, t) - var p *Person - err := db.Get(p, "SELECT * FROM person LIMIT 1") - if err == nil { - t.Error("Expected error when getting into nil struct ptr.") - } - var pp *[]Person - err = db.Select(pp, "SELECT * FROM person") - if err == nil { - t.Error("Expected an error when selecting into nil slice ptr.") - } - }) -} - -func TestNamedQuery(t *testing.T) { - var schema = Schema{ - create: ` - CREATE TABLE place ( - id integer PRIMARY KEY, - name text NULL - ); - CREATE TABLE person ( - first_name text NULL, - last_name text NULL, - email text NULL - ); - CREATE TABLE placeperson ( - first_name text NULL, - last_name text NULL, - email text NULL, - place_id integer NULL - ); - CREATE TABLE jsperson ( - "FIRST" text NULL, - last_name text NULL, - "EMAIL" text NULL - );`, - drop: ` - drop table person; - drop table jsperson; - drop table place; - drop table placeperson; - `, - } - - RunWithSchema(schema, t, func(db *DB, t *testing.T) { - type Person struct { - FirstName sql.NullString `db:"first_name"` - LastName sql.NullString `db:"last_name"` - Email sql.NullString - } - - p := Person{ - FirstName: sql.NullString{String: "ben", Valid: true}, - LastName: sql.NullString{String: "doe", Valid: true}, - Email: sql.NullString{String: "ben@doe.com", Valid: true}, - } - - q1 := `INSERT INTO person (first_name, last_name, email) VALUES (:first_name, :last_name, :email)` - _, err := db.NamedExec(q1, p) - if err != nil { - log.Fatal(err) - } - - p2 := &Person{} - rows, err := db.NamedQuery("SELECT * FROM person WHERE first_name=:first_name", p) - if err != nil { - log.Fatal(err) - } - for rows.Next() { - err = rows.StructScan(p2) - if err != nil { - t.Error(err) - } - if p2.FirstName.String != "ben" { - t.Error("Expected first name of `ben`, got " + p2.FirstName.String) - } - if p2.LastName.String != "doe" { - t.Error("Expected first name of `doe`, got " + p2.LastName.String) - } - } - - // these are tests for #73; they verify that named queries work if you've - // changed the db mapper. This code checks both NamedQuery "ad-hoc" style - // queries and NamedStmt queries, which use different code paths internally. - old := *db.Mapper - - type JSONPerson struct { - FirstName sql.NullString `json:"FIRST"` - LastName sql.NullString `json:"last_name"` - Email sql.NullString - } - - jp := JSONPerson{ - FirstName: sql.NullString{String: "ben", Valid: true}, - LastName: sql.NullString{String: "smith", Valid: true}, - Email: sql.NullString{String: "ben@smith.com", Valid: true}, - } - - db.Mapper = reflectx.NewMapperFunc("json", strings.ToUpper) - - // prepare queries for case sensitivity to test our ToUpper function. - // postgres and sqlite accept "", but mysql uses ``; since Go's multi-line - // strings are `` we use "" by default and swap out for MySQL - pdb := func(s string, db *DB) string { - if db.DriverName() == "mysql" { - return strings.Replace(s, `"`, "`", -1) - } - return s - } - - q1 = `INSERT INTO jsperson ("FIRST", last_name, "EMAIL") VALUES (:FIRST, :last_name, :EMAIL)` - _, err = db.NamedExec(pdb(q1, db), jp) - if err != nil { - t.Fatal(err, db.DriverName()) - } - - // Checks that a person pulled out of the db matches the one we put in - check := func(t *testing.T, rows *Rows) { - jp = JSONPerson{} - for rows.Next() { - err = rows.StructScan(&jp) - if err != nil { - t.Error(err) - } - if jp.FirstName.String != "ben" { - t.Errorf("Expected first name of `ben`, got `%s` (%s) ", jp.FirstName.String, db.DriverName()) - } - if jp.LastName.String != "smith" { - t.Errorf("Expected LastName of `smith`, got `%s` (%s)", jp.LastName.String, db.DriverName()) - } - if jp.Email.String != "ben@smith.com" { - t.Errorf("Expected first name of `doe`, got `%s` (%s)", jp.Email.String, db.DriverName()) - } - } - } - - ns, err := db.PrepareNamed(pdb(` - SELECT * FROM jsperson - WHERE - "FIRST"=:FIRST AND - last_name=:last_name AND - "EMAIL"=:EMAIL - `, db)) - - if err != nil { - t.Fatal(err) - } - rows, err = ns.Queryx(jp) - if err != nil { - t.Fatal(err) - } - - check(t, rows) - - // Check exactly the same thing, but with db.NamedQuery, which does not go - // through the PrepareNamed/NamedStmt path. - rows, err = db.NamedQuery(pdb(` - SELECT * FROM jsperson - WHERE - "FIRST"=:FIRST AND - last_name=:last_name AND - "EMAIL"=:EMAIL - `, db), jp) - if err != nil { - t.Fatal(err) - } - - check(t, rows) - - db.Mapper = &old - - // Test nested structs - type Place struct { - ID int `db:"id"` - Name sql.NullString `db:"name"` - } - type PlacePerson struct { - FirstName sql.NullString `db:"first_name"` - LastName sql.NullString `db:"last_name"` - Email sql.NullString - Place Place `db:"place"` - } - - pl := Place{ - Name: sql.NullString{String: "myplace", Valid: true}, - } - - pp := PlacePerson{ - FirstName: sql.NullString{String: "ben", Valid: true}, - LastName: sql.NullString{String: "doe", Valid: true}, - Email: sql.NullString{String: "ben@doe.com", Valid: true}, - } - - q2 := `INSERT INTO place (id, name) VALUES (1, :name)` - _, err = db.NamedExec(q2, pl) - if err != nil { - log.Fatal(err) - } - - id := 1 - pp.Place.ID = id - - q3 := `INSERT INTO placeperson (first_name, last_name, email, place_id) VALUES (:first_name, :last_name, :email, :place.id)` - _, err = db.NamedExec(q3, pp) - if err != nil { - log.Fatal(err) - } - - pp2 := &PlacePerson{} - rows, err = db.NamedQuery(` - SELECT - first_name, - last_name, - email, - place.id AS "place.id", - place.name AS "place.name" - FROM placeperson - INNER JOIN place ON place.id = placeperson.place_id - WHERE - place.id=:place.id`, pp) - if err != nil { - log.Fatal(err) - } - for rows.Next() { - err = rows.StructScan(pp2) - if err != nil { - t.Error(err) - } - if pp2.FirstName.String != "ben" { - t.Error("Expected first name of `ben`, got " + pp2.FirstName.String) - } - if pp2.LastName.String != "doe" { - t.Error("Expected first name of `doe`, got " + pp2.LastName.String) - } - if pp2.Place.Name.String != "myplace" { - t.Error("Expected place name of `myplace`, got " + pp2.Place.Name.String) - } - if pp2.Place.ID != pp.Place.ID { - t.Errorf("Expected place name of %v, got %v", pp.Place.ID, pp2.Place.ID) - } - } - }) -} - -func TestNilInserts(t *testing.T) { - var schema = Schema{ - create: ` - CREATE TABLE tt ( - id integer, - value text NULL DEFAULT NULL - );`, - drop: "drop table tt;", - } - - RunWithSchema(schema, t, func(db *DB, t *testing.T) { - type TT struct { - ID int - Value *string - } - var v, v2 TT - r := db.Rebind - - db.MustExec(r(`INSERT INTO tt (id) VALUES (1)`)) - db.Get(&v, r(`SELECT * FROM tt`)) - if v.ID != 1 { - t.Errorf("Expecting id of 1, got %v", v.ID) - } - if v.Value != nil { - t.Errorf("Expecting NULL to map to nil, got %s", *v.Value) - } - - v.ID = 2 - // NOTE: this incidentally uncovered a bug which was that named queries with - // pointer destinations would not work if the passed value here was not addressable, - // as reflectx.FieldByIndexes attempts to allocate nil pointer receivers for - // writing. This was fixed by creating & using the reflectx.FieldByIndexesReadOnly - // function. This next line is important as it provides the only coverage for this. - db.NamedExec(`INSERT INTO tt (id, value) VALUES (:id, :value)`, v) - - db.Get(&v2, r(`SELECT * FROM tt WHERE id=2`)) - if v.ID != v2.ID { - t.Errorf("%v != %v", v.ID, v2.ID) - } - if v2.Value != nil { - t.Errorf("Expecting NULL to map to nil, got %s", *v.Value) - } - }) -} - -func TestScanError(t *testing.T) { - var schema = Schema{ - create: ` - CREATE TABLE kv ( - k text, - v integer - );`, - drop: `drop table kv;`, - } - - RunWithSchema(schema, t, func(db *DB, t *testing.T) { - type WrongTypes struct { - K int - V string - } - _, err := db.Exec(db.Rebind("INSERT INTO kv (k, v) VALUES (?, ?)"), "hi", 1) - if err != nil { - t.Error(err) - } - - rows, err := db.Queryx("SELECT * FROM kv") - if err != nil { - t.Error(err) - } - for rows.Next() { - var wt WrongTypes - err := rows.StructScan(&wt) - if err == nil { - t.Errorf("%s: Scanning wrong types into keys should have errored.", db.DriverName()) - } - } - }) -} - -// FIXME: this function is kinda big but it slows things down to be constantly -// loading and reloading the schema.. - -func TestUsage(t *testing.T) { - RunWithSchema(defaultSchema, t, func(db *DB, t *testing.T) { - loadDefaultFixture(db, t) - slicemembers := []SliceMember{} - err := db.Select(&slicemembers, "SELECT * FROM place ORDER BY telcode ASC") - if err != nil { - t.Fatal(err) - } - - people := []Person{} - - err = db.Select(&people, "SELECT * FROM person ORDER BY first_name ASC") - if err != nil { - t.Fatal(err) - } - - jason, john := people[0], people[1] - if jason.FirstName != "Jason" { - t.Errorf("Expecting FirstName of Jason, got %s", jason.FirstName) - } - if jason.LastName != "Moiron" { - t.Errorf("Expecting LastName of Moiron, got %s", jason.LastName) - } - if jason.Email != "jmoiron@jmoiron.net" { - t.Errorf("Expecting Email of jmoiron@jmoiron.net, got %s", jason.Email) - } - if john.FirstName != "John" || john.LastName != "Doe" || john.Email != "johndoeDNE@gmail.net" { - t.Errorf("John Doe's person record not what expected: Got %v\n", john) - } - - jason = Person{} - err = db.Get(&jason, db.Rebind("SELECT * FROM person WHERE first_name=?"), "Jason") - - if err != nil { - t.Fatal(err) - } - if jason.FirstName != "Jason" { - t.Errorf("Expecting to get back Jason, but got %v\n", jason.FirstName) - } - - err = db.Get(&jason, db.Rebind("SELECT * FROM person WHERE first_name=?"), "Foobar") - if err == nil { - t.Errorf("Expecting an error, got nil\n") - } - if err != sql.ErrNoRows { - t.Errorf("Expected sql.ErrNoRows, got %v\n", err) - } - - // The following tests check statement reuse, which was actually a problem - // due to copying being done when creating Stmt's which was eventually removed - stmt1, err := db.Preparex(db.Rebind("SELECT * FROM person WHERE first_name=?")) - if err != nil { - t.Fatal(err) - } - jason = Person{} - - row := stmt1.QueryRowx("DoesNotExist") - row.Scan(&jason) - row = stmt1.QueryRowx("DoesNotExist") - row.Scan(&jason) - - err = stmt1.Get(&jason, "DoesNotExist User") - if err == nil { - t.Error("Expected an error") - } - err = stmt1.Get(&jason, "DoesNotExist User 2") - if err == nil { - t.Fatal(err) - } - - stmt2, err := db.Preparex(db.Rebind("SELECT * FROM person WHERE first_name=?")) - if err != nil { - t.Fatal(err) - } - jason = Person{} - tx, err := db.Beginx() - if err != nil { - t.Fatal(err) - } - tstmt2 := tx.Stmtx(stmt2) - row2 := tstmt2.QueryRowx("Jason") - err = row2.StructScan(&jason) - if err != nil { - t.Error(err) - } - tx.Commit() - - places := []*Place{} - err = db.Select(&places, "SELECT telcode FROM place ORDER BY telcode ASC") - if err != nil { - t.Fatal(err) - } - - usa, singsing, honkers := places[0], places[1], places[2] - - if usa.TelCode != 1 || honkers.TelCode != 852 || singsing.TelCode != 65 { - t.Errorf("Expected integer telcodes to work, got %#v", places) - } - - placesptr := []PlacePtr{} - err = db.Select(&placesptr, "SELECT * FROM place ORDER BY telcode ASC") - if err != nil { - t.Error(err) - } - //fmt.Printf("%#v\n%#v\n%#v\n", placesptr[0], placesptr[1], placesptr[2]) - - // if you have null fields and use SELECT *, you must use sql.Null* in your struct - // this test also verifies that you can use either a []Struct{} or a []*Struct{} - places2 := []Place{} - err = db.Select(&places2, "SELECT * FROM place ORDER BY telcode ASC") - if err != nil { - t.Fatal(err) - } - - usa, singsing, honkers = &places2[0], &places2[1], &places2[2] - - // this should return a type error that &p is not a pointer to a struct slice - p := Place{} - err = db.Select(&p, "SELECT * FROM place ORDER BY telcode ASC") - if err == nil { - t.Errorf("Expected an error, argument to select should be a pointer to a struct slice") - } - - // this should be an error - pl := []Place{} - err = db.Select(pl, "SELECT * FROM place ORDER BY telcode ASC") - if err == nil { - t.Errorf("Expected an error, argument to select should be a pointer to a struct slice, not a slice.") - } - - if usa.TelCode != 1 || honkers.TelCode != 852 || singsing.TelCode != 65 { - t.Errorf("Expected integer telcodes to work, got %#v", places) - } - - stmt, err := db.Preparex(db.Rebind("SELECT country, telcode FROM place WHERE telcode > ? ORDER BY telcode ASC")) - if err != nil { - t.Error(err) - } - - places = []*Place{} - err = stmt.Select(&places, 10) - if len(places) != 2 { - t.Error("Expected 2 places, got 0.") - } - if err != nil { - t.Fatal(err) - } - singsing, honkers = places[0], places[1] - if singsing.TelCode != 65 || honkers.TelCode != 852 { - t.Errorf("Expected the right telcodes, got %#v", places) - } - - rows, err := db.Queryx("SELECT * FROM place") - if err != nil { - t.Fatal(err) - } - place := Place{} - for rows.Next() { - err = rows.StructScan(&place) - if err != nil { - t.Fatal(err) - } - } - - rows, err = db.Queryx("SELECT * FROM place") - if err != nil { - t.Fatal(err) - } - m := map[string]interface{}{} - for rows.Next() { - err = rows.MapScan(m) - if err != nil { - t.Fatal(err) - } - _, ok := m["country"] - if !ok { - t.Errorf("Expected key `country` in map but could not find it (%#v)\n", m) - } - } - - rows, err = db.Queryx("SELECT * FROM place") - if err != nil { - t.Fatal(err) - } - for rows.Next() { - s, err := rows.SliceScan() - if err != nil { - t.Error(err) - } - if len(s) != 3 { - t.Errorf("Expected 3 columns in result, got %d\n", len(s)) - } - } - - // test advanced querying - // test that NamedExec works with a map as well as a struct - _, err = db.NamedExec("INSERT INTO person (first_name, last_name, email) VALUES (:first, :last, :email)", map[string]interface{}{ - "first": "Bin", - "last": "Smuth", - "email": "bensmith@allblacks.nz", - }) - if err != nil { - t.Fatal(err) - } - - // ensure that if the named param happens right at the end it still works - // ensure that NamedQuery works with a map[string]interface{} - rows, err = db.NamedQuery("SELECT * FROM person WHERE first_name=:first", map[string]interface{}{"first": "Bin"}) - if err != nil { - t.Fatal(err) - } - - ben := &Person{} - for rows.Next() { - err = rows.StructScan(ben) - if err != nil { - t.Fatal(err) - } - if ben.FirstName != "Bin" { - t.Fatal("Expected first name of `Bin`, got " + ben.FirstName) - } - if ben.LastName != "Smuth" { - t.Fatal("Expected first name of `Smuth`, got " + ben.LastName) - } - } - - ben.FirstName = "Ben" - ben.LastName = "Smith" - ben.Email = "binsmuth@allblacks.nz" - - // Insert via a named query using the struct - _, err = db.NamedExec("INSERT INTO person (first_name, last_name, email) VALUES (:first_name, :last_name, :email)", ben) - - if err != nil { - t.Fatal(err) - } - - rows, err = db.NamedQuery("SELECT * FROM person WHERE first_name=:first_name", ben) - if err != nil { - t.Fatal(err) - } - for rows.Next() { - err = rows.StructScan(ben) - if err != nil { - t.Fatal(err) - } - if ben.FirstName != "Ben" { - t.Fatal("Expected first name of `Ben`, got " + ben.FirstName) - } - if ben.LastName != "Smith" { - t.Fatal("Expected first name of `Smith`, got " + ben.LastName) - } - } - // ensure that Get does not panic on emppty result set - person := &Person{} - err = db.Get(person, "SELECT * FROM person WHERE first_name=$1", "does-not-exist") - if err == nil { - t.Fatal("Should have got an error for Get on non-existant row.") - } - - // lets test prepared statements some more - - stmt, err = db.Preparex(db.Rebind("SELECT * FROM person WHERE first_name=?")) - if err != nil { - t.Fatal(err) - } - rows, err = stmt.Queryx("Ben") - if err != nil { - t.Fatal(err) - } - for rows.Next() { - err = rows.StructScan(ben) - if err != nil { - t.Fatal(err) - } - if ben.FirstName != "Ben" { - t.Fatal("Expected first name of `Ben`, got " + ben.FirstName) - } - if ben.LastName != "Smith" { - t.Fatal("Expected first name of `Smith`, got " + ben.LastName) - } - } - - john = Person{} - stmt, err = db.Preparex(db.Rebind("SELECT * FROM person WHERE first_name=?")) - if err != nil { - t.Error(err) - } - err = stmt.Get(&john, "John") - if err != nil { - t.Error(err) - } - - // test name mapping - // THIS USED TO WORK BUT WILL NO LONGER WORK. - db.MapperFunc(strings.ToUpper) - rsa := CPlace{} - err = db.Get(&rsa, "SELECT * FROM capplace;") - if err != nil { - t.Error(err, "in db:", db.DriverName()) - } - db.MapperFunc(strings.ToLower) - - // create a copy and change the mapper, then verify the copy behaves - // differently from the original. - dbCopy := NewDb(db.DB, db.DriverName()) - dbCopy.MapperFunc(strings.ToUpper) - err = dbCopy.Get(&rsa, "SELECT * FROM capplace;") - if err != nil { - fmt.Println(db.DriverName()) - t.Error(err) - } - - err = db.Get(&rsa, "SELECT * FROM cappplace;") - if err == nil { - t.Error("Expected no error, got ", err) - } - - // test base type slices - var sdest []string - rows, err = db.Queryx("SELECT email FROM person ORDER BY email ASC;") - if err != nil { - t.Error(err) - } - err = scanAll(rows, &sdest, false) - if err != nil { - t.Error(err) - } - - // test Get with base types - var count int - err = db.Get(&count, "SELECT count(*) FROM person;") - if err != nil { - t.Error(err) - } - if count != len(sdest) { - t.Errorf("Expected %d == %d (count(*) vs len(SELECT ..)", count, len(sdest)) - } - - // test Get and Select with time.Time, #84 - var addedAt time.Time - err = db.Get(&addedAt, "SELECT added_at FROM person LIMIT 1;") - if err != nil { - t.Error(err) - } - - var addedAts []time.Time - err = db.Select(&addedAts, "SELECT added_at FROM person;") - if err != nil { - t.Error(err) - } - - // test it on a double pointer - var pcount *int - err = db.Get(&pcount, "SELECT count(*) FROM person;") - if err != nil { - t.Error(err) - } - if *pcount != count { - t.Errorf("expected %d = %d", *pcount, count) - } - - // test Select... - sdest = []string{} - err = db.Select(&sdest, "SELECT first_name FROM person ORDER BY first_name ASC;") - if err != nil { - t.Error(err) - } - expected := []string{"Ben", "Bin", "Jason", "John"} - for i, got := range sdest { - if got != expected[i] { - t.Errorf("Expected %d result to be %s, but got %s", i, expected[i], got) - } - } - - var nsdest []sql.NullString - err = db.Select(&nsdest, "SELECT city FROM place ORDER BY city ASC") - if err != nil { - t.Error(err) - } - for _, val := range nsdest { - if val.Valid && val.String != "New York" { - t.Errorf("expected single valid result to be `New York`, but got %s", val.String) - } - } - }) -} - -type Product struct { - ProductID int -} - -// tests that sqlx will not panic when the wrong driver is passed because -// of an automatic nil dereference in sqlx.Open(), which was fixed. -func TestDoNotPanicOnConnect(t *testing.T) { - db, err := Connect("bogus", "hehe") - if err == nil { - t.Errorf("Should return error when using bogus driverName") - } - if db != nil { - t.Errorf("Should not return the db on a connect failure") - } -} - -func TestRebind(t *testing.T) { - q1 := `INSERT INTO foo (a, b, c, d, e, f, g, h, i) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)` - q2 := `INSERT INTO foo (a, b, c) VALUES (?, ?, "foo"), ("Hi", ?, ?)` - - s1 := Rebind(DOLLAR, q1) - s2 := Rebind(DOLLAR, q2) - - if s1 != `INSERT INTO foo (a, b, c, d, e, f, g, h, i) VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10)` { - t.Errorf("q1 failed") - } - - if s2 != `INSERT INTO foo (a, b, c) VALUES ($1, $2, "foo"), ("Hi", $3, $4)` { - t.Errorf("q2 failed") - } - - s1 = Rebind(NAMED, q1) - s2 = Rebind(NAMED, q2) - - ex1 := `INSERT INTO foo (a, b, c, d, e, f, g, h, i) VALUES ` + - `(:arg1, :arg2, :arg3, :arg4, :arg5, :arg6, :arg7, :arg8, :arg9, :arg10)` - if s1 != ex1 { - t.Error("q1 failed on Named params") - } - - ex2 := `INSERT INTO foo (a, b, c) VALUES (:arg1, :arg2, "foo"), ("Hi", :arg3, :arg4)` - if s2 != ex2 { - t.Error("q2 failed on Named params") - } -} - -func TestBindMap(t *testing.T) { - // Test that it works.. - q1 := `INSERT INTO foo (a, b, c, d) VALUES (:name, :age, :first, :last)` - am := map[string]interface{}{ - "name": "Jason Moiron", - "age": 30, - "first": "Jason", - "last": "Moiron", - } - - bq, args, _ := bindMap(QUESTION, q1, am) - expect := `INSERT INTO foo (a, b, c, d) VALUES (?, ?, ?, ?)` - if bq != expect { - t.Errorf("Interpolation of query failed: got `%v`, expected `%v`\n", bq, expect) - } - - if args[0].(string) != "Jason Moiron" { - t.Errorf("Expected `Jason Moiron`, got %v\n", args[0]) - } - - if args[1].(int) != 30 { - t.Errorf("Expected 30, got %v\n", args[1]) - } - - if args[2].(string) != "Jason" { - t.Errorf("Expected Jason, got %v\n", args[2]) - } - - if args[3].(string) != "Moiron" { - t.Errorf("Expected Moiron, got %v\n", args[3]) - } -} - -// Test for #117, embedded nil maps - -type Message struct { - Text string `db:"string"` - Properties PropertyMap `db:"properties"` // Stored as JSON in the database -} - -type PropertyMap map[string]string - -// Implement driver.Valuer and sql.Scanner interfaces on PropertyMap -func (p PropertyMap) Value() (driver.Value, error) { - if len(p) == 0 { - return nil, nil - } - return json.Marshal(p) -} - -func (p PropertyMap) Scan(src interface{}) error { - v := reflect.ValueOf(src) - if !v.IsValid() || v.IsNil() { - return nil - } - if data, ok := src.([]byte); ok { - return json.Unmarshal(data, &p) - } - return fmt.Errorf("Could not not decode type %T -> %T", src, p) -} - -func TestEmbeddedMaps(t *testing.T) { - var schema = Schema{ - create: ` - CREATE TABLE message ( - string text, - properties text - );`, - drop: `drop table message;`, - } - - RunWithSchema(schema, t, func(db *DB, t *testing.T) { - messages := []Message{ - {"Hello, World", PropertyMap{"one": "1", "two": "2"}}, - {"Thanks, Joy", PropertyMap{"pull": "request"}}, - } - q1 := `INSERT INTO message (string, properties) VALUES (:string, :properties);` - for _, m := range messages { - _, err := db.NamedExec(q1, m) - if err != nil { - t.Fatal(err) - } - } - var count int - err := db.Get(&count, "SELECT count(*) FROM message") - if err != nil { - t.Fatal(err) - } - if count != len(messages) { - t.Fatalf("Expected %d messages in DB, found %d", len(messages), count) - } - - var m Message - err = db.Get(&m, "SELECT * FROM message LIMIT 1;") - if err != nil { - t.Fatal(err) - } - if m.Properties == nil { - t.Fatal("Expected m.Properties to not be nil, but it was.") - } - }) -} - -func TestIssue197(t *testing.T) { - // this test actually tests for a bug in database/sql: - // https://github.com/golang/go/issues/13905 - // this potentially makes _any_ named type that is an alias for []byte - // unsafe to use in a lot of different ways (basically, unsafe to hold - // onto after loading from the database). - t.Skip() - - type mybyte []byte - type Var struct{ Raw json.RawMessage } - type Var2 struct{ Raw []byte } - type Var3 struct{ Raw mybyte } - RunWithSchema(defaultSchema, t, func(db *DB, t *testing.T) { - var err error - var v, q Var - if err = db.Get(&v, `SELECT '{"a": "b"}' AS raw`); err != nil { - t.Fatal(err) - } - if err = db.Get(&q, `SELECT 'null' AS raw`); err != nil { - t.Fatal(err) - } - - var v2, q2 Var2 - if err = db.Get(&v2, `SELECT '{"a": "b"}' AS raw`); err != nil { - t.Fatal(err) - } - if err = db.Get(&q2, `SELECT 'null' AS raw`); err != nil { - t.Fatal(err) - } - - var v3, q3 Var3 - if err = db.QueryRow(`SELECT '{"a": "b"}' AS raw`).Scan(&v3.Raw); err != nil { - t.Fatal(err) - } - if err = db.QueryRow(`SELECT '{"c": "d"}' AS raw`).Scan(&q3.Raw); err != nil { - t.Fatal(err) - } - t.Fail() - }) -} - -func TestIn(t *testing.T) { - // some quite normal situations - type tr struct { - q string - args []interface{} - c int - } - tests := []tr{ - {"SELECT * FROM foo WHERE x = ? AND v in (?) AND y = ?", - []interface{}{"foo", []int{0, 5, 7, 2, 9}, "bar"}, - 7}, - {"SELECT * FROM foo WHERE x in (?)", - []interface{}{[]int{1, 2, 3, 4, 5, 6, 7, 8}}, - 8}, - } - for _, test := range tests { - q, a, err := In(test.q, test.args...) - if err != nil { - t.Error(err) - } - if len(a) != test.c { - t.Errorf("Expected %d args, but got %d (%+v)", test.c, len(a), a) - } - if strings.Count(q, "?") != test.c { - t.Errorf("Expected %d bindVars, got %d", test.c, strings.Count(q, "?")) - } - } - - // too many bindVars, but no slices, so short circuits parsing - // i'm not sure if this is the right behavior; this query/arg combo - // might not work, but we shouldn't parse if we don't need to - { - orig := "SELECT * FROM foo WHERE x = ? AND y = ?" - q, a, err := In(orig, "foo", "bar", "baz") - if err != nil { - t.Error(err) - } - if len(a) != 3 { - t.Errorf("Expected 3 args, but got %d (%+v)", len(a), a) - } - if q != orig { - t.Error("Expected unchanged query.") - } - } - - tests = []tr{ - // too many bindvars; slice present so should return error during parse - {"SELECT * FROM foo WHERE x = ? and y = ?", - []interface{}{"foo", []int{1, 2, 3}, "bar"}, - 0}, - // empty slice, should return error before parse - {"SELECT * FROM foo WHERE x = ?", - []interface{}{[]int{}}, - 0}, - // too *few* bindvars, should return an error - {"SELECT * FROM foo WHERE x = ? AND y in (?)", - []interface{}{[]int{1, 2, 3}}, - 0}, - } - for _, test := range tests { - _, _, err := In(test.q, test.args...) - if err == nil { - t.Error("Expected an error, but got nil.") - } - } - RunWithSchema(defaultSchema, t, func(db *DB, t *testing.T) { - loadDefaultFixture(db, t) - //tx.MustExec(tx.Rebind("INSERT INTO place (country, city, telcode) VALUES (?, ?, ?)"), "United States", "New York", "1") - //tx.MustExec(tx.Rebind("INSERT INTO place (country, telcode) VALUES (?, ?)"), "Hong Kong", "852") - //tx.MustExec(tx.Rebind("INSERT INTO place (country, telcode) VALUES (?, ?)"), "Singapore", "65") - telcodes := []int{852, 65} - q := "SELECT * FROM place WHERE telcode IN(?) ORDER BY telcode" - query, args, err := In(q, telcodes) - if err != nil { - t.Error(err) - } - query = db.Rebind(query) - places := []Place{} - err = db.Select(&places, query, args...) - if err != nil { - t.Error(err) - } - if len(places) != 2 { - t.Fatalf("Expecting 2 results, got %d", len(places)) - } - if places[0].TelCode != 65 { - t.Errorf("Expecting singapore first, but got %#v", places[0]) - } - if places[1].TelCode != 852 { - t.Errorf("Expecting hong kong second, but got %#v", places[1]) - } - }) -} - -func TestBindStruct(t *testing.T) { - var err error - - q1 := `INSERT INTO foo (a, b, c, d) VALUES (:name, :age, :first, :last)` - - type tt struct { - Name string - Age int - First string - Last string - } - - type tt2 struct { - Field1 string `db:"field_1"` - Field2 string `db:"field_2"` - } - - type tt3 struct { - tt2 - Name string - } - - am := tt{"Jason Moiron", 30, "Jason", "Moiron"} - - bq, args, _ := bindStruct(QUESTION, q1, am, mapper()) - expect := `INSERT INTO foo (a, b, c, d) VALUES (?, ?, ?, ?)` - if bq != expect { - t.Errorf("Interpolation of query failed: got `%v`, expected `%v`\n", bq, expect) - } - - if args[0].(string) != "Jason Moiron" { - t.Errorf("Expected `Jason Moiron`, got %v\n", args[0]) - } - - if args[1].(int) != 30 { - t.Errorf("Expected 30, got %v\n", args[1]) - } - - if args[2].(string) != "Jason" { - t.Errorf("Expected Jason, got %v\n", args[2]) - } - - if args[3].(string) != "Moiron" { - t.Errorf("Expected Moiron, got %v\n", args[3]) - } - - am2 := tt2{"Hello", "World"} - bq, args, _ = bindStruct(QUESTION, "INSERT INTO foo (a, b) VALUES (:field_2, :field_1)", am2, mapper()) - expect = `INSERT INTO foo (a, b) VALUES (?, ?)` - if bq != expect { - t.Errorf("Interpolation of query failed: got `%v`, expected `%v`\n", bq, expect) - } - - if args[0].(string) != "World" { - t.Errorf("Expected 'World', got %s\n", args[0].(string)) - } - if args[1].(string) != "Hello" { - t.Errorf("Expected 'Hello', got %s\n", args[1].(string)) - } - - am3 := tt3{Name: "Hello!"} - am3.Field1 = "Hello" - am3.Field2 = "World" - - bq, args, err = bindStruct(QUESTION, "INSERT INTO foo (a, b, c) VALUES (:name, :field_1, :field_2)", am3, mapper()) - - if err != nil { - t.Fatal(err) - } - - expect = `INSERT INTO foo (a, b, c) VALUES (?, ?, ?)` - if bq != expect { - t.Errorf("Interpolation of query failed: got `%v`, expected `%v`\n", bq, expect) - } - - if args[0].(string) != "Hello!" { - t.Errorf("Expected 'Hello!', got %s\n", args[0].(string)) - } - if args[1].(string) != "Hello" { - t.Errorf("Expected 'Hello', got %s\n", args[1].(string)) - } - if args[2].(string) != "World" { - t.Errorf("Expected 'World', got %s\n", args[0].(string)) - } -} - -func TestEmbeddedLiterals(t *testing.T) { - var schema = Schema{ - create: ` - CREATE TABLE x ( - k text - );`, - drop: `drop table x;`, - } - - RunWithSchema(schema, t, func(db *DB, t *testing.T) { - type t1 struct { - K *string - } - type t2 struct { - Inline struct { - F string - } - K *string - } - - db.MustExec(db.Rebind("INSERT INTO x (k) VALUES (?), (?), (?);"), "one", "two", "three") - - target := t1{} - err := db.Get(&target, db.Rebind("SELECT * FROM x WHERE k=?"), "one") - if err != nil { - t.Error(err) - } - if *target.K != "one" { - t.Error("Expected target.K to be `one`, got ", target.K) - } - - target2 := t2{} - err = db.Get(&target2, db.Rebind("SELECT * FROM x WHERE k=?"), "one") - if err != nil { - t.Error(err) - } - if *target2.K != "one" { - t.Errorf("Expected target2.K to be `one`, got `%v`", target2.K) - } - }) -} - -func BenchmarkBindStruct(b *testing.B) { - b.StopTimer() - q1 := `INSERT INTO foo (a, b, c, d) VALUES (:name, :age, :first, :last)` - type t struct { - Name string - Age int - First string - Last string - } - am := t{"Jason Moiron", 30, "Jason", "Moiron"} - b.StartTimer() - for i := 0; i < b.N; i++ { - bindStruct(DOLLAR, q1, am, mapper()) - } -} - -func BenchmarkBindMap(b *testing.B) { - b.StopTimer() - q1 := `INSERT INTO foo (a, b, c, d) VALUES (:name, :age, :first, :last)` - am := map[string]interface{}{ - "name": "Jason Moiron", - "age": 30, - "first": "Jason", - "last": "Moiron", - } - b.StartTimer() - for i := 0; i < b.N; i++ { - bindMap(DOLLAR, q1, am) - } -} - -func BenchmarkIn(b *testing.B) { - q := `SELECT * FROM foo WHERE x = ? AND v in (?) AND y = ?` - - for i := 0; i < b.N; i++ { - _, _, _ = In(q, []interface{}{"foo", []int{0, 5, 7, 2, 9}, "bar"}...) - } -} - -func BenchmarkIn1k(b *testing.B) { - q := `SELECT * FROM foo WHERE x = ? AND v in (?) AND y = ?` - - var vals [1000]interface{} - - for i := 0; i < b.N; i++ { - _, _, _ = In(q, []interface{}{"foo", vals[:], "bar"}...) - } -} - -func BenchmarkIn1kInt(b *testing.B) { - q := `SELECT * FROM foo WHERE x = ? AND v in (?) AND y = ?` - - var vals [1000]int - - for i := 0; i < b.N; i++ { - _, _, _ = In(q, []interface{}{"foo", vals[:], "bar"}...) - } -} - -func BenchmarkIn1kString(b *testing.B) { - q := `SELECT * FROM foo WHERE x = ? AND v in (?) AND y = ?` - - var vals [1000]string - - for i := 0; i < b.N; i++ { - _, _, _ = In(q, []interface{}{"foo", vals[:], "bar"}...) - } -} - -func BenchmarkRebind(b *testing.B) { - b.StopTimer() - q1 := `INSERT INTO foo (a, b, c, d, e, f, g, h, i) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?)` - q2 := `INSERT INTO foo (a, b, c) VALUES (?, ?, "foo"), ("Hi", ?, ?)` - b.StartTimer() - - for i := 0; i < b.N; i++ { - Rebind(DOLLAR, q1) - Rebind(DOLLAR, q2) - } -} - -func BenchmarkRebindBuffer(b *testing.B) { - b.StopTimer() - q1 := `INSERT INTO foo (a, b, c, d, e, f, g, h, i) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?)` - q2 := `INSERT INTO foo (a, b, c) VALUES (?, ?, "foo"), ("Hi", ?, ?)` - b.StartTimer() - - for i := 0; i < b.N; i++ { - rebindBuff(DOLLAR, q1) - rebindBuff(DOLLAR, q2) - } -} diff --git a/vendor/github.com/jmoiron/sqlx/types/README.md b/vendor/github.com/jmoiron/sqlx/types/README.md deleted file mode 100644 index 713abe5..0000000 --- a/vendor/github.com/jmoiron/sqlx/types/README.md +++ /dev/null @@ -1,5 +0,0 @@ -# types - -The types package provides some useful types which implement the `sql.Scanner` -and `driver.Valuer` interfaces, suitable for use as scan and value targets with -database/sql. diff --git a/vendor/github.com/jmoiron/sqlx/types/types.go b/vendor/github.com/jmoiron/sqlx/types/types.go deleted file mode 100644 index 7b014c1..0000000 --- a/vendor/github.com/jmoiron/sqlx/types/types.go +++ /dev/null @@ -1,172 +0,0 @@ -package types - -import ( - "bytes" - "compress/gzip" - "database/sql/driver" - "encoding/json" - "errors" - - "io/ioutil" -) - -// GzippedText is a []byte which transparently gzips data being submitted to -// a database and ungzips data being Scanned from a database. -type GzippedText []byte - -// Value implements the driver.Valuer interface, gzipping the raw value of -// this GzippedText. -func (g GzippedText) Value() (driver.Value, error) { - b := make([]byte, 0, len(g)) - buf := bytes.NewBuffer(b) - w := gzip.NewWriter(buf) - w.Write(g) - w.Close() - return buf.Bytes(), nil - -} - -// Scan implements the sql.Scanner interface, ungzipping the value coming off -// the wire and storing the raw result in the GzippedText. -func (g *GzippedText) Scan(src interface{}) error { - var source []byte - switch src.(type) { - case string: - source = []byte(src.(string)) - case []byte: - source = src.([]byte) - default: - return errors.New("Incompatible type for GzippedText") - } - reader, err := gzip.NewReader(bytes.NewReader(source)) - if err != nil { - return err - } - defer reader.Close() - b, err := ioutil.ReadAll(reader) - if err != nil { - return err - } - *g = GzippedText(b) - return nil -} - -// JSONText is a json.RawMessage, which is a []byte underneath. -// Value() validates the json format in the source, and returns an error if -// the json is not valid. Scan does no validation. JSONText additionally -// implements `Unmarshal`, which unmarshals the json within to an interface{} -type JSONText json.RawMessage - -var emptyJSON = JSONText("{}") - -// MarshalJSON returns the *j as the JSON encoding of j. -func (j JSONText) MarshalJSON() ([]byte, error) { - if len(j) == 0 { - return emptyJSON, nil - } - return j, nil -} - -// UnmarshalJSON sets *j to a copy of data -func (j *JSONText) UnmarshalJSON(data []byte) error { - if j == nil { - return errors.New("JSONText: UnmarshalJSON on nil pointer") - } - *j = append((*j)[0:0], data...) - return nil -} - -// Value returns j as a value. This does a validating unmarshal into another -// RawMessage. If j is invalid json, it returns an error. -func (j JSONText) Value() (driver.Value, error) { - var m json.RawMessage - var err = j.Unmarshal(&m) - if err != nil { - return []byte{}, err - } - return []byte(j), nil -} - -// Scan stores the src in *j. No validation is done. -func (j *JSONText) Scan(src interface{}) error { - var source []byte - switch t := src.(type) { - case string: - source = []byte(t) - case []byte: - if len(t) == 0 { - source = emptyJSON - } else { - source = t - } - case nil: - *j = emptyJSON - default: - return errors.New("Incompatible type for JSONText") - } - *j = JSONText(append((*j)[0:0], source...)) - return nil -} - -// Unmarshal unmarshal's the json in j to v, as in json.Unmarshal. -func (j *JSONText) Unmarshal(v interface{}) error { - if len(*j) == 0 { - *j = emptyJSON - } - return json.Unmarshal([]byte(*j), v) -} - -// String supports pretty printing for JSONText types. -func (j JSONText) String() string { - return string(j) -} - -// NullJSONText represents a JSONText that may be null. -// NullJSONText implements the scanner interface so -// it can be used as a scan destination, similar to NullString. -type NullJSONText struct { - JSONText - Valid bool // Valid is true if JSONText is not NULL -} - -// Scan implements the Scanner interface. -func (n *NullJSONText) Scan(value interface{}) error { - if value == nil { - n.JSONText, n.Valid = emptyJSON, false - return nil - } - n.Valid = true - return n.JSONText.Scan(value) -} - -// Value implements the driver Valuer interface. -func (n NullJSONText) Value() (driver.Value, error) { - if !n.Valid { - return nil, nil - } - return n.JSONText.Value() -} - -// BitBool is an implementation of a bool for the MySQL type BIT(1). -// This type allows you to avoid wasting an entire byte for MySQL's boolean type TINYINT. -type BitBool bool - -// Value implements the driver.Valuer interface, -// and turns the BitBool into a bitfield (BIT(1)) for MySQL storage. -func (b BitBool) Value() (driver.Value, error) { - if b { - return []byte{1}, nil - } - return []byte{0}, nil -} - -// Scan implements the sql.Scanner interface, -// and turns the bitfield incoming from MySQL into a BitBool -func (b *BitBool) Scan(src interface{}) error { - v, ok := src.([]byte) - if !ok { - return errors.New("bad []byte type assertion") - } - *b = v[0] == 1 - return nil -} diff --git a/vendor/github.com/jmoiron/sqlx/types/types_test.go b/vendor/github.com/jmoiron/sqlx/types/types_test.go deleted file mode 100644 index 29813d1..0000000 --- a/vendor/github.com/jmoiron/sqlx/types/types_test.go +++ /dev/null @@ -1,127 +0,0 @@ -package types - -import "testing" - -func TestGzipText(t *testing.T) { - g := GzippedText("Hello, world") - v, err := g.Value() - if err != nil { - t.Errorf("Was not expecting an error") - } - err = (&g).Scan(v) - if err != nil { - t.Errorf("Was not expecting an error") - } - if string(g) != "Hello, world" { - t.Errorf("Was expecting the string we sent in (Hello World), got %s", string(g)) - } -} - -func TestJSONText(t *testing.T) { - j := JSONText(`{"foo": 1, "bar": 2}`) - v, err := j.Value() - if err != nil { - t.Errorf("Was not expecting an error") - } - err = (&j).Scan(v) - if err != nil { - t.Errorf("Was not expecting an error") - } - m := map[string]interface{}{} - j.Unmarshal(&m) - - if m["foo"].(float64) != 1 || m["bar"].(float64) != 2 { - t.Errorf("Expected valid json but got some garbage instead? %#v", m) - } - - j = JSONText(`{"foo": 1, invalid, false}`) - v, err = j.Value() - if err == nil { - t.Errorf("Was expecting invalid json to fail!") - } - - j = JSONText("") - v, err = j.Value() - if err != nil { - t.Errorf("Was not expecting an error") - } - - err = (&j).Scan(v) - if err != nil { - t.Errorf("Was not expecting an error") - } - - j = JSONText(nil) - v, err = j.Value() - if err != nil { - t.Errorf("Was not expecting an error") - } - - err = (&j).Scan(v) - if err != nil { - t.Errorf("Was not expecting an error") - } -} - -func TestNullJSONText(t *testing.T) { - j := NullJSONText{} - err := j.Scan(`{"foo": 1, "bar": 2}`) - if err != nil { - t.Errorf("Was not expecting an error") - } - v, err := j.Value() - if err != nil { - t.Errorf("Was not expecting an error") - } - err = (&j).Scan(v) - if err != nil { - t.Errorf("Was not expecting an error") - } - m := map[string]interface{}{} - j.Unmarshal(&m) - - if m["foo"].(float64) != 1 || m["bar"].(float64) != 2 { - t.Errorf("Expected valid json but got some garbage instead? %#v", m) - } - - j = NullJSONText{} - err = j.Scan(nil) - if err != nil { - t.Errorf("Was not expecting an error") - } - if j.Valid != false { - t.Errorf("Expected valid to be false, but got true") - } -} - -func TestBitBool(t *testing.T) { - // Test true value - var b BitBool = true - - v, err := b.Value() - if err != nil { - t.Errorf("Cannot return error") - } - err = (&b).Scan(v) - if err != nil { - t.Errorf("Was not expecting an error") - } - if !b { - t.Errorf("Was expecting the bool we sent in (true), got %v", b) - } - - // Test false value - b = false - - v, err = b.Value() - if err != nil { - t.Errorf("Cannot return error") - } - err = (&b).Scan(v) - if err != nil { - t.Errorf("Was not expecting an error") - } - if b { - t.Errorf("Was expecting the bool we sent in (false), got %v", b) - } -} diff --git a/vendor/github.com/mattn/go-sqlite3/.gitignore b/vendor/github.com/mattn/go-sqlite3/.gitignore deleted file mode 100644 index 8a0e48d..0000000 --- a/vendor/github.com/mattn/go-sqlite3/.gitignore +++ /dev/null @@ -1,4 +0,0 @@ -*.db -*.exe -*.dll -*.o diff --git a/vendor/github.com/mattn/go-sqlite3/.travis.yml b/vendor/github.com/mattn/go-sqlite3/.travis.yml deleted file mode 100644 index 46e70cb..0000000 --- a/vendor/github.com/mattn/go-sqlite3/.travis.yml +++ /dev/null @@ -1,19 +0,0 @@ -language: go -sudo: required -dist: trusty -env: - - GOTAGS= - - GOTAGS=libsqlite3 - - GOTAGS=trace - - GOTAGS=vtable -go: - - 1.7.x - - 1.8.x - - 1.9.x - - master -before_install: - - go get github.com/mattn/goveralls - - go get golang.org/x/tools/cmd/cover -script: - - $HOME/gopath/bin/goveralls -repotoken 3qJVUE0iQwqnCbmNcDsjYu1nh4J4KIFXx - - go test -race -v . -tags "$GOTAGS" diff --git a/vendor/github.com/mattn/go-sqlite3/LICENSE b/vendor/github.com/mattn/go-sqlite3/LICENSE deleted file mode 100644 index ca458bb..0000000 --- a/vendor/github.com/mattn/go-sqlite3/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2014 Yasuhiro Matsumoto - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. diff --git a/vendor/github.com/mattn/go-sqlite3/README.md b/vendor/github.com/mattn/go-sqlite3/README.md deleted file mode 100644 index ad00f10..0000000 --- a/vendor/github.com/mattn/go-sqlite3/README.md +++ /dev/null @@ -1,97 +0,0 @@ -go-sqlite3 -========== - -[![GoDoc Reference](https://godoc.org/github.com/mattn/go-sqlite3?status.svg)](http://godoc.org/github.com/mattn/go-sqlite3) -[![Build Status](https://travis-ci.org/mattn/go-sqlite3.svg?branch=master)](https://travis-ci.org/mattn/go-sqlite3) -[![Coverage Status](https://coveralls.io/repos/mattn/go-sqlite3/badge.svg?branch=master)](https://coveralls.io/r/mattn/go-sqlite3?branch=master) -[![Go Report Card](https://goreportcard.com/badge/github.com/mattn/go-sqlite3)](https://goreportcard.com/report/github.com/mattn/go-sqlite3) - -Description ------------ - -sqlite3 driver conforming to the built-in database/sql interface - -Installation ------------- - -This package can be installed with the go get command: - - go get github.com/mattn/go-sqlite3 - -_go-sqlite3_ is *cgo* package. -If you want to build your app using go-sqlite3, you need gcc. -However, if you install _go-sqlite3_ with `go install github.com/mattn/go-sqlite3`, you don't need gcc to build your app anymore. - -Documentation -------------- - -API documentation can be found here: http://godoc.org/github.com/mattn/go-sqlite3 - -Examples can be found under the `./_example` directory - -FAQ ---- - -* Want to build go-sqlite3 with libsqlite3 on my linux. - - Use `go build --tags "libsqlite3 linux"` - -* Want to build go-sqlite3 with libsqlite3 on OS X. - - Install sqlite3 from homebrew: `brew install sqlite3` - - Use `go build --tags "libsqlite3 darwin"` - -* Want to build go-sqlite3 with icu extension. - - Use `go build --tags "icu"` - - Available extensions: `json1`, `fts5`, `icu` - -* Can't build go-sqlite3 on windows 64bit. - - > Probably, you are using go 1.0, go1.0 has a problem when it comes to compiling/linking on windows 64bit. - > See: [#27](https://github.com/mattn/go-sqlite3/issues/27) - -* Getting insert error while query is opened. - - > You can pass some arguments into the connection string, for example, a URI. - > See: [#39](https://github.com/mattn/go-sqlite3/issues/39) - -* Do you want to cross compile? mingw on Linux or Mac? - - > See: [#106](https://github.com/mattn/go-sqlite3/issues/106) - > See also: http://www.limitlessfx.com/cross-compile-golang-app-for-windows-from-linux.html - -* Want to get time.Time with current locale - - Use `_loc=auto` in SQLite3 filename schema like `file:foo.db?_loc=auto`. - -* Can I use this in multiple routines concurrently? - - Yes for readonly. But, No for writable. See [#50](https://github.com/mattn/go-sqlite3/issues/50), [#51](https://github.com/mattn/go-sqlite3/issues/51), [#209](https://github.com/mattn/go-sqlite3/issues/209). - -* Why is it racy if I use a `sql.Open("sqlite3", ":memory:")` database? - - Each connection to :memory: opens a brand new in-memory sql database, so if - the stdlib's sql engine happens to open another connection and you've only - specified ":memory:", that connection will see a brand new database. A - workaround is to use "file::memory:?mode=memory&cache=shared". Every - connection to this string will point to the same in-memory database. See - [#204](https://github.com/mattn/go-sqlite3/issues/204) for more info. - -License -------- - -MIT: http://mattn.mit-license.org/2012 - -sqlite3-binding.c, sqlite3-binding.h, sqlite3ext.h - -The -binding suffix was added to avoid build failures under gccgo. - -In this repository, those files are an amalgamation of code that was copied from SQLite3. The license of that code is the same as the license of SQLite3. - -Author ------- - -Yasuhiro Matsumoto (a.k.a mattn) diff --git a/vendor/github.com/mattn/go-sqlite3/_example/custom_func/main.go b/vendor/github.com/mattn/go-sqlite3/_example/custom_func/main.go deleted file mode 100644 index 85657e6..0000000 --- a/vendor/github.com/mattn/go-sqlite3/_example/custom_func/main.go +++ /dev/null @@ -1,133 +0,0 @@ -package main - -import ( - "database/sql" - "fmt" - "log" - "math" - "math/rand" - - sqlite "github.com/mattn/go-sqlite3" -) - -// Computes x^y -func pow(x, y int64) int64 { - return int64(math.Pow(float64(x), float64(y))) -} - -// Computes the bitwise exclusive-or of all its arguments -func xor(xs ...int64) int64 { - var ret int64 - for _, x := range xs { - ret ^= x - } - return ret -} - -// Returns a random number. It's actually deterministic here because -// we don't seed the RNG, but it's an example of a non-pure function -// from SQLite's POV. -func getrand() int64 { - return rand.Int63() -} - -// Computes the standard deviation of a GROUPed BY set of values -type stddev struct { - xs []int64 - // Running average calculation - sum int64 - n int64 -} - -func newStddev() *stddev { return &stddev{} } - -func (s *stddev) Step(x int64) { - s.xs = append(s.xs, x) - s.sum += x - s.n++ -} - -func (s *stddev) Done() float64 { - mean := float64(s.sum) / float64(s.n) - var sqDiff []float64 - for _, x := range s.xs { - sqDiff = append(sqDiff, math.Pow(float64(x)-mean, 2)) - } - var dev float64 - for _, x := range sqDiff { - dev += x - } - dev /= float64(len(sqDiff)) - return math.Sqrt(dev) -} - -func main() { - sql.Register("sqlite3_custom", &sqlite.SQLiteDriver{ - ConnectHook: func(conn *sqlite.SQLiteConn) error { - if err := conn.RegisterFunc("pow", pow, true); err != nil { - return err - } - if err := conn.RegisterFunc("xor", xor, true); err != nil { - return err - } - if err := conn.RegisterFunc("rand", getrand, false); err != nil { - return err - } - if err := conn.RegisterAggregator("stddev", newStddev, true); err != nil { - return err - } - return nil - }, - }) - - db, err := sql.Open("sqlite3_custom", ":memory:") - if err != nil { - log.Fatal("Failed to open database:", err) - } - defer db.Close() - - var i int64 - err = db.QueryRow("SELECT pow(2,3)").Scan(&i) - if err != nil { - log.Fatal("POW query error:", err) - } - fmt.Println("pow(2,3) =", i) // 8 - - err = db.QueryRow("SELECT xor(1,2,3,4,5,6)").Scan(&i) - if err != nil { - log.Fatal("XOR query error:", err) - } - fmt.Println("xor(1,2,3,4,5) =", i) // 7 - - err = db.QueryRow("SELECT rand()").Scan(&i) - if err != nil { - log.Fatal("RAND query error:", err) - } - fmt.Println("rand() =", i) // pseudorandom - - _, err = db.Exec("create table foo (department integer, profits integer)") - if err != nil { - log.Fatal("Failed to create table:", err) - } - _, err = db.Exec("insert into foo values (1, 10), (1, 20), (1, 45), (2, 42), (2, 115)") - if err != nil { - log.Fatal("Failed to insert records:", err) - } - - rows, err := db.Query("select department, stddev(profits) from foo group by department") - if err != nil { - log.Fatal("STDDEV query error:", err) - } - defer rows.Close() - for rows.Next() { - var dept int64 - var dev float64 - if err := rows.Scan(&dept, &dev); err != nil { - log.Fatal(err) - } - fmt.Printf("dept=%d stddev=%f\n", dept, dev) - } - if err := rows.Err(); err != nil { - log.Fatal(err) - } -} diff --git a/vendor/github.com/mattn/go-sqlite3/_example/hook/hook.go b/vendor/github.com/mattn/go-sqlite3/_example/hook/hook.go deleted file mode 100644 index 6023181..0000000 --- a/vendor/github.com/mattn/go-sqlite3/_example/hook/hook.go +++ /dev/null @@ -1,78 +0,0 @@ -package main - -import ( - "database/sql" - "log" - "os" - - "github.com/mattn/go-sqlite3" -) - -func main() { - sqlite3conn := []*sqlite3.SQLiteConn{} - sql.Register("sqlite3_with_hook_example", - &sqlite3.SQLiteDriver{ - ConnectHook: func(conn *sqlite3.SQLiteConn) error { - sqlite3conn = append(sqlite3conn, conn) - conn.RegisterUpdateHook(func(op int, db string, table string, rowid int64) { - switch op { - case sqlite3.SQLITE_INSERT: - log.Println("Notified of insert on db", db, "table", table, "rowid", rowid) - } - }) - return nil - }, - }) - os.Remove("./foo.db") - os.Remove("./bar.db") - - srcDb, err := sql.Open("sqlite3_with_hook_example", "./foo.db") - if err != nil { - log.Fatal(err) - } - defer srcDb.Close() - srcDb.Ping() - - _, err = srcDb.Exec("create table foo(id int, value text)") - if err != nil { - log.Fatal(err) - } - _, err = srcDb.Exec("insert into foo values(1, 'foo')") - if err != nil { - log.Fatal(err) - } - _, err = srcDb.Exec("insert into foo values(2, 'bar')") - if err != nil { - log.Fatal(err) - } - _, err = srcDb.Query("select * from foo") - if err != nil { - log.Fatal(err) - } - destDb, err := sql.Open("sqlite3_with_hook_example", "./bar.db") - if err != nil { - log.Fatal(err) - } - defer destDb.Close() - destDb.Ping() - - bk, err := sqlite3conn[1].Backup("main", sqlite3conn[0], "main") - if err != nil { - log.Fatal(err) - } - - _, err = bk.Step(-1) - if err != nil { - log.Fatal(err) - } - _, err = destDb.Query("select * from foo") - if err != nil { - log.Fatal(err) - } - _, err = destDb.Exec("insert into foo values(3, 'bar')") - if err != nil { - log.Fatal(err) - } - - bk.Finish() -} diff --git a/vendor/github.com/mattn/go-sqlite3/_example/limit/limit.go b/vendor/github.com/mattn/go-sqlite3/_example/limit/limit.go deleted file mode 100644 index 4e4b897..0000000 --- a/vendor/github.com/mattn/go-sqlite3/_example/limit/limit.go +++ /dev/null @@ -1,113 +0,0 @@ -package main - -import ( - "database/sql" - "fmt" - "log" - "os" - "strings" - - "github.com/mattn/go-sqlite3" -) - -func createBulkInsertQuery(n int, start int) (query string, args []interface{}) { - values := make([]string, n) - args = make([]interface{}, n*2) - pos := 0 - for i := 0; i < n; i++ { - values[i] = "(?, ?)" - args[pos] = start + i - args[pos+1] = fmt.Sprintf("こんにちわ世界%03d", i) - pos += 2 - } - query = fmt.Sprintf( - "insert into foo(id, name) values %s", - strings.Join(values, ", "), - ) - return -} - -func bukInsert(db *sql.DB, query string, args []interface{}) (err error) { - stmt, err := db.Prepare(query) - if err != nil { - return - } - - _, err = stmt.Exec(args...) - if err != nil { - return - } - - return -} - -func main() { - var sqlite3conn *sqlite3.SQLiteConn - sql.Register("sqlite3_with_limit", &sqlite3.SQLiteDriver{ - ConnectHook: func(conn *sqlite3.SQLiteConn) error { - sqlite3conn = conn - return nil - }, - }) - - os.Remove("./foo.db") - db, err := sql.Open("sqlite3_with_limit", "./foo.db") - if err != nil { - log.Fatal(err) - } - defer db.Close() - - sqlStmt := ` - create table foo (id integer not null primary key, name text); - delete from foo; - ` - _, err = db.Exec(sqlStmt) - if err != nil { - log.Printf("%q: %s\n", err, sqlStmt) - return - } - - if sqlite3conn == nil { - log.Fatal("not set sqlite3 connection") - } - - limitVariableNumber := sqlite3conn.GetLimit(sqlite3.SQLITE_LIMIT_VARIABLE_NUMBER) - log.Printf("default SQLITE_LIMIT_VARIABLE_NUMBER: %d", limitVariableNumber) - - num := 400 - query, args := createBulkInsertQuery(num, 0) - err = bukInsert(db, query, args) - if err != nil { - log.Fatal(err) - } - - smallLimitVariableNumber := 100 - sqlite3conn.SetLimit(sqlite3.SQLITE_LIMIT_VARIABLE_NUMBER, smallLimitVariableNumber) - - limitVariableNumber = sqlite3conn.GetLimit(sqlite3.SQLITE_LIMIT_VARIABLE_NUMBER) - log.Printf("updated SQLITE_LIMIT_VARIABLE_NUMBER: %d", limitVariableNumber) - - query, args = createBulkInsertQuery(num, num) - err = bukInsert(db, query, args) - if err != nil { - if err != nil { - log.Printf("expect failed since SQLITE_LIMIT_VARIABLE_NUMBER is too small: %v", err) - } - } - - bigLimitVariableNumber := 999999 - sqlite3conn.SetLimit(sqlite3.SQLITE_LIMIT_VARIABLE_NUMBER, bigLimitVariableNumber) - limitVariableNumber = sqlite3conn.GetLimit(sqlite3.SQLITE_LIMIT_VARIABLE_NUMBER) - log.Printf("set SQLITE_LIMIT_VARIABLE_NUMBER: %d", bigLimitVariableNumber) - log.Printf("updated SQLITE_LIMIT_VARIABLE_NUMBER: %d", limitVariableNumber) - - query, args = createBulkInsertQuery(500, num+num) - err = bukInsert(db, query, args) - if err != nil { - if err != nil { - log.Fatal(err) - } - } - - log.Println("no error if SQLITE_LIMIT_VARIABLE_NUMBER > 999") -} diff --git a/vendor/github.com/mattn/go-sqlite3/_example/mod_regexp/Makefile b/vendor/github.com/mattn/go-sqlite3/_example/mod_regexp/Makefile deleted file mode 100644 index 97b1e0f..0000000 --- a/vendor/github.com/mattn/go-sqlite3/_example/mod_regexp/Makefile +++ /dev/null @@ -1,22 +0,0 @@ -ifeq ($(OS),Windows_NT) -EXE=extension.exe -EXT=sqlite3_mod_regexp.dll -RM=cmd /c del -LDFLAG= -else -EXE=extension -EXT=sqlite3_mod_regexp.so -RM=rm -LDFLAG=-fPIC -endif - -all : $(EXE) $(EXT) - -$(EXE) : extension.go - go build $< - -$(EXT) : sqlite3_mod_regexp.c - gcc $(LDFLAG) -shared -o $@ $< -lsqlite3 -lpcre - -clean : - @-$(RM) $(EXE) $(EXT) diff --git a/vendor/github.com/mattn/go-sqlite3/_example/mod_regexp/extension.go b/vendor/github.com/mattn/go-sqlite3/_example/mod_regexp/extension.go deleted file mode 100644 index 61ceb55..0000000 --- a/vendor/github.com/mattn/go-sqlite3/_example/mod_regexp/extension.go +++ /dev/null @@ -1,43 +0,0 @@ -package main - -import ( - "database/sql" - "fmt" - "github.com/mattn/go-sqlite3" - "log" -) - -func main() { - sql.Register("sqlite3_with_extensions", - &sqlite3.SQLiteDriver{ - Extensions: []string{ - "sqlite3_mod_regexp", - }, - }) - - db, err := sql.Open("sqlite3_with_extensions", ":memory:") - if err != nil { - log.Fatal(err) - } - defer db.Close() - - // Force db to make a new connection in pool - // by putting the original in a transaction - tx, err := db.Begin() - if err != nil { - log.Fatal(err) - } - defer tx.Commit() - - // New connection works (hopefully!) - rows, err := db.Query("select 'hello world' where 'hello world' regexp '^hello.*d$'") - if err != nil { - log.Fatal(err) - } - defer rows.Close() - for rows.Next() { - var helloworld string - rows.Scan(&helloworld) - fmt.Println(helloworld) - } -} diff --git a/vendor/github.com/mattn/go-sqlite3/_example/mod_regexp/sqlite3_mod_regexp.c b/vendor/github.com/mattn/go-sqlite3/_example/mod_regexp/sqlite3_mod_regexp.c deleted file mode 100644 index 277764d..0000000 --- a/vendor/github.com/mattn/go-sqlite3/_example/mod_regexp/sqlite3_mod_regexp.c +++ /dev/null @@ -1,31 +0,0 @@ -#include -#include -#include -#include - -SQLITE_EXTENSION_INIT1 -static void regexp_func(sqlite3_context *context, int argc, sqlite3_value **argv) { - if (argc >= 2) { - const char *target = (const char *)sqlite3_value_text(argv[1]); - const char *pattern = (const char *)sqlite3_value_text(argv[0]); - const char* errstr = NULL; - int erroff = 0; - int vec[500]; - int n, rc; - pcre* re = pcre_compile(pattern, 0, &errstr, &erroff, NULL); - rc = pcre_exec(re, NULL, target, strlen(target), 0, 0, vec, 500); - if (rc <= 0) { - sqlite3_result_error(context, errstr, 0); - return; - } - sqlite3_result_int(context, 1); - } -} - -#ifdef _WIN32 -__declspec(dllexport) -#endif -int sqlite3_extension_init(sqlite3 *db, char **errmsg, const sqlite3_api_routines *api) { - SQLITE_EXTENSION_INIT2(api); - return sqlite3_create_function(db, "regexp", 2, SQLITE_UTF8, (void*)db, regexp_func, NULL, NULL); -} diff --git a/vendor/github.com/mattn/go-sqlite3/_example/mod_vtable/Makefile b/vendor/github.com/mattn/go-sqlite3/_example/mod_vtable/Makefile deleted file mode 100644 index cdd4853..0000000 --- a/vendor/github.com/mattn/go-sqlite3/_example/mod_vtable/Makefile +++ /dev/null @@ -1,24 +0,0 @@ -ifeq ($(OS),Windows_NT) -EXE=extension.exe -EXT=sqlite3_mod_vtable.dll -RM=cmd /c del -LIBCURL=-lcurldll -LDFLAG= -else -EXE=extension -EXT=sqlite3_mod_vtable.so -RM=rm -LDFLAG=-fPIC -LIBCURL=-lcurl -endif - -all : $(EXE) $(EXT) - -$(EXE) : extension.go - go build $< - -$(EXT) : sqlite3_mod_vtable.cc - g++ $(LDFLAG) -shared -o $@ $< -lsqlite3 $(LIBCURL) - -clean : - @-$(RM) $(EXE) $(EXT) diff --git a/vendor/github.com/mattn/go-sqlite3/_example/mod_vtable/extension.go b/vendor/github.com/mattn/go-sqlite3/_example/mod_vtable/extension.go deleted file mode 100644 index f738af6..0000000 --- a/vendor/github.com/mattn/go-sqlite3/_example/mod_vtable/extension.go +++ /dev/null @@ -1,37 +0,0 @@ -package main - -import ( - "database/sql" - "fmt" - "log" - - "github.com/mattn/go-sqlite3" -) - -func main() { - sql.Register("sqlite3_with_extensions", - &sqlite3.SQLiteDriver{ - Extensions: []string{ - "sqlite3_mod_vtable", - }, - }) - - db, err := sql.Open("sqlite3_with_extensions", ":memory:") - if err != nil { - log.Fatal(err) - } - defer db.Close() - - db.Exec("create virtual table repo using github(id, full_name, description, html_url)") - - rows, err := db.Query("select id, full_name, description, html_url from repo") - if err != nil { - log.Fatal(err) - } - defer rows.Close() - for rows.Next() { - var id, fullName, description, htmlURL string - rows.Scan(&id, &fullName, &description, &htmlURL) - fmt.Printf("%s: %s\n\t%s\n\t%s\n\n", id, fullName, description, htmlURL) - } -} diff --git a/vendor/github.com/mattn/go-sqlite3/_example/mod_vtable/picojson.h b/vendor/github.com/mattn/go-sqlite3/_example/mod_vtable/picojson.h deleted file mode 100644 index 2142647..0000000 --- a/vendor/github.com/mattn/go-sqlite3/_example/mod_vtable/picojson.h +++ /dev/null @@ -1,1040 +0,0 @@ -/* - * Copyright 2009-2010 Cybozu Labs, Inc. - * Copyright 2011 Kazuho Oku - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * - * 1. Redistributions of source code must retain the above copyright notice, - * this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright notice, - * this list of conditions and the following disclaimer in the documentation - * and/or other materials provided with the distribution. - * - * THIS SOFTWARE IS PROVIDED BY CYBOZU LABS, INC. ``AS IS'' AND ANY EXPRESS OR - * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO - * EVENT SHALL CYBOZU LABS, INC. OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, - * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES - * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND - * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF - * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - * The views and conclusions contained in the software and documentation are - * those of the authors and should not be interpreted as representing official - * policies, either expressed or implied, of Cybozu Labs, Inc. - * - */ -#ifndef picojson_h -#define picojson_h - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#ifdef _MSC_VER - #define SNPRINTF _snprintf_s - #pragma warning(push) - #pragma warning(disable : 4244) // conversion from int to char -#else - #define SNPRINTF snprintf -#endif - -namespace picojson { - - enum { - null_type, - boolean_type, - number_type, - string_type, - array_type, - object_type - }; - - struct null {}; - - class value { - public: - typedef std::vector array; - typedef std::map object; - union _storage { - bool boolean_; - double number_; - std::string* string_; - array* array_; - object* object_; - }; - protected: - int type_; - _storage u_; - public: - value(); - value(int type, bool); - explicit value(bool b); - explicit value(double n); - explicit value(const std::string& s); - explicit value(const array& a); - explicit value(const object& o); - explicit value(const char* s); - value(const char* s, size_t len); - ~value(); - value(const value& x); - value& operator=(const value& x); - void swap(value& x); - template bool is() const; - template const T& get() const; - template T& get(); - bool evaluate_as_boolean() const; - const value& get(size_t idx) const; - const value& get(const std::string& key) const; - bool contains(size_t idx) const; - bool contains(const std::string& key) const; - std::string to_str() const; - template void serialize(Iter os) const; - std::string serialize() const; - private: - template value(const T*); // intentionally defined to block implicit conversion of pointer to bool - }; - - typedef value::array array; - typedef value::object object; - - inline value::value() : type_(null_type) {} - - inline value::value(int type, bool) : type_(type) { - switch (type) { -#define INIT(p, v) case p##type: u_.p = v; break - INIT(boolean_, false); - INIT(number_, 0.0); - INIT(string_, new std::string()); - INIT(array_, new array()); - INIT(object_, new object()); -#undef INIT - default: break; - } - } - - inline value::value(bool b) : type_(boolean_type) { - u_.boolean_ = b; - } - - inline value::value(double n) : type_(number_type) { - u_.number_ = n; - } - - inline value::value(const std::string& s) : type_(string_type) { - u_.string_ = new std::string(s); - } - - inline value::value(const array& a) : type_(array_type) { - u_.array_ = new array(a); - } - - inline value::value(const object& o) : type_(object_type) { - u_.object_ = new object(o); - } - - inline value::value(const char* s) : type_(string_type) { - u_.string_ = new std::string(s); - } - - inline value::value(const char* s, size_t len) : type_(string_type) { - u_.string_ = new std::string(s, len); - } - - inline value::~value() { - switch (type_) { -#define DEINIT(p) case p##type: delete u_.p; break - DEINIT(string_); - DEINIT(array_); - DEINIT(object_); -#undef DEINIT - default: break; - } - } - - inline value::value(const value& x) : type_(x.type_) { - switch (type_) { -#define INIT(p, v) case p##type: u_.p = v; break - INIT(string_, new std::string(*x.u_.string_)); - INIT(array_, new array(*x.u_.array_)); - INIT(object_, new object(*x.u_.object_)); -#undef INIT - default: - u_ = x.u_; - break; - } - } - - inline value& value::operator=(const value& x) { - if (this != &x) { - this->~value(); - new (this) value(x); - } - return *this; - } - - inline void value::swap(value& x) { - std::swap(type_, x.type_); - std::swap(u_, x.u_); - } - -#define IS(ctype, jtype) \ - template <> inline bool value::is() const { \ - return type_ == jtype##_type; \ - } - IS(null, null) - IS(bool, boolean) - IS(int, number) - IS(double, number) - IS(std::string, string) - IS(array, array) - IS(object, object) -#undef IS - -#define GET(ctype, var) \ - template <> inline const ctype& value::get() const { \ - assert("type mismatch! call vis() before get()" \ - && is()); \ - return var; \ - } \ - template <> inline ctype& value::get() { \ - assert("type mismatch! call is() before get()" \ - && is()); \ - return var; \ - } - GET(bool, u_.boolean_) - GET(double, u_.number_) - GET(std::string, *u_.string_) - GET(array, *u_.array_) - GET(object, *u_.object_) -#undef GET - - inline bool value::evaluate_as_boolean() const { - switch (type_) { - case null_type: - return false; - case boolean_type: - return u_.boolean_; - case number_type: - return u_.number_ != 0; - case string_type: - return ! u_.string_->empty(); - default: - return true; - } - } - - inline const value& value::get(size_t idx) const { - static value s_null; - assert(is()); - return idx < u_.array_->size() ? (*u_.array_)[idx] : s_null; - } - - inline const value& value::get(const std::string& key) const { - static value s_null; - assert(is()); - object::const_iterator i = u_.object_->find(key); - return i != u_.object_->end() ? i->second : s_null; - } - - inline bool value::contains(size_t idx) const { - assert(is()); - return idx < u_.array_->size(); - } - - inline bool value::contains(const std::string& key) const { - assert(is()); - object::const_iterator i = u_.object_->find(key); - return i != u_.object_->end(); - } - - inline std::string value::to_str() const { - switch (type_) { - case null_type: return "null"; - case boolean_type: return u_.boolean_ ? "true" : "false"; - case number_type: { - char buf[256]; - double tmp; - SNPRINTF(buf, sizeof(buf), fabs(u_.number_) < (1ULL << 53) && modf(u_.number_, &tmp) == 0 ? "%.f" : "%.17g", u_.number_); - return buf; - } - case string_type: return *u_.string_; - case array_type: return "array"; - case object_type: return "object"; - default: assert(0); -#ifdef _MSC_VER - __assume(0); -#endif - } - return std::string(); - } - - template void copy(const std::string& s, Iter oi) { - std::copy(s.begin(), s.end(), oi); - } - - template void serialize_str(const std::string& s, Iter oi) { - *oi++ = '"'; - for (std::string::const_iterator i = s.begin(); i != s.end(); ++i) { - switch (*i) { -#define MAP(val, sym) case val: copy(sym, oi); break - MAP('"', "\\\""); - MAP('\\', "\\\\"); - MAP('/', "\\/"); - MAP('\b', "\\b"); - MAP('\f', "\\f"); - MAP('\n', "\\n"); - MAP('\r', "\\r"); - MAP('\t', "\\t"); -#undef MAP - default: - if ((unsigned char)*i < 0x20 || *i == 0x7f) { - char buf[7]; - SNPRINTF(buf, sizeof(buf), "\\u%04x", *i & 0xff); - copy(buf, buf + 6, oi); - } else { - *oi++ = *i; - } - break; - } - } - *oi++ = '"'; - } - - template void value::serialize(Iter oi) const { - switch (type_) { - case string_type: - serialize_str(*u_.string_, oi); - break; - case array_type: { - *oi++ = '['; - for (array::const_iterator i = u_.array_->begin(); - i != u_.array_->end(); - ++i) { - if (i != u_.array_->begin()) { - *oi++ = ','; - } - i->serialize(oi); - } - *oi++ = ']'; - break; - } - case object_type: { - *oi++ = '{'; - for (object::const_iterator i = u_.object_->begin(); - i != u_.object_->end(); - ++i) { - if (i != u_.object_->begin()) { - *oi++ = ','; - } - serialize_str(i->first, oi); - *oi++ = ':'; - i->second.serialize(oi); - } - *oi++ = '}'; - break; - } - default: - copy(to_str(), oi); - break; - } - } - - inline std::string value::serialize() const { - std::string s; - serialize(std::back_inserter(s)); - return s; - } - - template class input { - protected: - Iter cur_, end_; - int last_ch_; - bool ungot_; - int line_; - public: - input(const Iter& first, const Iter& last) : cur_(first), end_(last), last_ch_(-1), ungot_(false), line_(1) {} - int getc() { - if (ungot_) { - ungot_ = false; - return last_ch_; - } - if (cur_ == end_) { - last_ch_ = -1; - return -1; - } - if (last_ch_ == '\n') { - line_++; - } - last_ch_ = *cur_++ & 0xff; - return last_ch_; - } - void ungetc() { - if (last_ch_ != -1) { - assert(! ungot_); - ungot_ = true; - } - } - Iter cur() const { return cur_; } - int line() const { return line_; } - void skip_ws() { - while (1) { - int ch = getc(); - if (! (ch == ' ' || ch == '\t' || ch == '\n' || ch == '\r')) { - ungetc(); - break; - } - } - } - bool expect(int expect) { - skip_ws(); - if (getc() != expect) { - ungetc(); - return false; - } - return true; - } - bool match(const std::string& pattern) { - for (std::string::const_iterator pi(pattern.begin()); - pi != pattern.end(); - ++pi) { - if (getc() != *pi) { - ungetc(); - return false; - } - } - return true; - } - }; - - template inline int _parse_quadhex(input &in) { - int uni_ch = 0, hex; - for (int i = 0; i < 4; i++) { - if ((hex = in.getc()) == -1) { - return -1; - } - if ('0' <= hex && hex <= '9') { - hex -= '0'; - } else if ('A' <= hex && hex <= 'F') { - hex -= 'A' - 0xa; - } else if ('a' <= hex && hex <= 'f') { - hex -= 'a' - 0xa; - } else { - in.ungetc(); - return -1; - } - uni_ch = uni_ch * 16 + hex; - } - return uni_ch; - } - - template inline bool _parse_codepoint(String& out, input& in) { - int uni_ch; - if ((uni_ch = _parse_quadhex(in)) == -1) { - return false; - } - if (0xd800 <= uni_ch && uni_ch <= 0xdfff) { - if (0xdc00 <= uni_ch) { - // a second 16-bit of a surrogate pair appeared - return false; - } - // first 16-bit of surrogate pair, get the next one - if (in.getc() != '\\' || in.getc() != 'u') { - in.ungetc(); - return false; - } - int second = _parse_quadhex(in); - if (! (0xdc00 <= second && second <= 0xdfff)) { - return false; - } - uni_ch = ((uni_ch - 0xd800) << 10) | ((second - 0xdc00) & 0x3ff); - uni_ch += 0x10000; - } - if (uni_ch < 0x80) { - out.push_back(uni_ch); - } else { - if (uni_ch < 0x800) { - out.push_back(0xc0 | (uni_ch >> 6)); - } else { - if (uni_ch < 0x10000) { - out.push_back(0xe0 | (uni_ch >> 12)); - } else { - out.push_back(0xf0 | (uni_ch >> 18)); - out.push_back(0x80 | ((uni_ch >> 12) & 0x3f)); - } - out.push_back(0x80 | ((uni_ch >> 6) & 0x3f)); - } - out.push_back(0x80 | (uni_ch & 0x3f)); - } - return true; - } - - template inline bool _parse_string(String& out, input& in) { - while (1) { - int ch = in.getc(); - if (ch < ' ') { - in.ungetc(); - return false; - } else if (ch == '"') { - return true; - } else if (ch == '\\') { - if ((ch = in.getc()) == -1) { - return false; - } - switch (ch) { -#define MAP(sym, val) case sym: out.push_back(val); break - MAP('"', '\"'); - MAP('\\', '\\'); - MAP('/', '/'); - MAP('b', '\b'); - MAP('f', '\f'); - MAP('n', '\n'); - MAP('r', '\r'); - MAP('t', '\t'); -#undef MAP - case 'u': - if (! _parse_codepoint(out, in)) { - return false; - } - break; - default: - return false; - } - } else { - out.push_back(ch); - } - } - return false; - } - - template inline bool _parse_array(Context& ctx, input& in) { - if (! ctx.parse_array_start()) { - return false; - } - size_t idx = 0; - if (in.expect(']')) { - return ctx.parse_array_stop(idx); - } - do { - if (! ctx.parse_array_item(in, idx)) { - return false; - } - idx++; - } while (in.expect(',')); - return in.expect(']') && ctx.parse_array_stop(idx); - } - - template inline bool _parse_object(Context& ctx, input& in) { - if (! ctx.parse_object_start()) { - return false; - } - if (in.expect('}')) { - return true; - } - do { - std::string key; - if (! in.expect('"') - || ! _parse_string(key, in) - || ! in.expect(':')) { - return false; - } - if (! ctx.parse_object_item(in, key)) { - return false; - } - } while (in.expect(',')); - return in.expect('}'); - } - - template inline bool _parse_number(double& out, input& in) { - std::string num_str; - while (1) { - int ch = in.getc(); - if (('0' <= ch && ch <= '9') || ch == '+' || ch == '-' || ch == '.' - || ch == 'e' || ch == 'E') { - num_str.push_back(ch); - } else { - in.ungetc(); - break; - } - } - char* endp; - out = strtod(num_str.c_str(), &endp); - return endp == num_str.c_str() + num_str.size(); - } - - template inline bool _parse(Context& ctx, input& in) { - in.skip_ws(); - int ch = in.getc(); - switch (ch) { -#define IS(ch, text, op) case ch: \ - if (in.match(text) && op) { \ - return true; \ - } else { \ - return false; \ - } - IS('n', "ull", ctx.set_null()); - IS('f', "alse", ctx.set_bool(false)); - IS('t', "rue", ctx.set_bool(true)); -#undef IS - case '"': - return ctx.parse_string(in); - case '[': - return _parse_array(ctx, in); - case '{': - return _parse_object(ctx, in); - default: - if (('0' <= ch && ch <= '9') || ch == '-') { - in.ungetc(); - double f; - if (_parse_number(f, in)) { - ctx.set_number(f); - return true; - } else { - return false; - } - } - break; - } - in.ungetc(); - return false; - } - - class deny_parse_context { - public: - bool set_null() { return false; } - bool set_bool(bool) { return false; } - bool set_number(double) { return false; } - template bool parse_string(input&) { return false; } - bool parse_array_start() { return false; } - template bool parse_array_item(input&, size_t) { - return false; - } - bool parse_array_stop(size_t) { return false; } - bool parse_object_start() { return false; } - template bool parse_object_item(input&, const std::string&) { - return false; - } - }; - - class default_parse_context { - protected: - value* out_; - public: - default_parse_context(value* out) : out_(out) {} - bool set_null() { - *out_ = value(); - return true; - } - bool set_bool(bool b) { - *out_ = value(b); - return true; - } - bool set_number(double f) { - *out_ = value(f); - return true; - } - template bool parse_string(input& in) { - *out_ = value(string_type, false); - return _parse_string(out_->get(), in); - } - bool parse_array_start() { - *out_ = value(array_type, false); - return true; - } - template bool parse_array_item(input& in, size_t) { - array& a = out_->get(); - a.push_back(value()); - default_parse_context ctx(&a.back()); - return _parse(ctx, in); - } - bool parse_array_stop(size_t) { return true; } - bool parse_object_start() { - *out_ = value(object_type, false); - return true; - } - template bool parse_object_item(input& in, const std::string& key) { - object& o = out_->get(); - default_parse_context ctx(&o[key]); - return _parse(ctx, in); - } - private: - default_parse_context(const default_parse_context&); - default_parse_context& operator=(const default_parse_context&); - }; - - class null_parse_context { - public: - struct dummy_str { - void push_back(int) {} - }; - public: - null_parse_context() {} - bool set_null() { return true; } - bool set_bool(bool) { return true; } - bool set_number(double) { return true; } - template bool parse_string(input& in) { - dummy_str s; - return _parse_string(s, in); - } - bool parse_array_start() { return true; } - template bool parse_array_item(input& in, size_t) { - return _parse(*this, in); - } - bool parse_array_stop(size_t) { return true; } - bool parse_object_start() { return true; } - template bool parse_object_item(input& in, const std::string&) { - return _parse(*this, in); - } - private: - null_parse_context(const null_parse_context&); - null_parse_context& operator=(const null_parse_context&); - }; - - // obsolete, use the version below - template inline std::string parse(value& out, Iter& pos, const Iter& last) { - std::string err; - pos = parse(out, pos, last, &err); - return err; - } - - template inline Iter _parse(Context& ctx, const Iter& first, const Iter& last, std::string* err) { - input in(first, last); - if (! _parse(ctx, in) && err != NULL) { - char buf[64]; - SNPRINTF(buf, sizeof(buf), "syntax error at line %d near: ", in.line()); - *err = buf; - while (1) { - int ch = in.getc(); - if (ch == -1 || ch == '\n') { - break; - } else if (ch >= ' ') { - err->push_back(ch); - } - } - } - return in.cur(); - } - - template inline Iter parse(value& out, const Iter& first, const Iter& last, std::string* err) { - default_parse_context ctx(&out); - return _parse(ctx, first, last, err); - } - - inline std::string parse(value& out, std::istream& is) { - std::string err; - parse(out, std::istreambuf_iterator(is.rdbuf()), - std::istreambuf_iterator(), &err); - return err; - } - - template struct last_error_t { - static std::string s; - }; - template std::string last_error_t::s; - - inline void set_last_error(const std::string& s) { - last_error_t::s = s; - } - - inline const std::string& get_last_error() { - return last_error_t::s; - } - - inline bool operator==(const value& x, const value& y) { - if (x.is()) - return y.is(); -#define PICOJSON_CMP(type) \ - if (x.is()) \ - return y.is() && x.get() == y.get() - PICOJSON_CMP(bool); - PICOJSON_CMP(double); - PICOJSON_CMP(std::string); - PICOJSON_CMP(array); - PICOJSON_CMP(object); -#undef PICOJSON_CMP - assert(0); -#ifdef _MSC_VER - __assume(0); -#endif - return false; - } - - inline bool operator!=(const value& x, const value& y) { - return ! (x == y); - } -} - -namespace std { - template<> inline void swap(picojson::value& x, picojson::value& y) - { - x.swap(y); - } -} - -inline std::istream& operator>>(std::istream& is, picojson::value& x) -{ - picojson::set_last_error(std::string()); - std::string err = picojson::parse(x, is); - if (! err.empty()) { - picojson::set_last_error(err); - is.setstate(std::ios::failbit); - } - return is; -} - -inline std::ostream& operator<<(std::ostream& os, const picojson::value& x) -{ - x.serialize(std::ostream_iterator(os)); - return os; -} -#ifdef _MSC_VER - #pragma warning(pop) -#endif - -#endif -#ifdef TEST_PICOJSON -#ifdef _MSC_VER - #pragma warning(disable : 4127) // conditional expression is constant -#endif - -using namespace std; - -static void plan(int num) -{ - printf("1..%d\n", num); -} - -static bool success = true; - -static void ok(bool b, const char* name = "") -{ - static int n = 1; - if (! b) - success = false; - printf("%s %d - %s\n", b ? "ok" : "ng", n++, name); -} - -template void is(const T& x, const T& y, const char* name = "") -{ - if (x == y) { - ok(true, name); - } else { - ok(false, name); - } -} - -#include -#include -#include -#include - -int main(void) -{ - plan(85); - - // constructors -#define TEST(expr, expected) \ - is(picojson::value expr .serialize(), string(expected), "picojson::value" #expr) - - TEST( (true), "true"); - TEST( (false), "false"); - TEST( (42.0), "42"); - TEST( (string("hello")), "\"hello\""); - TEST( ("hello"), "\"hello\""); - TEST( ("hello", 4), "\"hell\""); - - { - double a = 1; - for (int i = 0; i < 1024; i++) { - picojson::value vi(a); - std::stringstream ss; - ss << vi; - picojson::value vo; - ss >> vo; - double b = vo.get(); - if ((i < 53 && a != b) || fabs(a - b) / b > 1e-8) { - printf("ng i=%d a=%.18e b=%.18e\n", i, a, b); - } - a *= 2; - } - } - -#undef TEST - -#define TEST(in, type, cmp, serialize_test) { \ - picojson::value v; \ - const char* s = in; \ - string err = picojson::parse(v, s, s + strlen(s)); \ - ok(err.empty(), in " no error"); \ - ok(v.is(), in " check type"); \ - is(v.get(), cmp, in " correct output"); \ - is(*s, '\0', in " read to eof"); \ - if (serialize_test) { \ - is(v.serialize(), string(in), in " serialize"); \ - } \ - } - TEST("false", bool, false, true); - TEST("true", bool, true, true); - TEST("90.5", double, 90.5, false); - TEST("1.7976931348623157e+308", double, DBL_MAX, false); - TEST("\"hello\"", string, string("hello"), true); - TEST("\"\\\"\\\\\\/\\b\\f\\n\\r\\t\"", string, string("\"\\/\b\f\n\r\t"), - true); - TEST("\"\\u0061\\u30af\\u30ea\\u30b9\"", string, - string("a\xe3\x82\xaf\xe3\x83\xaa\xe3\x82\xb9"), false); - TEST("\"\\ud840\\udc0b\"", string, string("\xf0\xa0\x80\x8b"), false); -#undef TEST - -#define TEST(type, expr) { \ - picojson::value v; \ - const char *s = expr; \ - string err = picojson::parse(v, s, s + strlen(s)); \ - ok(err.empty(), "empty " #type " no error"); \ - ok(v.is(), "empty " #type " check type"); \ - ok(v.get().empty(), "check " #type " array size"); \ - } - TEST(array, "[]"); - TEST(object, "{}"); -#undef TEST - - { - picojson::value v; - const char *s = "[1,true,\"hello\"]"; - string err = picojson::parse(v, s, s + strlen(s)); - ok(err.empty(), "array no error"); - ok(v.is(), "array check type"); - is(v.get().size(), size_t(3), "check array size"); - ok(v.contains(0), "check contains array[0]"); - ok(v.get(0).is(), "check array[0] type"); - is(v.get(0).get(), 1.0, "check array[0] value"); - ok(v.contains(1), "check contains array[1]"); - ok(v.get(1).is(), "check array[1] type"); - ok(v.get(1).get(), "check array[1] value"); - ok(v.contains(2), "check contains array[2]"); - ok(v.get(2).is(), "check array[2] type"); - is(v.get(2).get(), string("hello"), "check array[2] value"); - ok(!v.contains(3), "check not contains array[3]"); - } - - { - picojson::value v; - const char *s = "{ \"a\": true }"; - string err = picojson::parse(v, s, s + strlen(s)); - ok(err.empty(), "object no error"); - ok(v.is(), "object check type"); - is(v.get().size(), size_t(1), "check object size"); - ok(v.contains("a"), "check contains property"); - ok(v.get("a").is(), "check bool property exists"); - is(v.get("a").get(), true, "check bool property value"); - is(v.serialize(), string("{\"a\":true}"), "serialize object"); - ok(!v.contains("z"), "check not contains property"); - } - -#define TEST(json, msg) do { \ - picojson::value v; \ - const char *s = json; \ - string err = picojson::parse(v, s, s + strlen(s)); \ - is(err, string("syntax error at line " msg), msg); \ - } while (0) - TEST("falsoa", "1 near: oa"); - TEST("{]", "1 near: ]"); - TEST("\n\bbell", "2 near: bell"); - TEST("\"abc\nd\"", "1 near: "); -#undef TEST - - { - picojson::value v1, v2; - const char *s; - string err; - s = "{ \"b\": true, \"a\": [1,2,\"three\"], \"d\": 2 }"; - err = picojson::parse(v1, s, s + strlen(s)); - s = "{ \"d\": 2.0, \"b\": true, \"a\": [1,2,\"three\"] }"; - err = picojson::parse(v2, s, s + strlen(s)); - ok((v1 == v2), "check == operator in deep comparison"); - } - - { - picojson::value v1, v2; - const char *s; - string err; - s = "{ \"b\": true, \"a\": [1,2,\"three\"], \"d\": 2 }"; - err = picojson::parse(v1, s, s + strlen(s)); - s = "{ \"d\": 2.0, \"a\": [1,\"three\"], \"b\": true }"; - err = picojson::parse(v2, s, s + strlen(s)); - ok((v1 != v2), "check != operator for array in deep comparison"); - } - - { - picojson::value v1, v2; - const char *s; - string err; - s = "{ \"b\": true, \"a\": [1,2,\"three\"], \"d\": 2 }"; - err = picojson::parse(v1, s, s + strlen(s)); - s = "{ \"d\": 2.0, \"a\": [1,2,\"three\"], \"b\": false }"; - err = picojson::parse(v2, s, s + strlen(s)); - ok((v1 != v2), "check != operator for object in deep comparison"); - } - - { - picojson::value v1, v2; - const char *s; - string err; - s = "{ \"b\": true, \"a\": [1,2,\"three\"], \"d\": 2 }"; - err = picojson::parse(v1, s, s + strlen(s)); - picojson::object& o = v1.get(); - o.erase("b"); - picojson::array& a = o["a"].get(); - picojson::array::iterator i; - i = std::remove(a.begin(), a.end(), picojson::value(std::string("three"))); - a.erase(i, a.end()); - s = "{ \"a\": [1,2], \"d\": 2 }"; - err = picojson::parse(v2, s, s + strlen(s)); - ok((v1 == v2), "check erase()"); - } - - ok(picojson::value(3.0).serialize() == "3", - "integral number should be serialized as a integer"); - - { - const char* s = "{ \"a\": [1,2], \"d\": 2 }"; - picojson::null_parse_context ctx; - string err; - picojson::_parse(ctx, s, s + strlen(s), &err); - ok(err.empty(), "null_parse_context"); - } - - { - picojson::value v1, v2; - v1 = picojson::value(true); - swap(v1, v2); - ok(v1.is(), "swap (null)"); - ok(v2.get() == true, "swap (bool)"); - - v1 = picojson::value("a"); - v2 = picojson::value(1.0); - swap(v1, v2); - ok(v1.get() == 1.0, "swap (dobule)"); - ok(v2.get() == "a", "swap (string)"); - - v1 = picojson::value(picojson::object()); - v2 = picojson::value(picojson::array()); - swap(v1, v2); - ok(v1.is(), "swap (array)"); - ok(v2.is(), "swap (object)"); - } - - return success ? 0 : 1; -} - -#endif diff --git a/vendor/github.com/mattn/go-sqlite3/_example/mod_vtable/sqlite3_mod_vtable.cc b/vendor/github.com/mattn/go-sqlite3/_example/mod_vtable/sqlite3_mod_vtable.cc deleted file mode 100644 index 5bd4e66..0000000 --- a/vendor/github.com/mattn/go-sqlite3/_example/mod_vtable/sqlite3_mod_vtable.cc +++ /dev/null @@ -1,238 +0,0 @@ -#include -#include -#include -#include -#include -#include "picojson.h" - -#ifdef _WIN32 -# define EXPORT __declspec(dllexport) -#else -# define EXPORT -#endif - -SQLITE_EXTENSION_INIT1; - -typedef struct { - char* data; // response data from server - size_t size; // response size of data -} MEMFILE; - -MEMFILE* -memfopen() { - MEMFILE* mf = (MEMFILE*) malloc(sizeof(MEMFILE)); - if (mf) { - mf->data = NULL; - mf->size = 0; - } - return mf; -} - -void -memfclose(MEMFILE* mf) { - if (mf->data) free(mf->data); - free(mf); -} - -size_t -memfwrite(char* ptr, size_t size, size_t nmemb, void* stream) { - MEMFILE* mf = (MEMFILE*) stream; - int block = size * nmemb; - if (!mf) return block; // through - if (!mf->data) - mf->data = (char*) malloc(block); - else - mf->data = (char*) realloc(mf->data, mf->size + block); - if (mf->data) { - memcpy(mf->data + mf->size, ptr, block); - mf->size += block; - } - return block; -} - -char* -memfstrdup(MEMFILE* mf) { - char* buf; - if (mf->size == 0) return NULL; - buf = (char*) malloc(mf->size + 1); - memcpy(buf, mf->data, mf->size); - buf[mf->size] = 0; - return buf; -} - -static int -my_connect(sqlite3 *db, void *pAux, int argc, const char * const *argv, sqlite3_vtab **ppVTab, char **c) { - std::stringstream ss; - ss << "CREATE TABLE " << argv[0] - << "(id int, full_name text, description text, html_url text)"; - int rc = sqlite3_declare_vtab(db, ss.str().c_str()); - *ppVTab = (sqlite3_vtab *) sqlite3_malloc(sizeof(sqlite3_vtab)); - memset(*ppVTab, 0, sizeof(sqlite3_vtab)); - return rc; -} - -static int -my_create(sqlite3 *db, void *pAux, int argc, const char * const * argv, sqlite3_vtab **ppVTab, char **c) { - return my_connect(db, pAux, argc, argv, ppVTab, c); -} - -static int my_disconnect(sqlite3_vtab *pVTab) { - sqlite3_free(pVTab); - return SQLITE_OK; -} - -static int -my_destroy(sqlite3_vtab *pVTab) { - sqlite3_free(pVTab); - return SQLITE_OK; -} - -typedef struct { - sqlite3_vtab_cursor base; - int index; - picojson::value* rows; -} cursor; - -static int -my_open(sqlite3_vtab *pVTab, sqlite3_vtab_cursor **ppCursor) { - MEMFILE* mf; - CURL* curl; - char* json; - CURLcode res = CURLE_OK; - char error[CURL_ERROR_SIZE] = {0}; - char* cert_file = getenv("SSL_CERT_FILE"); - - mf = memfopen(); - curl = curl_easy_init(); - curl_easy_setopt(curl, CURLOPT_SSL_VERIFYPEER, 1); - curl_easy_setopt(curl, CURLOPT_SSL_VERIFYHOST, 2); - curl_easy_setopt(curl, CURLOPT_USERAGENT, "curl/7.29.0"); - curl_easy_setopt(curl, CURLOPT_URL, "https://api.github.com/repositories"); - if (cert_file) - curl_easy_setopt(curl, CURLOPT_CAINFO, cert_file); - curl_easy_setopt(curl, CURLOPT_FOLLOWLOCATION, 1); - curl_easy_setopt(curl, CURLOPT_ERRORBUFFER, error); - curl_easy_setopt(curl, CURLOPT_WRITEDATA, mf); - curl_easy_setopt(curl, CURLOPT_WRITEFUNCTION, memfwrite); - res = curl_easy_perform(curl); - curl_easy_cleanup(curl); - if (res != CURLE_OK) { - std::cerr << error << std::endl; - return SQLITE_FAIL; - } - - picojson::value* v = new picojson::value; - std::string err; - picojson::parse(*v, mf->data, mf->data + mf->size, &err); - memfclose(mf); - - if (!err.empty()) { - delete v; - std::cerr << err << std::endl; - return SQLITE_FAIL; - } - - cursor *c = (cursor *)sqlite3_malloc(sizeof(cursor)); - c->rows = v; - c->index = 0; - *ppCursor = &c->base; - return SQLITE_OK; -} - -static int -my_close(cursor *c) { - delete c->rows; - sqlite3_free(c); - return SQLITE_OK; -} - -static int -my_filter(cursor *c, int idxNum, const char *idxStr, int argc, sqlite3_value **argv) { - c->index = 0; - return SQLITE_OK; -} - -static int -my_next(cursor *c) { - c->index++; - return SQLITE_OK; -} - -static int -my_eof(cursor *c) { - return c->index >= c->rows->get().size() ? 1 : 0; -} - -static int -my_column(cursor *c, sqlite3_context *ctxt, int i) { - picojson::value v = c->rows->get()[c->index]; - picojson::object row = v.get(); - const char* p = NULL; - switch (i) { - case 0: - p = row["id"].to_str().c_str(); - break; - case 1: - p = row["full_name"].to_str().c_str(); - break; - case 2: - p = row["description"].to_str().c_str(); - break; - case 3: - p = row["html_url"].to_str().c_str(); - break; - } - sqlite3_result_text(ctxt, strdup(p), strlen(p), free); - return SQLITE_OK; -} - -static int -my_rowid(cursor *c, sqlite3_int64 *pRowid) { - *pRowid = c->index; - return SQLITE_OK; -} - -static int -my_bestindex(sqlite3_vtab *tab, sqlite3_index_info *pIdxInfo) { - return SQLITE_OK; -} - -static const sqlite3_module module = { - 0, - my_create, - my_connect, - my_bestindex, - my_disconnect, - my_destroy, - my_open, - (int (*)(sqlite3_vtab_cursor *)) my_close, - (int (*)(sqlite3_vtab_cursor *, int, char const *, int, sqlite3_value **)) my_filter, - (int (*)(sqlite3_vtab_cursor *)) my_next, - (int (*)(sqlite3_vtab_cursor *)) my_eof, - (int (*)(sqlite3_vtab_cursor *, sqlite3_context *, int)) my_column, - (int (*)(sqlite3_vtab_cursor *, sqlite3_int64 *)) my_rowid, - NULL, // my_update - NULL, // my_begin - NULL, // my_sync - NULL, // my_commit - NULL, // my_rollback - NULL, // my_findfunction - NULL, // my_rename -}; - -static void -destructor(void *arg) { - return; -} - - -extern "C" { - -EXPORT int -sqlite3_extension_init(sqlite3 *db, char **errmsg, const sqlite3_api_routines *api) { - SQLITE_EXTENSION_INIT2(api); - sqlite3_create_module_v2(db, "github", &module, NULL, destructor); - return 0; -} - -} diff --git a/vendor/github.com/mattn/go-sqlite3/_example/simple/simple.go b/vendor/github.com/mattn/go-sqlite3/_example/simple/simple.go deleted file mode 100644 index 261ed4d..0000000 --- a/vendor/github.com/mattn/go-sqlite3/_example/simple/simple.go +++ /dev/null @@ -1,106 +0,0 @@ -package main - -import ( - "database/sql" - "fmt" - _ "github.com/mattn/go-sqlite3" - "log" - "os" -) - -func main() { - os.Remove("./foo.db") - - db, err := sql.Open("sqlite3", "./foo.db") - if err != nil { - log.Fatal(err) - } - defer db.Close() - - sqlStmt := ` - create table foo (id integer not null primary key, name text); - delete from foo; - ` - _, err = db.Exec(sqlStmt) - if err != nil { - log.Printf("%q: %s\n", err, sqlStmt) - return - } - - tx, err := db.Begin() - if err != nil { - log.Fatal(err) - } - stmt, err := tx.Prepare("insert into foo(id, name) values(?, ?)") - if err != nil { - log.Fatal(err) - } - defer stmt.Close() - for i := 0; i < 100; i++ { - _, err = stmt.Exec(i, fmt.Sprintf("こんにちわ世界%03d", i)) - if err != nil { - log.Fatal(err) - } - } - tx.Commit() - - rows, err := db.Query("select id, name from foo") - if err != nil { - log.Fatal(err) - } - defer rows.Close() - for rows.Next() { - var id int - var name string - err = rows.Scan(&id, &name) - if err != nil { - log.Fatal(err) - } - fmt.Println(id, name) - } - err = rows.Err() - if err != nil { - log.Fatal(err) - } - - stmt, err = db.Prepare("select name from foo where id = ?") - if err != nil { - log.Fatal(err) - } - defer stmt.Close() - var name string - err = stmt.QueryRow("3").Scan(&name) - if err != nil { - log.Fatal(err) - } - fmt.Println(name) - - _, err = db.Exec("delete from foo") - if err != nil { - log.Fatal(err) - } - - _, err = db.Exec("insert into foo(id, name) values(1, 'foo'), (2, 'bar'), (3, 'baz')") - if err != nil { - log.Fatal(err) - } - - rows, err = db.Query("select id, name from foo") - if err != nil { - log.Fatal(err) - } - defer rows.Close() - for rows.Next() { - var id int - var name string - err = rows.Scan(&id, &name) - if err != nil { - log.Fatal(err) - } - fmt.Println(id, name) - } - err = rows.Err() - if err != nil { - log.Fatal(err) - } -} diff --git a/vendor/github.com/mattn/go-sqlite3/_example/trace/main.go b/vendor/github.com/mattn/go-sqlite3/_example/trace/main.go deleted file mode 100644 index 9f83ee1..0000000 --- a/vendor/github.com/mattn/go-sqlite3/_example/trace/main.go +++ /dev/null @@ -1,264 +0,0 @@ -package main - -import ( - "database/sql" - "fmt" - "log" - "os" - - sqlite3 "github.com/mattn/go-sqlite3" -) - -func traceCallback(info sqlite3.TraceInfo) int { - // Not very readable but may be useful; uncomment next line in case of doubt: - //fmt.Printf("Trace: %#v\n", info) - - var dbErrText string - if info.DBError.Code != 0 || info.DBError.ExtendedCode != 0 { - dbErrText = fmt.Sprintf("; DB error: %#v", info.DBError) - } else { - dbErrText = "." - } - - // Show the Statement-or-Trigger text in curly braces ('{', '}') - // since from the *paired* ASCII characters they are - // the least used in SQL syntax, therefore better visual delimiters. - // Maybe show 'ExpandedSQL' the same way as 'StmtOrTrigger'. - // - // A known use of curly braces (outside strings) is - // for ODBC escape sequences. Not likely to appear here. - // - // Template languages, etc. don't matter, we should see their *result* - // at *this* level. - // Strange curly braces in SQL code that reached the database driver - // suggest that there is a bug in the application. - // The braces are likely to be either template syntax or - // a programming language's string interpolation syntax. - - var expandedText string - if info.ExpandedSQL != "" { - if info.ExpandedSQL == info.StmtOrTrigger { - expandedText = " = exp" - } else { - expandedText = fmt.Sprintf(" expanded {%q}", info.ExpandedSQL) - } - } else { - expandedText = "" - } - - // SQLite docs as of September 6, 2016: Tracing and Profiling Functions - // https://www.sqlite.org/c3ref/profile.html - // - // The profile callback time is in units of nanoseconds, however - // the current implementation is only capable of millisecond resolution - // so the six least significant digits in the time are meaningless. - // Future versions of SQLite might provide greater resolution on the profiler callback. - - var runTimeText string - if info.RunTimeNanosec == 0 { - if info.EventCode == sqlite3.TraceProfile { - //runTimeText = "; no time" // seems confusing - runTimeText = "; time 0" // no measurement unit - } else { - //runTimeText = "; no time" // seems useless and confusing - } - } else { - const nanosPerMillisec = 1000000 - if info.RunTimeNanosec%nanosPerMillisec == 0 { - runTimeText = fmt.Sprintf("; time %d ms", info.RunTimeNanosec/nanosPerMillisec) - } else { - // unexpected: better than millisecond resolution - runTimeText = fmt.Sprintf("; time %d ns!!!", info.RunTimeNanosec) - } - } - - var modeText string - if info.AutoCommit { - modeText = "-AC-" - } else { - modeText = "+Tx+" - } - - fmt.Printf("Trace: ev %d %s conn 0x%x, stmt 0x%x {%q}%s%s%s\n", - info.EventCode, modeText, info.ConnHandle, info.StmtHandle, - info.StmtOrTrigger, expandedText, - runTimeText, - dbErrText) - return 0 -} - -func main() { - eventMask := sqlite3.TraceStmt | sqlite3.TraceProfile | sqlite3.TraceRow | sqlite3.TraceClose - - sql.Register("sqlite3_tracing", - &sqlite3.SQLiteDriver{ - ConnectHook: func(conn *sqlite3.SQLiteConn) error { - err := conn.SetTrace(&sqlite3.TraceConfig{ - Callback: traceCallback, - EventMask: uint(eventMask), - WantExpandedSQL: true, - }) - return err - }, - }) - - os.Exit(dbMain()) -} - -// Harder to do DB work in main(). -// It's better with a separate function because -// 'defer' and 'os.Exit' don't go well together. -// -// DO NOT use 'log.Fatal...' below: remember that it's equivalent to -// Print() followed by a call to os.Exit(1) --- and -// we want to avoid Exit() so 'defer' can do cleanup. -// Use 'log.Panic...' instead. - -func dbMain() int { - db, err := sql.Open("sqlite3_tracing", ":memory:") - if err != nil { - fmt.Printf("Failed to open database: %#+v\n", err) - return 1 - } - defer db.Close() - - err = db.Ping() - if err != nil { - log.Panic(err) - } - - dbSetup(db) - - dbDoInsert(db) - dbDoInsertPrepared(db) - dbDoSelect(db) - dbDoSelectPrepared(db) - - return 0 -} - -// 'DDL' stands for "Data Definition Language": - -// Note: "INTEGER PRIMARY KEY NOT NULL AUTOINCREMENT" causes the error -// 'near "AUTOINCREMENT": syntax error'; without "NOT NULL" it works. -const tableDDL = `CREATE TABLE t1 ( - id INTEGER PRIMARY KEY AUTOINCREMENT, - note VARCHAR NOT NULL -)` - -// 'DML' stands for "Data Manipulation Language": - -const insertDML = "INSERT INTO t1 (note) VALUES (?)" -const selectDML = "SELECT id, note FROM t1 WHERE note LIKE ?" - -const textPrefix = "bla-1234567890-" -const noteTextPattern = "%Prep%" - -const nGenRows = 4 // Number of Rows to Generate (for *each* approach tested) - -func dbSetup(db *sql.DB) { - var err error - - _, err = db.Exec("DROP TABLE IF EXISTS t1") - if err != nil { - log.Panic(err) - } - _, err = db.Exec(tableDDL) - if err != nil { - log.Panic(err) - } -} - -func dbDoInsert(db *sql.DB) { - const Descr = "DB-Exec" - for i := 0; i < nGenRows; i++ { - result, err := db.Exec(insertDML, textPrefix+Descr) - if err != nil { - log.Panic(err) - } - - resultDoCheck(result, Descr, i) - } -} - -func dbDoInsertPrepared(db *sql.DB) { - const Descr = "DB-Prepare" - - stmt, err := db.Prepare(insertDML) - if err != nil { - log.Panic(err) - } - defer stmt.Close() - - for i := 0; i < nGenRows; i++ { - result, err := stmt.Exec(textPrefix + Descr) - if err != nil { - log.Panic(err) - } - - resultDoCheck(result, Descr, i) - } -} - -func resultDoCheck(result sql.Result, callerDescr string, callIndex int) { - lastID, err := result.LastInsertId() - if err != nil { - log.Panic(err) - } - nAffected, err := result.RowsAffected() - if err != nil { - log.Panic(err) - } - - log.Printf("Exec result for %s (%d): ID = %d, affected = %d\n", callerDescr, callIndex, lastID, nAffected) -} - -func dbDoSelect(db *sql.DB) { - const Descr = "DB-Query" - - rows, err := db.Query(selectDML, noteTextPattern) - if err != nil { - log.Panic(err) - } - defer rows.Close() - - rowsDoFetch(rows, Descr) -} - -func dbDoSelectPrepared(db *sql.DB) { - const Descr = "DB-Prepare" - - stmt, err := db.Prepare(selectDML) - if err != nil { - log.Panic(err) - } - defer stmt.Close() - - rows, err := stmt.Query(noteTextPattern) - if err != nil { - log.Panic(err) - } - defer rows.Close() - - rowsDoFetch(rows, Descr) -} - -func rowsDoFetch(rows *sql.Rows, callerDescr string) { - var nRows int - var id int64 - var note string - - for rows.Next() { - err := rows.Scan(&id, ¬e) - if err != nil { - log.Panic(err) - } - log.Printf("Row for %s (%d): id=%d, note=%q\n", - callerDescr, nRows, id, note) - nRows++ - } - if err := rows.Err(); err != nil { - log.Panic(err) - } - log.Printf("Total %d rows for %s.\n", nRows, callerDescr) -} diff --git a/vendor/github.com/mattn/go-sqlite3/_example/vtable/main.go b/vendor/github.com/mattn/go-sqlite3/_example/vtable/main.go deleted file mode 100644 index aad8dda..0000000 --- a/vendor/github.com/mattn/go-sqlite3/_example/vtable/main.go +++ /dev/null @@ -1,38 +0,0 @@ -package main - -import ( - "database/sql" - "fmt" - "log" - - "github.com/mattn/go-sqlite3" -) - -func main() { - sql.Register("sqlite3_with_extensions", &sqlite3.SQLiteDriver{ - ConnectHook: func(conn *sqlite3.SQLiteConn) error { - return conn.CreateModule("github", &githubModule{}) - }, - }) - db, err := sql.Open("sqlite3_with_extensions", ":memory:") - if err != nil { - log.Fatal(err) - } - defer db.Close() - - _, err = db.Exec("create virtual table repo using github(id, full_name, description, html_url)") - if err != nil { - log.Fatal(err) - } - - rows, err := db.Query("select id, full_name, description, html_url from repo") - if err != nil { - log.Fatal(err) - } - defer rows.Close() - for rows.Next() { - var id, fullName, description, htmlURL string - rows.Scan(&id, &fullName, &description, &htmlURL) - fmt.Printf("%s: %s\n\t%s\n\t%s\n\n", id, fullName, description, htmlURL) - } -} diff --git a/vendor/github.com/mattn/go-sqlite3/_example/vtable/vtable.go b/vendor/github.com/mattn/go-sqlite3/_example/vtable/vtable.go deleted file mode 100644 index 1d6d824..0000000 --- a/vendor/github.com/mattn/go-sqlite3/_example/vtable/vtable.go +++ /dev/null @@ -1,111 +0,0 @@ -package main - -import ( - "encoding/json" - "fmt" - "io/ioutil" - "net/http" - - "github.com/mattn/go-sqlite3" -) - -type githubRepo struct { - ID int `json:"id"` - FullName string `json:"full_name"` - Description string `json:"description"` - HTMLURL string `json:"html_url"` -} - -type githubModule struct { -} - -func (m *githubModule) Create(c *sqlite3.SQLiteConn, args []string) (sqlite3.VTab, error) { - err := c.DeclareVTab(fmt.Sprintf(` - CREATE TABLE %s ( - id INT, - full_name TEXT, - description TEXT, - html_url TEXT - )`, args[0])) - if err != nil { - return nil, err - } - return &ghRepoTable{}, nil -} - -func (m *githubModule) Connect(c *sqlite3.SQLiteConn, args []string) (sqlite3.VTab, error) { - return m.Create(c, args) -} - -func (m *githubModule) DestroyModule() {} - -type ghRepoTable struct { - repos []githubRepo -} - -func (v *ghRepoTable) Open() (sqlite3.VTabCursor, error) { - resp, err := http.Get("https://api.github.com/repositories") - if err != nil { - return nil, err - } - defer resp.Body.Close() - - body, err := ioutil.ReadAll(resp.Body) - if err != nil { - return nil, err - } - - var repos []githubRepo - if err := json.Unmarshal(body, &repos); err != nil { - return nil, err - } - return &ghRepoCursor{0, repos}, nil -} - -func (v *ghRepoTable) BestIndex(cst []sqlite3.InfoConstraint, ob []sqlite3.InfoOrderBy) (*sqlite3.IndexResult, error) { - return &sqlite3.IndexResult{}, nil -} - -func (v *ghRepoTable) Disconnect() error { return nil } -func (v *ghRepoTable) Destroy() error { return nil } - -type ghRepoCursor struct { - index int - repos []githubRepo -} - -func (vc *ghRepoCursor) Column(c *sqlite3.SQLiteContext, col int) error { - switch col { - case 0: - c.ResultInt(vc.repos[vc.index].ID) - case 1: - c.ResultText(vc.repos[vc.index].FullName) - case 2: - c.ResultText(vc.repos[vc.index].Description) - case 3: - c.ResultText(vc.repos[vc.index].HTMLURL) - } - return nil -} - -func (vc *ghRepoCursor) Filter(idxNum int, idxStr string, vals []interface{}) error { - vc.index = 0 - return nil -} - -func (vc *ghRepoCursor) Next() error { - vc.index++ - return nil -} - -func (vc *ghRepoCursor) EOF() bool { - return vc.index >= len(vc.repos) -} - -func (vc *ghRepoCursor) Rowid() (int64, error) { - return int64(vc.index), nil -} - -func (vc *ghRepoCursor) Close() error { - return nil -} diff --git a/vendor/github.com/mattn/go-sqlite3/backup.go b/vendor/github.com/mattn/go-sqlite3/backup.go deleted file mode 100644 index 5ab3a54..0000000 --- a/vendor/github.com/mattn/go-sqlite3/backup.go +++ /dev/null @@ -1,85 +0,0 @@ -// Copyright (C) 2014 Yasuhiro Matsumoto . -// -// Use of this source code is governed by an MIT-style -// license that can be found in the LICENSE file. - -package sqlite3 - -/* -#ifndef USE_LIBSQLITE3 -#include -#else -#include -#endif -#include -*/ -import "C" -import ( - "runtime" - "unsafe" -) - -// SQLiteBackup implement interface of Backup. -type SQLiteBackup struct { - b *C.sqlite3_backup -} - -// Backup make backup from src to dest. -func (c *SQLiteConn) Backup(dest string, conn *SQLiteConn, src string) (*SQLiteBackup, error) { - destptr := C.CString(dest) - defer C.free(unsafe.Pointer(destptr)) - srcptr := C.CString(src) - defer C.free(unsafe.Pointer(srcptr)) - - if b := C.sqlite3_backup_init(c.db, destptr, conn.db, srcptr); b != nil { - bb := &SQLiteBackup{b: b} - runtime.SetFinalizer(bb, (*SQLiteBackup).Finish) - return bb, nil - } - return nil, c.lastError() -} - -// Step to backs up for one step. Calls the underlying `sqlite3_backup_step` -// function. This function returns a boolean indicating if the backup is done -// and an error signalling any other error. Done is returned if the underlying -// C function returns SQLITE_DONE (Code 101) -func (b *SQLiteBackup) Step(p int) (bool, error) { - ret := C.sqlite3_backup_step(b.b, C.int(p)) - if ret == C.SQLITE_DONE { - return true, nil - } else if ret != 0 && ret != C.SQLITE_LOCKED && ret != C.SQLITE_BUSY { - return false, Error{Code: ErrNo(ret)} - } - return false, nil -} - -// Remaining return whether have the rest for backup. -func (b *SQLiteBackup) Remaining() int { - return int(C.sqlite3_backup_remaining(b.b)) -} - -// PageCount return count of pages. -func (b *SQLiteBackup) PageCount() int { - return int(C.sqlite3_backup_pagecount(b.b)) -} - -// Finish close backup. -func (b *SQLiteBackup) Finish() error { - return b.Close() -} - -// Close close backup. -func (b *SQLiteBackup) Close() error { - ret := C.sqlite3_backup_finish(b.b) - - // sqlite3_backup_finish() never fails, it just returns the - // error code from previous operations, so clean up before - // checking and returning an error - b.b = nil - runtime.SetFinalizer(b, nil) - - if ret != 0 { - return Error{Code: ErrNo(ret)} - } - return nil -} diff --git a/vendor/github.com/mattn/go-sqlite3/backup_test.go b/vendor/github.com/mattn/go-sqlite3/backup_test.go deleted file mode 100644 index 73c0a4b..0000000 --- a/vendor/github.com/mattn/go-sqlite3/backup_test.go +++ /dev/null @@ -1,290 +0,0 @@ -// Use of this source code is governed by an MIT-style -// license that can be found in the LICENSE file. - -package sqlite3 - -import ( - "database/sql" - "fmt" - "os" - "testing" - "time" -) - -// The number of rows of test data to create in the source database. -// Can be used to control how many pages are available to be backed up. -const testRowCount = 100 - -// The maximum number of seconds after which the page-by-page backup is considered to have taken too long. -const usePagePerStepsTimeoutSeconds = 30 - -// Test the backup functionality. -func testBackup(t *testing.T, testRowCount int, usePerPageSteps bool) { - // This function will be called multiple times. - // It uses sql.Register(), which requires the name parameter value to be unique. - // There does not currently appear to be a way to unregister a registered driver, however. - // So generate a database driver name that will likely be unique. - var driverName = fmt.Sprintf("sqlite3_testBackup_%v_%v_%v", testRowCount, usePerPageSteps, time.Now().UnixNano()) - - // The driver's connection will be needed in order to perform the backup. - driverConns := []*SQLiteConn{} - sql.Register(driverName, &SQLiteDriver{ - ConnectHook: func(conn *SQLiteConn) error { - driverConns = append(driverConns, conn) - return nil - }, - }) - - // Connect to the source database. - srcTempFilename := TempFilename(t) - defer os.Remove(srcTempFilename) - srcDb, err := sql.Open(driverName, srcTempFilename) - if err != nil { - t.Fatal("Failed to open the source database:", err) - } - defer srcDb.Close() - err = srcDb.Ping() - if err != nil { - t.Fatal("Failed to connect to the source database:", err) - } - - // Connect to the destination database. - destTempFilename := TempFilename(t) - defer os.Remove(destTempFilename) - destDb, err := sql.Open(driverName, destTempFilename) - if err != nil { - t.Fatal("Failed to open the destination database:", err) - } - defer destDb.Close() - err = destDb.Ping() - if err != nil { - t.Fatal("Failed to connect to the destination database:", err) - } - - // Check the driver connections. - if len(driverConns) != 2 { - t.Fatalf("Expected 2 driver connections, but found %v.", len(driverConns)) - } - srcDbDriverConn := driverConns[0] - if srcDbDriverConn == nil { - t.Fatal("The source database driver connection is nil.") - } - destDbDriverConn := driverConns[1] - if destDbDriverConn == nil { - t.Fatal("The destination database driver connection is nil.") - } - - // Generate some test data for the given ID. - var generateTestData = func(id int) string { - return fmt.Sprintf("test-%v", id) - } - - // Populate the source database with a test table containing some test data. - tx, err := srcDb.Begin() - if err != nil { - t.Fatal("Failed to begin a transaction when populating the source database:", err) - } - _, err = srcDb.Exec("CREATE TABLE test (id INTEGER PRIMARY KEY, value TEXT)") - if err != nil { - tx.Rollback() - t.Fatal("Failed to create the source database \"test\" table:", err) - } - for id := 0; id < testRowCount; id++ { - _, err = srcDb.Exec("INSERT INTO test (id, value) VALUES (?, ?)", id, generateTestData(id)) - if err != nil { - tx.Rollback() - t.Fatal("Failed to insert a row into the source database \"test\" table:", err) - } - } - err = tx.Commit() - if err != nil { - t.Fatal("Failed to populate the source database:", err) - } - - // Confirm that the destination database is initially empty. - var destTableCount int - err = destDb.QueryRow("SELECT COUNT(*) FROM sqlite_master WHERE type = 'table'").Scan(&destTableCount) - if err != nil { - t.Fatal("Failed to check the destination table count:", err) - } - if destTableCount != 0 { - t.Fatalf("The destination database is not empty; %v table(s) found.", destTableCount) - } - - // Prepare to perform the backup. - backup, err := destDbDriverConn.Backup("main", srcDbDriverConn, "main") - if err != nil { - t.Fatal("Failed to initialize the backup:", err) - } - - // Allow the initial page count and remaining values to be retrieved. - // According to , the page count and remaining values are "... only updated by sqlite3_backup_step()." - isDone, err := backup.Step(0) - if err != nil { - t.Fatal("Unable to perform an initial 0-page backup step:", err) - } - if isDone { - t.Fatal("Backup is unexpectedly done.") - } - - // Check that the page count and remaining values are reasonable. - initialPageCount := backup.PageCount() - if initialPageCount <= 0 { - t.Fatalf("Unexpected initial page count value: %v", initialPageCount) - } - initialRemaining := backup.Remaining() - if initialRemaining <= 0 { - t.Fatalf("Unexpected initial remaining value: %v", initialRemaining) - } - if initialRemaining != initialPageCount { - t.Fatalf("Initial remaining value differs from the initial page count value; remaining: %v; page count: %v", initialRemaining, initialPageCount) - } - - // Perform the backup. - if usePerPageSteps { - var startTime = time.Now().Unix() - - // Test backing-up using a page-by-page approach. - var latestRemaining = initialRemaining - for { - // Perform the backup step. - isDone, err = backup.Step(1) - if err != nil { - t.Fatal("Failed to perform a backup step:", err) - } - - // The page count should remain unchanged from its initial value. - currentPageCount := backup.PageCount() - if currentPageCount != initialPageCount { - t.Fatalf("Current page count differs from the initial page count; initial page count: %v; current page count: %v", initialPageCount, currentPageCount) - } - - // There should now be one less page remaining. - currentRemaining := backup.Remaining() - expectedRemaining := latestRemaining - 1 - if currentRemaining != expectedRemaining { - t.Fatalf("Unexpected remaining value; expected remaining value: %v; actual remaining value: %v", expectedRemaining, currentRemaining) - } - latestRemaining = currentRemaining - - if isDone { - break - } - - // Limit the runtime of the backup attempt. - if (time.Now().Unix() - startTime) > usePagePerStepsTimeoutSeconds { - t.Fatal("Backup is taking longer than expected.") - } - } - } else { - // Test the copying of all remaining pages. - isDone, err = backup.Step(-1) - if err != nil { - t.Fatal("Failed to perform a backup step:", err) - } - if !isDone { - t.Fatal("Backup is unexpectedly not done.") - } - } - - // Check that the page count and remaining values are reasonable. - finalPageCount := backup.PageCount() - if finalPageCount != initialPageCount { - t.Fatalf("Final page count differs from the initial page count; initial page count: %v; final page count: %v", initialPageCount, finalPageCount) - } - finalRemaining := backup.Remaining() - if finalRemaining != 0 { - t.Fatalf("Unexpected remaining value: %v", finalRemaining) - } - - // Finish the backup. - err = backup.Finish() - if err != nil { - t.Fatal("Failed to finish backup:", err) - } - - // Confirm that the "test" table now exists in the destination database. - var doesTestTableExist bool - err = destDb.QueryRow("SELECT EXISTS (SELECT 1 FROM sqlite_master WHERE type = 'table' AND name = 'test' LIMIT 1) AS test_table_exists").Scan(&doesTestTableExist) - if err != nil { - t.Fatal("Failed to check if the \"test\" table exists in the destination database:", err) - } - if !doesTestTableExist { - t.Fatal("The \"test\" table could not be found in the destination database.") - } - - // Confirm that the number of rows in the destination database's "test" table matches that of the source table. - var actualTestTableRowCount int - err = destDb.QueryRow("SELECT COUNT(*) FROM test").Scan(&actualTestTableRowCount) - if err != nil { - t.Fatal("Failed to determine the rowcount of the \"test\" table in the destination database:", err) - } - if testRowCount != actualTestTableRowCount { - t.Fatalf("Unexpected destination \"test\" table row count; expected: %v; found: %v", testRowCount, actualTestTableRowCount) - } - - // Check each of the rows in the destination database. - for id := 0; id < testRowCount; id++ { - var checkedValue string - err = destDb.QueryRow("SELECT value FROM test WHERE id = ?", id).Scan(&checkedValue) - if err != nil { - t.Fatal("Failed to query the \"test\" table in the destination database:", err) - } - - var expectedValue = generateTestData(id) - if checkedValue != expectedValue { - t.Fatalf("Unexpected value in the \"test\" table in the destination database; expected value: %v; actual value: %v", expectedValue, checkedValue) - } - } -} - -func TestBackupStepByStep(t *testing.T) { - testBackup(t, testRowCount, true) -} - -func TestBackupAllRemainingPages(t *testing.T) { - testBackup(t, testRowCount, false) -} - -// Test the error reporting when preparing to perform a backup. -func TestBackupError(t *testing.T) { - const driverName = "sqlite3_TestBackupError" - - // The driver's connection will be needed in order to perform the backup. - var dbDriverConn *SQLiteConn - sql.Register(driverName, &SQLiteDriver{ - ConnectHook: func(conn *SQLiteConn) error { - dbDriverConn = conn - return nil - }, - }) - - // Connect to the database. - dbTempFilename := TempFilename(t) - defer os.Remove(dbTempFilename) - db, err := sql.Open(driverName, dbTempFilename) - if err != nil { - t.Fatal("Failed to open the database:", err) - } - defer db.Close() - db.Ping() - - // Need the driver connection in order to perform the backup. - if dbDriverConn == nil { - t.Fatal("Failed to get the driver connection.") - } - - // Prepare to perform the backup. - // Intentionally using the same connection for both the source and destination databases, to trigger an error result. - backup, err := dbDriverConn.Backup("main", dbDriverConn, "main") - if err == nil { - t.Fatal("Failed to get the expected error result.") - } - const expectedError = "source and destination must be distinct" - if err.Error() != expectedError { - t.Fatalf("Unexpected error message; expected value: \"%v\"; actual value: \"%v\"", expectedError, err.Error()) - } - if backup != nil { - t.Fatal("Failed to get the expected nil backup result.") - } -} diff --git a/vendor/github.com/mattn/go-sqlite3/callback.go b/vendor/github.com/mattn/go-sqlite3/callback.go deleted file mode 100644 index 29ece3d..0000000 --- a/vendor/github.com/mattn/go-sqlite3/callback.go +++ /dev/null @@ -1,364 +0,0 @@ -// Copyright (C) 2014 Yasuhiro Matsumoto . -// -// Use of this source code is governed by an MIT-style -// license that can be found in the LICENSE file. - -package sqlite3 - -// You can't export a Go function to C and have definitions in the C -// preamble in the same file, so we have to have callbackTrampoline in -// its own file. Because we need a separate file anyway, the support -// code for SQLite custom functions is in here. - -/* -#ifndef USE_LIBSQLITE3 -#include -#else -#include -#endif -#include - -void _sqlite3_result_text(sqlite3_context* ctx, const char* s); -void _sqlite3_result_blob(sqlite3_context* ctx, const void* b, int l); -*/ -import "C" - -import ( - "errors" - "fmt" - "math" - "reflect" - "sync" - "unsafe" -) - -//export callbackTrampoline -func callbackTrampoline(ctx *C.sqlite3_context, argc int, argv **C.sqlite3_value) { - args := (*[(math.MaxInt32 - 1) / unsafe.Sizeof((*C.sqlite3_value)(nil))]*C.sqlite3_value)(unsafe.Pointer(argv))[:argc:argc] - fi := lookupHandle(uintptr(C.sqlite3_user_data(ctx))).(*functionInfo) - fi.Call(ctx, args) -} - -//export stepTrampoline -func stepTrampoline(ctx *C.sqlite3_context, argc C.int, argv **C.sqlite3_value) { - args := (*[(math.MaxInt32 - 1) / unsafe.Sizeof((*C.sqlite3_value)(nil))]*C.sqlite3_value)(unsafe.Pointer(argv))[:int(argc):int(argc)] - ai := lookupHandle(uintptr(C.sqlite3_user_data(ctx))).(*aggInfo) - ai.Step(ctx, args) -} - -//export doneTrampoline -func doneTrampoline(ctx *C.sqlite3_context) { - handle := uintptr(C.sqlite3_user_data(ctx)) - ai := lookupHandle(handle).(*aggInfo) - ai.Done(ctx) -} - -//export compareTrampoline -func compareTrampoline(handlePtr uintptr, la C.int, a *C.char, lb C.int, b *C.char) C.int { - cmp := lookupHandle(handlePtr).(func(string, string) int) - return C.int(cmp(C.GoStringN(a, la), C.GoStringN(b, lb))) -} - -//export commitHookTrampoline -func commitHookTrampoline(handle uintptr) int { - callback := lookupHandle(handle).(func() int) - return callback() -} - -//export rollbackHookTrampoline -func rollbackHookTrampoline(handle uintptr) { - callback := lookupHandle(handle).(func()) - callback() -} - -//export updateHookTrampoline -func updateHookTrampoline(handle uintptr, op int, db *C.char, table *C.char, rowid int64) { - callback := lookupHandle(handle).(func(int, string, string, int64)) - callback(op, C.GoString(db), C.GoString(table), rowid) -} - -// Use handles to avoid passing Go pointers to C. - -type handleVal struct { - db *SQLiteConn - val interface{} -} - -var handleLock sync.Mutex -var handleVals = make(map[uintptr]handleVal) -var handleIndex uintptr = 100 - -func newHandle(db *SQLiteConn, v interface{}) uintptr { - handleLock.Lock() - defer handleLock.Unlock() - i := handleIndex - handleIndex++ - handleVals[i] = handleVal{db, v} - return i -} - -func lookupHandle(handle uintptr) interface{} { - handleLock.Lock() - defer handleLock.Unlock() - r, ok := handleVals[handle] - if !ok { - if handle >= 100 && handle < handleIndex { - panic("deleted handle") - } else { - panic("invalid handle") - } - } - return r.val -} - -func deleteHandles(db *SQLiteConn) { - handleLock.Lock() - defer handleLock.Unlock() - for handle, val := range handleVals { - if val.db == db { - delete(handleVals, handle) - } - } -} - -// This is only here so that tests can refer to it. -type callbackArgRaw C.sqlite3_value - -type callbackArgConverter func(*C.sqlite3_value) (reflect.Value, error) - -type callbackArgCast struct { - f callbackArgConverter - typ reflect.Type -} - -func (c callbackArgCast) Run(v *C.sqlite3_value) (reflect.Value, error) { - val, err := c.f(v) - if err != nil { - return reflect.Value{}, err - } - if !val.Type().ConvertibleTo(c.typ) { - return reflect.Value{}, fmt.Errorf("cannot convert %s to %s", val.Type(), c.typ) - } - return val.Convert(c.typ), nil -} - -func callbackArgInt64(v *C.sqlite3_value) (reflect.Value, error) { - if C.sqlite3_value_type(v) != C.SQLITE_INTEGER { - return reflect.Value{}, fmt.Errorf("argument must be an INTEGER") - } - return reflect.ValueOf(int64(C.sqlite3_value_int64(v))), nil -} - -func callbackArgBool(v *C.sqlite3_value) (reflect.Value, error) { - if C.sqlite3_value_type(v) != C.SQLITE_INTEGER { - return reflect.Value{}, fmt.Errorf("argument must be an INTEGER") - } - i := int64(C.sqlite3_value_int64(v)) - val := false - if i != 0 { - val = true - } - return reflect.ValueOf(val), nil -} - -func callbackArgFloat64(v *C.sqlite3_value) (reflect.Value, error) { - if C.sqlite3_value_type(v) != C.SQLITE_FLOAT { - return reflect.Value{}, fmt.Errorf("argument must be a FLOAT") - } - return reflect.ValueOf(float64(C.sqlite3_value_double(v))), nil -} - -func callbackArgBytes(v *C.sqlite3_value) (reflect.Value, error) { - switch C.sqlite3_value_type(v) { - case C.SQLITE_BLOB: - l := C.sqlite3_value_bytes(v) - p := C.sqlite3_value_blob(v) - return reflect.ValueOf(C.GoBytes(p, l)), nil - case C.SQLITE_TEXT: - l := C.sqlite3_value_bytes(v) - c := unsafe.Pointer(C.sqlite3_value_text(v)) - return reflect.ValueOf(C.GoBytes(c, l)), nil - default: - return reflect.Value{}, fmt.Errorf("argument must be BLOB or TEXT") - } -} - -func callbackArgString(v *C.sqlite3_value) (reflect.Value, error) { - switch C.sqlite3_value_type(v) { - case C.SQLITE_BLOB: - l := C.sqlite3_value_bytes(v) - p := (*C.char)(C.sqlite3_value_blob(v)) - return reflect.ValueOf(C.GoStringN(p, l)), nil - case C.SQLITE_TEXT: - c := (*C.char)(unsafe.Pointer(C.sqlite3_value_text(v))) - return reflect.ValueOf(C.GoString(c)), nil - default: - return reflect.Value{}, fmt.Errorf("argument must be BLOB or TEXT") - } -} - -func callbackArgGeneric(v *C.sqlite3_value) (reflect.Value, error) { - switch C.sqlite3_value_type(v) { - case C.SQLITE_INTEGER: - return callbackArgInt64(v) - case C.SQLITE_FLOAT: - return callbackArgFloat64(v) - case C.SQLITE_TEXT: - return callbackArgString(v) - case C.SQLITE_BLOB: - return callbackArgBytes(v) - case C.SQLITE_NULL: - // Interpret NULL as a nil byte slice. - var ret []byte - return reflect.ValueOf(ret), nil - default: - panic("unreachable") - } -} - -func callbackArg(typ reflect.Type) (callbackArgConverter, error) { - switch typ.Kind() { - case reflect.Interface: - if typ.NumMethod() != 0 { - return nil, errors.New("the only supported interface type is interface{}") - } - return callbackArgGeneric, nil - case reflect.Slice: - if typ.Elem().Kind() != reflect.Uint8 { - return nil, errors.New("the only supported slice type is []byte") - } - return callbackArgBytes, nil - case reflect.String: - return callbackArgString, nil - case reflect.Bool: - return callbackArgBool, nil - case reflect.Int64: - return callbackArgInt64, nil - case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Int, reflect.Uint: - c := callbackArgCast{callbackArgInt64, typ} - return c.Run, nil - case reflect.Float64: - return callbackArgFloat64, nil - case reflect.Float32: - c := callbackArgCast{callbackArgFloat64, typ} - return c.Run, nil - default: - return nil, fmt.Errorf("don't know how to convert to %s", typ) - } -} - -func callbackConvertArgs(argv []*C.sqlite3_value, converters []callbackArgConverter, variadic callbackArgConverter) ([]reflect.Value, error) { - var args []reflect.Value - - if len(argv) < len(converters) { - return nil, fmt.Errorf("function requires at least %d arguments", len(converters)) - } - - for i, arg := range argv[:len(converters)] { - v, err := converters[i](arg) - if err != nil { - return nil, err - } - args = append(args, v) - } - - if variadic != nil { - for _, arg := range argv[len(converters):] { - v, err := variadic(arg) - if err != nil { - return nil, err - } - args = append(args, v) - } - } - return args, nil -} - -type callbackRetConverter func(*C.sqlite3_context, reflect.Value) error - -func callbackRetInteger(ctx *C.sqlite3_context, v reflect.Value) error { - switch v.Type().Kind() { - case reflect.Int64: - case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Int, reflect.Uint: - v = v.Convert(reflect.TypeOf(int64(0))) - case reflect.Bool: - b := v.Interface().(bool) - if b { - v = reflect.ValueOf(int64(1)) - } else { - v = reflect.ValueOf(int64(0)) - } - default: - return fmt.Errorf("cannot convert %s to INTEGER", v.Type()) - } - - C.sqlite3_result_int64(ctx, C.sqlite3_int64(v.Interface().(int64))) - return nil -} - -func callbackRetFloat(ctx *C.sqlite3_context, v reflect.Value) error { - switch v.Type().Kind() { - case reflect.Float64: - case reflect.Float32: - v = v.Convert(reflect.TypeOf(float64(0))) - default: - return fmt.Errorf("cannot convert %s to FLOAT", v.Type()) - } - - C.sqlite3_result_double(ctx, C.double(v.Interface().(float64))) - return nil -} - -func callbackRetBlob(ctx *C.sqlite3_context, v reflect.Value) error { - if v.Type().Kind() != reflect.Slice || v.Type().Elem().Kind() != reflect.Uint8 { - return fmt.Errorf("cannot convert %s to BLOB", v.Type()) - } - i := v.Interface() - if i == nil || len(i.([]byte)) == 0 { - C.sqlite3_result_null(ctx) - } else { - bs := i.([]byte) - C._sqlite3_result_blob(ctx, unsafe.Pointer(&bs[0]), C.int(len(bs))) - } - return nil -} - -func callbackRetText(ctx *C.sqlite3_context, v reflect.Value) error { - if v.Type().Kind() != reflect.String { - return fmt.Errorf("cannot convert %s to TEXT", v.Type()) - } - C._sqlite3_result_text(ctx, C.CString(v.Interface().(string))) - return nil -} - -func callbackRet(typ reflect.Type) (callbackRetConverter, error) { - switch typ.Kind() { - case reflect.Slice: - if typ.Elem().Kind() != reflect.Uint8 { - return nil, errors.New("the only supported slice type is []byte") - } - return callbackRetBlob, nil - case reflect.String: - return callbackRetText, nil - case reflect.Bool, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Int, reflect.Uint: - return callbackRetInteger, nil - case reflect.Float32, reflect.Float64: - return callbackRetFloat, nil - default: - return nil, fmt.Errorf("don't know how to convert to %s", typ) - } -} - -func callbackError(ctx *C.sqlite3_context, err error) { - cstr := C.CString(err.Error()) - defer C.free(unsafe.Pointer(cstr)) - C.sqlite3_result_error(ctx, cstr, -1) -} - -// Test support code. Tests are not allowed to import "C", so we can't -// declare any functions that use C.sqlite3_value. -func callbackSyntheticForTests(v reflect.Value, err error) callbackArgConverter { - return func(*C.sqlite3_value) (reflect.Value, error) { - return v, err - } -} diff --git a/vendor/github.com/mattn/go-sqlite3/callback_test.go b/vendor/github.com/mattn/go-sqlite3/callback_test.go deleted file mode 100644 index 5c61f44..0000000 --- a/vendor/github.com/mattn/go-sqlite3/callback_test.go +++ /dev/null @@ -1,97 +0,0 @@ -package sqlite3 - -import ( - "errors" - "math" - "reflect" - "testing" -) - -func TestCallbackArgCast(t *testing.T) { - intConv := callbackSyntheticForTests(reflect.ValueOf(int64(math.MaxInt64)), nil) - floatConv := callbackSyntheticForTests(reflect.ValueOf(float64(math.MaxFloat64)), nil) - errConv := callbackSyntheticForTests(reflect.Value{}, errors.New("test")) - - tests := []struct { - f callbackArgConverter - o reflect.Value - }{ - {intConv, reflect.ValueOf(int8(-1))}, - {intConv, reflect.ValueOf(int16(-1))}, - {intConv, reflect.ValueOf(int32(-1))}, - {intConv, reflect.ValueOf(uint8(math.MaxUint8))}, - {intConv, reflect.ValueOf(uint16(math.MaxUint16))}, - {intConv, reflect.ValueOf(uint32(math.MaxUint32))}, - // Special case, int64->uint64 is only 1<<63 - 1, not 1<<64 - 1 - {intConv, reflect.ValueOf(uint64(math.MaxInt64))}, - {floatConv, reflect.ValueOf(float32(math.Inf(1)))}, - } - - for _, test := range tests { - conv := callbackArgCast{test.f, test.o.Type()} - val, err := conv.Run(nil) - if err != nil { - t.Errorf("Couldn't convert to %s: %s", test.o.Type(), err) - } else if !reflect.DeepEqual(val.Interface(), test.o.Interface()) { - t.Errorf("Unexpected result from converting to %s: got %v, want %v", test.o.Type(), val.Interface(), test.o.Interface()) - } - } - - conv := callbackArgCast{errConv, reflect.TypeOf(int8(0))} - _, err := conv.Run(nil) - if err == nil { - t.Errorf("Expected error during callbackArgCast, but got none") - } -} - -func TestCallbackConverters(t *testing.T) { - tests := []struct { - v interface{} - err bool - }{ - // Unfortunately, we can't tell which converter was returned, - // but we can at least check which types can be converted. - {[]byte{0}, false}, - {"text", false}, - {true, false}, - {int8(0), false}, - {int16(0), false}, - {int32(0), false}, - {int64(0), false}, - {uint8(0), false}, - {uint16(0), false}, - {uint32(0), false}, - {uint64(0), false}, - {int(0), false}, - {uint(0), false}, - {float64(0), false}, - {float32(0), false}, - - {func() {}, true}, - {complex64(complex(0, 0)), true}, - {complex128(complex(0, 0)), true}, - {struct{}{}, true}, - {map[string]string{}, true}, - {[]string{}, true}, - {(*int8)(nil), true}, - {make(chan int), true}, - } - - for _, test := range tests { - _, err := callbackArg(reflect.TypeOf(test.v)) - if test.err && err == nil { - t.Errorf("Expected an error when converting %s, got no error", reflect.TypeOf(test.v)) - } else if !test.err && err != nil { - t.Errorf("Expected converter when converting %s, got error: %s", reflect.TypeOf(test.v), err) - } - } - - for _, test := range tests { - _, err := callbackRet(reflect.TypeOf(test.v)) - if test.err && err == nil { - t.Errorf("Expected an error when converting %s, got no error", reflect.TypeOf(test.v)) - } else if !test.err && err != nil { - t.Errorf("Expected converter when converting %s, got error: %s", reflect.TypeOf(test.v), err) - } - } -} diff --git a/vendor/github.com/mattn/go-sqlite3/doc.go b/vendor/github.com/mattn/go-sqlite3/doc.go deleted file mode 100644 index c721f77..0000000 --- a/vendor/github.com/mattn/go-sqlite3/doc.go +++ /dev/null @@ -1,112 +0,0 @@ -/* -Package sqlite3 provides interface to SQLite3 databases. - -This works as a driver for database/sql. - -Installation - - go get github.com/mattn/go-sqlite3 - -Supported Types - -Currently, go-sqlite3 supports the following data types. - - +------------------------------+ - |go | sqlite3 | - |----------|-------------------| - |nil | null | - |int | integer | - |int64 | integer | - |float64 | float | - |bool | integer | - |[]byte | blob | - |string | text | - |time.Time | timestamp/datetime| - +------------------------------+ - -SQLite3 Extension - -You can write your own extension module for sqlite3. For example, below is an -extension for a Regexp matcher operation. - - #include - #include - #include - #include - - SQLITE_EXTENSION_INIT1 - static void regexp_func(sqlite3_context *context, int argc, sqlite3_value **argv) { - if (argc >= 2) { - const char *target = (const char *)sqlite3_value_text(argv[1]); - const char *pattern = (const char *)sqlite3_value_text(argv[0]); - const char* errstr = NULL; - int erroff = 0; - int vec[500]; - int n, rc; - pcre* re = pcre_compile(pattern, 0, &errstr, &erroff, NULL); - rc = pcre_exec(re, NULL, target, strlen(target), 0, 0, vec, 500); - if (rc <= 0) { - sqlite3_result_error(context, errstr, 0); - return; - } - sqlite3_result_int(context, 1); - } - } - - #ifdef _WIN32 - __declspec(dllexport) - #endif - int sqlite3_extension_init(sqlite3 *db, char **errmsg, - const sqlite3_api_routines *api) { - SQLITE_EXTENSION_INIT2(api); - return sqlite3_create_function(db, "regexp", 2, SQLITE_UTF8, - (void*)db, regexp_func, NULL, NULL); - } - -It needs to be built as a so/dll shared library. And you need to register -the extension module like below. - - sql.Register("sqlite3_with_extensions", - &sqlite3.SQLiteDriver{ - Extensions: []string{ - "sqlite3_mod_regexp", - }, - }) - -Then, you can use this extension. - - rows, err := db.Query("select text from mytable where name regexp '^golang'") - -Connection Hook - -You can hook and inject your code when the connection is established. database/sql -doesn't provide a way to get native go-sqlite3 interfaces. So if you want, -you need to set ConnectHook and get the SQLiteConn. - - sql.Register("sqlite3_with_hook_example", - &sqlite3.SQLiteDriver{ - ConnectHook: func(conn *sqlite3.SQLiteConn) error { - sqlite3conn = append(sqlite3conn, conn) - return nil - }, - }) - -Go SQlite3 Extensions - -If you want to register Go functions as SQLite extension functions, -call RegisterFunction from ConnectHook. - - regex = func(re, s string) (bool, error) { - return regexp.MatchString(re, s) - } - sql.Register("sqlite3_with_go_func", - &sqlite3.SQLiteDriver{ - ConnectHook: func(conn *sqlite3.SQLiteConn) error { - return conn.RegisterFunc("regexp", regex, true) - }, - }) - -See the documentation of RegisterFunc for more details. - -*/ -package sqlite3 diff --git a/vendor/github.com/mattn/go-sqlite3/error.go b/vendor/github.com/mattn/go-sqlite3/error.go deleted file mode 100644 index 49ab890..0000000 --- a/vendor/github.com/mattn/go-sqlite3/error.go +++ /dev/null @@ -1,135 +0,0 @@ -// Copyright (C) 2014 Yasuhiro Matsumoto . -// -// Use of this source code is governed by an MIT-style -// license that can be found in the LICENSE file. - -package sqlite3 - -import "C" - -// ErrNo inherit errno. -type ErrNo int - -// ErrNoMask is mask code. -const ErrNoMask C.int = 0xff - -// ErrNoExtended is extended errno. -type ErrNoExtended int - -// Error implement sqlite error code. -type Error struct { - Code ErrNo /* The error code returned by SQLite */ - ExtendedCode ErrNoExtended /* The extended error code returned by SQLite */ - err string /* The error string returned by sqlite3_errmsg(), - this usually contains more specific details. */ -} - -// result codes from http://www.sqlite.org/c3ref/c_abort.html -var ( - ErrError = ErrNo(1) /* SQL error or missing database */ - ErrInternal = ErrNo(2) /* Internal logic error in SQLite */ - ErrPerm = ErrNo(3) /* Access permission denied */ - ErrAbort = ErrNo(4) /* Callback routine requested an abort */ - ErrBusy = ErrNo(5) /* The database file is locked */ - ErrLocked = ErrNo(6) /* A table in the database is locked */ - ErrNomem = ErrNo(7) /* A malloc() failed */ - ErrReadonly = ErrNo(8) /* Attempt to write a readonly database */ - ErrInterrupt = ErrNo(9) /* Operation terminated by sqlite3_interrupt() */ - ErrIoErr = ErrNo(10) /* Some kind of disk I/O error occurred */ - ErrCorrupt = ErrNo(11) /* The database disk image is malformed */ - ErrNotFound = ErrNo(12) /* Unknown opcode in sqlite3_file_control() */ - ErrFull = ErrNo(13) /* Insertion failed because database is full */ - ErrCantOpen = ErrNo(14) /* Unable to open the database file */ - ErrProtocol = ErrNo(15) /* Database lock protocol error */ - ErrEmpty = ErrNo(16) /* Database is empty */ - ErrSchema = ErrNo(17) /* The database schema changed */ - ErrTooBig = ErrNo(18) /* String or BLOB exceeds size limit */ - ErrConstraint = ErrNo(19) /* Abort due to constraint violation */ - ErrMismatch = ErrNo(20) /* Data type mismatch */ - ErrMisuse = ErrNo(21) /* Library used incorrectly */ - ErrNoLFS = ErrNo(22) /* Uses OS features not supported on host */ - ErrAuth = ErrNo(23) /* Authorization denied */ - ErrFormat = ErrNo(24) /* Auxiliary database format error */ - ErrRange = ErrNo(25) /* 2nd parameter to sqlite3_bind out of range */ - ErrNotADB = ErrNo(26) /* File opened that is not a database file */ - ErrNotice = ErrNo(27) /* Notifications from sqlite3_log() */ - ErrWarning = ErrNo(28) /* Warnings from sqlite3_log() */ -) - -// Error return error message from errno. -func (err ErrNo) Error() string { - return Error{Code: err}.Error() -} - -// Extend return extended errno. -func (err ErrNo) Extend(by int) ErrNoExtended { - return ErrNoExtended(int(err) | (by << 8)) -} - -// Error return error message that is extended code. -func (err ErrNoExtended) Error() string { - return Error{Code: ErrNo(C.int(err) & ErrNoMask), ExtendedCode: err}.Error() -} - -func (err Error) Error() string { - if err.err != "" { - return err.err - } - return errorString(err) -} - -// result codes from http://www.sqlite.org/c3ref/c_abort_rollback.html -var ( - ErrIoErrRead = ErrIoErr.Extend(1) - ErrIoErrShortRead = ErrIoErr.Extend(2) - ErrIoErrWrite = ErrIoErr.Extend(3) - ErrIoErrFsync = ErrIoErr.Extend(4) - ErrIoErrDirFsync = ErrIoErr.Extend(5) - ErrIoErrTruncate = ErrIoErr.Extend(6) - ErrIoErrFstat = ErrIoErr.Extend(7) - ErrIoErrUnlock = ErrIoErr.Extend(8) - ErrIoErrRDlock = ErrIoErr.Extend(9) - ErrIoErrDelete = ErrIoErr.Extend(10) - ErrIoErrBlocked = ErrIoErr.Extend(11) - ErrIoErrNoMem = ErrIoErr.Extend(12) - ErrIoErrAccess = ErrIoErr.Extend(13) - ErrIoErrCheckReservedLock = ErrIoErr.Extend(14) - ErrIoErrLock = ErrIoErr.Extend(15) - ErrIoErrClose = ErrIoErr.Extend(16) - ErrIoErrDirClose = ErrIoErr.Extend(17) - ErrIoErrSHMOpen = ErrIoErr.Extend(18) - ErrIoErrSHMSize = ErrIoErr.Extend(19) - ErrIoErrSHMLock = ErrIoErr.Extend(20) - ErrIoErrSHMMap = ErrIoErr.Extend(21) - ErrIoErrSeek = ErrIoErr.Extend(22) - ErrIoErrDeleteNoent = ErrIoErr.Extend(23) - ErrIoErrMMap = ErrIoErr.Extend(24) - ErrIoErrGetTempPath = ErrIoErr.Extend(25) - ErrIoErrConvPath = ErrIoErr.Extend(26) - ErrLockedSharedCache = ErrLocked.Extend(1) - ErrBusyRecovery = ErrBusy.Extend(1) - ErrBusySnapshot = ErrBusy.Extend(2) - ErrCantOpenNoTempDir = ErrCantOpen.Extend(1) - ErrCantOpenIsDir = ErrCantOpen.Extend(2) - ErrCantOpenFullPath = ErrCantOpen.Extend(3) - ErrCantOpenConvPath = ErrCantOpen.Extend(4) - ErrCorruptVTab = ErrCorrupt.Extend(1) - ErrReadonlyRecovery = ErrReadonly.Extend(1) - ErrReadonlyCantLock = ErrReadonly.Extend(2) - ErrReadonlyRollback = ErrReadonly.Extend(3) - ErrReadonlyDbMoved = ErrReadonly.Extend(4) - ErrAbortRollback = ErrAbort.Extend(2) - ErrConstraintCheck = ErrConstraint.Extend(1) - ErrConstraintCommitHook = ErrConstraint.Extend(2) - ErrConstraintForeignKey = ErrConstraint.Extend(3) - ErrConstraintFunction = ErrConstraint.Extend(4) - ErrConstraintNotNull = ErrConstraint.Extend(5) - ErrConstraintPrimaryKey = ErrConstraint.Extend(6) - ErrConstraintTrigger = ErrConstraint.Extend(7) - ErrConstraintUnique = ErrConstraint.Extend(8) - ErrConstraintVTab = ErrConstraint.Extend(9) - ErrConstraintRowID = ErrConstraint.Extend(10) - ErrNoticeRecoverWAL = ErrNotice.Extend(1) - ErrNoticeRecoverRollback = ErrNotice.Extend(2) - ErrWarningAutoIndex = ErrWarning.Extend(1) -) diff --git a/vendor/github.com/mattn/go-sqlite3/error_test.go b/vendor/github.com/mattn/go-sqlite3/error_test.go deleted file mode 100644 index 1ccbe5b..0000000 --- a/vendor/github.com/mattn/go-sqlite3/error_test.go +++ /dev/null @@ -1,242 +0,0 @@ -// Copyright (C) 2014 Yasuhiro Matsumoto . -// -// Use of this source code is governed by an MIT-style -// license that can be found in the LICENSE file. - -package sqlite3 - -import ( - "database/sql" - "io/ioutil" - "os" - "path" - "testing" -) - -func TestSimpleError(t *testing.T) { - e := ErrError.Error() - if e != "SQL logic error or missing database" { - t.Error("wrong error code:" + e) - } -} - -func TestCorruptDbErrors(t *testing.T) { - dirName, err := ioutil.TempDir("", "sqlite3") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(dirName) - - dbFileName := path.Join(dirName, "test.db") - f, err := os.Create(dbFileName) - if err != nil { - t.Error(err) - } - f.Write([]byte{1, 2, 3, 4, 5}) - f.Close() - - db, err := sql.Open("sqlite3", dbFileName) - if err == nil { - _, err = db.Exec("drop table foo") - } - - sqliteErr := err.(Error) - if sqliteErr.Code != ErrNotADB { - t.Error("wrong error code for corrupted DB") - } - if err.Error() == "" { - t.Error("wrong error string for corrupted DB") - } - db.Close() -} - -func TestSqlLogicErrors(t *testing.T) { - dirName, err := ioutil.TempDir("", "sqlite3") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(dirName) - - dbFileName := path.Join(dirName, "test.db") - db, err := sql.Open("sqlite3", dbFileName) - if err != nil { - t.Error(err) - } - defer db.Close() - - _, err = db.Exec("CREATE TABLE Foo (id INTEGER PRIMARY KEY)") - if err != nil { - t.Error(err) - } - - const expectedErr = "table Foo already exists" - _, err = db.Exec("CREATE TABLE Foo (id INTEGER PRIMARY KEY)") - if err.Error() != expectedErr { - t.Errorf("Unexpected error: %s, expected %s", err.Error(), expectedErr) - } - -} - -func TestExtendedErrorCodes_ForeignKey(t *testing.T) { - dirName, err := ioutil.TempDir("", "sqlite3-err") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(dirName) - - dbFileName := path.Join(dirName, "test.db") - db, err := sql.Open("sqlite3", dbFileName) - if err != nil { - t.Error(err) - } - defer db.Close() - - _, err = db.Exec("PRAGMA foreign_keys=ON;") - if err != nil { - t.Errorf("PRAGMA foreign_keys=ON: %v", err) - } - - _, err = db.Exec(`CREATE TABLE Foo ( - id INTEGER PRIMARY KEY AUTOINCREMENT, - value INTEGER NOT NULL, - ref INTEGER NULL REFERENCES Foo (id), - UNIQUE(value) - );`) - if err != nil { - t.Error(err) - } - - _, err = db.Exec("INSERT INTO Foo (ref, value) VALUES (100, 100);") - if err == nil { - t.Error("No error!") - } else { - sqliteErr := err.(Error) - if sqliteErr.Code != ErrConstraint { - t.Errorf("Wrong basic error code: %d != %d", - sqliteErr.Code, ErrConstraint) - } - if sqliteErr.ExtendedCode != ErrConstraintForeignKey { - t.Errorf("Wrong extended error code: %d != %d", - sqliteErr.ExtendedCode, ErrConstraintForeignKey) - } - } - -} - -func TestExtendedErrorCodes_NotNull(t *testing.T) { - dirName, err := ioutil.TempDir("", "sqlite3-err") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(dirName) - - dbFileName := path.Join(dirName, "test.db") - db, err := sql.Open("sqlite3", dbFileName) - if err != nil { - t.Error(err) - } - defer db.Close() - - _, err = db.Exec("PRAGMA foreign_keys=ON;") - if err != nil { - t.Errorf("PRAGMA foreign_keys=ON: %v", err) - } - - _, err = db.Exec(`CREATE TABLE Foo ( - id INTEGER PRIMARY KEY AUTOINCREMENT, - value INTEGER NOT NULL, - ref INTEGER NULL REFERENCES Foo (id), - UNIQUE(value) - );`) - if err != nil { - t.Error(err) - } - - res, err := db.Exec("INSERT INTO Foo (value) VALUES (100);") - if err != nil { - t.Fatalf("Creating first row: %v", err) - } - - id, err := res.LastInsertId() - if err != nil { - t.Fatalf("Retrieving last insert id: %v", err) - } - - _, err = db.Exec("INSERT INTO Foo (ref) VALUES (?);", id) - if err == nil { - t.Error("No error!") - } else { - sqliteErr := err.(Error) - if sqliteErr.Code != ErrConstraint { - t.Errorf("Wrong basic error code: %d != %d", - sqliteErr.Code, ErrConstraint) - } - if sqliteErr.ExtendedCode != ErrConstraintNotNull { - t.Errorf("Wrong extended error code: %d != %d", - sqliteErr.ExtendedCode, ErrConstraintNotNull) - } - } - -} - -func TestExtendedErrorCodes_Unique(t *testing.T) { - dirName, err := ioutil.TempDir("", "sqlite3-err") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(dirName) - - dbFileName := path.Join(dirName, "test.db") - db, err := sql.Open("sqlite3", dbFileName) - if err != nil { - t.Error(err) - } - defer db.Close() - - _, err = db.Exec("PRAGMA foreign_keys=ON;") - if err != nil { - t.Errorf("PRAGMA foreign_keys=ON: %v", err) - } - - _, err = db.Exec(`CREATE TABLE Foo ( - id INTEGER PRIMARY KEY AUTOINCREMENT, - value INTEGER NOT NULL, - ref INTEGER NULL REFERENCES Foo (id), - UNIQUE(value) - );`) - if err != nil { - t.Error(err) - } - - res, err := db.Exec("INSERT INTO Foo (value) VALUES (100);") - if err != nil { - t.Fatalf("Creating first row: %v", err) - } - - id, err := res.LastInsertId() - if err != nil { - t.Fatalf("Retrieving last insert id: %v", err) - } - - _, err = db.Exec("INSERT INTO Foo (ref, value) VALUES (?, 100);", id) - if err == nil { - t.Error("No error!") - } else { - sqliteErr := err.(Error) - if sqliteErr.Code != ErrConstraint { - t.Errorf("Wrong basic error code: %d != %d", - sqliteErr.Code, ErrConstraint) - } - if sqliteErr.ExtendedCode != ErrConstraintUnique { - t.Errorf("Wrong extended error code: %d != %d", - sqliteErr.ExtendedCode, ErrConstraintUnique) - } - extended := sqliteErr.Code.Extend(3).Error() - expected := "constraint failed" - if extended != expected { - t.Errorf("Wrong basic error code: %q != %q", - extended, expected) - } - } - -} diff --git a/vendor/github.com/mattn/go-sqlite3/sqlite3-binding.c b/vendor/github.com/mattn/go-sqlite3/sqlite3-binding.c deleted file mode 100644 index 825e7d8..0000000 --- a/vendor/github.com/mattn/go-sqlite3/sqlite3-binding.c +++ /dev/null @@ -1,201408 +0,0 @@ -#ifndef USE_LIBSQLITE3 -#define SQLITE_DISABLE_INTRINSIC 1 -/****************************************************************************** -** This file is an amalgamation of many separate C source files from SQLite -** version 3.17.0. By combining all the individual C code files into this -** single large file, the entire code can be compiled as a single translation -** unit. This allows many compilers to do optimizations that would not be -** possible if the files were compiled separately. Performance improvements -** of 5% or more are commonly seen when SQLite is compiled as a single -** translation unit. -** -** This file is all you need to compile SQLite. To use SQLite in other -** programs, you need this file and the "sqlite3.h" header file that defines -** the programming interface to the SQLite library. (If you do not have -** the "sqlite3.h" header file at hand, you will find a copy embedded within -** the text of this file. Search for "Begin file sqlite3.h" to find the start -** of the embedded sqlite3.h header file.) Additional code files may be needed -** if you want a wrapper to interface SQLite with your choice of programming -** language. The code for the "sqlite3" command-line shell is also in a -** separate file. This file contains only code for the core SQLite library. -*/ -#define SQLITE_CORE 1 -#define SQLITE_AMALGAMATION 1 -#ifndef SQLITE_PRIVATE -# define SQLITE_PRIVATE static -#endif -/************** Begin file sqliteInt.h ***************************************/ -/* -** 2001 September 15 -** -** The author disclaims copyright to this source code. In place of -** a legal notice, here is a blessing: -** -** May you do good and not evil. -** May you find forgiveness for yourself and forgive others. -** May you share freely, never taking more than you give. -** -************************************************************************* -** Internal interface definitions for SQLite. -** -*/ -#ifndef SQLITEINT_H -#define SQLITEINT_H - -/* Special Comments: -** -** Some comments have special meaning to the tools that measure test -** coverage: -** -** NO_TEST - The branches on this line are not -** measured by branch coverage. This is -** used on lines of code that actually -** implement parts of coverage testing. -** -** OPTIMIZATION-IF-TRUE - This branch is allowed to alway be false -** and the correct answer is still obtained, -** though perhaps more slowly. -** -** OPTIMIZATION-IF-FALSE - This branch is allowed to alway be true -** and the correct answer is still obtained, -** though perhaps more slowly. -** -** PREVENTS-HARMLESS-OVERREAD - This branch prevents a buffer overread -** that would be harmless and undetectable -** if it did occur. -** -** In all cases, the special comment must be enclosed in the usual -** slash-asterisk...asterisk-slash comment marks, with no spaces between the -** asterisks and the comment text. -*/ - -/* -** Make sure the Tcl calling convention macro is defined. This macro is -** only used by test code and Tcl integration code. -*/ -#ifndef SQLITE_TCLAPI -# define SQLITE_TCLAPI -#endif - -/* -** Make sure that rand_s() is available on Windows systems with MSVC 2005 -** or higher. -*/ -#if defined(_MSC_VER) && _MSC_VER>=1400 -# define _CRT_RAND_S -#endif - -/* -** Include the header file used to customize the compiler options for MSVC. -** This should be done first so that it can successfully prevent spurious -** compiler warnings due to subsequent content in this file and other files -** that are included by this file. -*/ -/************** Include msvc.h in the middle of sqliteInt.h ******************/ -/************** Begin file msvc.h ********************************************/ -/* -** 2015 January 12 -** -** The author disclaims copyright to this source code. In place of -** a legal notice, here is a blessing: -** -** May you do good and not evil. -** May you find forgiveness for yourself and forgive others. -** May you share freely, never taking more than you give. -** -****************************************************************************** -** -** This file contains code that is specific to MSVC. -*/ -#ifndef SQLITE_MSVC_H -#define SQLITE_MSVC_H - -#if defined(_MSC_VER) -#pragma warning(disable : 4054) -#pragma warning(disable : 4055) -#pragma warning(disable : 4100) -#pragma warning(disable : 4127) -#pragma warning(disable : 4130) -#pragma warning(disable : 4152) -#pragma warning(disable : 4189) -#pragma warning(disable : 4206) -#pragma warning(disable : 4210) -#pragma warning(disable : 4232) -#pragma warning(disable : 4244) -#pragma warning(disable : 4305) -#pragma warning(disable : 4306) -#pragma warning(disable : 4702) -#pragma warning(disable : 4706) -#endif /* defined(_MSC_VER) */ - -#endif /* SQLITE_MSVC_H */ - -/************** End of msvc.h ************************************************/ -/************** Continuing where we left off in sqliteInt.h ******************/ - -/* -** Special setup for VxWorks -*/ -/************** Include vxworks.h in the middle of sqliteInt.h ***************/ -/************** Begin file vxworks.h *****************************************/ -/* -** 2015-03-02 -** -** The author disclaims copyright to this source code. In place of -** a legal notice, here is a blessing: -** -** May you do good and not evil. -** May you find forgiveness for yourself and forgive others. -** May you share freely, never taking more than you give. -** -****************************************************************************** -** -** This file contains code that is specific to Wind River's VxWorks -*/ -#if defined(__RTP__) || defined(_WRS_KERNEL) -/* This is VxWorks. Set up things specially for that OS -*/ -#include -#include /* amalgamator: dontcache */ -#define OS_VXWORKS 1 -#define SQLITE_OS_OTHER 0 -#define SQLITE_HOMEGROWN_RECURSIVE_MUTEX 1 -#define SQLITE_OMIT_LOAD_EXTENSION 1 -#define SQLITE_ENABLE_LOCKING_STYLE 0 -#define HAVE_UTIME 1 -#else -/* This is not VxWorks. */ -#define OS_VXWORKS 0 -#define HAVE_FCHOWN 1 -#define HAVE_READLINK 1 -#define HAVE_LSTAT 1 -#endif /* defined(_WRS_KERNEL) */ - -/************** End of vxworks.h *********************************************/ -/************** Continuing where we left off in sqliteInt.h ******************/ - -/* -** These #defines should enable >2GB file support on POSIX if the -** underlying operating system supports it. If the OS lacks -** large file support, or if the OS is windows, these should be no-ops. -** -** Ticket #2739: The _LARGEFILE_SOURCE macro must appear before any -** system #includes. Hence, this block of code must be the very first -** code in all source files. -** -** Large file support can be disabled using the -DSQLITE_DISABLE_LFS switch -** on the compiler command line. This is necessary if you are compiling -** on a recent machine (ex: Red Hat 7.2) but you want your code to work -** on an older machine (ex: Red Hat 6.0). If you compile on Red Hat 7.2 -** without this option, LFS is enable. But LFS does not exist in the kernel -** in Red Hat 6.0, so the code won't work. Hence, for maximum binary -** portability you should omit LFS. -** -** The previous paragraph was written in 2005. (This paragraph is written -** on 2008-11-28.) These days, all Linux kernels support large files, so -** you should probably leave LFS enabled. But some embedded platforms might -** lack LFS in which case the SQLITE_DISABLE_LFS macro might still be useful. -** -** Similar is true for Mac OS X. LFS is only supported on Mac OS X 9 and later. -*/ -#ifndef SQLITE_DISABLE_LFS -# define _LARGE_FILE 1 -# ifndef _FILE_OFFSET_BITS -# define _FILE_OFFSET_BITS 64 -# endif -# define _LARGEFILE_SOURCE 1 -#endif - -/* The GCC_VERSION, CLANG_VERSION, and MSVC_VERSION macros are used to -** conditionally include optimizations for each of these compilers. A -** value of 0 means that compiler is not being used. The -** SQLITE_DISABLE_INTRINSIC macro means do not use any compiler-specific -** optimizations, and hence set all compiler macros to 0 -*/ -#if defined(__GNUC__) && !defined(SQLITE_DISABLE_INTRINSIC) -# define GCC_VERSION (__GNUC__*1000000+__GNUC_MINOR__*1000+__GNUC_PATCHLEVEL__) -#else -# define GCC_VERSION 0 -#endif -#if defined(__clang__) && !defined(_WIN32) && !defined(SQLITE_DISABLE_INTRINSIC) -# define CLANG_VERSION \ - (__clang_major__*1000000+__clang_minor__*1000+__clang_patchlevel__) -#else -# define CLANG_VERSION 0 -#endif -#if defined(_MSC_VER) && !defined(SQLITE_DISABLE_INTRINSIC) -# define MSVC_VERSION _MSC_VER -#else -# define MSVC_VERSION 0 -#endif - -/* Needed for various definitions... */ -#if defined(__GNUC__) && !defined(_GNU_SOURCE) -# define _GNU_SOURCE -#endif - -#if defined(__OpenBSD__) && !defined(_BSD_SOURCE) -# define _BSD_SOURCE -#endif - -/* -** For MinGW, check to see if we can include the header file containing its -** version information, among other things. Normally, this internal MinGW -** header file would [only] be included automatically by other MinGW header -** files; however, the contained version information is now required by this -** header file to work around binary compatibility issues (see below) and -** this is the only known way to reliably obtain it. This entire #if block -** would be completely unnecessary if there was any other way of detecting -** MinGW via their preprocessor (e.g. if they customized their GCC to define -** some MinGW-specific macros). When compiling for MinGW, either the -** _HAVE_MINGW_H or _HAVE__MINGW_H (note the extra underscore) macro must be -** defined; otherwise, detection of conditions specific to MinGW will be -** disabled. -*/ -#if defined(_HAVE_MINGW_H) -# include "mingw.h" -#elif defined(_HAVE__MINGW_H) -# include "_mingw.h" -#endif - -/* -** For MinGW version 4.x (and higher), check to see if the _USE_32BIT_TIME_T -** define is required to maintain binary compatibility with the MSVC runtime -** library in use (e.g. for Windows XP). -*/ -#if !defined(_USE_32BIT_TIME_T) && !defined(_USE_64BIT_TIME_T) && \ - defined(_WIN32) && !defined(_WIN64) && \ - defined(__MINGW_MAJOR_VERSION) && __MINGW_MAJOR_VERSION >= 4 && \ - defined(__MSVCRT__) -# define _USE_32BIT_TIME_T -#endif - -/* The public SQLite interface. The _FILE_OFFSET_BITS macro must appear -** first in QNX. Also, the _USE_32BIT_TIME_T macro must appear first for -** MinGW. -*/ -/************** Include sqlite3.h in the middle of sqliteInt.h ***************/ -/************** Begin file sqlite3.h *****************************************/ -/* -** 2001 September 15 -** -** The author disclaims copyright to this source code. In place of -** a legal notice, here is a blessing: -** -** May you do good and not evil. -** May you find forgiveness for yourself and forgive others. -** May you share freely, never taking more than you give. -** -************************************************************************* -** This header file defines the interface that the SQLite library -** presents to client programs. If a C-function, structure, datatype, -** or constant definition does not appear in this file, then it is -** not a published API of SQLite, is subject to change without -** notice, and should not be referenced by programs that use SQLite. -** -** Some of the definitions that are in this file are marked as -** "experimental". Experimental interfaces are normally new -** features recently added to SQLite. We do not anticipate changes -** to experimental interfaces but reserve the right to make minor changes -** if experience from use "in the wild" suggest such changes are prudent. -** -** The official C-language API documentation for SQLite is derived -** from comments in this file. This file is the authoritative source -** on how SQLite interfaces are supposed to operate. -** -** The name of this file under configuration management is "sqlite.h.in". -** The makefile makes some minor changes to this file (such as inserting -** the version number) and changes its name to "sqlite3.h" as -** part of the build process. -*/ -#ifndef SQLITE3_H -#define SQLITE3_H -#include /* Needed for the definition of va_list */ - -/* -** Make sure we can call this stuff from C++. -*/ -#if 0 -extern "C" { -#endif - - -/* -** Provide the ability to override linkage features of the interface. -*/ -#ifndef SQLITE_EXTERN -# define SQLITE_EXTERN extern -#endif -#ifndef SQLITE_API -# define SQLITE_API -#endif -#ifndef SQLITE_CDECL -# define SQLITE_CDECL -#endif -#ifndef SQLITE_APICALL -# define SQLITE_APICALL -#endif -#ifndef SQLITE_STDCALL -# define SQLITE_STDCALL SQLITE_APICALL -#endif -#ifndef SQLITE_CALLBACK -# define SQLITE_CALLBACK -#endif -#ifndef SQLITE_SYSAPI -# define SQLITE_SYSAPI -#endif - -/* -** These no-op macros are used in front of interfaces to mark those -** interfaces as either deprecated or experimental. New applications -** should not use deprecated interfaces - they are supported for backwards -** compatibility only. Application writers should be aware that -** experimental interfaces are subject to change in point releases. -** -** These macros used to resolve to various kinds of compiler magic that -** would generate warning messages when they were used. But that -** compiler magic ended up generating such a flurry of bug reports -** that we have taken it all out and gone back to using simple -** noop macros. -*/ -#define SQLITE_DEPRECATED -#define SQLITE_EXPERIMENTAL - -/* -** Ensure these symbols were not defined by some previous header file. -*/ -#ifdef SQLITE_VERSION -# undef SQLITE_VERSION -#endif -#ifdef SQLITE_VERSION_NUMBER -# undef SQLITE_VERSION_NUMBER -#endif - -/* -** CAPI3REF: Compile-Time Library Version Numbers -** -** ^(The [SQLITE_VERSION] C preprocessor macro in the sqlite3.h header -** evaluates to a string literal that is the SQLite version in the -** format "X.Y.Z" where X is the major version number (always 3 for -** SQLite3) and Y is the minor version number and Z is the release number.)^ -** ^(The [SQLITE_VERSION_NUMBER] C preprocessor macro resolves to an integer -** with the value (X*1000000 + Y*1000 + Z) where X, Y, and Z are the same -** numbers used in [SQLITE_VERSION].)^ -** The SQLITE_VERSION_NUMBER for any given release of SQLite will also -** be larger than the release from which it is derived. Either Y will -** be held constant and Z will be incremented or else Y will be incremented -** and Z will be reset to zero. -** -** Since [version 3.6.18] ([dateof:3.6.18]), -** SQLite source code has been stored in the -** Fossil configuration management -** system. ^The SQLITE_SOURCE_ID macro evaluates to -** a string which identifies a particular check-in of SQLite -** within its configuration management system. ^The SQLITE_SOURCE_ID -** string contains the date and time of the check-in (UTC) and an SHA1 -** hash of the entire source tree. -** -** See also: [sqlite3_libversion()], -** [sqlite3_libversion_number()], [sqlite3_sourceid()], -** [sqlite_version()] and [sqlite_source_id()]. -*/ -#define SQLITE_VERSION "3.17.0" -#define SQLITE_VERSION_NUMBER 3017000 -#define SQLITE_SOURCE_ID "2017-02-13 16:02:40 ada05cfa86ad7f5645450ac7a2a21c9aa6e57d2c" - -/* -** CAPI3REF: Run-Time Library Version Numbers -** KEYWORDS: sqlite3_version sqlite3_sourceid -** -** These interfaces provide the same information as the [SQLITE_VERSION], -** [SQLITE_VERSION_NUMBER], and [SQLITE_SOURCE_ID] C preprocessor macros -** but are associated with the library instead of the header file. ^(Cautious -** programmers might include assert() statements in their application to -** verify that values returned by these interfaces match the macros in -** the header, and thus ensure that the application is -** compiled with matching library and header files. -** -**
-** assert( sqlite3_libversion_number()==SQLITE_VERSION_NUMBER );
-** assert( strcmp(sqlite3_sourceid(),SQLITE_SOURCE_ID)==0 );
-** assert( strcmp(sqlite3_libversion(),SQLITE_VERSION)==0 );
-** 
)^ -** -** ^The sqlite3_version[] string constant contains the text of [SQLITE_VERSION] -** macro. ^The sqlite3_libversion() function returns a pointer to the -** to the sqlite3_version[] string constant. The sqlite3_libversion() -** function is provided for use in DLLs since DLL users usually do not have -** direct access to string constants within the DLL. ^The -** sqlite3_libversion_number() function returns an integer equal to -** [SQLITE_VERSION_NUMBER]. ^The sqlite3_sourceid() function returns -** a pointer to a string constant whose value is the same as the -** [SQLITE_SOURCE_ID] C preprocessor macro. -** -** See also: [sqlite_version()] and [sqlite_source_id()]. -*/ -SQLITE_API const char sqlite3_version[] = SQLITE_VERSION; -SQLITE_API const char *sqlite3_libversion(void); -SQLITE_API const char *sqlite3_sourceid(void); -SQLITE_API int sqlite3_libversion_number(void); - -/* -** CAPI3REF: Run-Time Library Compilation Options Diagnostics -** -** ^The sqlite3_compileoption_used() function returns 0 or 1 -** indicating whether the specified option was defined at -** compile time. ^The SQLITE_ prefix may be omitted from the -** option name passed to sqlite3_compileoption_used(). -** -** ^The sqlite3_compileoption_get() function allows iterating -** over the list of options that were defined at compile time by -** returning the N-th compile time option string. ^If N is out of range, -** sqlite3_compileoption_get() returns a NULL pointer. ^The SQLITE_ -** prefix is omitted from any strings returned by -** sqlite3_compileoption_get(). -** -** ^Support for the diagnostic functions sqlite3_compileoption_used() -** and sqlite3_compileoption_get() may be omitted by specifying the -** [SQLITE_OMIT_COMPILEOPTION_DIAGS] option at compile time. -** -** See also: SQL functions [sqlite_compileoption_used()] and -** [sqlite_compileoption_get()] and the [compile_options pragma]. -*/ -#ifndef SQLITE_OMIT_COMPILEOPTION_DIAGS -SQLITE_API int sqlite3_compileoption_used(const char *zOptName); -SQLITE_API const char *sqlite3_compileoption_get(int N); -#endif - -/* -** CAPI3REF: Test To See If The Library Is Threadsafe -** -** ^The sqlite3_threadsafe() function returns zero if and only if -** SQLite was compiled with mutexing code omitted due to the -** [SQLITE_THREADSAFE] compile-time option being set to 0. -** -** SQLite can be compiled with or without mutexes. When -** the [SQLITE_THREADSAFE] C preprocessor macro is 1 or 2, mutexes -** are enabled and SQLite is threadsafe. When the -** [SQLITE_THREADSAFE] macro is 0, -** the mutexes are omitted. Without the mutexes, it is not safe -** to use SQLite concurrently from more than one thread. -** -** Enabling mutexes incurs a measurable performance penalty. -** So if speed is of utmost importance, it makes sense to disable -** the mutexes. But for maximum safety, mutexes should be enabled. -** ^The default behavior is for mutexes to be enabled. -** -** This interface can be used by an application to make sure that the -** version of SQLite that it is linking against was compiled with -** the desired setting of the [SQLITE_THREADSAFE] macro. -** -** This interface only reports on the compile-time mutex setting -** of the [SQLITE_THREADSAFE] flag. If SQLite is compiled with -** SQLITE_THREADSAFE=1 or =2 then mutexes are enabled by default but -** can be fully or partially disabled using a call to [sqlite3_config()] -** with the verbs [SQLITE_CONFIG_SINGLETHREAD], [SQLITE_CONFIG_MULTITHREAD], -** or [SQLITE_CONFIG_SERIALIZED]. ^(The return value of the -** sqlite3_threadsafe() function shows only the compile-time setting of -** thread safety, not any run-time changes to that setting made by -** sqlite3_config(). In other words, the return value from sqlite3_threadsafe() -** is unchanged by calls to sqlite3_config().)^ -** -** See the [threading mode] documentation for additional information. -*/ -SQLITE_API int sqlite3_threadsafe(void); - -/* -** CAPI3REF: Database Connection Handle -** KEYWORDS: {database connection} {database connections} -** -** Each open SQLite database is represented by a pointer to an instance of -** the opaque structure named "sqlite3". It is useful to think of an sqlite3 -** pointer as an object. The [sqlite3_open()], [sqlite3_open16()], and -** [sqlite3_open_v2()] interfaces are its constructors, and [sqlite3_close()] -** and [sqlite3_close_v2()] are its destructors. There are many other -** interfaces (such as -** [sqlite3_prepare_v2()], [sqlite3_create_function()], and -** [sqlite3_busy_timeout()] to name but three) that are methods on an -** sqlite3 object. -*/ -typedef struct sqlite3 sqlite3; - -/* -** CAPI3REF: 64-Bit Integer Types -** KEYWORDS: sqlite_int64 sqlite_uint64 -** -** Because there is no cross-platform way to specify 64-bit integer types -** SQLite includes typedefs for 64-bit signed and unsigned integers. -** -** The sqlite3_int64 and sqlite3_uint64 are the preferred type definitions. -** The sqlite_int64 and sqlite_uint64 types are supported for backwards -** compatibility only. -** -** ^The sqlite3_int64 and sqlite_int64 types can store integer values -** between -9223372036854775808 and +9223372036854775807 inclusive. ^The -** sqlite3_uint64 and sqlite_uint64 types can store integer values -** between 0 and +18446744073709551615 inclusive. -*/ -#ifdef SQLITE_INT64_TYPE - typedef SQLITE_INT64_TYPE sqlite_int64; -# ifdef SQLITE_UINT64_TYPE - typedef SQLITE_UINT64_TYPE sqlite_uint64; -# else - typedef unsigned SQLITE_INT64_TYPE sqlite_uint64; -# endif -#elif defined(_MSC_VER) || defined(__BORLANDC__) - typedef __int64 sqlite_int64; - typedef unsigned __int64 sqlite_uint64; -#else - typedef long long int sqlite_int64; - typedef unsigned long long int sqlite_uint64; -#endif -typedef sqlite_int64 sqlite3_int64; -typedef sqlite_uint64 sqlite3_uint64; - -/* -** If compiling for a processor that lacks floating point support, -** substitute integer for floating-point. -*/ -#ifdef SQLITE_OMIT_FLOATING_POINT -# define double sqlite3_int64 -#endif - -/* -** CAPI3REF: Closing A Database Connection -** DESTRUCTOR: sqlite3 -** -** ^The sqlite3_close() and sqlite3_close_v2() routines are destructors -** for the [sqlite3] object. -** ^Calls to sqlite3_close() and sqlite3_close_v2() return [SQLITE_OK] if -** the [sqlite3] object is successfully destroyed and all associated -** resources are deallocated. -** -** ^If the database connection is associated with unfinalized prepared -** statements or unfinished sqlite3_backup objects then sqlite3_close() -** will leave the database connection open and return [SQLITE_BUSY]. -** ^If sqlite3_close_v2() is called with unfinalized prepared statements -** and/or unfinished sqlite3_backups, then the database connection becomes -** an unusable "zombie" which will automatically be deallocated when the -** last prepared statement is finalized or the last sqlite3_backup is -** finished. The sqlite3_close_v2() interface is intended for use with -** host languages that are garbage collected, and where the order in which -** destructors are called is arbitrary. -** -** Applications should [sqlite3_finalize | finalize] all [prepared statements], -** [sqlite3_blob_close | close] all [BLOB handles], and -** [sqlite3_backup_finish | finish] all [sqlite3_backup] objects associated -** with the [sqlite3] object prior to attempting to close the object. ^If -** sqlite3_close_v2() is called on a [database connection] that still has -** outstanding [prepared statements], [BLOB handles], and/or -** [sqlite3_backup] objects then it returns [SQLITE_OK] and the deallocation -** of resources is deferred until all [prepared statements], [BLOB handles], -** and [sqlite3_backup] objects are also destroyed. -** -** ^If an [sqlite3] object is destroyed while a transaction is open, -** the transaction is automatically rolled back. -** -** The C parameter to [sqlite3_close(C)] and [sqlite3_close_v2(C)] -** must be either a NULL -** pointer or an [sqlite3] object pointer obtained -** from [sqlite3_open()], [sqlite3_open16()], or -** [sqlite3_open_v2()], and not previously closed. -** ^Calling sqlite3_close() or sqlite3_close_v2() with a NULL pointer -** argument is a harmless no-op. -*/ -SQLITE_API int sqlite3_close(sqlite3*); -SQLITE_API int sqlite3_close_v2(sqlite3*); - -/* -** The type for a callback function. -** This is legacy and deprecated. It is included for historical -** compatibility and is not documented. -*/ -typedef int (*sqlite3_callback)(void*,int,char**, char**); - -/* -** CAPI3REF: One-Step Query Execution Interface -** METHOD: sqlite3 -** -** The sqlite3_exec() interface is a convenience wrapper around -** [sqlite3_prepare_v2()], [sqlite3_step()], and [sqlite3_finalize()], -** that allows an application to run multiple statements of SQL -** without having to use a lot of C code. -** -** ^The sqlite3_exec() interface runs zero or more UTF-8 encoded, -** semicolon-separate SQL statements passed into its 2nd argument, -** in the context of the [database connection] passed in as its 1st -** argument. ^If the callback function of the 3rd argument to -** sqlite3_exec() is not NULL, then it is invoked for each result row -** coming out of the evaluated SQL statements. ^The 4th argument to -** sqlite3_exec() is relayed through to the 1st argument of each -** callback invocation. ^If the callback pointer to sqlite3_exec() -** is NULL, then no callback is ever invoked and result rows are -** ignored. -** -** ^If an error occurs while evaluating the SQL statements passed into -** sqlite3_exec(), then execution of the current statement stops and -** subsequent statements are skipped. ^If the 5th parameter to sqlite3_exec() -** is not NULL then any error message is written into memory obtained -** from [sqlite3_malloc()] and passed back through the 5th parameter. -** To avoid memory leaks, the application should invoke [sqlite3_free()] -** on error message strings returned through the 5th parameter of -** sqlite3_exec() after the error message string is no longer needed. -** ^If the 5th parameter to sqlite3_exec() is not NULL and no errors -** occur, then sqlite3_exec() sets the pointer in its 5th parameter to -** NULL before returning. -** -** ^If an sqlite3_exec() callback returns non-zero, the sqlite3_exec() -** routine returns SQLITE_ABORT without invoking the callback again and -** without running any subsequent SQL statements. -** -** ^The 2nd argument to the sqlite3_exec() callback function is the -** number of columns in the result. ^The 3rd argument to the sqlite3_exec() -** callback is an array of pointers to strings obtained as if from -** [sqlite3_column_text()], one for each column. ^If an element of a -** result row is NULL then the corresponding string pointer for the -** sqlite3_exec() callback is a NULL pointer. ^The 4th argument to the -** sqlite3_exec() callback is an array of pointers to strings where each -** entry represents the name of corresponding result column as obtained -** from [sqlite3_column_name()]. -** -** ^If the 2nd parameter to sqlite3_exec() is a NULL pointer, a pointer -** to an empty string, or a pointer that contains only whitespace and/or -** SQL comments, then no SQL statements are evaluated and the database -** is not changed. -** -** Restrictions: -** -**
    -**
  • The application must ensure that the 1st parameter to sqlite3_exec() -** is a valid and open [database connection]. -**
  • The application must not close the [database connection] specified by -** the 1st parameter to sqlite3_exec() while sqlite3_exec() is running. -**
  • The application must not modify the SQL statement text passed into -** the 2nd parameter of sqlite3_exec() while sqlite3_exec() is running. -**
-*/ -SQLITE_API int sqlite3_exec( - sqlite3*, /* An open database */ - const char *sql, /* SQL to be evaluated */ - int (*callback)(void*,int,char**,char**), /* Callback function */ - void *, /* 1st argument to callback */ - char **errmsg /* Error msg written here */ -); - -/* -** CAPI3REF: Result Codes -** KEYWORDS: {result code definitions} -** -** Many SQLite functions return an integer result code from the set shown -** here in order to indicate success or failure. -** -** New error codes may be added in future versions of SQLite. -** -** See also: [extended result code definitions] -*/ -#define SQLITE_OK 0 /* Successful result */ -/* beginning-of-error-codes */ -#define SQLITE_ERROR 1 /* SQL error or missing database */ -#define SQLITE_INTERNAL 2 /* Internal logic error in SQLite */ -#define SQLITE_PERM 3 /* Access permission denied */ -#define SQLITE_ABORT 4 /* Callback routine requested an abort */ -#define SQLITE_BUSY 5 /* The database file is locked */ -#define SQLITE_LOCKED 6 /* A table in the database is locked */ -#define SQLITE_NOMEM 7 /* A malloc() failed */ -#define SQLITE_READONLY 8 /* Attempt to write a readonly database */ -#define SQLITE_INTERRUPT 9 /* Operation terminated by sqlite3_interrupt()*/ -#define SQLITE_IOERR 10 /* Some kind of disk I/O error occurred */ -#define SQLITE_CORRUPT 11 /* The database disk image is malformed */ -#define SQLITE_NOTFOUND 12 /* Unknown opcode in sqlite3_file_control() */ -#define SQLITE_FULL 13 /* Insertion failed because database is full */ -#define SQLITE_CANTOPEN 14 /* Unable to open the database file */ -#define SQLITE_PROTOCOL 15 /* Database lock protocol error */ -#define SQLITE_EMPTY 16 /* Database is empty */ -#define SQLITE_SCHEMA 17 /* The database schema changed */ -#define SQLITE_TOOBIG 18 /* String or BLOB exceeds size limit */ -#define SQLITE_CONSTRAINT 19 /* Abort due to constraint violation */ -#define SQLITE_MISMATCH 20 /* Data type mismatch */ -#define SQLITE_MISUSE 21 /* Library used incorrectly */ -#define SQLITE_NOLFS 22 /* Uses OS features not supported on host */ -#define SQLITE_AUTH 23 /* Authorization denied */ -#define SQLITE_FORMAT 24 /* Auxiliary database format error */ -#define SQLITE_RANGE 25 /* 2nd parameter to sqlite3_bind out of range */ -#define SQLITE_NOTADB 26 /* File opened that is not a database file */ -#define SQLITE_NOTICE 27 /* Notifications from sqlite3_log() */ -#define SQLITE_WARNING 28 /* Warnings from sqlite3_log() */ -#define SQLITE_ROW 100 /* sqlite3_step() has another row ready */ -#define SQLITE_DONE 101 /* sqlite3_step() has finished executing */ -/* end-of-error-codes */ - -/* -** CAPI3REF: Extended Result Codes -** KEYWORDS: {extended result code definitions} -** -** In its default configuration, SQLite API routines return one of 30 integer -** [result codes]. However, experience has shown that many of -** these result codes are too coarse-grained. They do not provide as -** much information about problems as programmers might like. In an effort to -** address this, newer versions of SQLite (version 3.3.8 [dateof:3.3.8] -** and later) include -** support for additional result codes that provide more detailed information -** about errors. These [extended result codes] are enabled or disabled -** on a per database connection basis using the -** [sqlite3_extended_result_codes()] API. Or, the extended code for -** the most recent error can be obtained using -** [sqlite3_extended_errcode()]. -*/ -#define SQLITE_IOERR_READ (SQLITE_IOERR | (1<<8)) -#define SQLITE_IOERR_SHORT_READ (SQLITE_IOERR | (2<<8)) -#define SQLITE_IOERR_WRITE (SQLITE_IOERR | (3<<8)) -#define SQLITE_IOERR_FSYNC (SQLITE_IOERR | (4<<8)) -#define SQLITE_IOERR_DIR_FSYNC (SQLITE_IOERR | (5<<8)) -#define SQLITE_IOERR_TRUNCATE (SQLITE_IOERR | (6<<8)) -#define SQLITE_IOERR_FSTAT (SQLITE_IOERR | (7<<8)) -#define SQLITE_IOERR_UNLOCK (SQLITE_IOERR | (8<<8)) -#define SQLITE_IOERR_RDLOCK (SQLITE_IOERR | (9<<8)) -#define SQLITE_IOERR_DELETE (SQLITE_IOERR | (10<<8)) -#define SQLITE_IOERR_BLOCKED (SQLITE_IOERR | (11<<8)) -#define SQLITE_IOERR_NOMEM (SQLITE_IOERR | (12<<8)) -#define SQLITE_IOERR_ACCESS (SQLITE_IOERR | (13<<8)) -#define SQLITE_IOERR_CHECKRESERVEDLOCK (SQLITE_IOERR | (14<<8)) -#define SQLITE_IOERR_LOCK (SQLITE_IOERR | (15<<8)) -#define SQLITE_IOERR_CLOSE (SQLITE_IOERR | (16<<8)) -#define SQLITE_IOERR_DIR_CLOSE (SQLITE_IOERR | (17<<8)) -#define SQLITE_IOERR_SHMOPEN (SQLITE_IOERR | (18<<8)) -#define SQLITE_IOERR_SHMSIZE (SQLITE_IOERR | (19<<8)) -#define SQLITE_IOERR_SHMLOCK (SQLITE_IOERR | (20<<8)) -#define SQLITE_IOERR_SHMMAP (SQLITE_IOERR | (21<<8)) -#define SQLITE_IOERR_SEEK (SQLITE_IOERR | (22<<8)) -#define SQLITE_IOERR_DELETE_NOENT (SQLITE_IOERR | (23<<8)) -#define SQLITE_IOERR_MMAP (SQLITE_IOERR | (24<<8)) -#define SQLITE_IOERR_GETTEMPPATH (SQLITE_IOERR | (25<<8)) -#define SQLITE_IOERR_CONVPATH (SQLITE_IOERR | (26<<8)) -#define SQLITE_IOERR_VNODE (SQLITE_IOERR | (27<<8)) -#define SQLITE_IOERR_AUTH (SQLITE_IOERR | (28<<8)) -#define SQLITE_LOCKED_SHAREDCACHE (SQLITE_LOCKED | (1<<8)) -#define SQLITE_BUSY_RECOVERY (SQLITE_BUSY | (1<<8)) -#define SQLITE_BUSY_SNAPSHOT (SQLITE_BUSY | (2<<8)) -#define SQLITE_CANTOPEN_NOTEMPDIR (SQLITE_CANTOPEN | (1<<8)) -#define SQLITE_CANTOPEN_ISDIR (SQLITE_CANTOPEN | (2<<8)) -#define SQLITE_CANTOPEN_FULLPATH (SQLITE_CANTOPEN | (3<<8)) -#define SQLITE_CANTOPEN_CONVPATH (SQLITE_CANTOPEN | (4<<8)) -#define SQLITE_CORRUPT_VTAB (SQLITE_CORRUPT | (1<<8)) -#define SQLITE_READONLY_RECOVERY (SQLITE_READONLY | (1<<8)) -#define SQLITE_READONLY_CANTLOCK (SQLITE_READONLY | (2<<8)) -#define SQLITE_READONLY_ROLLBACK (SQLITE_READONLY | (3<<8)) -#define SQLITE_READONLY_DBMOVED (SQLITE_READONLY | (4<<8)) -#define SQLITE_ABORT_ROLLBACK (SQLITE_ABORT | (2<<8)) -#define SQLITE_CONSTRAINT_CHECK (SQLITE_CONSTRAINT | (1<<8)) -#define SQLITE_CONSTRAINT_COMMITHOOK (SQLITE_CONSTRAINT | (2<<8)) -#define SQLITE_CONSTRAINT_FOREIGNKEY (SQLITE_CONSTRAINT | (3<<8)) -#define SQLITE_CONSTRAINT_FUNCTION (SQLITE_CONSTRAINT | (4<<8)) -#define SQLITE_CONSTRAINT_NOTNULL (SQLITE_CONSTRAINT | (5<<8)) -#define SQLITE_CONSTRAINT_PRIMARYKEY (SQLITE_CONSTRAINT | (6<<8)) -#define SQLITE_CONSTRAINT_TRIGGER (SQLITE_CONSTRAINT | (7<<8)) -#define SQLITE_CONSTRAINT_UNIQUE (SQLITE_CONSTRAINT | (8<<8)) -#define SQLITE_CONSTRAINT_VTAB (SQLITE_CONSTRAINT | (9<<8)) -#define SQLITE_CONSTRAINT_ROWID (SQLITE_CONSTRAINT |(10<<8)) -#define SQLITE_NOTICE_RECOVER_WAL (SQLITE_NOTICE | (1<<8)) -#define SQLITE_NOTICE_RECOVER_ROLLBACK (SQLITE_NOTICE | (2<<8)) -#define SQLITE_WARNING_AUTOINDEX (SQLITE_WARNING | (1<<8)) -#define SQLITE_AUTH_USER (SQLITE_AUTH | (1<<8)) -#define SQLITE_OK_LOAD_PERMANENTLY (SQLITE_OK | (1<<8)) - -/* -** CAPI3REF: Flags For File Open Operations -** -** These bit values are intended for use in the -** 3rd parameter to the [sqlite3_open_v2()] interface and -** in the 4th parameter to the [sqlite3_vfs.xOpen] method. -*/ -#define SQLITE_OPEN_READONLY 0x00000001 /* Ok for sqlite3_open_v2() */ -#define SQLITE_OPEN_READWRITE 0x00000002 /* Ok for sqlite3_open_v2() */ -#define SQLITE_OPEN_CREATE 0x00000004 /* Ok for sqlite3_open_v2() */ -#define SQLITE_OPEN_DELETEONCLOSE 0x00000008 /* VFS only */ -#define SQLITE_OPEN_EXCLUSIVE 0x00000010 /* VFS only */ -#define SQLITE_OPEN_AUTOPROXY 0x00000020 /* VFS only */ -#define SQLITE_OPEN_URI 0x00000040 /* Ok for sqlite3_open_v2() */ -#define SQLITE_OPEN_MEMORY 0x00000080 /* Ok for sqlite3_open_v2() */ -#define SQLITE_OPEN_MAIN_DB 0x00000100 /* VFS only */ -#define SQLITE_OPEN_TEMP_DB 0x00000200 /* VFS only */ -#define SQLITE_OPEN_TRANSIENT_DB 0x00000400 /* VFS only */ -#define SQLITE_OPEN_MAIN_JOURNAL 0x00000800 /* VFS only */ -#define SQLITE_OPEN_TEMP_JOURNAL 0x00001000 /* VFS only */ -#define SQLITE_OPEN_SUBJOURNAL 0x00002000 /* VFS only */ -#define SQLITE_OPEN_MASTER_JOURNAL 0x00004000 /* VFS only */ -#define SQLITE_OPEN_NOMUTEX 0x00008000 /* Ok for sqlite3_open_v2() */ -#define SQLITE_OPEN_FULLMUTEX 0x00010000 /* Ok for sqlite3_open_v2() */ -#define SQLITE_OPEN_SHAREDCACHE 0x00020000 /* Ok for sqlite3_open_v2() */ -#define SQLITE_OPEN_PRIVATECACHE 0x00040000 /* Ok for sqlite3_open_v2() */ -#define SQLITE_OPEN_WAL 0x00080000 /* VFS only */ - -/* Reserved: 0x00F00000 */ - -/* -** CAPI3REF: Device Characteristics -** -** The xDeviceCharacteristics method of the [sqlite3_io_methods] -** object returns an integer which is a vector of these -** bit values expressing I/O characteristics of the mass storage -** device that holds the file that the [sqlite3_io_methods] -** refers to. -** -** The SQLITE_IOCAP_ATOMIC property means that all writes of -** any size are atomic. The SQLITE_IOCAP_ATOMICnnn values -** mean that writes of blocks that are nnn bytes in size and -** are aligned to an address which is an integer multiple of -** nnn are atomic. The SQLITE_IOCAP_SAFE_APPEND value means -** that when data is appended to a file, the data is appended -** first then the size of the file is extended, never the other -** way around. The SQLITE_IOCAP_SEQUENTIAL property means that -** information is written to disk in the same order as calls -** to xWrite(). The SQLITE_IOCAP_POWERSAFE_OVERWRITE property means that -** after reboot following a crash or power loss, the only bytes in a -** file that were written at the application level might have changed -** and that adjacent bytes, even bytes within the same sector are -** guaranteed to be unchanged. The SQLITE_IOCAP_UNDELETABLE_WHEN_OPEN -** flag indicates that a file cannot be deleted when open. The -** SQLITE_IOCAP_IMMUTABLE flag indicates that the file is on -** read-only media and cannot be changed even by processes with -** elevated privileges. -*/ -#define SQLITE_IOCAP_ATOMIC 0x00000001 -#define SQLITE_IOCAP_ATOMIC512 0x00000002 -#define SQLITE_IOCAP_ATOMIC1K 0x00000004 -#define SQLITE_IOCAP_ATOMIC2K 0x00000008 -#define SQLITE_IOCAP_ATOMIC4K 0x00000010 -#define SQLITE_IOCAP_ATOMIC8K 0x00000020 -#define SQLITE_IOCAP_ATOMIC16K 0x00000040 -#define SQLITE_IOCAP_ATOMIC32K 0x00000080 -#define SQLITE_IOCAP_ATOMIC64K 0x00000100 -#define SQLITE_IOCAP_SAFE_APPEND 0x00000200 -#define SQLITE_IOCAP_SEQUENTIAL 0x00000400 -#define SQLITE_IOCAP_UNDELETABLE_WHEN_OPEN 0x00000800 -#define SQLITE_IOCAP_POWERSAFE_OVERWRITE 0x00001000 -#define SQLITE_IOCAP_IMMUTABLE 0x00002000 - -/* -** CAPI3REF: File Locking Levels -** -** SQLite uses one of these integer values as the second -** argument to calls it makes to the xLock() and xUnlock() methods -** of an [sqlite3_io_methods] object. -*/ -#define SQLITE_LOCK_NONE 0 -#define SQLITE_LOCK_SHARED 1 -#define SQLITE_LOCK_RESERVED 2 -#define SQLITE_LOCK_PENDING 3 -#define SQLITE_LOCK_EXCLUSIVE 4 - -/* -** CAPI3REF: Synchronization Type Flags -** -** When SQLite invokes the xSync() method of an -** [sqlite3_io_methods] object it uses a combination of -** these integer values as the second argument. -** -** When the SQLITE_SYNC_DATAONLY flag is used, it means that the -** sync operation only needs to flush data to mass storage. Inode -** information need not be flushed. If the lower four bits of the flag -** equal SQLITE_SYNC_NORMAL, that means to use normal fsync() semantics. -** If the lower four bits equal SQLITE_SYNC_FULL, that means -** to use Mac OS X style fullsync instead of fsync(). -** -** Do not confuse the SQLITE_SYNC_NORMAL and SQLITE_SYNC_FULL flags -** with the [PRAGMA synchronous]=NORMAL and [PRAGMA synchronous]=FULL -** settings. The [synchronous pragma] determines when calls to the -** xSync VFS method occur and applies uniformly across all platforms. -** The SQLITE_SYNC_NORMAL and SQLITE_SYNC_FULL flags determine how -** energetic or rigorous or forceful the sync operations are and -** only make a difference on Mac OSX for the default SQLite code. -** (Third-party VFS implementations might also make the distinction -** between SQLITE_SYNC_NORMAL and SQLITE_SYNC_FULL, but among the -** operating systems natively supported by SQLite, only Mac OSX -** cares about the difference.) -*/ -#define SQLITE_SYNC_NORMAL 0x00002 -#define SQLITE_SYNC_FULL 0x00003 -#define SQLITE_SYNC_DATAONLY 0x00010 - -/* -** CAPI3REF: OS Interface Open File Handle -** -** An [sqlite3_file] object represents an open file in the -** [sqlite3_vfs | OS interface layer]. Individual OS interface -** implementations will -** want to subclass this object by appending additional fields -** for their own use. The pMethods entry is a pointer to an -** [sqlite3_io_methods] object that defines methods for performing -** I/O operations on the open file. -*/ -typedef struct sqlite3_file sqlite3_file; -struct sqlite3_file { - const struct sqlite3_io_methods *pMethods; /* Methods for an open file */ -}; - -/* -** CAPI3REF: OS Interface File Virtual Methods Object -** -** Every file opened by the [sqlite3_vfs.xOpen] method populates an -** [sqlite3_file] object (or, more commonly, a subclass of the -** [sqlite3_file] object) with a pointer to an instance of this object. -** This object defines the methods used to perform various operations -** against the open file represented by the [sqlite3_file] object. -** -** If the [sqlite3_vfs.xOpen] method sets the sqlite3_file.pMethods element -** to a non-NULL pointer, then the sqlite3_io_methods.xClose method -** may be invoked even if the [sqlite3_vfs.xOpen] reported that it failed. The -** only way to prevent a call to xClose following a failed [sqlite3_vfs.xOpen] -** is for the [sqlite3_vfs.xOpen] to set the sqlite3_file.pMethods element -** to NULL. -** -** The flags argument to xSync may be one of [SQLITE_SYNC_NORMAL] or -** [SQLITE_SYNC_FULL]. The first choice is the normal fsync(). -** The second choice is a Mac OS X style fullsync. The [SQLITE_SYNC_DATAONLY] -** flag may be ORed in to indicate that only the data of the file -** and not its inode needs to be synced. -** -** The integer values to xLock() and xUnlock() are one of -**
    -**
  • [SQLITE_LOCK_NONE], -**
  • [SQLITE_LOCK_SHARED], -**
  • [SQLITE_LOCK_RESERVED], -**
  • [SQLITE_LOCK_PENDING], or -**
  • [SQLITE_LOCK_EXCLUSIVE]. -**
-** xLock() increases the lock. xUnlock() decreases the lock. -** The xCheckReservedLock() method checks whether any database connection, -** either in this process or in some other process, is holding a RESERVED, -** PENDING, or EXCLUSIVE lock on the file. It returns true -** if such a lock exists and false otherwise. -** -** The xFileControl() method is a generic interface that allows custom -** VFS implementations to directly control an open file using the -** [sqlite3_file_control()] interface. The second "op" argument is an -** integer opcode. The third argument is a generic pointer intended to -** point to a structure that may contain arguments or space in which to -** write return values. Potential uses for xFileControl() might be -** functions to enable blocking locks with timeouts, to change the -** locking strategy (for example to use dot-file locks), to inquire -** about the status of a lock, or to break stale locks. The SQLite -** core reserves all opcodes less than 100 for its own use. -** A [file control opcodes | list of opcodes] less than 100 is available. -** Applications that define a custom xFileControl method should use opcodes -** greater than 100 to avoid conflicts. VFS implementations should -** return [SQLITE_NOTFOUND] for file control opcodes that they do not -** recognize. -** -** The xSectorSize() method returns the sector size of the -** device that underlies the file. The sector size is the -** minimum write that can be performed without disturbing -** other bytes in the file. The xDeviceCharacteristics() -** method returns a bit vector describing behaviors of the -** underlying device: -** -**
    -**
  • [SQLITE_IOCAP_ATOMIC] -**
  • [SQLITE_IOCAP_ATOMIC512] -**
  • [SQLITE_IOCAP_ATOMIC1K] -**
  • [SQLITE_IOCAP_ATOMIC2K] -**
  • [SQLITE_IOCAP_ATOMIC4K] -**
  • [SQLITE_IOCAP_ATOMIC8K] -**
  • [SQLITE_IOCAP_ATOMIC16K] -**
  • [SQLITE_IOCAP_ATOMIC32K] -**
  • [SQLITE_IOCAP_ATOMIC64K] -**
  • [SQLITE_IOCAP_SAFE_APPEND] -**
  • [SQLITE_IOCAP_SEQUENTIAL] -**
  • [SQLITE_IOCAP_UNDELETABLE_WHEN_OPEN] -**
  • [SQLITE_IOCAP_POWERSAFE_OVERWRITE] -**
  • [SQLITE_IOCAP_IMMUTABLE] -**
-** -** The SQLITE_IOCAP_ATOMIC property means that all writes of -** any size are atomic. The SQLITE_IOCAP_ATOMICnnn values -** mean that writes of blocks that are nnn bytes in size and -** are aligned to an address which is an integer multiple of -** nnn are atomic. The SQLITE_IOCAP_SAFE_APPEND value means -** that when data is appended to a file, the data is appended -** first then the size of the file is extended, never the other -** way around. The SQLITE_IOCAP_SEQUENTIAL property means that -** information is written to disk in the same order as calls -** to xWrite(). -** -** If xRead() returns SQLITE_IOERR_SHORT_READ it must also fill -** in the unread portions of the buffer with zeros. A VFS that -** fails to zero-fill short reads might seem to work. However, -** failure to zero-fill short reads will eventually lead to -** database corruption. -*/ -typedef struct sqlite3_io_methods sqlite3_io_methods; -struct sqlite3_io_methods { - int iVersion; - int (*xClose)(sqlite3_file*); - int (*xRead)(sqlite3_file*, void*, int iAmt, sqlite3_int64 iOfst); - int (*xWrite)(sqlite3_file*, const void*, int iAmt, sqlite3_int64 iOfst); - int (*xTruncate)(sqlite3_file*, sqlite3_int64 size); - int (*xSync)(sqlite3_file*, int flags); - int (*xFileSize)(sqlite3_file*, sqlite3_int64 *pSize); - int (*xLock)(sqlite3_file*, int); - int (*xUnlock)(sqlite3_file*, int); - int (*xCheckReservedLock)(sqlite3_file*, int *pResOut); - int (*xFileControl)(sqlite3_file*, int op, void *pArg); - int (*xSectorSize)(sqlite3_file*); - int (*xDeviceCharacteristics)(sqlite3_file*); - /* Methods above are valid for version 1 */ - int (*xShmMap)(sqlite3_file*, int iPg, int pgsz, int, void volatile**); - int (*xShmLock)(sqlite3_file*, int offset, int n, int flags); - void (*xShmBarrier)(sqlite3_file*); - int (*xShmUnmap)(sqlite3_file*, int deleteFlag); - /* Methods above are valid for version 2 */ - int (*xFetch)(sqlite3_file*, sqlite3_int64 iOfst, int iAmt, void **pp); - int (*xUnfetch)(sqlite3_file*, sqlite3_int64 iOfst, void *p); - /* Methods above are valid for version 3 */ - /* Additional methods may be added in future releases */ -}; - -/* -** CAPI3REF: Standard File Control Opcodes -** KEYWORDS: {file control opcodes} {file control opcode} -** -** These integer constants are opcodes for the xFileControl method -** of the [sqlite3_io_methods] object and for the [sqlite3_file_control()] -** interface. -** -**
    -**
  • [[SQLITE_FCNTL_LOCKSTATE]] -** The [SQLITE_FCNTL_LOCKSTATE] opcode is used for debugging. This -** opcode causes the xFileControl method to write the current state of -** the lock (one of [SQLITE_LOCK_NONE], [SQLITE_LOCK_SHARED], -** [SQLITE_LOCK_RESERVED], [SQLITE_LOCK_PENDING], or [SQLITE_LOCK_EXCLUSIVE]) -** into an integer that the pArg argument points to. This capability -** is used during testing and is only available when the SQLITE_TEST -** compile-time option is used. -** -**
  • [[SQLITE_FCNTL_SIZE_HINT]] -** The [SQLITE_FCNTL_SIZE_HINT] opcode is used by SQLite to give the VFS -** layer a hint of how large the database file will grow to be during the -** current transaction. This hint is not guaranteed to be accurate but it -** is often close. The underlying VFS might choose to preallocate database -** file space based on this hint in order to help writes to the database -** file run faster. -** -**
  • [[SQLITE_FCNTL_CHUNK_SIZE]] -** The [SQLITE_FCNTL_CHUNK_SIZE] opcode is used to request that the VFS -** extends and truncates the database file in chunks of a size specified -** by the user. The fourth argument to [sqlite3_file_control()] should -** point to an integer (type int) containing the new chunk-size to use -** for the nominated database. Allocating database file space in large -** chunks (say 1MB at a time), may reduce file-system fragmentation and -** improve performance on some systems. -** -**
  • [[SQLITE_FCNTL_FILE_POINTER]] -** The [SQLITE_FCNTL_FILE_POINTER] opcode is used to obtain a pointer -** to the [sqlite3_file] object associated with a particular database -** connection. See also [SQLITE_FCNTL_JOURNAL_POINTER]. -** -**
  • [[SQLITE_FCNTL_JOURNAL_POINTER]] -** The [SQLITE_FCNTL_JOURNAL_POINTER] opcode is used to obtain a pointer -** to the [sqlite3_file] object associated with the journal file (either -** the [rollback journal] or the [write-ahead log]) for a particular database -** connection. See also [SQLITE_FCNTL_FILE_POINTER]. -** -**
  • [[SQLITE_FCNTL_SYNC_OMITTED]] -** No longer in use. -** -**
  • [[SQLITE_FCNTL_SYNC]] -** The [SQLITE_FCNTL_SYNC] opcode is generated internally by SQLite and -** sent to the VFS immediately before the xSync method is invoked on a -** database file descriptor. Or, if the xSync method is not invoked -** because the user has configured SQLite with -** [PRAGMA synchronous | PRAGMA synchronous=OFF] it is invoked in place -** of the xSync method. In most cases, the pointer argument passed with -** this file-control is NULL. However, if the database file is being synced -** as part of a multi-database commit, the argument points to a nul-terminated -** string containing the transactions master-journal file name. VFSes that -** do not need this signal should silently ignore this opcode. Applications -** should not call [sqlite3_file_control()] with this opcode as doing so may -** disrupt the operation of the specialized VFSes that do require it. -** -**
  • [[SQLITE_FCNTL_COMMIT_PHASETWO]] -** The [SQLITE_FCNTL_COMMIT_PHASETWO] opcode is generated internally by SQLite -** and sent to the VFS after a transaction has been committed immediately -** but before the database is unlocked. VFSes that do not need this signal -** should silently ignore this opcode. Applications should not call -** [sqlite3_file_control()] with this opcode as doing so may disrupt the -** operation of the specialized VFSes that do require it. -** -**
  • [[SQLITE_FCNTL_WIN32_AV_RETRY]] -** ^The [SQLITE_FCNTL_WIN32_AV_RETRY] opcode is used to configure automatic -** retry counts and intervals for certain disk I/O operations for the -** windows [VFS] in order to provide robustness in the presence of -** anti-virus programs. By default, the windows VFS will retry file read, -** file write, and file delete operations up to 10 times, with a delay -** of 25 milliseconds before the first retry and with the delay increasing -** by an additional 25 milliseconds with each subsequent retry. This -** opcode allows these two values (10 retries and 25 milliseconds of delay) -** to be adjusted. The values are changed for all database connections -** within the same process. The argument is a pointer to an array of two -** integers where the first integer i the new retry count and the second -** integer is the delay. If either integer is negative, then the setting -** is not changed but instead the prior value of that setting is written -** into the array entry, allowing the current retry settings to be -** interrogated. The zDbName parameter is ignored. -** -**
  • [[SQLITE_FCNTL_PERSIST_WAL]] -** ^The [SQLITE_FCNTL_PERSIST_WAL] opcode is used to set or query the -** persistent [WAL | Write Ahead Log] setting. By default, the auxiliary -** write ahead log and shared memory files used for transaction control -** are automatically deleted when the latest connection to the database -** closes. Setting persistent WAL mode causes those files to persist after -** close. Persisting the files is useful when other processes that do not -** have write permission on the directory containing the database file want -** to read the database file, as the WAL and shared memory files must exist -** in order for the database to be readable. The fourth parameter to -** [sqlite3_file_control()] for this opcode should be a pointer to an integer. -** That integer is 0 to disable persistent WAL mode or 1 to enable persistent -** WAL mode. If the integer is -1, then it is overwritten with the current -** WAL persistence setting. -** -**
  • [[SQLITE_FCNTL_POWERSAFE_OVERWRITE]] -** ^The [SQLITE_FCNTL_POWERSAFE_OVERWRITE] opcode is used to set or query the -** persistent "powersafe-overwrite" or "PSOW" setting. The PSOW setting -** determines the [SQLITE_IOCAP_POWERSAFE_OVERWRITE] bit of the -** xDeviceCharacteristics methods. The fourth parameter to -** [sqlite3_file_control()] for this opcode should be a pointer to an integer. -** That integer is 0 to disable zero-damage mode or 1 to enable zero-damage -** mode. If the integer is -1, then it is overwritten with the current -** zero-damage mode setting. -** -**
  • [[SQLITE_FCNTL_OVERWRITE]] -** ^The [SQLITE_FCNTL_OVERWRITE] opcode is invoked by SQLite after opening -** a write transaction to indicate that, unless it is rolled back for some -** reason, the entire database file will be overwritten by the current -** transaction. This is used by VACUUM operations. -** -**
  • [[SQLITE_FCNTL_VFSNAME]] -** ^The [SQLITE_FCNTL_VFSNAME] opcode can be used to obtain the names of -** all [VFSes] in the VFS stack. The names are of all VFS shims and the -** final bottom-level VFS are written into memory obtained from -** [sqlite3_malloc()] and the result is stored in the char* variable -** that the fourth parameter of [sqlite3_file_control()] points to. -** The caller is responsible for freeing the memory when done. As with -** all file-control actions, there is no guarantee that this will actually -** do anything. Callers should initialize the char* variable to a NULL -** pointer in case this file-control is not implemented. This file-control -** is intended for diagnostic use only. -** -**
  • [[SQLITE_FCNTL_VFS_POINTER]] -** ^The [SQLITE_FCNTL_VFS_POINTER] opcode finds a pointer to the top-level -** [VFSes] currently in use. ^(The argument X in -** sqlite3_file_control(db,SQLITE_FCNTL_VFS_POINTER,X) must be -** of type "[sqlite3_vfs] **". This opcodes will set *X -** to a pointer to the top-level VFS.)^ -** ^When there are multiple VFS shims in the stack, this opcode finds the -** upper-most shim only. -** -**
  • [[SQLITE_FCNTL_PRAGMA]] -** ^Whenever a [PRAGMA] statement is parsed, an [SQLITE_FCNTL_PRAGMA] -** file control is sent to the open [sqlite3_file] object corresponding -** to the database file to which the pragma statement refers. ^The argument -** to the [SQLITE_FCNTL_PRAGMA] file control is an array of -** pointers to strings (char**) in which the second element of the array -** is the name of the pragma and the third element is the argument to the -** pragma or NULL if the pragma has no argument. ^The handler for an -** [SQLITE_FCNTL_PRAGMA] file control can optionally make the first element -** of the char** argument point to a string obtained from [sqlite3_mprintf()] -** or the equivalent and that string will become the result of the pragma or -** the error message if the pragma fails. ^If the -** [SQLITE_FCNTL_PRAGMA] file control returns [SQLITE_NOTFOUND], then normal -** [PRAGMA] processing continues. ^If the [SQLITE_FCNTL_PRAGMA] -** file control returns [SQLITE_OK], then the parser assumes that the -** VFS has handled the PRAGMA itself and the parser generates a no-op -** prepared statement if result string is NULL, or that returns a copy -** of the result string if the string is non-NULL. -** ^If the [SQLITE_FCNTL_PRAGMA] file control returns -** any result code other than [SQLITE_OK] or [SQLITE_NOTFOUND], that means -** that the VFS encountered an error while handling the [PRAGMA] and the -** compilation of the PRAGMA fails with an error. ^The [SQLITE_FCNTL_PRAGMA] -** file control occurs at the beginning of pragma statement analysis and so -** it is able to override built-in [PRAGMA] statements. -** -**
  • [[SQLITE_FCNTL_BUSYHANDLER]] -** ^The [SQLITE_FCNTL_BUSYHANDLER] -** file-control may be invoked by SQLite on the database file handle -** shortly after it is opened in order to provide a custom VFS with access -** to the connections busy-handler callback. The argument is of type (void **) -** - an array of two (void *) values. The first (void *) actually points -** to a function of type (int (*)(void *)). In order to invoke the connections -** busy-handler, this function should be invoked with the second (void *) in -** the array as the only argument. If it returns non-zero, then the operation -** should be retried. If it returns zero, the custom VFS should abandon the -** current operation. -** -**
  • [[SQLITE_FCNTL_TEMPFILENAME]] -** ^Application can invoke the [SQLITE_FCNTL_TEMPFILENAME] file-control -** to have SQLite generate a -** temporary filename using the same algorithm that is followed to generate -** temporary filenames for TEMP tables and other internal uses. The -** argument should be a char** which will be filled with the filename -** written into memory obtained from [sqlite3_malloc()]. The caller should -** invoke [sqlite3_free()] on the result to avoid a memory leak. -** -**
  • [[SQLITE_FCNTL_MMAP_SIZE]] -** The [SQLITE_FCNTL_MMAP_SIZE] file control is used to query or set the -** maximum number of bytes that will be used for memory-mapped I/O. -** The argument is a pointer to a value of type sqlite3_int64 that -** is an advisory maximum number of bytes in the file to memory map. The -** pointer is overwritten with the old value. The limit is not changed if -** the value originally pointed to is negative, and so the current limit -** can be queried by passing in a pointer to a negative number. This -** file-control is used internally to implement [PRAGMA mmap_size]. -** -**
  • [[SQLITE_FCNTL_TRACE]] -** The [SQLITE_FCNTL_TRACE] file control provides advisory information -** to the VFS about what the higher layers of the SQLite stack are doing. -** This file control is used by some VFS activity tracing [shims]. -** The argument is a zero-terminated string. Higher layers in the -** SQLite stack may generate instances of this file control if -** the [SQLITE_USE_FCNTL_TRACE] compile-time option is enabled. -** -**
  • [[SQLITE_FCNTL_HAS_MOVED]] -** The [SQLITE_FCNTL_HAS_MOVED] file control interprets its argument as a -** pointer to an integer and it writes a boolean into that integer depending -** on whether or not the file has been renamed, moved, or deleted since it -** was first opened. -** -**
  • [[SQLITE_FCNTL_WIN32_GET_HANDLE]] -** The [SQLITE_FCNTL_WIN32_GET_HANDLE] opcode can be used to obtain the -** underlying native file handle associated with a file handle. This file -** control interprets its argument as a pointer to a native file handle and -** writes the resulting value there. -** -**
  • [[SQLITE_FCNTL_WIN32_SET_HANDLE]] -** The [SQLITE_FCNTL_WIN32_SET_HANDLE] opcode is used for debugging. This -** opcode causes the xFileControl method to swap the file handle with the one -** pointed to by the pArg argument. This capability is used during testing -** and only needs to be supported when SQLITE_TEST is defined. -** -**
  • [[SQLITE_FCNTL_WAL_BLOCK]] -** The [SQLITE_FCNTL_WAL_BLOCK] is a signal to the VFS layer that it might -** be advantageous to block on the next WAL lock if the lock is not immediately -** available. The WAL subsystem issues this signal during rare -** circumstances in order to fix a problem with priority inversion. -** Applications should not use this file-control. -** -**
  • [[SQLITE_FCNTL_ZIPVFS]] -** The [SQLITE_FCNTL_ZIPVFS] opcode is implemented by zipvfs only. All other -** VFS should return SQLITE_NOTFOUND for this opcode. -** -**
  • [[SQLITE_FCNTL_RBU]] -** The [SQLITE_FCNTL_RBU] opcode is implemented by the special VFS used by -** the RBU extension only. All other VFS should return SQLITE_NOTFOUND for -** this opcode. -**
-*/ -#define SQLITE_FCNTL_LOCKSTATE 1 -#define SQLITE_FCNTL_GET_LOCKPROXYFILE 2 -#define SQLITE_FCNTL_SET_LOCKPROXYFILE 3 -#define SQLITE_FCNTL_LAST_ERRNO 4 -#define SQLITE_FCNTL_SIZE_HINT 5 -#define SQLITE_FCNTL_CHUNK_SIZE 6 -#define SQLITE_FCNTL_FILE_POINTER 7 -#define SQLITE_FCNTL_SYNC_OMITTED 8 -#define SQLITE_FCNTL_WIN32_AV_RETRY 9 -#define SQLITE_FCNTL_PERSIST_WAL 10 -#define SQLITE_FCNTL_OVERWRITE 11 -#define SQLITE_FCNTL_VFSNAME 12 -#define SQLITE_FCNTL_POWERSAFE_OVERWRITE 13 -#define SQLITE_FCNTL_PRAGMA 14 -#define SQLITE_FCNTL_BUSYHANDLER 15 -#define SQLITE_FCNTL_TEMPFILENAME 16 -#define SQLITE_FCNTL_MMAP_SIZE 18 -#define SQLITE_FCNTL_TRACE 19 -#define SQLITE_FCNTL_HAS_MOVED 20 -#define SQLITE_FCNTL_SYNC 21 -#define SQLITE_FCNTL_COMMIT_PHASETWO 22 -#define SQLITE_FCNTL_WIN32_SET_HANDLE 23 -#define SQLITE_FCNTL_WAL_BLOCK 24 -#define SQLITE_FCNTL_ZIPVFS 25 -#define SQLITE_FCNTL_RBU 26 -#define SQLITE_FCNTL_VFS_POINTER 27 -#define SQLITE_FCNTL_JOURNAL_POINTER 28 -#define SQLITE_FCNTL_WIN32_GET_HANDLE 29 -#define SQLITE_FCNTL_PDB 30 - -/* deprecated names */ -#define SQLITE_GET_LOCKPROXYFILE SQLITE_FCNTL_GET_LOCKPROXYFILE -#define SQLITE_SET_LOCKPROXYFILE SQLITE_FCNTL_SET_LOCKPROXYFILE -#define SQLITE_LAST_ERRNO SQLITE_FCNTL_LAST_ERRNO - - -/* -** CAPI3REF: Mutex Handle -** -** The mutex module within SQLite defines [sqlite3_mutex] to be an -** abstract type for a mutex object. The SQLite core never looks -** at the internal representation of an [sqlite3_mutex]. It only -** deals with pointers to the [sqlite3_mutex] object. -** -** Mutexes are created using [sqlite3_mutex_alloc()]. -*/ -typedef struct sqlite3_mutex sqlite3_mutex; - -/* -** CAPI3REF: Loadable Extension Thunk -** -** A pointer to the opaque sqlite3_api_routines structure is passed as -** the third parameter to entry points of [loadable extensions]. This -** structure must be typedefed in order to work around compiler warnings -** on some platforms. -*/ -typedef struct sqlite3_api_routines sqlite3_api_routines; - -/* -** CAPI3REF: OS Interface Object -** -** An instance of the sqlite3_vfs object defines the interface between -** the SQLite core and the underlying operating system. The "vfs" -** in the name of the object stands for "virtual file system". See -** the [VFS | VFS documentation] for further information. -** -** The value of the iVersion field is initially 1 but may be larger in -** future versions of SQLite. Additional fields may be appended to this -** object when the iVersion value is increased. Note that the structure -** of the sqlite3_vfs object changes in the transaction between -** SQLite version 3.5.9 and 3.6.0 and yet the iVersion field was not -** modified. -** -** The szOsFile field is the size of the subclassed [sqlite3_file] -** structure used by this VFS. mxPathname is the maximum length of -** a pathname in this VFS. -** -** Registered sqlite3_vfs objects are kept on a linked list formed by -** the pNext pointer. The [sqlite3_vfs_register()] -** and [sqlite3_vfs_unregister()] interfaces manage this list -** in a thread-safe way. The [sqlite3_vfs_find()] interface -** searches the list. Neither the application code nor the VFS -** implementation should use the pNext pointer. -** -** The pNext field is the only field in the sqlite3_vfs -** structure that SQLite will ever modify. SQLite will only access -** or modify this field while holding a particular static mutex. -** The application should never modify anything within the sqlite3_vfs -** object once the object has been registered. -** -** The zName field holds the name of the VFS module. The name must -** be unique across all VFS modules. -** -** [[sqlite3_vfs.xOpen]] -** ^SQLite guarantees that the zFilename parameter to xOpen -** is either a NULL pointer or string obtained -** from xFullPathname() with an optional suffix added. -** ^If a suffix is added to the zFilename parameter, it will -** consist of a single "-" character followed by no more than -** 11 alphanumeric and/or "-" characters. -** ^SQLite further guarantees that -** the string will be valid and unchanged until xClose() is -** called. Because of the previous sentence, -** the [sqlite3_file] can safely store a pointer to the -** filename if it needs to remember the filename for some reason. -** If the zFilename parameter to xOpen is a NULL pointer then xOpen -** must invent its own temporary name for the file. ^Whenever the -** xFilename parameter is NULL it will also be the case that the -** flags parameter will include [SQLITE_OPEN_DELETEONCLOSE]. -** -** The flags argument to xOpen() includes all bits set in -** the flags argument to [sqlite3_open_v2()]. Or if [sqlite3_open()] -** or [sqlite3_open16()] is used, then flags includes at least -** [SQLITE_OPEN_READWRITE] | [SQLITE_OPEN_CREATE]. -** If xOpen() opens a file read-only then it sets *pOutFlags to -** include [SQLITE_OPEN_READONLY]. Other bits in *pOutFlags may be set. -** -** ^(SQLite will also add one of the following flags to the xOpen() -** call, depending on the object being opened: -** -**
    -**
  • [SQLITE_OPEN_MAIN_DB] -**
  • [SQLITE_OPEN_MAIN_JOURNAL] -**
  • [SQLITE_OPEN_TEMP_DB] -**
  • [SQLITE_OPEN_TEMP_JOURNAL] -**
  • [SQLITE_OPEN_TRANSIENT_DB] -**
  • [SQLITE_OPEN_SUBJOURNAL] -**
  • [SQLITE_OPEN_MASTER_JOURNAL] -**
  • [SQLITE_OPEN_WAL] -**
)^ -** -** The file I/O implementation can use the object type flags to -** change the way it deals with files. For example, an application -** that does not care about crash recovery or rollback might make -** the open of a journal file a no-op. Writes to this journal would -** also be no-ops, and any attempt to read the journal would return -** SQLITE_IOERR. Or the implementation might recognize that a database -** file will be doing page-aligned sector reads and writes in a random -** order and set up its I/O subsystem accordingly. -** -** SQLite might also add one of the following flags to the xOpen method: -** -**
    -**
  • [SQLITE_OPEN_DELETEONCLOSE] -**
  • [SQLITE_OPEN_EXCLUSIVE] -**
-** -** The [SQLITE_OPEN_DELETEONCLOSE] flag means the file should be -** deleted when it is closed. ^The [SQLITE_OPEN_DELETEONCLOSE] -** will be set for TEMP databases and their journals, transient -** databases, and subjournals. -** -** ^The [SQLITE_OPEN_EXCLUSIVE] flag is always used in conjunction -** with the [SQLITE_OPEN_CREATE] flag, which are both directly -** analogous to the O_EXCL and O_CREAT flags of the POSIX open() -** API. The SQLITE_OPEN_EXCLUSIVE flag, when paired with the -** SQLITE_OPEN_CREATE, is used to indicate that file should always -** be created, and that it is an error if it already exists. -** It is not used to indicate the file should be opened -** for exclusive access. -** -** ^At least szOsFile bytes of memory are allocated by SQLite -** to hold the [sqlite3_file] structure passed as the third -** argument to xOpen. The xOpen method does not have to -** allocate the structure; it should just fill it in. Note that -** the xOpen method must set the sqlite3_file.pMethods to either -** a valid [sqlite3_io_methods] object or to NULL. xOpen must do -** this even if the open fails. SQLite expects that the sqlite3_file.pMethods -** element will be valid after xOpen returns regardless of the success -** or failure of the xOpen call. -** -** [[sqlite3_vfs.xAccess]] -** ^The flags argument to xAccess() may be [SQLITE_ACCESS_EXISTS] -** to test for the existence of a file, or [SQLITE_ACCESS_READWRITE] to -** test whether a file is readable and writable, or [SQLITE_ACCESS_READ] -** to test whether a file is at least readable. The file can be a -** directory. -** -** ^SQLite will always allocate at least mxPathname+1 bytes for the -** output buffer xFullPathname. The exact size of the output buffer -** is also passed as a parameter to both methods. If the output buffer -** is not large enough, [SQLITE_CANTOPEN] should be returned. Since this is -** handled as a fatal error by SQLite, vfs implementations should endeavor -** to prevent this by setting mxPathname to a sufficiently large value. -** -** The xRandomness(), xSleep(), xCurrentTime(), and xCurrentTimeInt64() -** interfaces are not strictly a part of the filesystem, but they are -** included in the VFS structure for completeness. -** The xRandomness() function attempts to return nBytes bytes -** of good-quality randomness into zOut. The return value is -** the actual number of bytes of randomness obtained. -** The xSleep() method causes the calling thread to sleep for at -** least the number of microseconds given. ^The xCurrentTime() -** method returns a Julian Day Number for the current date and time as -** a floating point value. -** ^The xCurrentTimeInt64() method returns, as an integer, the Julian -** Day Number multiplied by 86400000 (the number of milliseconds in -** a 24-hour day). -** ^SQLite will use the xCurrentTimeInt64() method to get the current -** date and time if that method is available (if iVersion is 2 or -** greater and the function pointer is not NULL) and will fall back -** to xCurrentTime() if xCurrentTimeInt64() is unavailable. -** -** ^The xSetSystemCall(), xGetSystemCall(), and xNestSystemCall() interfaces -** are not used by the SQLite core. These optional interfaces are provided -** by some VFSes to facilitate testing of the VFS code. By overriding -** system calls with functions under its control, a test program can -** simulate faults and error conditions that would otherwise be difficult -** or impossible to induce. The set of system calls that can be overridden -** varies from one VFS to another, and from one version of the same VFS to the -** next. Applications that use these interfaces must be prepared for any -** or all of these interfaces to be NULL or for their behavior to change -** from one release to the next. Applications must not attempt to access -** any of these methods if the iVersion of the VFS is less than 3. -*/ -typedef struct sqlite3_vfs sqlite3_vfs; -typedef void (*sqlite3_syscall_ptr)(void); -struct sqlite3_vfs { - int iVersion; /* Structure version number (currently 3) */ - int szOsFile; /* Size of subclassed sqlite3_file */ - int mxPathname; /* Maximum file pathname length */ - sqlite3_vfs *pNext; /* Next registered VFS */ - const char *zName; /* Name of this virtual file system */ - void *pAppData; /* Pointer to application-specific data */ - int (*xOpen)(sqlite3_vfs*, const char *zName, sqlite3_file*, - int flags, int *pOutFlags); - int (*xDelete)(sqlite3_vfs*, const char *zName, int syncDir); - int (*xAccess)(sqlite3_vfs*, const char *zName, int flags, int *pResOut); - int (*xFullPathname)(sqlite3_vfs*, const char *zName, int nOut, char *zOut); - void *(*xDlOpen)(sqlite3_vfs*, const char *zFilename); - void (*xDlError)(sqlite3_vfs*, int nByte, char *zErrMsg); - void (*(*xDlSym)(sqlite3_vfs*,void*, const char *zSymbol))(void); - void (*xDlClose)(sqlite3_vfs*, void*); - int (*xRandomness)(sqlite3_vfs*, int nByte, char *zOut); - int (*xSleep)(sqlite3_vfs*, int microseconds); - int (*xCurrentTime)(sqlite3_vfs*, double*); - int (*xGetLastError)(sqlite3_vfs*, int, char *); - /* - ** The methods above are in version 1 of the sqlite_vfs object - ** definition. Those that follow are added in version 2 or later - */ - int (*xCurrentTimeInt64)(sqlite3_vfs*, sqlite3_int64*); - /* - ** The methods above are in versions 1 and 2 of the sqlite_vfs object. - ** Those below are for version 3 and greater. - */ - int (*xSetSystemCall)(sqlite3_vfs*, const char *zName, sqlite3_syscall_ptr); - sqlite3_syscall_ptr (*xGetSystemCall)(sqlite3_vfs*, const char *zName); - const char *(*xNextSystemCall)(sqlite3_vfs*, const char *zName); - /* - ** The methods above are in versions 1 through 3 of the sqlite_vfs object. - ** New fields may be appended in future versions. The iVersion - ** value will increment whenever this happens. - */ -}; - -/* -** CAPI3REF: Flags for the xAccess VFS method -** -** These integer constants can be used as the third parameter to -** the xAccess method of an [sqlite3_vfs] object. They determine -** what kind of permissions the xAccess method is looking for. -** With SQLITE_ACCESS_EXISTS, the xAccess method -** simply checks whether the file exists. -** With SQLITE_ACCESS_READWRITE, the xAccess method -** checks whether the named directory is both readable and writable -** (in other words, if files can be added, removed, and renamed within -** the directory). -** The SQLITE_ACCESS_READWRITE constant is currently used only by the -** [temp_store_directory pragma], though this could change in a future -** release of SQLite. -** With SQLITE_ACCESS_READ, the xAccess method -** checks whether the file is readable. The SQLITE_ACCESS_READ constant is -** currently unused, though it might be used in a future release of -** SQLite. -*/ -#define SQLITE_ACCESS_EXISTS 0 -#define SQLITE_ACCESS_READWRITE 1 /* Used by PRAGMA temp_store_directory */ -#define SQLITE_ACCESS_READ 2 /* Unused */ - -/* -** CAPI3REF: Flags for the xShmLock VFS method -** -** These integer constants define the various locking operations -** allowed by the xShmLock method of [sqlite3_io_methods]. The -** following are the only legal combinations of flags to the -** xShmLock method: -** -**
    -**
  • SQLITE_SHM_LOCK | SQLITE_SHM_SHARED -**
  • SQLITE_SHM_LOCK | SQLITE_SHM_EXCLUSIVE -**
  • SQLITE_SHM_UNLOCK | SQLITE_SHM_SHARED -**
  • SQLITE_SHM_UNLOCK | SQLITE_SHM_EXCLUSIVE -**
-** -** When unlocking, the same SHARED or EXCLUSIVE flag must be supplied as -** was given on the corresponding lock. -** -** The xShmLock method can transition between unlocked and SHARED or -** between unlocked and EXCLUSIVE. It cannot transition between SHARED -** and EXCLUSIVE. -*/ -#define SQLITE_SHM_UNLOCK 1 -#define SQLITE_SHM_LOCK 2 -#define SQLITE_SHM_SHARED 4 -#define SQLITE_SHM_EXCLUSIVE 8 - -/* -** CAPI3REF: Maximum xShmLock index -** -** The xShmLock method on [sqlite3_io_methods] may use values -** between 0 and this upper bound as its "offset" argument. -** The SQLite core will never attempt to acquire or release a -** lock outside of this range -*/ -#define SQLITE_SHM_NLOCK 8 - - -/* -** CAPI3REF: Initialize The SQLite Library -** -** ^The sqlite3_initialize() routine initializes the -** SQLite library. ^The sqlite3_shutdown() routine -** deallocates any resources that were allocated by sqlite3_initialize(). -** These routines are designed to aid in process initialization and -** shutdown on embedded systems. Workstation applications using -** SQLite normally do not need to invoke either of these routines. -** -** A call to sqlite3_initialize() is an "effective" call if it is -** the first time sqlite3_initialize() is invoked during the lifetime of -** the process, or if it is the first time sqlite3_initialize() is invoked -** following a call to sqlite3_shutdown(). ^(Only an effective call -** of sqlite3_initialize() does any initialization. All other calls -** are harmless no-ops.)^ -** -** A call to sqlite3_shutdown() is an "effective" call if it is the first -** call to sqlite3_shutdown() since the last sqlite3_initialize(). ^(Only -** an effective call to sqlite3_shutdown() does any deinitialization. -** All other valid calls to sqlite3_shutdown() are harmless no-ops.)^ -** -** The sqlite3_initialize() interface is threadsafe, but sqlite3_shutdown() -** is not. The sqlite3_shutdown() interface must only be called from a -** single thread. All open [database connections] must be closed and all -** other SQLite resources must be deallocated prior to invoking -** sqlite3_shutdown(). -** -** Among other things, ^sqlite3_initialize() will invoke -** sqlite3_os_init(). Similarly, ^sqlite3_shutdown() -** will invoke sqlite3_os_end(). -** -** ^The sqlite3_initialize() routine returns [SQLITE_OK] on success. -** ^If for some reason, sqlite3_initialize() is unable to initialize -** the library (perhaps it is unable to allocate a needed resource such -** as a mutex) it returns an [error code] other than [SQLITE_OK]. -** -** ^The sqlite3_initialize() routine is called internally by many other -** SQLite interfaces so that an application usually does not need to -** invoke sqlite3_initialize() directly. For example, [sqlite3_open()] -** calls sqlite3_initialize() so the SQLite library will be automatically -** initialized when [sqlite3_open()] is called if it has not be initialized -** already. ^However, if SQLite is compiled with the [SQLITE_OMIT_AUTOINIT] -** compile-time option, then the automatic calls to sqlite3_initialize() -** are omitted and the application must call sqlite3_initialize() directly -** prior to using any other SQLite interface. For maximum portability, -** it is recommended that applications always invoke sqlite3_initialize() -** directly prior to using any other SQLite interface. Future releases -** of SQLite may require this. In other words, the behavior exhibited -** when SQLite is compiled with [SQLITE_OMIT_AUTOINIT] might become the -** default behavior in some future release of SQLite. -** -** The sqlite3_os_init() routine does operating-system specific -** initialization of the SQLite library. The sqlite3_os_end() -** routine undoes the effect of sqlite3_os_init(). Typical tasks -** performed by these routines include allocation or deallocation -** of static resources, initialization of global variables, -** setting up a default [sqlite3_vfs] module, or setting up -** a default configuration using [sqlite3_config()]. -** -** The application should never invoke either sqlite3_os_init() -** or sqlite3_os_end() directly. The application should only invoke -** sqlite3_initialize() and sqlite3_shutdown(). The sqlite3_os_init() -** interface is called automatically by sqlite3_initialize() and -** sqlite3_os_end() is called by sqlite3_shutdown(). Appropriate -** implementations for sqlite3_os_init() and sqlite3_os_end() -** are built into SQLite when it is compiled for Unix, Windows, or OS/2. -** When [custom builds | built for other platforms] -** (using the [SQLITE_OS_OTHER=1] compile-time -** option) the application must supply a suitable implementation for -** sqlite3_os_init() and sqlite3_os_end(). An application-supplied -** implementation of sqlite3_os_init() or sqlite3_os_end() -** must return [SQLITE_OK] on success and some other [error code] upon -** failure. -*/ -SQLITE_API int sqlite3_initialize(void); -SQLITE_API int sqlite3_shutdown(void); -SQLITE_API int sqlite3_os_init(void); -SQLITE_API int sqlite3_os_end(void); - -/* -** CAPI3REF: Configuring The SQLite Library -** -** The sqlite3_config() interface is used to make global configuration -** changes to SQLite in order to tune SQLite to the specific needs of -** the application. The default configuration is recommended for most -** applications and so this routine is usually not necessary. It is -** provided to support rare applications with unusual needs. -** -** The sqlite3_config() interface is not threadsafe. The application -** must ensure that no other SQLite interfaces are invoked by other -** threads while sqlite3_config() is running. -** -** The sqlite3_config() interface -** may only be invoked prior to library initialization using -** [sqlite3_initialize()] or after shutdown by [sqlite3_shutdown()]. -** ^If sqlite3_config() is called after [sqlite3_initialize()] and before -** [sqlite3_shutdown()] then it will return SQLITE_MISUSE. -** Note, however, that ^sqlite3_config() can be called as part of the -** implementation of an application-defined [sqlite3_os_init()]. -** -** The first argument to sqlite3_config() is an integer -** [configuration option] that determines -** what property of SQLite is to be configured. Subsequent arguments -** vary depending on the [configuration option] -** in the first argument. -** -** ^When a configuration option is set, sqlite3_config() returns [SQLITE_OK]. -** ^If the option is unknown or SQLite is unable to set the option -** then this routine returns a non-zero [error code]. -*/ -SQLITE_API int sqlite3_config(int, ...); - -/* -** CAPI3REF: Configure database connections -** METHOD: sqlite3 -** -** The sqlite3_db_config() interface is used to make configuration -** changes to a [database connection]. The interface is similar to -** [sqlite3_config()] except that the changes apply to a single -** [database connection] (specified in the first argument). -** -** The second argument to sqlite3_db_config(D,V,...) is the -** [SQLITE_DBCONFIG_LOOKASIDE | configuration verb] - an integer code -** that indicates what aspect of the [database connection] is being configured. -** Subsequent arguments vary depending on the configuration verb. -** -** ^Calls to sqlite3_db_config() return SQLITE_OK if and only if -** the call is considered successful. -*/ -SQLITE_API int sqlite3_db_config(sqlite3*, int op, ...); - -/* -** CAPI3REF: Memory Allocation Routines -** -** An instance of this object defines the interface between SQLite -** and low-level memory allocation routines. -** -** This object is used in only one place in the SQLite interface. -** A pointer to an instance of this object is the argument to -** [sqlite3_config()] when the configuration option is -** [SQLITE_CONFIG_MALLOC] or [SQLITE_CONFIG_GETMALLOC]. -** By creating an instance of this object -** and passing it to [sqlite3_config]([SQLITE_CONFIG_MALLOC]) -** during configuration, an application can specify an alternative -** memory allocation subsystem for SQLite to use for all of its -** dynamic memory needs. -** -** Note that SQLite comes with several [built-in memory allocators] -** that are perfectly adequate for the overwhelming majority of applications -** and that this object is only useful to a tiny minority of applications -** with specialized memory allocation requirements. This object is -** also used during testing of SQLite in order to specify an alternative -** memory allocator that simulates memory out-of-memory conditions in -** order to verify that SQLite recovers gracefully from such -** conditions. -** -** The xMalloc, xRealloc, and xFree methods must work like the -** malloc(), realloc() and free() functions from the standard C library. -** ^SQLite guarantees that the second argument to -** xRealloc is always a value returned by a prior call to xRoundup. -** -** xSize should return the allocated size of a memory allocation -** previously obtained from xMalloc or xRealloc. The allocated size -** is always at least as big as the requested size but may be larger. -** -** The xRoundup method returns what would be the allocated size of -** a memory allocation given a particular requested size. Most memory -** allocators round up memory allocations at least to the next multiple -** of 8. Some allocators round up to a larger multiple or to a power of 2. -** Every memory allocation request coming in through [sqlite3_malloc()] -** or [sqlite3_realloc()] first calls xRoundup. If xRoundup returns 0, -** that causes the corresponding memory allocation to fail. -** -** The xInit method initializes the memory allocator. For example, -** it might allocate any require mutexes or initialize internal data -** structures. The xShutdown method is invoked (indirectly) by -** [sqlite3_shutdown()] and should deallocate any resources acquired -** by xInit. The pAppData pointer is used as the only parameter to -** xInit and xShutdown. -** -** SQLite holds the [SQLITE_MUTEX_STATIC_MASTER] mutex when it invokes -** the xInit method, so the xInit method need not be threadsafe. The -** xShutdown method is only called from [sqlite3_shutdown()] so it does -** not need to be threadsafe either. For all other methods, SQLite -** holds the [SQLITE_MUTEX_STATIC_MEM] mutex as long as the -** [SQLITE_CONFIG_MEMSTATUS] configuration option is turned on (which -** it is by default) and so the methods are automatically serialized. -** However, if [SQLITE_CONFIG_MEMSTATUS] is disabled, then the other -** methods must be threadsafe or else make their own arrangements for -** serialization. -** -** SQLite will never invoke xInit() more than once without an intervening -** call to xShutdown(). -*/ -typedef struct sqlite3_mem_methods sqlite3_mem_methods; -struct sqlite3_mem_methods { - void *(*xMalloc)(int); /* Memory allocation function */ - void (*xFree)(void*); /* Free a prior allocation */ - void *(*xRealloc)(void*,int); /* Resize an allocation */ - int (*xSize)(void*); /* Return the size of an allocation */ - int (*xRoundup)(int); /* Round up request size to allocation size */ - int (*xInit)(void*); /* Initialize the memory allocator */ - void (*xShutdown)(void*); /* Deinitialize the memory allocator */ - void *pAppData; /* Argument to xInit() and xShutdown() */ -}; - -/* -** CAPI3REF: Configuration Options -** KEYWORDS: {configuration option} -** -** These constants are the available integer configuration options that -** can be passed as the first argument to the [sqlite3_config()] interface. -** -** New configuration options may be added in future releases of SQLite. -** Existing configuration options might be discontinued. Applications -** should check the return code from [sqlite3_config()] to make sure that -** the call worked. The [sqlite3_config()] interface will return a -** non-zero [error code] if a discontinued or unsupported configuration option -** is invoked. -** -**
-** [[SQLITE_CONFIG_SINGLETHREAD]]
SQLITE_CONFIG_SINGLETHREAD
-**
There are no arguments to this option. ^This option sets the -** [threading mode] to Single-thread. In other words, it disables -** all mutexing and puts SQLite into a mode where it can only be used -** by a single thread. ^If SQLite is compiled with -** the [SQLITE_THREADSAFE | SQLITE_THREADSAFE=0] compile-time option then -** it is not possible to change the [threading mode] from its default -** value of Single-thread and so [sqlite3_config()] will return -** [SQLITE_ERROR] if called with the SQLITE_CONFIG_SINGLETHREAD -** configuration option.
-** -** [[SQLITE_CONFIG_MULTITHREAD]]
SQLITE_CONFIG_MULTITHREAD
-**
There are no arguments to this option. ^This option sets the -** [threading mode] to Multi-thread. In other words, it disables -** mutexing on [database connection] and [prepared statement] objects. -** The application is responsible for serializing access to -** [database connections] and [prepared statements]. But other mutexes -** are enabled so that SQLite will be safe to use in a multi-threaded -** environment as long as no two threads attempt to use the same -** [database connection] at the same time. ^If SQLite is compiled with -** the [SQLITE_THREADSAFE | SQLITE_THREADSAFE=0] compile-time option then -** it is not possible to set the Multi-thread [threading mode] and -** [sqlite3_config()] will return [SQLITE_ERROR] if called with the -** SQLITE_CONFIG_MULTITHREAD configuration option.
-** -** [[SQLITE_CONFIG_SERIALIZED]]
SQLITE_CONFIG_SERIALIZED
-**
There are no arguments to this option. ^This option sets the -** [threading mode] to Serialized. In other words, this option enables -** all mutexes including the recursive -** mutexes on [database connection] and [prepared statement] objects. -** In this mode (which is the default when SQLite is compiled with -** [SQLITE_THREADSAFE=1]) the SQLite library will itself serialize access -** to [database connections] and [prepared statements] so that the -** application is free to use the same [database connection] or the -** same [prepared statement] in different threads at the same time. -** ^If SQLite is compiled with -** the [SQLITE_THREADSAFE | SQLITE_THREADSAFE=0] compile-time option then -** it is not possible to set the Serialized [threading mode] and -** [sqlite3_config()] will return [SQLITE_ERROR] if called with the -** SQLITE_CONFIG_SERIALIZED configuration option.
-** -** [[SQLITE_CONFIG_MALLOC]]
SQLITE_CONFIG_MALLOC
-**
^(The SQLITE_CONFIG_MALLOC option takes a single argument which is -** a pointer to an instance of the [sqlite3_mem_methods] structure. -** The argument specifies -** alternative low-level memory allocation routines to be used in place of -** the memory allocation routines built into SQLite.)^ ^SQLite makes -** its own private copy of the content of the [sqlite3_mem_methods] structure -** before the [sqlite3_config()] call returns.
-** -** [[SQLITE_CONFIG_GETMALLOC]]
SQLITE_CONFIG_GETMALLOC
-**
^(The SQLITE_CONFIG_GETMALLOC option takes a single argument which -** is a pointer to an instance of the [sqlite3_mem_methods] structure. -** The [sqlite3_mem_methods] -** structure is filled with the currently defined memory allocation routines.)^ -** This option can be used to overload the default memory allocation -** routines with a wrapper that simulations memory allocation failure or -** tracks memory usage, for example.
-** -** [[SQLITE_CONFIG_MEMSTATUS]]
SQLITE_CONFIG_MEMSTATUS
-**
^The SQLITE_CONFIG_MEMSTATUS option takes single argument of type int, -** interpreted as a boolean, which enables or disables the collection of -** memory allocation statistics. ^(When memory allocation statistics are -** disabled, the following SQLite interfaces become non-operational: -**
    -**
  • [sqlite3_memory_used()] -**
  • [sqlite3_memory_highwater()] -**
  • [sqlite3_soft_heap_limit64()] -**
  • [sqlite3_status64()] -**
)^ -** ^Memory allocation statistics are enabled by default unless SQLite is -** compiled with [SQLITE_DEFAULT_MEMSTATUS]=0 in which case memory -** allocation statistics are disabled by default. -**
-** -** [[SQLITE_CONFIG_SCRATCH]]
SQLITE_CONFIG_SCRATCH
-**
^The SQLITE_CONFIG_SCRATCH option specifies a static memory buffer -** that SQLite can use for scratch memory. ^(There are three arguments -** to SQLITE_CONFIG_SCRATCH: A pointer an 8-byte -** aligned memory buffer from which the scratch allocations will be -** drawn, the size of each scratch allocation (sz), -** and the maximum number of scratch allocations (N).)^ -** The first argument must be a pointer to an 8-byte aligned buffer -** of at least sz*N bytes of memory. -** ^SQLite will not use more than one scratch buffers per thread. -** ^SQLite will never request a scratch buffer that is more than 6 -** times the database page size. -** ^If SQLite needs needs additional -** scratch memory beyond what is provided by this configuration option, then -** [sqlite3_malloc()] will be used to obtain the memory needed.

-** ^When the application provides any amount of scratch memory using -** SQLITE_CONFIG_SCRATCH, SQLite avoids unnecessary large -** [sqlite3_malloc|heap allocations]. -** This can help [Robson proof|prevent memory allocation failures] due to heap -** fragmentation in low-memory embedded systems. -**

-** -** [[SQLITE_CONFIG_PAGECACHE]]
SQLITE_CONFIG_PAGECACHE
-**
^The SQLITE_CONFIG_PAGECACHE option specifies a memory pool -** that SQLite can use for the database page cache with the default page -** cache implementation. -** This configuration option is a no-op if an application-define page -** cache implementation is loaded using the [SQLITE_CONFIG_PCACHE2]. -** ^There are three arguments to SQLITE_CONFIG_PAGECACHE: A pointer to -** 8-byte aligned memory (pMem), the size of each page cache line (sz), -** and the number of cache lines (N). -** The sz argument should be the size of the largest database page -** (a power of two between 512 and 65536) plus some extra bytes for each -** page header. ^The number of extra bytes needed by the page header -** can be determined using [SQLITE_CONFIG_PCACHE_HDRSZ]. -** ^It is harmless, apart from the wasted memory, -** for the sz parameter to be larger than necessary. The pMem -** argument must be either a NULL pointer or a pointer to an 8-byte -** aligned block of memory of at least sz*N bytes, otherwise -** subsequent behavior is undefined. -** ^When pMem is not NULL, SQLite will strive to use the memory provided -** to satisfy page cache needs, falling back to [sqlite3_malloc()] if -** a page cache line is larger than sz bytes or if all of the pMem buffer -** is exhausted. -** ^If pMem is NULL and N is non-zero, then each database connection -** does an initial bulk allocation for page cache memory -** from [sqlite3_malloc()] sufficient for N cache lines if N is positive or -** of -1024*N bytes if N is negative, . ^If additional -** page cache memory is needed beyond what is provided by the initial -** allocation, then SQLite goes to [sqlite3_malloc()] separately for each -** additional cache line.
-** -** [[SQLITE_CONFIG_HEAP]]
SQLITE_CONFIG_HEAP
-**
^The SQLITE_CONFIG_HEAP option specifies a static memory buffer -** that SQLite will use for all of its dynamic memory allocation needs -** beyond those provided for by [SQLITE_CONFIG_SCRATCH] and -** [SQLITE_CONFIG_PAGECACHE]. -** ^The SQLITE_CONFIG_HEAP option is only available if SQLite is compiled -** with either [SQLITE_ENABLE_MEMSYS3] or [SQLITE_ENABLE_MEMSYS5] and returns -** [SQLITE_ERROR] if invoked otherwise. -** ^There are three arguments to SQLITE_CONFIG_HEAP: -** An 8-byte aligned pointer to the memory, -** the number of bytes in the memory buffer, and the minimum allocation size. -** ^If the first pointer (the memory pointer) is NULL, then SQLite reverts -** to using its default memory allocator (the system malloc() implementation), -** undoing any prior invocation of [SQLITE_CONFIG_MALLOC]. ^If the -** memory pointer is not NULL then the alternative memory -** allocator is engaged to handle all of SQLites memory allocation needs. -** The first pointer (the memory pointer) must be aligned to an 8-byte -** boundary or subsequent behavior of SQLite will be undefined. -** The minimum allocation size is capped at 2**12. Reasonable values -** for the minimum allocation size are 2**5 through 2**8.
-** -** [[SQLITE_CONFIG_MUTEX]]
SQLITE_CONFIG_MUTEX
-**
^(The SQLITE_CONFIG_MUTEX option takes a single argument which is a -** pointer to an instance of the [sqlite3_mutex_methods] structure. -** The argument specifies alternative low-level mutex routines to be used -** in place the mutex routines built into SQLite.)^ ^SQLite makes a copy of -** the content of the [sqlite3_mutex_methods] structure before the call to -** [sqlite3_config()] returns. ^If SQLite is compiled with -** the [SQLITE_THREADSAFE | SQLITE_THREADSAFE=0] compile-time option then -** the entire mutexing subsystem is omitted from the build and hence calls to -** [sqlite3_config()] with the SQLITE_CONFIG_MUTEX configuration option will -** return [SQLITE_ERROR].
-** -** [[SQLITE_CONFIG_GETMUTEX]]
SQLITE_CONFIG_GETMUTEX
-**
^(The SQLITE_CONFIG_GETMUTEX option takes a single argument which -** is a pointer to an instance of the [sqlite3_mutex_methods] structure. The -** [sqlite3_mutex_methods] -** structure is filled with the currently defined mutex routines.)^ -** This option can be used to overload the default mutex allocation -** routines with a wrapper used to track mutex usage for performance -** profiling or testing, for example. ^If SQLite is compiled with -** the [SQLITE_THREADSAFE | SQLITE_THREADSAFE=0] compile-time option then -** the entire mutexing subsystem is omitted from the build and hence calls to -** [sqlite3_config()] with the SQLITE_CONFIG_GETMUTEX configuration option will -** return [SQLITE_ERROR].
-** -** [[SQLITE_CONFIG_LOOKASIDE]]
SQLITE_CONFIG_LOOKASIDE
-**
^(The SQLITE_CONFIG_LOOKASIDE option takes two arguments that determine -** the default size of lookaside memory on each [database connection]. -** The first argument is the -** size of each lookaside buffer slot and the second is the number of -** slots allocated to each database connection.)^ ^(SQLITE_CONFIG_LOOKASIDE -** sets the default lookaside size. The [SQLITE_DBCONFIG_LOOKASIDE] -** option to [sqlite3_db_config()] can be used to change the lookaside -** configuration on individual connections.)^
-** -** [[SQLITE_CONFIG_PCACHE2]]
SQLITE_CONFIG_PCACHE2
-**
^(The SQLITE_CONFIG_PCACHE2 option takes a single argument which is -** a pointer to an [sqlite3_pcache_methods2] object. This object specifies -** the interface to a custom page cache implementation.)^ -** ^SQLite makes a copy of the [sqlite3_pcache_methods2] object.
-** -** [[SQLITE_CONFIG_GETPCACHE2]]
SQLITE_CONFIG_GETPCACHE2
-**
^(The SQLITE_CONFIG_GETPCACHE2 option takes a single argument which -** is a pointer to an [sqlite3_pcache_methods2] object. SQLite copies of -** the current page cache implementation into that object.)^
-** -** [[SQLITE_CONFIG_LOG]]
SQLITE_CONFIG_LOG
-**
The SQLITE_CONFIG_LOG option is used to configure the SQLite -** global [error log]. -** (^The SQLITE_CONFIG_LOG option takes two arguments: a pointer to a -** function with a call signature of void(*)(void*,int,const char*), -** and a pointer to void. ^If the function pointer is not NULL, it is -** invoked by [sqlite3_log()] to process each logging event. ^If the -** function pointer is NULL, the [sqlite3_log()] interface becomes a no-op. -** ^The void pointer that is the second argument to SQLITE_CONFIG_LOG is -** passed through as the first parameter to the application-defined logger -** function whenever that function is invoked. ^The second parameter to -** the logger function is a copy of the first parameter to the corresponding -** [sqlite3_log()] call and is intended to be a [result code] or an -** [extended result code]. ^The third parameter passed to the logger is -** log message after formatting via [sqlite3_snprintf()]. -** The SQLite logging interface is not reentrant; the logger function -** supplied by the application must not invoke any SQLite interface. -** In a multi-threaded application, the application-defined logger -** function must be threadsafe.
-** -** [[SQLITE_CONFIG_URI]]
SQLITE_CONFIG_URI -**
^(The SQLITE_CONFIG_URI option takes a single argument of type int. -** If non-zero, then URI handling is globally enabled. If the parameter is zero, -** then URI handling is globally disabled.)^ ^If URI handling is globally -** enabled, all filenames passed to [sqlite3_open()], [sqlite3_open_v2()], -** [sqlite3_open16()] or -** specified as part of [ATTACH] commands are interpreted as URIs, regardless -** of whether or not the [SQLITE_OPEN_URI] flag is set when the database -** connection is opened. ^If it is globally disabled, filenames are -** only interpreted as URIs if the SQLITE_OPEN_URI flag is set when the -** database connection is opened. ^(By default, URI handling is globally -** disabled. The default value may be changed by compiling with the -** [SQLITE_USE_URI] symbol defined.)^ -** -** [[SQLITE_CONFIG_COVERING_INDEX_SCAN]]
SQLITE_CONFIG_COVERING_INDEX_SCAN -**
^The SQLITE_CONFIG_COVERING_INDEX_SCAN option takes a single integer -** argument which is interpreted as a boolean in order to enable or disable -** the use of covering indices for full table scans in the query optimizer. -** ^The default setting is determined -** by the [SQLITE_ALLOW_COVERING_INDEX_SCAN] compile-time option, or is "on" -** if that compile-time option is omitted. -** The ability to disable the use of covering indices for full table scans -** is because some incorrectly coded legacy applications might malfunction -** when the optimization is enabled. Providing the ability to -** disable the optimization allows the older, buggy application code to work -** without change even with newer versions of SQLite. -** -** [[SQLITE_CONFIG_PCACHE]] [[SQLITE_CONFIG_GETPCACHE]] -**
SQLITE_CONFIG_PCACHE and SQLITE_CONFIG_GETPCACHE -**
These options are obsolete and should not be used by new code. -** They are retained for backwards compatibility but are now no-ops. -**
-** -** [[SQLITE_CONFIG_SQLLOG]] -**
SQLITE_CONFIG_SQLLOG -**
This option is only available if sqlite is compiled with the -** [SQLITE_ENABLE_SQLLOG] pre-processor macro defined. The first argument should -** be a pointer to a function of type void(*)(void*,sqlite3*,const char*, int). -** The second should be of type (void*). The callback is invoked by the library -** in three separate circumstances, identified by the value passed as the -** fourth parameter. If the fourth parameter is 0, then the database connection -** passed as the second argument has just been opened. The third argument -** points to a buffer containing the name of the main database file. If the -** fourth parameter is 1, then the SQL statement that the third parameter -** points to has just been executed. Or, if the fourth parameter is 2, then -** the connection being passed as the second parameter is being closed. The -** third parameter is passed NULL In this case. An example of using this -** configuration option can be seen in the "test_sqllog.c" source file in -** the canonical SQLite source tree.
-** -** [[SQLITE_CONFIG_MMAP_SIZE]] -**
SQLITE_CONFIG_MMAP_SIZE -**
^SQLITE_CONFIG_MMAP_SIZE takes two 64-bit integer (sqlite3_int64) values -** that are the default mmap size limit (the default setting for -** [PRAGMA mmap_size]) and the maximum allowed mmap size limit. -** ^The default setting can be overridden by each database connection using -** either the [PRAGMA mmap_size] command, or by using the -** [SQLITE_FCNTL_MMAP_SIZE] file control. ^(The maximum allowed mmap size -** will be silently truncated if necessary so that it does not exceed the -** compile-time maximum mmap size set by the -** [SQLITE_MAX_MMAP_SIZE] compile-time option.)^ -** ^If either argument to this option is negative, then that argument is -** changed to its compile-time default. -** -** [[SQLITE_CONFIG_WIN32_HEAPSIZE]] -**
SQLITE_CONFIG_WIN32_HEAPSIZE -**
^The SQLITE_CONFIG_WIN32_HEAPSIZE option is only available if SQLite is -** compiled for Windows with the [SQLITE_WIN32_MALLOC] pre-processor macro -** defined. ^SQLITE_CONFIG_WIN32_HEAPSIZE takes a 32-bit unsigned integer value -** that specifies the maximum size of the created heap. -** -** [[SQLITE_CONFIG_PCACHE_HDRSZ]] -**
SQLITE_CONFIG_PCACHE_HDRSZ -**
^The SQLITE_CONFIG_PCACHE_HDRSZ option takes a single parameter which -** is a pointer to an integer and writes into that integer the number of extra -** bytes per page required for each page in [SQLITE_CONFIG_PAGECACHE]. -** The amount of extra space required can change depending on the compiler, -** target platform, and SQLite version. -** -** [[SQLITE_CONFIG_PMASZ]] -**
SQLITE_CONFIG_PMASZ -**
^The SQLITE_CONFIG_PMASZ option takes a single parameter which -** is an unsigned integer and sets the "Minimum PMA Size" for the multithreaded -** sorter to that integer. The default minimum PMA Size is set by the -** [SQLITE_SORTER_PMASZ] compile-time option. New threads are launched -** to help with sort operations when multithreaded sorting -** is enabled (using the [PRAGMA threads] command) and the amount of content -** to be sorted exceeds the page size times the minimum of the -** [PRAGMA cache_size] setting and this value. -** -** [[SQLITE_CONFIG_STMTJRNL_SPILL]] -**
SQLITE_CONFIG_STMTJRNL_SPILL -**
^The SQLITE_CONFIG_STMTJRNL_SPILL option takes a single parameter which -** becomes the [statement journal] spill-to-disk threshold. -** [Statement journals] are held in memory until their size (in bytes) -** exceeds this threshold, at which point they are written to disk. -** Or if the threshold is -1, statement journals are always held -** exclusively in memory. -** Since many statement journals never become large, setting the spill -** threshold to a value such as 64KiB can greatly reduce the amount of -** I/O required to support statement rollback. -** The default value for this setting is controlled by the -** [SQLITE_STMTJRNL_SPILL] compile-time option. -**
-*/ -#define SQLITE_CONFIG_SINGLETHREAD 1 /* nil */ -#define SQLITE_CONFIG_MULTITHREAD 2 /* nil */ -#define SQLITE_CONFIG_SERIALIZED 3 /* nil */ -#define SQLITE_CONFIG_MALLOC 4 /* sqlite3_mem_methods* */ -#define SQLITE_CONFIG_GETMALLOC 5 /* sqlite3_mem_methods* */ -#define SQLITE_CONFIG_SCRATCH 6 /* void*, int sz, int N */ -#define SQLITE_CONFIG_PAGECACHE 7 /* void*, int sz, int N */ -#define SQLITE_CONFIG_HEAP 8 /* void*, int nByte, int min */ -#define SQLITE_CONFIG_MEMSTATUS 9 /* boolean */ -#define SQLITE_CONFIG_MUTEX 10 /* sqlite3_mutex_methods* */ -#define SQLITE_CONFIG_GETMUTEX 11 /* sqlite3_mutex_methods* */ -/* previously SQLITE_CONFIG_CHUNKALLOC 12 which is now unused. */ -#define SQLITE_CONFIG_LOOKASIDE 13 /* int int */ -#define SQLITE_CONFIG_PCACHE 14 /* no-op */ -#define SQLITE_CONFIG_GETPCACHE 15 /* no-op */ -#define SQLITE_CONFIG_LOG 16 /* xFunc, void* */ -#define SQLITE_CONFIG_URI 17 /* int */ -#define SQLITE_CONFIG_PCACHE2 18 /* sqlite3_pcache_methods2* */ -#define SQLITE_CONFIG_GETPCACHE2 19 /* sqlite3_pcache_methods2* */ -#define SQLITE_CONFIG_COVERING_INDEX_SCAN 20 /* int */ -#define SQLITE_CONFIG_SQLLOG 21 /* xSqllog, void* */ -#define SQLITE_CONFIG_MMAP_SIZE 22 /* sqlite3_int64, sqlite3_int64 */ -#define SQLITE_CONFIG_WIN32_HEAPSIZE 23 /* int nByte */ -#define SQLITE_CONFIG_PCACHE_HDRSZ 24 /* int *psz */ -#define SQLITE_CONFIG_PMASZ 25 /* unsigned int szPma */ -#define SQLITE_CONFIG_STMTJRNL_SPILL 26 /* int nByte */ - -/* -** CAPI3REF: Database Connection Configuration Options -** -** These constants are the available integer configuration options that -** can be passed as the second argument to the [sqlite3_db_config()] interface. -** -** New configuration options may be added in future releases of SQLite. -** Existing configuration options might be discontinued. Applications -** should check the return code from [sqlite3_db_config()] to make sure that -** the call worked. ^The [sqlite3_db_config()] interface will return a -** non-zero [error code] if a discontinued or unsupported configuration option -** is invoked. -** -**
-**
SQLITE_DBCONFIG_LOOKASIDE
-**
^This option takes three additional arguments that determine the -** [lookaside memory allocator] configuration for the [database connection]. -** ^The first argument (the third parameter to [sqlite3_db_config()] is a -** pointer to a memory buffer to use for lookaside memory. -** ^The first argument after the SQLITE_DBCONFIG_LOOKASIDE verb -** may be NULL in which case SQLite will allocate the -** lookaside buffer itself using [sqlite3_malloc()]. ^The second argument is the -** size of each lookaside buffer slot. ^The third argument is the number of -** slots. The size of the buffer in the first argument must be greater than -** or equal to the product of the second and third arguments. The buffer -** must be aligned to an 8-byte boundary. ^If the second argument to -** SQLITE_DBCONFIG_LOOKASIDE is not a multiple of 8, it is internally -** rounded down to the next smaller multiple of 8. ^(The lookaside memory -** configuration for a database connection can only be changed when that -** connection is not currently using lookaside memory, or in other words -** when the "current value" returned by -** [sqlite3_db_status](D,[SQLITE_CONFIG_LOOKASIDE],...) is zero. -** Any attempt to change the lookaside memory configuration when lookaside -** memory is in use leaves the configuration unchanged and returns -** [SQLITE_BUSY].)^
-** -**
SQLITE_DBCONFIG_ENABLE_FKEY
-**
^This option is used to enable or disable the enforcement of -** [foreign key constraints]. There should be two additional arguments. -** The first argument is an integer which is 0 to disable FK enforcement, -** positive to enable FK enforcement or negative to leave FK enforcement -** unchanged. The second parameter is a pointer to an integer into which -** is written 0 or 1 to indicate whether FK enforcement is off or on -** following this call. The second parameter may be a NULL pointer, in -** which case the FK enforcement setting is not reported back.
-** -**
SQLITE_DBCONFIG_ENABLE_TRIGGER
-**
^This option is used to enable or disable [CREATE TRIGGER | triggers]. -** There should be two additional arguments. -** The first argument is an integer which is 0 to disable triggers, -** positive to enable triggers or negative to leave the setting unchanged. -** The second parameter is a pointer to an integer into which -** is written 0 or 1 to indicate whether triggers are disabled or enabled -** following this call. The second parameter may be a NULL pointer, in -** which case the trigger setting is not reported back.
-** -**
SQLITE_DBCONFIG_ENABLE_FTS3_TOKENIZER
-**
^This option is used to enable or disable the two-argument -** version of the [fts3_tokenizer()] function which is part of the -** [FTS3] full-text search engine extension. -** There should be two additional arguments. -** The first argument is an integer which is 0 to disable fts3_tokenizer() or -** positive to enable fts3_tokenizer() or negative to leave the setting -** unchanged. -** The second parameter is a pointer to an integer into which -** is written 0 or 1 to indicate whether fts3_tokenizer is disabled or enabled -** following this call. The second parameter may be a NULL pointer, in -** which case the new setting is not reported back.
-** -**
SQLITE_DBCONFIG_ENABLE_LOAD_EXTENSION
-**
^This option is used to enable or disable the [sqlite3_load_extension()] -** interface independently of the [load_extension()] SQL function. -** The [sqlite3_enable_load_extension()] API enables or disables both the -** C-API [sqlite3_load_extension()] and the SQL function [load_extension()]. -** There should be two additional arguments. -** When the first argument to this interface is 1, then only the C-API is -** enabled and the SQL function remains disabled. If the first argument to -** this interface is 0, then both the C-API and the SQL function are disabled. -** If the first argument is -1, then no changes are made to state of either the -** C-API or the SQL function. -** The second parameter is a pointer to an integer into which -** is written 0 or 1 to indicate whether [sqlite3_load_extension()] interface -** is disabled or enabled following this call. The second parameter may -** be a NULL pointer, in which case the new setting is not reported back. -**
-** -**
SQLITE_DBCONFIG_MAINDBNAME
-**
^This option is used to change the name of the "main" database -** schema. ^The sole argument is a pointer to a constant UTF8 string -** which will become the new schema name in place of "main". ^SQLite -** does not make a copy of the new main schema name string, so the application -** must ensure that the argument passed into this DBCONFIG option is unchanged -** until after the database connection closes. -**
-** -**
SQLITE_DBCONFIG_NO_CKPT_ON_CLOSE
-**
Usually, when a database in wal mode is closed or detached from a -** database handle, SQLite checks if this will mean that there are now no -** connections at all to the database. If so, it performs a checkpoint -** operation before closing the connection. This option may be used to -** override this behaviour. The first parameter passed to this operation -** is an integer - non-zero to disable checkpoints-on-close, or zero (the -** default) to enable them. The second parameter is a pointer to an integer -** into which is written 0 or 1 to indicate whether checkpoints-on-close -** have been disabled - 0 if they are not disabled, 1 if they are. -**
-** -**
-*/ -#define SQLITE_DBCONFIG_MAINDBNAME 1000 /* const char* */ -#define SQLITE_DBCONFIG_LOOKASIDE 1001 /* void* int int */ -#define SQLITE_DBCONFIG_ENABLE_FKEY 1002 /* int int* */ -#define SQLITE_DBCONFIG_ENABLE_TRIGGER 1003 /* int int* */ -#define SQLITE_DBCONFIG_ENABLE_FTS3_TOKENIZER 1004 /* int int* */ -#define SQLITE_DBCONFIG_ENABLE_LOAD_EXTENSION 1005 /* int int* */ -#define SQLITE_DBCONFIG_NO_CKPT_ON_CLOSE 1006 /* int int* */ - - -/* -** CAPI3REF: Enable Or Disable Extended Result Codes -** METHOD: sqlite3 -** -** ^The sqlite3_extended_result_codes() routine enables or disables the -** [extended result codes] feature of SQLite. ^The extended result -** codes are disabled by default for historical compatibility. -*/ -SQLITE_API int sqlite3_extended_result_codes(sqlite3*, int onoff); - -/* -** CAPI3REF: Last Insert Rowid -** METHOD: sqlite3 -** -** ^Each entry in most SQLite tables (except for [WITHOUT ROWID] tables) -** has a unique 64-bit signed -** integer key called the [ROWID | "rowid"]. ^The rowid is always available -** as an undeclared column named ROWID, OID, or _ROWID_ as long as those -** names are not also used by explicitly declared columns. ^If -** the table has a column of type [INTEGER PRIMARY KEY] then that column -** is another alias for the rowid. -** -** ^The sqlite3_last_insert_rowid(D) interface returns the [rowid] of the -** most recent successful [INSERT] into a rowid table or [virtual table] -** on database connection D. -** ^Inserts into [WITHOUT ROWID] tables are not recorded. -** ^If no successful [INSERT]s into rowid tables -** have ever occurred on the database connection D, -** then sqlite3_last_insert_rowid(D) returns zero. -** -** ^(If an [INSERT] occurs within a trigger or within a [virtual table] -** method, then this routine will return the [rowid] of the inserted -** row as long as the trigger or virtual table method is running. -** But once the trigger or virtual table method ends, the value returned -** by this routine reverts to what it was before the trigger or virtual -** table method began.)^ -** -** ^An [INSERT] that fails due to a constraint violation is not a -** successful [INSERT] and does not change the value returned by this -** routine. ^Thus INSERT OR FAIL, INSERT OR IGNORE, INSERT OR ROLLBACK, -** and INSERT OR ABORT make no changes to the return value of this -** routine when their insertion fails. ^(When INSERT OR REPLACE -** encounters a constraint violation, it does not fail. The -** INSERT continues to completion after deleting rows that caused -** the constraint problem so INSERT OR REPLACE will always change -** the return value of this interface.)^ -** -** ^For the purposes of this routine, an [INSERT] is considered to -** be successful even if it is subsequently rolled back. -** -** This function is accessible to SQL statements via the -** [last_insert_rowid() SQL function]. -** -** If a separate thread performs a new [INSERT] on the same -** database connection while the [sqlite3_last_insert_rowid()] -** function is running and thus changes the last insert [rowid], -** then the value returned by [sqlite3_last_insert_rowid()] is -** unpredictable and might not equal either the old or the new -** last insert [rowid]. -*/ -SQLITE_API sqlite3_int64 sqlite3_last_insert_rowid(sqlite3*); - -/* -** CAPI3REF: Count The Number Of Rows Modified -** METHOD: sqlite3 -** -** ^This function returns the number of rows modified, inserted or -** deleted by the most recently completed INSERT, UPDATE or DELETE -** statement on the database connection specified by the only parameter. -** ^Executing any other type of SQL statement does not modify the value -** returned by this function. -** -** ^Only changes made directly by the INSERT, UPDATE or DELETE statement are -** considered - auxiliary changes caused by [CREATE TRIGGER | triggers], -** [foreign key actions] or [REPLACE] constraint resolution are not counted. -** -** Changes to a view that are intercepted by -** [INSTEAD OF trigger | INSTEAD OF triggers] are not counted. ^The value -** returned by sqlite3_changes() immediately after an INSERT, UPDATE or -** DELETE statement run on a view is always zero. Only changes made to real -** tables are counted. -** -** Things are more complicated if the sqlite3_changes() function is -** executed while a trigger program is running. This may happen if the -** program uses the [changes() SQL function], or if some other callback -** function invokes sqlite3_changes() directly. Essentially: -** -**
    -**
  • ^(Before entering a trigger program the value returned by -** sqlite3_changes() function is saved. After the trigger program -** has finished, the original value is restored.)^ -** -**
  • ^(Within a trigger program each INSERT, UPDATE and DELETE -** statement sets the value returned by sqlite3_changes() -** upon completion as normal. Of course, this value will not include -** any changes performed by sub-triggers, as the sqlite3_changes() -** value will be saved and restored after each sub-trigger has run.)^ -**
-** -** ^This means that if the changes() SQL function (or similar) is used -** by the first INSERT, UPDATE or DELETE statement within a trigger, it -** returns the value as set when the calling statement began executing. -** ^If it is used by the second or subsequent such statement within a trigger -** program, the value returned reflects the number of rows modified by the -** previous INSERT, UPDATE or DELETE statement within the same trigger. -** -** See also the [sqlite3_total_changes()] interface, the -** [count_changes pragma], and the [changes() SQL function]. -** -** If a separate thread makes changes on the same database connection -** while [sqlite3_changes()] is running then the value returned -** is unpredictable and not meaningful. -*/ -SQLITE_API int sqlite3_changes(sqlite3*); - -/* -** CAPI3REF: Total Number Of Rows Modified -** METHOD: sqlite3 -** -** ^This function returns the total number of rows inserted, modified or -** deleted by all [INSERT], [UPDATE] or [DELETE] statements completed -** since the database connection was opened, including those executed as -** part of trigger programs. ^Executing any other type of SQL statement -** does not affect the value returned by sqlite3_total_changes(). -** -** ^Changes made as part of [foreign key actions] are included in the -** count, but those made as part of REPLACE constraint resolution are -** not. ^Changes to a view that are intercepted by INSTEAD OF triggers -** are not counted. -** -** See also the [sqlite3_changes()] interface, the -** [count_changes pragma], and the [total_changes() SQL function]. -** -** If a separate thread makes changes on the same database connection -** while [sqlite3_total_changes()] is running then the value -** returned is unpredictable and not meaningful. -*/ -SQLITE_API int sqlite3_total_changes(sqlite3*); - -/* -** CAPI3REF: Interrupt A Long-Running Query -** METHOD: sqlite3 -** -** ^This function causes any pending database operation to abort and -** return at its earliest opportunity. This routine is typically -** called in response to a user action such as pressing "Cancel" -** or Ctrl-C where the user wants a long query operation to halt -** immediately. -** -** ^It is safe to call this routine from a thread different from the -** thread that is currently running the database operation. But it -** is not safe to call this routine with a [database connection] that -** is closed or might close before sqlite3_interrupt() returns. -** -** ^If an SQL operation is very nearly finished at the time when -** sqlite3_interrupt() is called, then it might not have an opportunity -** to be interrupted and might continue to completion. -** -** ^An SQL operation that is interrupted will return [SQLITE_INTERRUPT]. -** ^If the interrupted SQL operation is an INSERT, UPDATE, or DELETE -** that is inside an explicit transaction, then the entire transaction -** will be rolled back automatically. -** -** ^The sqlite3_interrupt(D) call is in effect until all currently running -** SQL statements on [database connection] D complete. ^Any new SQL statements -** that are started after the sqlite3_interrupt() call and before the -** running statements reaches zero are interrupted as if they had been -** running prior to the sqlite3_interrupt() call. ^New SQL statements -** that are started after the running statement count reaches zero are -** not effected by the sqlite3_interrupt(). -** ^A call to sqlite3_interrupt(D) that occurs when there are no running -** SQL statements is a no-op and has no effect on SQL statements -** that are started after the sqlite3_interrupt() call returns. -** -** If the database connection closes while [sqlite3_interrupt()] -** is running then bad things will likely happen. -*/ -SQLITE_API void sqlite3_interrupt(sqlite3*); - -/* -** CAPI3REF: Determine If An SQL Statement Is Complete -** -** These routines are useful during command-line input to determine if the -** currently entered text seems to form a complete SQL statement or -** if additional input is needed before sending the text into -** SQLite for parsing. ^These routines return 1 if the input string -** appears to be a complete SQL statement. ^A statement is judged to be -** complete if it ends with a semicolon token and is not a prefix of a -** well-formed CREATE TRIGGER statement. ^Semicolons that are embedded within -** string literals or quoted identifier names or comments are not -** independent tokens (they are part of the token in which they are -** embedded) and thus do not count as a statement terminator. ^Whitespace -** and comments that follow the final semicolon are ignored. -** -** ^These routines return 0 if the statement is incomplete. ^If a -** memory allocation fails, then SQLITE_NOMEM is returned. -** -** ^These routines do not parse the SQL statements thus -** will not detect syntactically incorrect SQL. -** -** ^(If SQLite has not been initialized using [sqlite3_initialize()] prior -** to invoking sqlite3_complete16() then sqlite3_initialize() is invoked -** automatically by sqlite3_complete16(). If that initialization fails, -** then the return value from sqlite3_complete16() will be non-zero -** regardless of whether or not the input SQL is complete.)^ -** -** The input to [sqlite3_complete()] must be a zero-terminated -** UTF-8 string. -** -** The input to [sqlite3_complete16()] must be a zero-terminated -** UTF-16 string in native byte order. -*/ -SQLITE_API int sqlite3_complete(const char *sql); -SQLITE_API int sqlite3_complete16(const void *sql); - -/* -** CAPI3REF: Register A Callback To Handle SQLITE_BUSY Errors -** KEYWORDS: {busy-handler callback} {busy handler} -** METHOD: sqlite3 -** -** ^The sqlite3_busy_handler(D,X,P) routine sets a callback function X -** that might be invoked with argument P whenever -** an attempt is made to access a database table associated with -** [database connection] D when another thread -** or process has the table locked. -** The sqlite3_busy_handler() interface is used to implement -** [sqlite3_busy_timeout()] and [PRAGMA busy_timeout]. -** -** ^If the busy callback is NULL, then [SQLITE_BUSY] -** is returned immediately upon encountering the lock. ^If the busy callback -** is not NULL, then the callback might be invoked with two arguments. -** -** ^The first argument to the busy handler is a copy of the void* pointer which -** is the third argument to sqlite3_busy_handler(). ^The second argument to -** the busy handler callback is the number of times that the busy handler has -** been invoked previously for the same locking event. ^If the -** busy callback returns 0, then no additional attempts are made to -** access the database and [SQLITE_BUSY] is returned -** to the application. -** ^If the callback returns non-zero, then another attempt -** is made to access the database and the cycle repeats. -** -** The presence of a busy handler does not guarantee that it will be invoked -** when there is lock contention. ^If SQLite determines that invoking the busy -** handler could result in a deadlock, it will go ahead and return [SQLITE_BUSY] -** to the application instead of invoking the -** busy handler. -** Consider a scenario where one process is holding a read lock that -** it is trying to promote to a reserved lock and -** a second process is holding a reserved lock that it is trying -** to promote to an exclusive lock. The first process cannot proceed -** because it is blocked by the second and the second process cannot -** proceed because it is blocked by the first. If both processes -** invoke the busy handlers, neither will make any progress. Therefore, -** SQLite returns [SQLITE_BUSY] for the first process, hoping that this -** will induce the first process to release its read lock and allow -** the second process to proceed. -** -** ^The default busy callback is NULL. -** -** ^(There can only be a single busy handler defined for each -** [database connection]. Setting a new busy handler clears any -** previously set handler.)^ ^Note that calling [sqlite3_busy_timeout()] -** or evaluating [PRAGMA busy_timeout=N] will change the -** busy handler and thus clear any previously set busy handler. -** -** The busy callback should not take any actions which modify the -** database connection that invoked the busy handler. In other words, -** the busy handler is not reentrant. Any such actions -** result in undefined behavior. -** -** A busy handler must not close the database connection -** or [prepared statement] that invoked the busy handler. -*/ -SQLITE_API int sqlite3_busy_handler(sqlite3*,int(*)(void*,int),void*); - -/* -** CAPI3REF: Set A Busy Timeout -** METHOD: sqlite3 -** -** ^This routine sets a [sqlite3_busy_handler | busy handler] that sleeps -** for a specified amount of time when a table is locked. ^The handler -** will sleep multiple times until at least "ms" milliseconds of sleeping -** have accumulated. ^After at least "ms" milliseconds of sleeping, -** the handler returns 0 which causes [sqlite3_step()] to return -** [SQLITE_BUSY]. -** -** ^Calling this routine with an argument less than or equal to zero -** turns off all busy handlers. -** -** ^(There can only be a single busy handler for a particular -** [database connection] at any given moment. If another busy handler -** was defined (using [sqlite3_busy_handler()]) prior to calling -** this routine, that other busy handler is cleared.)^ -** -** See also: [PRAGMA busy_timeout] -*/ -SQLITE_API int sqlite3_busy_timeout(sqlite3*, int ms); - -/* -** CAPI3REF: Convenience Routines For Running Queries -** METHOD: sqlite3 -** -** This is a legacy interface that is preserved for backwards compatibility. -** Use of this interface is not recommended. -** -** Definition: A result table is memory data structure created by the -** [sqlite3_get_table()] interface. A result table records the -** complete query results from one or more queries. -** -** The table conceptually has a number of rows and columns. But -** these numbers are not part of the result table itself. These -** numbers are obtained separately. Let N be the number of rows -** and M be the number of columns. -** -** A result table is an array of pointers to zero-terminated UTF-8 strings. -** There are (N+1)*M elements in the array. The first M pointers point -** to zero-terminated strings that contain the names of the columns. -** The remaining entries all point to query results. NULL values result -** in NULL pointers. All other values are in their UTF-8 zero-terminated -** string representation as returned by [sqlite3_column_text()]. -** -** A result table might consist of one or more memory allocations. -** It is not safe to pass a result table directly to [sqlite3_free()]. -** A result table should be deallocated using [sqlite3_free_table()]. -** -** ^(As an example of the result table format, suppose a query result -** is as follows: -** -**
-**        Name        | Age
-**        -----------------------
-**        Alice       | 43
-**        Bob         | 28
-**        Cindy       | 21
-** 
-** -** There are two column (M==2) and three rows (N==3). Thus the -** result table has 8 entries. Suppose the result table is stored -** in an array names azResult. Then azResult holds this content: -** -**
-**        azResult[0] = "Name";
-**        azResult[1] = "Age";
-**        azResult[2] = "Alice";
-**        azResult[3] = "43";
-**        azResult[4] = "Bob";
-**        azResult[5] = "28";
-**        azResult[6] = "Cindy";
-**        azResult[7] = "21";
-** 
)^ -** -** ^The sqlite3_get_table() function evaluates one or more -** semicolon-separated SQL statements in the zero-terminated UTF-8 -** string of its 2nd parameter and returns a result table to the -** pointer given in its 3rd parameter. -** -** After the application has finished with the result from sqlite3_get_table(), -** it must pass the result table pointer to sqlite3_free_table() in order to -** release the memory that was malloced. Because of the way the -** [sqlite3_malloc()] happens within sqlite3_get_table(), the calling -** function must not try to call [sqlite3_free()] directly. Only -** [sqlite3_free_table()] is able to release the memory properly and safely. -** -** The sqlite3_get_table() interface is implemented as a wrapper around -** [sqlite3_exec()]. The sqlite3_get_table() routine does not have access -** to any internal data structures of SQLite. It uses only the public -** interface defined here. As a consequence, errors that occur in the -** wrapper layer outside of the internal [sqlite3_exec()] call are not -** reflected in subsequent calls to [sqlite3_errcode()] or -** [sqlite3_errmsg()]. -*/ -SQLITE_API int sqlite3_get_table( - sqlite3 *db, /* An open database */ - const char *zSql, /* SQL to be evaluated */ - char ***pazResult, /* Results of the query */ - int *pnRow, /* Number of result rows written here */ - int *pnColumn, /* Number of result columns written here */ - char **pzErrmsg /* Error msg written here */ -); -SQLITE_API void sqlite3_free_table(char **result); - -/* -** CAPI3REF: Formatted String Printing Functions -** -** These routines are work-alikes of the "printf()" family of functions -** from the standard C library. -** These routines understand most of the common K&R formatting options, -** plus some additional non-standard formats, detailed below. -** Note that some of the more obscure formatting options from recent -** C-library standards are omitted from this implementation. -** -** ^The sqlite3_mprintf() and sqlite3_vmprintf() routines write their -** results into memory obtained from [sqlite3_malloc()]. -** The strings returned by these two routines should be -** released by [sqlite3_free()]. ^Both routines return a -** NULL pointer if [sqlite3_malloc()] is unable to allocate enough -** memory to hold the resulting string. -** -** ^(The sqlite3_snprintf() routine is similar to "snprintf()" from -** the standard C library. The result is written into the -** buffer supplied as the second parameter whose size is given by -** the first parameter. Note that the order of the -** first two parameters is reversed from snprintf().)^ This is an -** historical accident that cannot be fixed without breaking -** backwards compatibility. ^(Note also that sqlite3_snprintf() -** returns a pointer to its buffer instead of the number of -** characters actually written into the buffer.)^ We admit that -** the number of characters written would be a more useful return -** value but we cannot change the implementation of sqlite3_snprintf() -** now without breaking compatibility. -** -** ^As long as the buffer size is greater than zero, sqlite3_snprintf() -** guarantees that the buffer is always zero-terminated. ^The first -** parameter "n" is the total size of the buffer, including space for -** the zero terminator. So the longest string that can be completely -** written will be n-1 characters. -** -** ^The sqlite3_vsnprintf() routine is a varargs version of sqlite3_snprintf(). -** -** These routines all implement some additional formatting -** options that are useful for constructing SQL statements. -** All of the usual printf() formatting options apply. In addition, there -** is are "%q", "%Q", "%w" and "%z" options. -** -** ^(The %q option works like %s in that it substitutes a nul-terminated -** string from the argument list. But %q also doubles every '\'' character. -** %q is designed for use inside a string literal.)^ By doubling each '\'' -** character it escapes that character and allows it to be inserted into -** the string. -** -** For example, assume the string variable zText contains text as follows: -** -**
-**  char *zText = "It's a happy day!";
-** 
-** -** One can use this text in an SQL statement as follows: -** -**
-**  char *zSQL = sqlite3_mprintf("INSERT INTO table VALUES('%q')", zText);
-**  sqlite3_exec(db, zSQL, 0, 0, 0);
-**  sqlite3_free(zSQL);
-** 
-** -** Because the %q format string is used, the '\'' character in zText -** is escaped and the SQL generated is as follows: -** -**
-**  INSERT INTO table1 VALUES('It''s a happy day!')
-** 
-** -** This is correct. Had we used %s instead of %q, the generated SQL -** would have looked like this: -** -**
-**  INSERT INTO table1 VALUES('It's a happy day!');
-** 
-** -** This second example is an SQL syntax error. As a general rule you should -** always use %q instead of %s when inserting text into a string literal. -** -** ^(The %Q option works like %q except it also adds single quotes around -** the outside of the total string. Additionally, if the parameter in the -** argument list is a NULL pointer, %Q substitutes the text "NULL" (without -** single quotes).)^ So, for example, one could say: -** -**
-**  char *zSQL = sqlite3_mprintf("INSERT INTO table VALUES(%Q)", zText);
-**  sqlite3_exec(db, zSQL, 0, 0, 0);
-**  sqlite3_free(zSQL);
-** 
-** -** The code above will render a correct SQL statement in the zSQL -** variable even if the zText variable is a NULL pointer. -** -** ^(The "%w" formatting option is like "%q" except that it expects to -** be contained within double-quotes instead of single quotes, and it -** escapes the double-quote character instead of the single-quote -** character.)^ The "%w" formatting option is intended for safely inserting -** table and column names into a constructed SQL statement. -** -** ^(The "%z" formatting option works like "%s" but with the -** addition that after the string has been read and copied into -** the result, [sqlite3_free()] is called on the input string.)^ -*/ -SQLITE_API char *sqlite3_mprintf(const char*,...); -SQLITE_API char *sqlite3_vmprintf(const char*, va_list); -SQLITE_API char *sqlite3_snprintf(int,char*,const char*, ...); -SQLITE_API char *sqlite3_vsnprintf(int,char*,const char*, va_list); - -/* -** CAPI3REF: Memory Allocation Subsystem -** -** The SQLite core uses these three routines for all of its own -** internal memory allocation needs. "Core" in the previous sentence -** does not include operating-system specific VFS implementation. The -** Windows VFS uses native malloc() and free() for some operations. -** -** ^The sqlite3_malloc() routine returns a pointer to a block -** of memory at least N bytes in length, where N is the parameter. -** ^If sqlite3_malloc() is unable to obtain sufficient free -** memory, it returns a NULL pointer. ^If the parameter N to -** sqlite3_malloc() is zero or negative then sqlite3_malloc() returns -** a NULL pointer. -** -** ^The sqlite3_malloc64(N) routine works just like -** sqlite3_malloc(N) except that N is an unsigned 64-bit integer instead -** of a signed 32-bit integer. -** -** ^Calling sqlite3_free() with a pointer previously returned -** by sqlite3_malloc() or sqlite3_realloc() releases that memory so -** that it might be reused. ^The sqlite3_free() routine is -** a no-op if is called with a NULL pointer. Passing a NULL pointer -** to sqlite3_free() is harmless. After being freed, memory -** should neither be read nor written. Even reading previously freed -** memory might result in a segmentation fault or other severe error. -** Memory corruption, a segmentation fault, or other severe error -** might result if sqlite3_free() is called with a non-NULL pointer that -** was not obtained from sqlite3_malloc() or sqlite3_realloc(). -** -** ^The sqlite3_realloc(X,N) interface attempts to resize a -** prior memory allocation X to be at least N bytes. -** ^If the X parameter to sqlite3_realloc(X,N) -** is a NULL pointer then its behavior is identical to calling -** sqlite3_malloc(N). -** ^If the N parameter to sqlite3_realloc(X,N) is zero or -** negative then the behavior is exactly the same as calling -** sqlite3_free(X). -** ^sqlite3_realloc(X,N) returns a pointer to a memory allocation -** of at least N bytes in size or NULL if insufficient memory is available. -** ^If M is the size of the prior allocation, then min(N,M) bytes -** of the prior allocation are copied into the beginning of buffer returned -** by sqlite3_realloc(X,N) and the prior allocation is freed. -** ^If sqlite3_realloc(X,N) returns NULL and N is positive, then the -** prior allocation is not freed. -** -** ^The sqlite3_realloc64(X,N) interfaces works the same as -** sqlite3_realloc(X,N) except that N is a 64-bit unsigned integer instead -** of a 32-bit signed integer. -** -** ^If X is a memory allocation previously obtained from sqlite3_malloc(), -** sqlite3_malloc64(), sqlite3_realloc(), or sqlite3_realloc64(), then -** sqlite3_msize(X) returns the size of that memory allocation in bytes. -** ^The value returned by sqlite3_msize(X) might be larger than the number -** of bytes requested when X was allocated. ^If X is a NULL pointer then -** sqlite3_msize(X) returns zero. If X points to something that is not -** the beginning of memory allocation, or if it points to a formerly -** valid memory allocation that has now been freed, then the behavior -** of sqlite3_msize(X) is undefined and possibly harmful. -** -** ^The memory returned by sqlite3_malloc(), sqlite3_realloc(), -** sqlite3_malloc64(), and sqlite3_realloc64() -** is always aligned to at least an 8 byte boundary, or to a -** 4 byte boundary if the [SQLITE_4_BYTE_ALIGNED_MALLOC] compile-time -** option is used. -** -** In SQLite version 3.5.0 and 3.5.1, it was possible to define -** the SQLITE_OMIT_MEMORY_ALLOCATION which would cause the built-in -** implementation of these routines to be omitted. That capability -** is no longer provided. Only built-in memory allocators can be used. -** -** Prior to SQLite version 3.7.10, the Windows OS interface layer called -** the system malloc() and free() directly when converting -** filenames between the UTF-8 encoding used by SQLite -** and whatever filename encoding is used by the particular Windows -** installation. Memory allocation errors were detected, but -** they were reported back as [SQLITE_CANTOPEN] or -** [SQLITE_IOERR] rather than [SQLITE_NOMEM]. -** -** The pointer arguments to [sqlite3_free()] and [sqlite3_realloc()] -** must be either NULL or else pointers obtained from a prior -** invocation of [sqlite3_malloc()] or [sqlite3_realloc()] that have -** not yet been released. -** -** The application must not read or write any part of -** a block of memory after it has been released using -** [sqlite3_free()] or [sqlite3_realloc()]. -*/ -SQLITE_API void *sqlite3_malloc(int); -SQLITE_API void *sqlite3_malloc64(sqlite3_uint64); -SQLITE_API void *sqlite3_realloc(void*, int); -SQLITE_API void *sqlite3_realloc64(void*, sqlite3_uint64); -SQLITE_API void sqlite3_free(void*); -SQLITE_API sqlite3_uint64 sqlite3_msize(void*); - -/* -** CAPI3REF: Memory Allocator Statistics -** -** SQLite provides these two interfaces for reporting on the status -** of the [sqlite3_malloc()], [sqlite3_free()], and [sqlite3_realloc()] -** routines, which form the built-in memory allocation subsystem. -** -** ^The [sqlite3_memory_used()] routine returns the number of bytes -** of memory currently outstanding (malloced but not freed). -** ^The [sqlite3_memory_highwater()] routine returns the maximum -** value of [sqlite3_memory_used()] since the high-water mark -** was last reset. ^The values returned by [sqlite3_memory_used()] and -** [sqlite3_memory_highwater()] include any overhead -** added by SQLite in its implementation of [sqlite3_malloc()], -** but not overhead added by the any underlying system library -** routines that [sqlite3_malloc()] may call. -** -** ^The memory high-water mark is reset to the current value of -** [sqlite3_memory_used()] if and only if the parameter to -** [sqlite3_memory_highwater()] is true. ^The value returned -** by [sqlite3_memory_highwater(1)] is the high-water mark -** prior to the reset. -*/ -SQLITE_API sqlite3_int64 sqlite3_memory_used(void); -SQLITE_API sqlite3_int64 sqlite3_memory_highwater(int resetFlag); - -/* -** CAPI3REF: Pseudo-Random Number Generator -** -** SQLite contains a high-quality pseudo-random number generator (PRNG) used to -** select random [ROWID | ROWIDs] when inserting new records into a table that -** already uses the largest possible [ROWID]. The PRNG is also used for -** the build-in random() and randomblob() SQL functions. This interface allows -** applications to access the same PRNG for other purposes. -** -** ^A call to this routine stores N bytes of randomness into buffer P. -** ^The P parameter can be a NULL pointer. -** -** ^If this routine has not been previously called or if the previous -** call had N less than one or a NULL pointer for P, then the PRNG is -** seeded using randomness obtained from the xRandomness method of -** the default [sqlite3_vfs] object. -** ^If the previous call to this routine had an N of 1 or more and a -** non-NULL P then the pseudo-randomness is generated -** internally and without recourse to the [sqlite3_vfs] xRandomness -** method. -*/ -SQLITE_API void sqlite3_randomness(int N, void *P); - -/* -** CAPI3REF: Compile-Time Authorization Callbacks -** METHOD: sqlite3 -** -** ^This routine registers an authorizer callback with a particular -** [database connection], supplied in the first argument. -** ^The authorizer callback is invoked as SQL statements are being compiled -** by [sqlite3_prepare()] or its variants [sqlite3_prepare_v2()], -** [sqlite3_prepare16()] and [sqlite3_prepare16_v2()]. ^At various -** points during the compilation process, as logic is being created -** to perform various actions, the authorizer callback is invoked to -** see if those actions are allowed. ^The authorizer callback should -** return [SQLITE_OK] to allow the action, [SQLITE_IGNORE] to disallow the -** specific action but allow the SQL statement to continue to be -** compiled, or [SQLITE_DENY] to cause the entire SQL statement to be -** rejected with an error. ^If the authorizer callback returns -** any value other than [SQLITE_IGNORE], [SQLITE_OK], or [SQLITE_DENY] -** then the [sqlite3_prepare_v2()] or equivalent call that triggered -** the authorizer will fail with an error message. -** -** When the callback returns [SQLITE_OK], that means the operation -** requested is ok. ^When the callback returns [SQLITE_DENY], the -** [sqlite3_prepare_v2()] or equivalent call that triggered the -** authorizer will fail with an error message explaining that -** access is denied. -** -** ^The first parameter to the authorizer callback is a copy of the third -** parameter to the sqlite3_set_authorizer() interface. ^The second parameter -** to the callback is an integer [SQLITE_COPY | action code] that specifies -** the particular action to be authorized. ^The third through sixth parameters -** to the callback are zero-terminated strings that contain additional -** details about the action to be authorized. -** -** ^If the action code is [SQLITE_READ] -** and the callback returns [SQLITE_IGNORE] then the -** [prepared statement] statement is constructed to substitute -** a NULL value in place of the table column that would have -** been read if [SQLITE_OK] had been returned. The [SQLITE_IGNORE] -** return can be used to deny an untrusted user access to individual -** columns of a table. -** ^If the action code is [SQLITE_DELETE] and the callback returns -** [SQLITE_IGNORE] then the [DELETE] operation proceeds but the -** [truncate optimization] is disabled and all rows are deleted individually. -** -** An authorizer is used when [sqlite3_prepare | preparing] -** SQL statements from an untrusted source, to ensure that the SQL statements -** do not try to access data they are not allowed to see, or that they do not -** try to execute malicious statements that damage the database. For -** example, an application may allow a user to enter arbitrary -** SQL queries for evaluation by a database. But the application does -** not want the user to be able to make arbitrary changes to the -** database. An authorizer could then be put in place while the -** user-entered SQL is being [sqlite3_prepare | prepared] that -** disallows everything except [SELECT] statements. -** -** Applications that need to process SQL from untrusted sources -** might also consider lowering resource limits using [sqlite3_limit()] -** and limiting database size using the [max_page_count] [PRAGMA] -** in addition to using an authorizer. -** -** ^(Only a single authorizer can be in place on a database connection -** at a time. Each call to sqlite3_set_authorizer overrides the -** previous call.)^ ^Disable the authorizer by installing a NULL callback. -** The authorizer is disabled by default. -** -** The authorizer callback must not do anything that will modify -** the database connection that invoked the authorizer callback. -** Note that [sqlite3_prepare_v2()] and [sqlite3_step()] both modify their -** database connections for the meaning of "modify" in this paragraph. -** -** ^When [sqlite3_prepare_v2()] is used to prepare a statement, the -** statement might be re-prepared during [sqlite3_step()] due to a -** schema change. Hence, the application should ensure that the -** correct authorizer callback remains in place during the [sqlite3_step()]. -** -** ^Note that the authorizer callback is invoked only during -** [sqlite3_prepare()] or its variants. Authorization is not -** performed during statement evaluation in [sqlite3_step()], unless -** as stated in the previous paragraph, sqlite3_step() invokes -** sqlite3_prepare_v2() to reprepare a statement after a schema change. -*/ -SQLITE_API int sqlite3_set_authorizer( - sqlite3*, - int (*xAuth)(void*,int,const char*,const char*,const char*,const char*), - void *pUserData -); - -/* -** CAPI3REF: Authorizer Return Codes -** -** The [sqlite3_set_authorizer | authorizer callback function] must -** return either [SQLITE_OK] or one of these two constants in order -** to signal SQLite whether or not the action is permitted. See the -** [sqlite3_set_authorizer | authorizer documentation] for additional -** information. -** -** Note that SQLITE_IGNORE is also used as a [conflict resolution mode] -** returned from the [sqlite3_vtab_on_conflict()] interface. -*/ -#define SQLITE_DENY 1 /* Abort the SQL statement with an error */ -#define SQLITE_IGNORE 2 /* Don't allow access, but don't generate an error */ - -/* -** CAPI3REF: Authorizer Action Codes -** -** The [sqlite3_set_authorizer()] interface registers a callback function -** that is invoked to authorize certain SQL statement actions. The -** second parameter to the callback is an integer code that specifies -** what action is being authorized. These are the integer action codes that -** the authorizer callback may be passed. -** -** These action code values signify what kind of operation is to be -** authorized. The 3rd and 4th parameters to the authorization -** callback function will be parameters or NULL depending on which of these -** codes is used as the second parameter. ^(The 5th parameter to the -** authorizer callback is the name of the database ("main", "temp", -** etc.) if applicable.)^ ^The 6th parameter to the authorizer callback -** is the name of the inner-most trigger or view that is responsible for -** the access attempt or NULL if this access attempt is directly from -** top-level SQL code. -*/ -/******************************************* 3rd ************ 4th ***********/ -#define SQLITE_CREATE_INDEX 1 /* Index Name Table Name */ -#define SQLITE_CREATE_TABLE 2 /* Table Name NULL */ -#define SQLITE_CREATE_TEMP_INDEX 3 /* Index Name Table Name */ -#define SQLITE_CREATE_TEMP_TABLE 4 /* Table Name NULL */ -#define SQLITE_CREATE_TEMP_TRIGGER 5 /* Trigger Name Table Name */ -#define SQLITE_CREATE_TEMP_VIEW 6 /* View Name NULL */ -#define SQLITE_CREATE_TRIGGER 7 /* Trigger Name Table Name */ -#define SQLITE_CREATE_VIEW 8 /* View Name NULL */ -#define SQLITE_DELETE 9 /* Table Name NULL */ -#define SQLITE_DROP_INDEX 10 /* Index Name Table Name */ -#define SQLITE_DROP_TABLE 11 /* Table Name NULL */ -#define SQLITE_DROP_TEMP_INDEX 12 /* Index Name Table Name */ -#define SQLITE_DROP_TEMP_TABLE 13 /* Table Name NULL */ -#define SQLITE_DROP_TEMP_TRIGGER 14 /* Trigger Name Table Name */ -#define SQLITE_DROP_TEMP_VIEW 15 /* View Name NULL */ -#define SQLITE_DROP_TRIGGER 16 /* Trigger Name Table Name */ -#define SQLITE_DROP_VIEW 17 /* View Name NULL */ -#define SQLITE_INSERT 18 /* Table Name NULL */ -#define SQLITE_PRAGMA 19 /* Pragma Name 1st arg or NULL */ -#define SQLITE_READ 20 /* Table Name Column Name */ -#define SQLITE_SELECT 21 /* NULL NULL */ -#define SQLITE_TRANSACTION 22 /* Operation NULL */ -#define SQLITE_UPDATE 23 /* Table Name Column Name */ -#define SQLITE_ATTACH 24 /* Filename NULL */ -#define SQLITE_DETACH 25 /* Database Name NULL */ -#define SQLITE_ALTER_TABLE 26 /* Database Name Table Name */ -#define SQLITE_REINDEX 27 /* Index Name NULL */ -#define SQLITE_ANALYZE 28 /* Table Name NULL */ -#define SQLITE_CREATE_VTABLE 29 /* Table Name Module Name */ -#define SQLITE_DROP_VTABLE 30 /* Table Name Module Name */ -#define SQLITE_FUNCTION 31 /* NULL Function Name */ -#define SQLITE_SAVEPOINT 32 /* Operation Savepoint Name */ -#define SQLITE_COPY 0 /* No longer used */ -#define SQLITE_RECURSIVE 33 /* NULL NULL */ - -/* -** CAPI3REF: Tracing And Profiling Functions -** METHOD: sqlite3 -** -** These routines are deprecated. Use the [sqlite3_trace_v2()] interface -** instead of the routines described here. -** -** These routines register callback functions that can be used for -** tracing and profiling the execution of SQL statements. -** -** ^The callback function registered by sqlite3_trace() is invoked at -** various times when an SQL statement is being run by [sqlite3_step()]. -** ^The sqlite3_trace() callback is invoked with a UTF-8 rendering of the -** SQL statement text as the statement first begins executing. -** ^(Additional sqlite3_trace() callbacks might occur -** as each triggered subprogram is entered. The callbacks for triggers -** contain a UTF-8 SQL comment that identifies the trigger.)^ -** -** The [SQLITE_TRACE_SIZE_LIMIT] compile-time option can be used to limit -** the length of [bound parameter] expansion in the output of sqlite3_trace(). -** -** ^The callback function registered by sqlite3_profile() is invoked -** as each SQL statement finishes. ^The profile callback contains -** the original statement text and an estimate of wall-clock time -** of how long that statement took to run. ^The profile callback -** time is in units of nanoseconds, however the current implementation -** is only capable of millisecond resolution so the six least significant -** digits in the time are meaningless. Future versions of SQLite -** might provide greater resolution on the profiler callback. The -** sqlite3_profile() function is considered experimental and is -** subject to change in future versions of SQLite. -*/ -SQLITE_API SQLITE_DEPRECATED void *sqlite3_trace(sqlite3*, - void(*xTrace)(void*,const char*), void*); -SQLITE_API SQLITE_DEPRECATED void *sqlite3_profile(sqlite3*, - void(*xProfile)(void*,const char*,sqlite3_uint64), void*); - -/* -** CAPI3REF: SQL Trace Event Codes -** KEYWORDS: SQLITE_TRACE -** -** These constants identify classes of events that can be monitored -** using the [sqlite3_trace_v2()] tracing logic. The third argument -** to [sqlite3_trace_v2()] is an OR-ed combination of one or more of -** the following constants. ^The first argument to the trace callback -** is one of the following constants. -** -** New tracing constants may be added in future releases. -** -** ^A trace callback has four arguments: xCallback(T,C,P,X). -** ^The T argument is one of the integer type codes above. -** ^The C argument is a copy of the context pointer passed in as the -** fourth argument to [sqlite3_trace_v2()]. -** The P and X arguments are pointers whose meanings depend on T. -** -**
-** [[SQLITE_TRACE_STMT]]
SQLITE_TRACE_STMT
-**
^An SQLITE_TRACE_STMT callback is invoked when a prepared statement -** first begins running and possibly at other times during the -** execution of the prepared statement, such as at the start of each -** trigger subprogram. ^The P argument is a pointer to the -** [prepared statement]. ^The X argument is a pointer to a string which -** is the unexpanded SQL text of the prepared statement or an SQL comment -** that indicates the invocation of a trigger. ^The callback can compute -** the same text that would have been returned by the legacy [sqlite3_trace()] -** interface by using the X argument when X begins with "--" and invoking -** [sqlite3_expanded_sql(P)] otherwise. -** -** [[SQLITE_TRACE_PROFILE]]
SQLITE_TRACE_PROFILE
-**
^An SQLITE_TRACE_PROFILE callback provides approximately the same -** information as is provided by the [sqlite3_profile()] callback. -** ^The P argument is a pointer to the [prepared statement] and the -** X argument points to a 64-bit integer which is the estimated of -** the number of nanosecond that the prepared statement took to run. -** ^The SQLITE_TRACE_PROFILE callback is invoked when the statement finishes. -** -** [[SQLITE_TRACE_ROW]]
SQLITE_TRACE_ROW
-**
^An SQLITE_TRACE_ROW callback is invoked whenever a prepared -** statement generates a single row of result. -** ^The P argument is a pointer to the [prepared statement] and the -** X argument is unused. -** -** [[SQLITE_TRACE_CLOSE]]
SQLITE_TRACE_CLOSE
-**
^An SQLITE_TRACE_CLOSE callback is invoked when a database -** connection closes. -** ^The P argument is a pointer to the [database connection] object -** and the X argument is unused. -**
-*/ -#define SQLITE_TRACE_STMT 0x01 -#define SQLITE_TRACE_PROFILE 0x02 -#define SQLITE_TRACE_ROW 0x04 -#define SQLITE_TRACE_CLOSE 0x08 - -/* -** CAPI3REF: SQL Trace Hook -** METHOD: sqlite3 -** -** ^The sqlite3_trace_v2(D,M,X,P) interface registers a trace callback -** function X against [database connection] D, using property mask M -** and context pointer P. ^If the X callback is -** NULL or if the M mask is zero, then tracing is disabled. The -** M argument should be the bitwise OR-ed combination of -** zero or more [SQLITE_TRACE] constants. -** -** ^Each call to either sqlite3_trace() or sqlite3_trace_v2() overrides -** (cancels) any prior calls to sqlite3_trace() or sqlite3_trace_v2(). -** -** ^The X callback is invoked whenever any of the events identified by -** mask M occur. ^The integer return value from the callback is currently -** ignored, though this may change in future releases. Callback -** implementations should return zero to ensure future compatibility. -** -** ^A trace callback is invoked with four arguments: callback(T,C,P,X). -** ^The T argument is one of the [SQLITE_TRACE] -** constants to indicate why the callback was invoked. -** ^The C argument is a copy of the context pointer. -** The P and X arguments are pointers whose meanings depend on T. -** -** The sqlite3_trace_v2() interface is intended to replace the legacy -** interfaces [sqlite3_trace()] and [sqlite3_profile()], both of which -** are deprecated. -*/ -SQLITE_API int sqlite3_trace_v2( - sqlite3*, - unsigned uMask, - int(*xCallback)(unsigned,void*,void*,void*), - void *pCtx -); - -/* -** CAPI3REF: Query Progress Callbacks -** METHOD: sqlite3 -** -** ^The sqlite3_progress_handler(D,N,X,P) interface causes the callback -** function X to be invoked periodically during long running calls to -** [sqlite3_exec()], [sqlite3_step()] and [sqlite3_get_table()] for -** database connection D. An example use for this -** interface is to keep a GUI updated during a large query. -** -** ^The parameter P is passed through as the only parameter to the -** callback function X. ^The parameter N is the approximate number of -** [virtual machine instructions] that are evaluated between successive -** invocations of the callback X. ^If N is less than one then the progress -** handler is disabled. -** -** ^Only a single progress handler may be defined at one time per -** [database connection]; setting a new progress handler cancels the -** old one. ^Setting parameter X to NULL disables the progress handler. -** ^The progress handler is also disabled by setting N to a value less -** than 1. -** -** ^If the progress callback returns non-zero, the operation is -** interrupted. This feature can be used to implement a -** "Cancel" button on a GUI progress dialog box. -** -** The progress handler callback must not do anything that will modify -** the database connection that invoked the progress handler. -** Note that [sqlite3_prepare_v2()] and [sqlite3_step()] both modify their -** database connections for the meaning of "modify" in this paragraph. -** -*/ -SQLITE_API void sqlite3_progress_handler(sqlite3*, int, int(*)(void*), void*); - -/* -** CAPI3REF: Opening A New Database Connection -** CONSTRUCTOR: sqlite3 -** -** ^These routines open an SQLite database file as specified by the -** filename argument. ^The filename argument is interpreted as UTF-8 for -** sqlite3_open() and sqlite3_open_v2() and as UTF-16 in the native byte -** order for sqlite3_open16(). ^(A [database connection] handle is usually -** returned in *ppDb, even if an error occurs. The only exception is that -** if SQLite is unable to allocate memory to hold the [sqlite3] object, -** a NULL will be written into *ppDb instead of a pointer to the [sqlite3] -** object.)^ ^(If the database is opened (and/or created) successfully, then -** [SQLITE_OK] is returned. Otherwise an [error code] is returned.)^ ^The -** [sqlite3_errmsg()] or [sqlite3_errmsg16()] routines can be used to obtain -** an English language description of the error following a failure of any -** of the sqlite3_open() routines. -** -** ^The default encoding will be UTF-8 for databases created using -** sqlite3_open() or sqlite3_open_v2(). ^The default encoding for databases -** created using sqlite3_open16() will be UTF-16 in the native byte order. -** -** Whether or not an error occurs when it is opened, resources -** associated with the [database connection] handle should be released by -** passing it to [sqlite3_close()] when it is no longer required. -** -** The sqlite3_open_v2() interface works like sqlite3_open() -** except that it accepts two additional parameters for additional control -** over the new database connection. ^(The flags parameter to -** sqlite3_open_v2() can take one of -** the following three values, optionally combined with the -** [SQLITE_OPEN_NOMUTEX], [SQLITE_OPEN_FULLMUTEX], [SQLITE_OPEN_SHAREDCACHE], -** [SQLITE_OPEN_PRIVATECACHE], and/or [SQLITE_OPEN_URI] flags:)^ -** -**
-** ^(
[SQLITE_OPEN_READONLY]
-**
The database is opened in read-only mode. If the database does not -** already exist, an error is returned.
)^ -** -** ^(
[SQLITE_OPEN_READWRITE]
-**
The database is opened for reading and writing if possible, or reading -** only if the file is write protected by the operating system. In either -** case the database must already exist, otherwise an error is returned.
)^ -** -** ^(
[SQLITE_OPEN_READWRITE] | [SQLITE_OPEN_CREATE]
-**
The database is opened for reading and writing, and is created if -** it does not already exist. This is the behavior that is always used for -** sqlite3_open() and sqlite3_open16().
)^ -**
-** -** If the 3rd parameter to sqlite3_open_v2() is not one of the -** combinations shown above optionally combined with other -** [SQLITE_OPEN_READONLY | SQLITE_OPEN_* bits] -** then the behavior is undefined. -** -** ^If the [SQLITE_OPEN_NOMUTEX] flag is set, then the database connection -** opens in the multi-thread [threading mode] as long as the single-thread -** mode has not been set at compile-time or start-time. ^If the -** [SQLITE_OPEN_FULLMUTEX] flag is set then the database connection opens -** in the serialized [threading mode] unless single-thread was -** previously selected at compile-time or start-time. -** ^The [SQLITE_OPEN_SHAREDCACHE] flag causes the database connection to be -** eligible to use [shared cache mode], regardless of whether or not shared -** cache is enabled using [sqlite3_enable_shared_cache()]. ^The -** [SQLITE_OPEN_PRIVATECACHE] flag causes the database connection to not -** participate in [shared cache mode] even if it is enabled. -** -** ^The fourth parameter to sqlite3_open_v2() is the name of the -** [sqlite3_vfs] object that defines the operating system interface that -** the new database connection should use. ^If the fourth parameter is -** a NULL pointer then the default [sqlite3_vfs] object is used. -** -** ^If the filename is ":memory:", then a private, temporary in-memory database -** is created for the connection. ^This in-memory database will vanish when -** the database connection is closed. Future versions of SQLite might -** make use of additional special filenames that begin with the ":" character. -** It is recommended that when a database filename actually does begin with -** a ":" character you should prefix the filename with a pathname such as -** "./" to avoid ambiguity. -** -** ^If the filename is an empty string, then a private, temporary -** on-disk database will be created. ^This private database will be -** automatically deleted as soon as the database connection is closed. -** -** [[URI filenames in sqlite3_open()]]

URI Filenames

-** -** ^If [URI filename] interpretation is enabled, and the filename argument -** begins with "file:", then the filename is interpreted as a URI. ^URI -** filename interpretation is enabled if the [SQLITE_OPEN_URI] flag is -** set in the fourth argument to sqlite3_open_v2(), or if it has -** been enabled globally using the [SQLITE_CONFIG_URI] option with the -** [sqlite3_config()] method or by the [SQLITE_USE_URI] compile-time option. -** As of SQLite version 3.7.7, URI filename interpretation is turned off -** by default, but future releases of SQLite might enable URI filename -** interpretation by default. See "[URI filenames]" for additional -** information. -** -** URI filenames are parsed according to RFC 3986. ^If the URI contains an -** authority, then it must be either an empty string or the string -** "localhost". ^If the authority is not an empty string or "localhost", an -** error is returned to the caller. ^The fragment component of a URI, if -** present, is ignored. -** -** ^SQLite uses the path component of the URI as the name of the disk file -** which contains the database. ^If the path begins with a '/' character, -** then it is interpreted as an absolute path. ^If the path does not begin -** with a '/' (meaning that the authority section is omitted from the URI) -** then the path is interpreted as a relative path. -** ^(On windows, the first component of an absolute path -** is a drive specification (e.g. "C:").)^ -** -** [[core URI query parameters]] -** The query component of a URI may contain parameters that are interpreted -** either by SQLite itself, or by a [VFS | custom VFS implementation]. -** SQLite and its built-in [VFSes] interpret the -** following query parameters: -** -**
    -**
  • vfs: ^The "vfs" parameter may be used to specify the name of -** a VFS object that provides the operating system interface that should -** be used to access the database file on disk. ^If this option is set to -** an empty string the default VFS object is used. ^Specifying an unknown -** VFS is an error. ^If sqlite3_open_v2() is used and the vfs option is -** present, then the VFS specified by the option takes precedence over -** the value passed as the fourth parameter to sqlite3_open_v2(). -** -**
  • mode: ^(The mode parameter may be set to either "ro", "rw", -** "rwc", or "memory". Attempting to set it to any other value is -** an error)^. -** ^If "ro" is specified, then the database is opened for read-only -** access, just as if the [SQLITE_OPEN_READONLY] flag had been set in the -** third argument to sqlite3_open_v2(). ^If the mode option is set to -** "rw", then the database is opened for read-write (but not create) -** access, as if SQLITE_OPEN_READWRITE (but not SQLITE_OPEN_CREATE) had -** been set. ^Value "rwc" is equivalent to setting both -** SQLITE_OPEN_READWRITE and SQLITE_OPEN_CREATE. ^If the mode option is -** set to "memory" then a pure [in-memory database] that never reads -** or writes from disk is used. ^It is an error to specify a value for -** the mode parameter that is less restrictive than that specified by -** the flags passed in the third parameter to sqlite3_open_v2(). -** -**
  • cache: ^The cache parameter may be set to either "shared" or -** "private". ^Setting it to "shared" is equivalent to setting the -** SQLITE_OPEN_SHAREDCACHE bit in the flags argument passed to -** sqlite3_open_v2(). ^Setting the cache parameter to "private" is -** equivalent to setting the SQLITE_OPEN_PRIVATECACHE bit. -** ^If sqlite3_open_v2() is used and the "cache" parameter is present in -** a URI filename, its value overrides any behavior requested by setting -** SQLITE_OPEN_PRIVATECACHE or SQLITE_OPEN_SHAREDCACHE flag. -** -**
  • psow: ^The psow parameter indicates whether or not the -** [powersafe overwrite] property does or does not apply to the -** storage media on which the database file resides. -** -**
  • nolock: ^The nolock parameter is a boolean query parameter -** which if set disables file locking in rollback journal modes. This -** is useful for accessing a database on a filesystem that does not -** support locking. Caution: Database corruption might result if two -** or more processes write to the same database and any one of those -** processes uses nolock=1. -** -**
  • immutable: ^The immutable parameter is a boolean query -** parameter that indicates that the database file is stored on -** read-only media. ^When immutable is set, SQLite assumes that the -** database file cannot be changed, even by a process with higher -** privilege, and so the database is opened read-only and all locking -** and change detection is disabled. Caution: Setting the immutable -** property on a database file that does in fact change can result -** in incorrect query results and/or [SQLITE_CORRUPT] errors. -** See also: [SQLITE_IOCAP_IMMUTABLE]. -** -**
-** -** ^Specifying an unknown parameter in the query component of a URI is not an -** error. Future versions of SQLite might understand additional query -** parameters. See "[query parameters with special meaning to SQLite]" for -** additional information. -** -** [[URI filename examples]]

URI filename examples

-** -** -**
URI filenames Results -**
file:data.db -** Open the file "data.db" in the current directory. -**
file:/home/fred/data.db
-** file:///home/fred/data.db
-** file://localhost/home/fred/data.db
-** Open the database file "/home/fred/data.db". -**
file://darkstar/home/fred/data.db -** An error. "darkstar" is not a recognized authority. -**
-** file:///C:/Documents%20and%20Settings/fred/Desktop/data.db -** Windows only: Open the file "data.db" on fred's desktop on drive -** C:. Note that the %20 escaping in this example is not strictly -** necessary - space characters can be used literally -** in URI filenames. -**
file:data.db?mode=ro&cache=private -** Open file "data.db" in the current directory for read-only access. -** Regardless of whether or not shared-cache mode is enabled by -** default, use a private cache. -**
file:/home/fred/data.db?vfs=unix-dotfile -** Open file "/home/fred/data.db". Use the special VFS "unix-dotfile" -** that uses dot-files in place of posix advisory locking. -**
file:data.db?mode=readonly -** An error. "readonly" is not a valid option for the "mode" parameter. -**
-** -** ^URI hexadecimal escape sequences (%HH) are supported within the path and -** query components of a URI. A hexadecimal escape sequence consists of a -** percent sign - "%" - followed by exactly two hexadecimal digits -** specifying an octet value. ^Before the path or query components of a -** URI filename are interpreted, they are encoded using UTF-8 and all -** hexadecimal escape sequences replaced by a single byte containing the -** corresponding octet. If this process generates an invalid UTF-8 encoding, -** the results are undefined. -** -** Note to Windows users: The encoding used for the filename argument -** of sqlite3_open() and sqlite3_open_v2() must be UTF-8, not whatever -** codepage is currently defined. Filenames containing international -** characters must be converted to UTF-8 prior to passing them into -** sqlite3_open() or sqlite3_open_v2(). -** -** Note to Windows Runtime users: The temporary directory must be set -** prior to calling sqlite3_open() or sqlite3_open_v2(). Otherwise, various -** features that require the use of temporary files may fail. -** -** See also: [sqlite3_temp_directory] -*/ -SQLITE_API int sqlite3_open( - const char *filename, /* Database filename (UTF-8) */ - sqlite3 **ppDb /* OUT: SQLite db handle */ -); -SQLITE_API int sqlite3_open16( - const void *filename, /* Database filename (UTF-16) */ - sqlite3 **ppDb /* OUT: SQLite db handle */ -); -SQLITE_API int sqlite3_open_v2( - const char *filename, /* Database filename (UTF-8) */ - sqlite3 **ppDb, /* OUT: SQLite db handle */ - int flags, /* Flags */ - const char *zVfs /* Name of VFS module to use */ -); - -/* -** CAPI3REF: Obtain Values For URI Parameters -** -** These are utility routines, useful to VFS implementations, that check -** to see if a database file was a URI that contained a specific query -** parameter, and if so obtains the value of that query parameter. -** -** If F is the database filename pointer passed into the xOpen() method of -** a VFS implementation when the flags parameter to xOpen() has one or -** more of the [SQLITE_OPEN_URI] or [SQLITE_OPEN_MAIN_DB] bits set and -** P is the name of the query parameter, then -** sqlite3_uri_parameter(F,P) returns the value of the P -** parameter if it exists or a NULL pointer if P does not appear as a -** query parameter on F. If P is a query parameter of F -** has no explicit value, then sqlite3_uri_parameter(F,P) returns -** a pointer to an empty string. -** -** The sqlite3_uri_boolean(F,P,B) routine assumes that P is a boolean -** parameter and returns true (1) or false (0) according to the value -** of P. The sqlite3_uri_boolean(F,P,B) routine returns true (1) if the -** value of query parameter P is one of "yes", "true", or "on" in any -** case or if the value begins with a non-zero number. The -** sqlite3_uri_boolean(F,P,B) routines returns false (0) if the value of -** query parameter P is one of "no", "false", or "off" in any case or -** if the value begins with a numeric zero. If P is not a query -** parameter on F or if the value of P is does not match any of the -** above, then sqlite3_uri_boolean(F,P,B) returns (B!=0). -** -** The sqlite3_uri_int64(F,P,D) routine converts the value of P into a -** 64-bit signed integer and returns that integer, or D if P does not -** exist. If the value of P is something other than an integer, then -** zero is returned. -** -** If F is a NULL pointer, then sqlite3_uri_parameter(F,P) returns NULL and -** sqlite3_uri_boolean(F,P,B) returns B. If F is not a NULL pointer and -** is not a database file pathname pointer that SQLite passed into the xOpen -** VFS method, then the behavior of this routine is undefined and probably -** undesirable. -*/ -SQLITE_API const char *sqlite3_uri_parameter(const char *zFilename, const char *zParam); -SQLITE_API int sqlite3_uri_boolean(const char *zFile, const char *zParam, int bDefault); -SQLITE_API sqlite3_int64 sqlite3_uri_int64(const char*, const char*, sqlite3_int64); - - -/* -** CAPI3REF: Error Codes And Messages -** METHOD: sqlite3 -** -** ^If the most recent sqlite3_* API call associated with -** [database connection] D failed, then the sqlite3_errcode(D) interface -** returns the numeric [result code] or [extended result code] for that -** API call. -** If the most recent API call was successful, -** then the return value from sqlite3_errcode() is undefined. -** ^The sqlite3_extended_errcode() -** interface is the same except that it always returns the -** [extended result code] even when extended result codes are -** disabled. -** -** ^The sqlite3_errmsg() and sqlite3_errmsg16() return English-language -** text that describes the error, as either UTF-8 or UTF-16 respectively. -** ^(Memory to hold the error message string is managed internally. -** The application does not need to worry about freeing the result. -** However, the error string might be overwritten or deallocated by -** subsequent calls to other SQLite interface functions.)^ -** -** ^The sqlite3_errstr() interface returns the English-language text -** that describes the [result code], as UTF-8. -** ^(Memory to hold the error message string is managed internally -** and must not be freed by the application)^. -** -** When the serialized [threading mode] is in use, it might be the -** case that a second error occurs on a separate thread in between -** the time of the first error and the call to these interfaces. -** When that happens, the second error will be reported since these -** interfaces always report the most recent result. To avoid -** this, each thread can obtain exclusive use of the [database connection] D -** by invoking [sqlite3_mutex_enter]([sqlite3_db_mutex](D)) before beginning -** to use D and invoking [sqlite3_mutex_leave]([sqlite3_db_mutex](D)) after -** all calls to the interfaces listed here are completed. -** -** If an interface fails with SQLITE_MISUSE, that means the interface -** was invoked incorrectly by the application. In that case, the -** error code and message may or may not be set. -*/ -SQLITE_API int sqlite3_errcode(sqlite3 *db); -SQLITE_API int sqlite3_extended_errcode(sqlite3 *db); -SQLITE_API const char *sqlite3_errmsg(sqlite3*); -SQLITE_API const void *sqlite3_errmsg16(sqlite3*); -SQLITE_API const char *sqlite3_errstr(int); - -/* -** CAPI3REF: Prepared Statement Object -** KEYWORDS: {prepared statement} {prepared statements} -** -** An instance of this object represents a single SQL statement that -** has been compiled into binary form and is ready to be evaluated. -** -** Think of each SQL statement as a separate computer program. The -** original SQL text is source code. A prepared statement object -** is the compiled object code. All SQL must be converted into a -** prepared statement before it can be run. -** -** The life-cycle of a prepared statement object usually goes like this: -** -**
    -**
  1. Create the prepared statement object using [sqlite3_prepare_v2()]. -**
  2. Bind values to [parameters] using the sqlite3_bind_*() -** interfaces. -**
  3. Run the SQL by calling [sqlite3_step()] one or more times. -**
  4. Reset the prepared statement using [sqlite3_reset()] then go back -** to step 2. Do this zero or more times. -**
  5. Destroy the object using [sqlite3_finalize()]. -**
-*/ -typedef struct sqlite3_stmt sqlite3_stmt; - -/* -** CAPI3REF: Run-time Limits -** METHOD: sqlite3 -** -** ^(This interface allows the size of various constructs to be limited -** on a connection by connection basis. The first parameter is the -** [database connection] whose limit is to be set or queried. The -** second parameter is one of the [limit categories] that define a -** class of constructs to be size limited. The third parameter is the -** new limit for that construct.)^ -** -** ^If the new limit is a negative number, the limit is unchanged. -** ^(For each limit category SQLITE_LIMIT_NAME there is a -** [limits | hard upper bound] -** set at compile-time by a C preprocessor macro called -** [limits | SQLITE_MAX_NAME]. -** (The "_LIMIT_" in the name is changed to "_MAX_".))^ -** ^Attempts to increase a limit above its hard upper bound are -** silently truncated to the hard upper bound. -** -** ^Regardless of whether or not the limit was changed, the -** [sqlite3_limit()] interface returns the prior value of the limit. -** ^Hence, to find the current value of a limit without changing it, -** simply invoke this interface with the third parameter set to -1. -** -** Run-time limits are intended for use in applications that manage -** both their own internal database and also databases that are controlled -** by untrusted external sources. An example application might be a -** web browser that has its own databases for storing history and -** separate databases controlled by JavaScript applications downloaded -** off the Internet. The internal databases can be given the -** large, default limits. Databases managed by external sources can -** be given much smaller limits designed to prevent a denial of service -** attack. Developers might also want to use the [sqlite3_set_authorizer()] -** interface to further control untrusted SQL. The size of the database -** created by an untrusted script can be contained using the -** [max_page_count] [PRAGMA]. -** -** New run-time limit categories may be added in future releases. -*/ -SQLITE_API int sqlite3_limit(sqlite3*, int id, int newVal); - -/* -** CAPI3REF: Run-Time Limit Categories -** KEYWORDS: {limit category} {*limit categories} -** -** These constants define various performance limits -** that can be lowered at run-time using [sqlite3_limit()]. -** The synopsis of the meanings of the various limits is shown below. -** Additional information is available at [limits | Limits in SQLite]. -** -**
-** [[SQLITE_LIMIT_LENGTH]] ^(
SQLITE_LIMIT_LENGTH
-**
The maximum size of any string or BLOB or table row, in bytes.
)^ -** -** [[SQLITE_LIMIT_SQL_LENGTH]] ^(
SQLITE_LIMIT_SQL_LENGTH
-**
The maximum length of an SQL statement, in bytes.
)^ -** -** [[SQLITE_LIMIT_COLUMN]] ^(
SQLITE_LIMIT_COLUMN
-**
The maximum number of columns in a table definition or in the -** result set of a [SELECT] or the maximum number of columns in an index -** or in an ORDER BY or GROUP BY clause.
)^ -** -** [[SQLITE_LIMIT_EXPR_DEPTH]] ^(
SQLITE_LIMIT_EXPR_DEPTH
-**
The maximum depth of the parse tree on any expression.
)^ -** -** [[SQLITE_LIMIT_COMPOUND_SELECT]] ^(
SQLITE_LIMIT_COMPOUND_SELECT
-**
The maximum number of terms in a compound SELECT statement.
)^ -** -** [[SQLITE_LIMIT_VDBE_OP]] ^(
SQLITE_LIMIT_VDBE_OP
-**
The maximum number of instructions in a virtual machine program -** used to implement an SQL statement. This limit is not currently -** enforced, though that might be added in some future release of -** SQLite.
)^ -** -** [[SQLITE_LIMIT_FUNCTION_ARG]] ^(
SQLITE_LIMIT_FUNCTION_ARG
-**
The maximum number of arguments on a function.
)^ -** -** [[SQLITE_LIMIT_ATTACHED]] ^(
SQLITE_LIMIT_ATTACHED
-**
The maximum number of [ATTACH | attached databases].)^
-** -** [[SQLITE_LIMIT_LIKE_PATTERN_LENGTH]] -** ^(
SQLITE_LIMIT_LIKE_PATTERN_LENGTH
-**
The maximum length of the pattern argument to the [LIKE] or -** [GLOB] operators.
)^ -** -** [[SQLITE_LIMIT_VARIABLE_NUMBER]] -** ^(
SQLITE_LIMIT_VARIABLE_NUMBER
-**
The maximum index number of any [parameter] in an SQL statement.)^ -** -** [[SQLITE_LIMIT_TRIGGER_DEPTH]] ^(
SQLITE_LIMIT_TRIGGER_DEPTH
-**
The maximum depth of recursion for triggers.
)^ -** -** [[SQLITE_LIMIT_WORKER_THREADS]] ^(
SQLITE_LIMIT_WORKER_THREADS
-**
The maximum number of auxiliary worker threads that a single -** [prepared statement] may start.
)^ -**
-*/ -#define SQLITE_LIMIT_LENGTH 0 -#define SQLITE_LIMIT_SQL_LENGTH 1 -#define SQLITE_LIMIT_COLUMN 2 -#define SQLITE_LIMIT_EXPR_DEPTH 3 -#define SQLITE_LIMIT_COMPOUND_SELECT 4 -#define SQLITE_LIMIT_VDBE_OP 5 -#define SQLITE_LIMIT_FUNCTION_ARG 6 -#define SQLITE_LIMIT_ATTACHED 7 -#define SQLITE_LIMIT_LIKE_PATTERN_LENGTH 8 -#define SQLITE_LIMIT_VARIABLE_NUMBER 9 -#define SQLITE_LIMIT_TRIGGER_DEPTH 10 -#define SQLITE_LIMIT_WORKER_THREADS 11 - -/* -** CAPI3REF: Compiling An SQL Statement -** KEYWORDS: {SQL statement compiler} -** METHOD: sqlite3 -** CONSTRUCTOR: sqlite3_stmt -** -** To execute an SQL query, it must first be compiled into a byte-code -** program using one of these routines. -** -** The first argument, "db", is a [database connection] obtained from a -** prior successful call to [sqlite3_open()], [sqlite3_open_v2()] or -** [sqlite3_open16()]. The database connection must not have been closed. -** -** The second argument, "zSql", is the statement to be compiled, encoded -** as either UTF-8 or UTF-16. The sqlite3_prepare() and sqlite3_prepare_v2() -** interfaces use UTF-8, and sqlite3_prepare16() and sqlite3_prepare16_v2() -** use UTF-16. -** -** ^If the nByte argument is negative, then zSql is read up to the -** first zero terminator. ^If nByte is positive, then it is the -** number of bytes read from zSql. ^If nByte is zero, then no prepared -** statement is generated. -** If the caller knows that the supplied string is nul-terminated, then -** there is a small performance advantage to passing an nByte parameter that -** is the number of bytes in the input string including -** the nul-terminator. -** -** ^If pzTail is not NULL then *pzTail is made to point to the first byte -** past the end of the first SQL statement in zSql. These routines only -** compile the first statement in zSql, so *pzTail is left pointing to -** what remains uncompiled. -** -** ^*ppStmt is left pointing to a compiled [prepared statement] that can be -** executed using [sqlite3_step()]. ^If there is an error, *ppStmt is set -** to NULL. ^If the input text contains no SQL (if the input is an empty -** string or a comment) then *ppStmt is set to NULL. -** The calling procedure is responsible for deleting the compiled -** SQL statement using [sqlite3_finalize()] after it has finished with it. -** ppStmt may not be NULL. -** -** ^On success, the sqlite3_prepare() family of routines return [SQLITE_OK]; -** otherwise an [error code] is returned. -** -** The sqlite3_prepare_v2() and sqlite3_prepare16_v2() interfaces are -** recommended for all new programs. The two older interfaces are retained -** for backwards compatibility, but their use is discouraged. -** ^In the "v2" interfaces, the prepared statement -** that is returned (the [sqlite3_stmt] object) contains a copy of the -** original SQL text. This causes the [sqlite3_step()] interface to -** behave differently in three ways: -** -**
    -**
  1. -** ^If the database schema changes, instead of returning [SQLITE_SCHEMA] as it -** always used to do, [sqlite3_step()] will automatically recompile the SQL -** statement and try to run it again. As many as [SQLITE_MAX_SCHEMA_RETRY] -** retries will occur before sqlite3_step() gives up and returns an error. -**
  2. -** -**
  3. -** ^When an error occurs, [sqlite3_step()] will return one of the detailed -** [error codes] or [extended error codes]. ^The legacy behavior was that -** [sqlite3_step()] would only return a generic [SQLITE_ERROR] result code -** and the application would have to make a second call to [sqlite3_reset()] -** in order to find the underlying cause of the problem. With the "v2" prepare -** interfaces, the underlying reason for the error is returned immediately. -**
  4. -** -**
  5. -** ^If the specific value bound to [parameter | host parameter] in the -** WHERE clause might influence the choice of query plan for a statement, -** then the statement will be automatically recompiled, as if there had been -** a schema change, on the first [sqlite3_step()] call following any change -** to the [sqlite3_bind_text | bindings] of that [parameter]. -** ^The specific value of WHERE-clause [parameter] might influence the -** choice of query plan if the parameter is the left-hand side of a [LIKE] -** or [GLOB] operator or if the parameter is compared to an indexed column -** and the [SQLITE_ENABLE_STAT3] compile-time option is enabled. -**
  6. -**
-*/ -SQLITE_API int sqlite3_prepare( - sqlite3 *db, /* Database handle */ - const char *zSql, /* SQL statement, UTF-8 encoded */ - int nByte, /* Maximum length of zSql in bytes. */ - sqlite3_stmt **ppStmt, /* OUT: Statement handle */ - const char **pzTail /* OUT: Pointer to unused portion of zSql */ -); -SQLITE_API int sqlite3_prepare_v2( - sqlite3 *db, /* Database handle */ - const char *zSql, /* SQL statement, UTF-8 encoded */ - int nByte, /* Maximum length of zSql in bytes. */ - sqlite3_stmt **ppStmt, /* OUT: Statement handle */ - const char **pzTail /* OUT: Pointer to unused portion of zSql */ -); -SQLITE_API int sqlite3_prepare16( - sqlite3 *db, /* Database handle */ - const void *zSql, /* SQL statement, UTF-16 encoded */ - int nByte, /* Maximum length of zSql in bytes. */ - sqlite3_stmt **ppStmt, /* OUT: Statement handle */ - const void **pzTail /* OUT: Pointer to unused portion of zSql */ -); -SQLITE_API int sqlite3_prepare16_v2( - sqlite3 *db, /* Database handle */ - const void *zSql, /* SQL statement, UTF-16 encoded */ - int nByte, /* Maximum length of zSql in bytes. */ - sqlite3_stmt **ppStmt, /* OUT: Statement handle */ - const void **pzTail /* OUT: Pointer to unused portion of zSql */ -); - -/* -** CAPI3REF: Retrieving Statement SQL -** METHOD: sqlite3_stmt -** -** ^The sqlite3_sql(P) interface returns a pointer to a copy of the UTF-8 -** SQL text used to create [prepared statement] P if P was -** created by either [sqlite3_prepare_v2()] or [sqlite3_prepare16_v2()]. -** ^The sqlite3_expanded_sql(P) interface returns a pointer to a UTF-8 -** string containing the SQL text of prepared statement P with -** [bound parameters] expanded. -** -** ^(For example, if a prepared statement is created using the SQL -** text "SELECT $abc,:xyz" and if parameter $abc is bound to integer 2345 -** and parameter :xyz is unbound, then sqlite3_sql() will return -** the original string, "SELECT $abc,:xyz" but sqlite3_expanded_sql() -** will return "SELECT 2345,NULL".)^ -** -** ^The sqlite3_expanded_sql() interface returns NULL if insufficient memory -** is available to hold the result, or if the result would exceed the -** the maximum string length determined by the [SQLITE_LIMIT_LENGTH]. -** -** ^The [SQLITE_TRACE_SIZE_LIMIT] compile-time option limits the size of -** bound parameter expansions. ^The [SQLITE_OMIT_TRACE] compile-time -** option causes sqlite3_expanded_sql() to always return NULL. -** -** ^The string returned by sqlite3_sql(P) is managed by SQLite and is -** automatically freed when the prepared statement is finalized. -** ^The string returned by sqlite3_expanded_sql(P), on the other hand, -** is obtained from [sqlite3_malloc()] and must be free by the application -** by passing it to [sqlite3_free()]. -*/ -SQLITE_API const char *sqlite3_sql(sqlite3_stmt *pStmt); -SQLITE_API char *sqlite3_expanded_sql(sqlite3_stmt *pStmt); - -/* -** CAPI3REF: Determine If An SQL Statement Writes The Database -** METHOD: sqlite3_stmt -** -** ^The sqlite3_stmt_readonly(X) interface returns true (non-zero) if -** and only if the [prepared statement] X makes no direct changes to -** the content of the database file. -** -** Note that [application-defined SQL functions] or -** [virtual tables] might change the database indirectly as a side effect. -** ^(For example, if an application defines a function "eval()" that -** calls [sqlite3_exec()], then the following SQL statement would -** change the database file through side-effects: -** -**
-**    SELECT eval('DELETE FROM t1') FROM t2;
-** 
-** -** But because the [SELECT] statement does not change the database file -** directly, sqlite3_stmt_readonly() would still return true.)^ -** -** ^Transaction control statements such as [BEGIN], [COMMIT], [ROLLBACK], -** [SAVEPOINT], and [RELEASE] cause sqlite3_stmt_readonly() to return true, -** since the statements themselves do not actually modify the database but -** rather they control the timing of when other statements modify the -** database. ^The [ATTACH] and [DETACH] statements also cause -** sqlite3_stmt_readonly() to return true since, while those statements -** change the configuration of a database connection, they do not make -** changes to the content of the database files on disk. -** ^The sqlite3_stmt_readonly() interface returns true for [BEGIN] since -** [BEGIN] merely sets internal flags, but the [BEGIN|BEGIN IMMEDIATE] and -** [BEGIN|BEGIN EXCLUSIVE] commands do touch the database and so -** sqlite3_stmt_readonly() returns false for those commands. -*/ -SQLITE_API int sqlite3_stmt_readonly(sqlite3_stmt *pStmt); - -/* -** CAPI3REF: Determine If A Prepared Statement Has Been Reset -** METHOD: sqlite3_stmt -** -** ^The sqlite3_stmt_busy(S) interface returns true (non-zero) if the -** [prepared statement] S has been stepped at least once using -** [sqlite3_step(S)] but has neither run to completion (returned -** [SQLITE_DONE] from [sqlite3_step(S)]) nor -** been reset using [sqlite3_reset(S)]. ^The sqlite3_stmt_busy(S) -** interface returns false if S is a NULL pointer. If S is not a -** NULL pointer and is not a pointer to a valid [prepared statement] -** object, then the behavior is undefined and probably undesirable. -** -** This interface can be used in combination [sqlite3_next_stmt()] -** to locate all prepared statements associated with a database -** connection that are in need of being reset. This can be used, -** for example, in diagnostic routines to search for prepared -** statements that are holding a transaction open. -*/ -SQLITE_API int sqlite3_stmt_busy(sqlite3_stmt*); - -/* -** CAPI3REF: Dynamically Typed Value Object -** KEYWORDS: {protected sqlite3_value} {unprotected sqlite3_value} -** -** SQLite uses the sqlite3_value object to represent all values -** that can be stored in a database table. SQLite uses dynamic typing -** for the values it stores. ^Values stored in sqlite3_value objects -** can be integers, floating point values, strings, BLOBs, or NULL. -** -** An sqlite3_value object may be either "protected" or "unprotected". -** Some interfaces require a protected sqlite3_value. Other interfaces -** will accept either a protected or an unprotected sqlite3_value. -** Every interface that accepts sqlite3_value arguments specifies -** whether or not it requires a protected sqlite3_value. The -** [sqlite3_value_dup()] interface can be used to construct a new -** protected sqlite3_value from an unprotected sqlite3_value. -** -** The terms "protected" and "unprotected" refer to whether or not -** a mutex is held. An internal mutex is held for a protected -** sqlite3_value object but no mutex is held for an unprotected -** sqlite3_value object. If SQLite is compiled to be single-threaded -** (with [SQLITE_THREADSAFE=0] and with [sqlite3_threadsafe()] returning 0) -** or if SQLite is run in one of reduced mutex modes -** [SQLITE_CONFIG_SINGLETHREAD] or [SQLITE_CONFIG_MULTITHREAD] -** then there is no distinction between protected and unprotected -** sqlite3_value objects and they can be used interchangeably. However, -** for maximum code portability it is recommended that applications -** still make the distinction between protected and unprotected -** sqlite3_value objects even when not strictly required. -** -** ^The sqlite3_value objects that are passed as parameters into the -** implementation of [application-defined SQL functions] are protected. -** ^The sqlite3_value object returned by -** [sqlite3_column_value()] is unprotected. -** Unprotected sqlite3_value objects may only be used with -** [sqlite3_result_value()] and [sqlite3_bind_value()]. -** The [sqlite3_value_blob | sqlite3_value_type()] family of -** interfaces require protected sqlite3_value objects. -*/ -typedef struct Mem sqlite3_value; - -/* -** CAPI3REF: SQL Function Context Object -** -** The context in which an SQL function executes is stored in an -** sqlite3_context object. ^A pointer to an sqlite3_context object -** is always first parameter to [application-defined SQL functions]. -** The application-defined SQL function implementation will pass this -** pointer through into calls to [sqlite3_result_int | sqlite3_result()], -** [sqlite3_aggregate_context()], [sqlite3_user_data()], -** [sqlite3_context_db_handle()], [sqlite3_get_auxdata()], -** and/or [sqlite3_set_auxdata()]. -*/ -typedef struct sqlite3_context sqlite3_context; - -/* -** CAPI3REF: Binding Values To Prepared Statements -** KEYWORDS: {host parameter} {host parameters} {host parameter name} -** KEYWORDS: {SQL parameter} {SQL parameters} {parameter binding} -** METHOD: sqlite3_stmt -** -** ^(In the SQL statement text input to [sqlite3_prepare_v2()] and its variants, -** literals may be replaced by a [parameter] that matches one of following -** templates: -** -**
    -**
  • ? -**
  • ?NNN -**
  • :VVV -**
  • @VVV -**
  • $VVV -**
-** -** In the templates above, NNN represents an integer literal, -** and VVV represents an alphanumeric identifier.)^ ^The values of these -** parameters (also called "host parameter names" or "SQL parameters") -** can be set using the sqlite3_bind_*() routines defined here. -** -** ^The first argument to the sqlite3_bind_*() routines is always -** a pointer to the [sqlite3_stmt] object returned from -** [sqlite3_prepare_v2()] or its variants. -** -** ^The second argument is the index of the SQL parameter to be set. -** ^The leftmost SQL parameter has an index of 1. ^When the same named -** SQL parameter is used more than once, second and subsequent -** occurrences have the same index as the first occurrence. -** ^The index for named parameters can be looked up using the -** [sqlite3_bind_parameter_index()] API if desired. ^The index -** for "?NNN" parameters is the value of NNN. -** ^The NNN value must be between 1 and the [sqlite3_limit()] -** parameter [SQLITE_LIMIT_VARIABLE_NUMBER] (default value: 999). -** -** ^The third argument is the value to bind to the parameter. -** ^If the third parameter to sqlite3_bind_text() or sqlite3_bind_text16() -** or sqlite3_bind_blob() is a NULL pointer then the fourth parameter -** is ignored and the end result is the same as sqlite3_bind_null(). -** -** ^(In those routines that have a fourth argument, its value is the -** number of bytes in the parameter. To be clear: the value is the -** number of bytes in the value, not the number of characters.)^ -** ^If the fourth parameter to sqlite3_bind_text() or sqlite3_bind_text16() -** is negative, then the length of the string is -** the number of bytes up to the first zero terminator. -** If the fourth parameter to sqlite3_bind_blob() is negative, then -** the behavior is undefined. -** If a non-negative fourth parameter is provided to sqlite3_bind_text() -** or sqlite3_bind_text16() or sqlite3_bind_text64() then -** that parameter must be the byte offset -** where the NUL terminator would occur assuming the string were NUL -** terminated. If any NUL characters occur at byte offsets less than -** the value of the fourth parameter then the resulting string value will -** contain embedded NULs. The result of expressions involving strings -** with embedded NULs is undefined. -** -** ^The fifth argument to the BLOB and string binding interfaces -** is a destructor used to dispose of the BLOB or -** string after SQLite has finished with it. ^The destructor is called -** to dispose of the BLOB or string even if the call to bind API fails. -** ^If the fifth argument is -** the special value [SQLITE_STATIC], then SQLite assumes that the -** information is in static, unmanaged space and does not need to be freed. -** ^If the fifth argument has the value [SQLITE_TRANSIENT], then -** SQLite makes its own private copy of the data immediately, before -** the sqlite3_bind_*() routine returns. -** -** ^The sixth argument to sqlite3_bind_text64() must be one of -** [SQLITE_UTF8], [SQLITE_UTF16], [SQLITE_UTF16BE], or [SQLITE_UTF16LE] -** to specify the encoding of the text in the third parameter. If -** the sixth argument to sqlite3_bind_text64() is not one of the -** allowed values shown above, or if the text encoding is different -** from the encoding specified by the sixth parameter, then the behavior -** is undefined. -** -** ^The sqlite3_bind_zeroblob() routine binds a BLOB of length N that -** is filled with zeroes. ^A zeroblob uses a fixed amount of memory -** (just an integer to hold its size) while it is being processed. -** Zeroblobs are intended to serve as placeholders for BLOBs whose -** content is later written using -** [sqlite3_blob_open | incremental BLOB I/O] routines. -** ^A negative value for the zeroblob results in a zero-length BLOB. -** -** ^If any of the sqlite3_bind_*() routines are called with a NULL pointer -** for the [prepared statement] or with a prepared statement for which -** [sqlite3_step()] has been called more recently than [sqlite3_reset()], -** then the call will return [SQLITE_MISUSE]. If any sqlite3_bind_() -** routine is passed a [prepared statement] that has been finalized, the -** result is undefined and probably harmful. -** -** ^Bindings are not cleared by the [sqlite3_reset()] routine. -** ^Unbound parameters are interpreted as NULL. -** -** ^The sqlite3_bind_* routines return [SQLITE_OK] on success or an -** [error code] if anything goes wrong. -** ^[SQLITE_TOOBIG] might be returned if the size of a string or BLOB -** exceeds limits imposed by [sqlite3_limit]([SQLITE_LIMIT_LENGTH]) or -** [SQLITE_MAX_LENGTH]. -** ^[SQLITE_RANGE] is returned if the parameter -** index is out of range. ^[SQLITE_NOMEM] is returned if malloc() fails. -** -** See also: [sqlite3_bind_parameter_count()], -** [sqlite3_bind_parameter_name()], and [sqlite3_bind_parameter_index()]. -*/ -SQLITE_API int sqlite3_bind_blob(sqlite3_stmt*, int, const void*, int n, void(*)(void*)); -SQLITE_API int sqlite3_bind_blob64(sqlite3_stmt*, int, const void*, sqlite3_uint64, - void(*)(void*)); -SQLITE_API int sqlite3_bind_double(sqlite3_stmt*, int, double); -SQLITE_API int sqlite3_bind_int(sqlite3_stmt*, int, int); -SQLITE_API int sqlite3_bind_int64(sqlite3_stmt*, int, sqlite3_int64); -SQLITE_API int sqlite3_bind_null(sqlite3_stmt*, int); -SQLITE_API int sqlite3_bind_text(sqlite3_stmt*,int,const char*,int,void(*)(void*)); -SQLITE_API int sqlite3_bind_text16(sqlite3_stmt*, int, const void*, int, void(*)(void*)); -SQLITE_API int sqlite3_bind_text64(sqlite3_stmt*, int, const char*, sqlite3_uint64, - void(*)(void*), unsigned char encoding); -SQLITE_API int sqlite3_bind_value(sqlite3_stmt*, int, const sqlite3_value*); -SQLITE_API int sqlite3_bind_zeroblob(sqlite3_stmt*, int, int n); -SQLITE_API int sqlite3_bind_zeroblob64(sqlite3_stmt*, int, sqlite3_uint64); - -/* -** CAPI3REF: Number Of SQL Parameters -** METHOD: sqlite3_stmt -** -** ^This routine can be used to find the number of [SQL parameters] -** in a [prepared statement]. SQL parameters are tokens of the -** form "?", "?NNN", ":AAA", "$AAA", or "@AAA" that serve as -** placeholders for values that are [sqlite3_bind_blob | bound] -** to the parameters at a later time. -** -** ^(This routine actually returns the index of the largest (rightmost) -** parameter. For all forms except ?NNN, this will correspond to the -** number of unique parameters. If parameters of the ?NNN form are used, -** there may be gaps in the list.)^ -** -** See also: [sqlite3_bind_blob|sqlite3_bind()], -** [sqlite3_bind_parameter_name()], and -** [sqlite3_bind_parameter_index()]. -*/ -SQLITE_API int sqlite3_bind_parameter_count(sqlite3_stmt*); - -/* -** CAPI3REF: Name Of A Host Parameter -** METHOD: sqlite3_stmt -** -** ^The sqlite3_bind_parameter_name(P,N) interface returns -** the name of the N-th [SQL parameter] in the [prepared statement] P. -** ^(SQL parameters of the form "?NNN" or ":AAA" or "@AAA" or "$AAA" -** have a name which is the string "?NNN" or ":AAA" or "@AAA" or "$AAA" -** respectively. -** In other words, the initial ":" or "$" or "@" or "?" -** is included as part of the name.)^ -** ^Parameters of the form "?" without a following integer have no name -** and are referred to as "nameless" or "anonymous parameters". -** -** ^The first host parameter has an index of 1, not 0. -** -** ^If the value N is out of range or if the N-th parameter is -** nameless, then NULL is returned. ^The returned string is -** always in UTF-8 encoding even if the named parameter was -** originally specified as UTF-16 in [sqlite3_prepare16()] or -** [sqlite3_prepare16_v2()]. -** -** See also: [sqlite3_bind_blob|sqlite3_bind()], -** [sqlite3_bind_parameter_count()], and -** [sqlite3_bind_parameter_index()]. -*/ -SQLITE_API const char *sqlite3_bind_parameter_name(sqlite3_stmt*, int); - -/* -** CAPI3REF: Index Of A Parameter With A Given Name -** METHOD: sqlite3_stmt -** -** ^Return the index of an SQL parameter given its name. ^The -** index value returned is suitable for use as the second -** parameter to [sqlite3_bind_blob|sqlite3_bind()]. ^A zero -** is returned if no matching parameter is found. ^The parameter -** name must be given in UTF-8 even if the original statement -** was prepared from UTF-16 text using [sqlite3_prepare16_v2()]. -** -** See also: [sqlite3_bind_blob|sqlite3_bind()], -** [sqlite3_bind_parameter_count()], and -** [sqlite3_bind_parameter_name()]. -*/ -SQLITE_API int sqlite3_bind_parameter_index(sqlite3_stmt*, const char *zName); - -/* -** CAPI3REF: Reset All Bindings On A Prepared Statement -** METHOD: sqlite3_stmt -** -** ^Contrary to the intuition of many, [sqlite3_reset()] does not reset -** the [sqlite3_bind_blob | bindings] on a [prepared statement]. -** ^Use this routine to reset all host parameters to NULL. -*/ -SQLITE_API int sqlite3_clear_bindings(sqlite3_stmt*); - -/* -** CAPI3REF: Number Of Columns In A Result Set -** METHOD: sqlite3_stmt -** -** ^Return the number of columns in the result set returned by the -** [prepared statement]. ^If this routine returns 0, that means the -** [prepared statement] returns no data (for example an [UPDATE]). -** ^However, just because this routine returns a positive number does not -** mean that one or more rows of data will be returned. ^A SELECT statement -** will always have a positive sqlite3_column_count() but depending on the -** WHERE clause constraints and the table content, it might return no rows. -** -** See also: [sqlite3_data_count()] -*/ -SQLITE_API int sqlite3_column_count(sqlite3_stmt *pStmt); - -/* -** CAPI3REF: Column Names In A Result Set -** METHOD: sqlite3_stmt -** -** ^These routines return the name assigned to a particular column -** in the result set of a [SELECT] statement. ^The sqlite3_column_name() -** interface returns a pointer to a zero-terminated UTF-8 string -** and sqlite3_column_name16() returns a pointer to a zero-terminated -** UTF-16 string. ^The first parameter is the [prepared statement] -** that implements the [SELECT] statement. ^The second parameter is the -** column number. ^The leftmost column is number 0. -** -** ^The returned string pointer is valid until either the [prepared statement] -** is destroyed by [sqlite3_finalize()] or until the statement is automatically -** reprepared by the first call to [sqlite3_step()] for a particular run -** or until the next call to -** sqlite3_column_name() or sqlite3_column_name16() on the same column. -** -** ^If sqlite3_malloc() fails during the processing of either routine -** (for example during a conversion from UTF-8 to UTF-16) then a -** NULL pointer is returned. -** -** ^The name of a result column is the value of the "AS" clause for -** that column, if there is an AS clause. If there is no AS clause -** then the name of the column is unspecified and may change from -** one release of SQLite to the next. -*/ -SQLITE_API const char *sqlite3_column_name(sqlite3_stmt*, int N); -SQLITE_API const void *sqlite3_column_name16(sqlite3_stmt*, int N); - -/* -** CAPI3REF: Source Of Data In A Query Result -** METHOD: sqlite3_stmt -** -** ^These routines provide a means to determine the database, table, and -** table column that is the origin of a particular result column in -** [SELECT] statement. -** ^The name of the database or table or column can be returned as -** either a UTF-8 or UTF-16 string. ^The _database_ routines return -** the database name, the _table_ routines return the table name, and -** the origin_ routines return the column name. -** ^The returned string is valid until the [prepared statement] is destroyed -** using [sqlite3_finalize()] or until the statement is automatically -** reprepared by the first call to [sqlite3_step()] for a particular run -** or until the same information is requested -** again in a different encoding. -** -** ^The names returned are the original un-aliased names of the -** database, table, and column. -** -** ^The first argument to these interfaces is a [prepared statement]. -** ^These functions return information about the Nth result column returned by -** the statement, where N is the second function argument. -** ^The left-most column is column 0 for these routines. -** -** ^If the Nth column returned by the statement is an expression or -** subquery and is not a column value, then all of these functions return -** NULL. ^These routine might also return NULL if a memory allocation error -** occurs. ^Otherwise, they return the name of the attached database, table, -** or column that query result column was extracted from. -** -** ^As with all other SQLite APIs, those whose names end with "16" return -** UTF-16 encoded strings and the other functions return UTF-8. -** -** ^These APIs are only available if the library was compiled with the -** [SQLITE_ENABLE_COLUMN_METADATA] C-preprocessor symbol. -** -** If two or more threads call one or more of these routines against the same -** prepared statement and column at the same time then the results are -** undefined. -** -** If two or more threads call one or more -** [sqlite3_column_database_name | column metadata interfaces] -** for the same [prepared statement] and result column -** at the same time then the results are undefined. -*/ -SQLITE_API const char *sqlite3_column_database_name(sqlite3_stmt*,int); -SQLITE_API const void *sqlite3_column_database_name16(sqlite3_stmt*,int); -SQLITE_API const char *sqlite3_column_table_name(sqlite3_stmt*,int); -SQLITE_API const void *sqlite3_column_table_name16(sqlite3_stmt*,int); -SQLITE_API const char *sqlite3_column_origin_name(sqlite3_stmt*,int); -SQLITE_API const void *sqlite3_column_origin_name16(sqlite3_stmt*,int); - -/* -** CAPI3REF: Declared Datatype Of A Query Result -** METHOD: sqlite3_stmt -** -** ^(The first parameter is a [prepared statement]. -** If this statement is a [SELECT] statement and the Nth column of the -** returned result set of that [SELECT] is a table column (not an -** expression or subquery) then the declared type of the table -** column is returned.)^ ^If the Nth column of the result set is an -** expression or subquery, then a NULL pointer is returned. -** ^The returned string is always UTF-8 encoded. -** -** ^(For example, given the database schema: -** -** CREATE TABLE t1(c1 VARIANT); -** -** and the following statement to be compiled: -** -** SELECT c1 + 1, c1 FROM t1; -** -** this routine would return the string "VARIANT" for the second result -** column (i==1), and a NULL pointer for the first result column (i==0).)^ -** -** ^SQLite uses dynamic run-time typing. ^So just because a column -** is declared to contain a particular type does not mean that the -** data stored in that column is of the declared type. SQLite is -** strongly typed, but the typing is dynamic not static. ^Type -** is associated with individual values, not with the containers -** used to hold those values. -*/ -SQLITE_API const char *sqlite3_column_decltype(sqlite3_stmt*,int); -SQLITE_API const void *sqlite3_column_decltype16(sqlite3_stmt*,int); - -/* -** CAPI3REF: Evaluate An SQL Statement -** METHOD: sqlite3_stmt -** -** After a [prepared statement] has been prepared using either -** [sqlite3_prepare_v2()] or [sqlite3_prepare16_v2()] or one of the legacy -** interfaces [sqlite3_prepare()] or [sqlite3_prepare16()], this function -** must be called one or more times to evaluate the statement. -** -** The details of the behavior of the sqlite3_step() interface depend -** on whether the statement was prepared using the newer "v2" interface -** [sqlite3_prepare_v2()] and [sqlite3_prepare16_v2()] or the older legacy -** interface [sqlite3_prepare()] and [sqlite3_prepare16()]. The use of the -** new "v2" interface is recommended for new applications but the legacy -** interface will continue to be supported. -** -** ^In the legacy interface, the return value will be either [SQLITE_BUSY], -** [SQLITE_DONE], [SQLITE_ROW], [SQLITE_ERROR], or [SQLITE_MISUSE]. -** ^With the "v2" interface, any of the other [result codes] or -** [extended result codes] might be returned as well. -** -** ^[SQLITE_BUSY] means that the database engine was unable to acquire the -** database locks it needs to do its job. ^If the statement is a [COMMIT] -** or occurs outside of an explicit transaction, then you can retry the -** statement. If the statement is not a [COMMIT] and occurs within an -** explicit transaction then you should rollback the transaction before -** continuing. -** -** ^[SQLITE_DONE] means that the statement has finished executing -** successfully. sqlite3_step() should not be called again on this virtual -** machine without first calling [sqlite3_reset()] to reset the virtual -** machine back to its initial state. -** -** ^If the SQL statement being executed returns any data, then [SQLITE_ROW] -** is returned each time a new row of data is ready for processing by the -** caller. The values may be accessed using the [column access functions]. -** sqlite3_step() is called again to retrieve the next row of data. -** -** ^[SQLITE_ERROR] means that a run-time error (such as a constraint -** violation) has occurred. sqlite3_step() should not be called again on -** the VM. More information may be found by calling [sqlite3_errmsg()]. -** ^With the legacy interface, a more specific error code (for example, -** [SQLITE_INTERRUPT], [SQLITE_SCHEMA], [SQLITE_CORRUPT], and so forth) -** can be obtained by calling [sqlite3_reset()] on the -** [prepared statement]. ^In the "v2" interface, -** the more specific error code is returned directly by sqlite3_step(). -** -** [SQLITE_MISUSE] means that the this routine was called inappropriately. -** Perhaps it was called on a [prepared statement] that has -** already been [sqlite3_finalize | finalized] or on one that had -** previously returned [SQLITE_ERROR] or [SQLITE_DONE]. Or it could -** be the case that the same database connection is being used by two or -** more threads at the same moment in time. -** -** For all versions of SQLite up to and including 3.6.23.1, a call to -** [sqlite3_reset()] was required after sqlite3_step() returned anything -** other than [SQLITE_ROW] before any subsequent invocation of -** sqlite3_step(). Failure to reset the prepared statement using -** [sqlite3_reset()] would result in an [SQLITE_MISUSE] return from -** sqlite3_step(). But after [version 3.6.23.1] ([dateof:3.6.23.1], -** sqlite3_step() began -** calling [sqlite3_reset()] automatically in this circumstance rather -** than returning [SQLITE_MISUSE]. This is not considered a compatibility -** break because any application that ever receives an SQLITE_MISUSE error -** is broken by definition. The [SQLITE_OMIT_AUTORESET] compile-time option -** can be used to restore the legacy behavior. -** -** Goofy Interface Alert: In the legacy interface, the sqlite3_step() -** API always returns a generic error code, [SQLITE_ERROR], following any -** error other than [SQLITE_BUSY] and [SQLITE_MISUSE]. You must call -** [sqlite3_reset()] or [sqlite3_finalize()] in order to find one of the -** specific [error codes] that better describes the error. -** We admit that this is a goofy design. The problem has been fixed -** with the "v2" interface. If you prepare all of your SQL statements -** using either [sqlite3_prepare_v2()] or [sqlite3_prepare16_v2()] instead -** of the legacy [sqlite3_prepare()] and [sqlite3_prepare16()] interfaces, -** then the more specific [error codes] are returned directly -** by sqlite3_step(). The use of the "v2" interface is recommended. -*/ -SQLITE_API int sqlite3_step(sqlite3_stmt*); - -/* -** CAPI3REF: Number of columns in a result set -** METHOD: sqlite3_stmt -** -** ^The sqlite3_data_count(P) interface returns the number of columns in the -** current row of the result set of [prepared statement] P. -** ^If prepared statement P does not have results ready to return -** (via calls to the [sqlite3_column_int | sqlite3_column_*()] of -** interfaces) then sqlite3_data_count(P) returns 0. -** ^The sqlite3_data_count(P) routine also returns 0 if P is a NULL pointer. -** ^The sqlite3_data_count(P) routine returns 0 if the previous call to -** [sqlite3_step](P) returned [SQLITE_DONE]. ^The sqlite3_data_count(P) -** will return non-zero if previous call to [sqlite3_step](P) returned -** [SQLITE_ROW], except in the case of the [PRAGMA incremental_vacuum] -** where it always returns zero since each step of that multi-step -** pragma returns 0 columns of data. -** -** See also: [sqlite3_column_count()] -*/ -SQLITE_API int sqlite3_data_count(sqlite3_stmt *pStmt); - -/* -** CAPI3REF: Fundamental Datatypes -** KEYWORDS: SQLITE_TEXT -** -** ^(Every value in SQLite has one of five fundamental datatypes: -** -**
    -**
  • 64-bit signed integer -**
  • 64-bit IEEE floating point number -**
  • string -**
  • BLOB -**
  • NULL -**
)^ -** -** These constants are codes for each of those types. -** -** Note that the SQLITE_TEXT constant was also used in SQLite version 2 -** for a completely different meaning. Software that links against both -** SQLite version 2 and SQLite version 3 should use SQLITE3_TEXT, not -** SQLITE_TEXT. -*/ -#define SQLITE_INTEGER 1 -#define SQLITE_FLOAT 2 -#define SQLITE_BLOB 4 -#define SQLITE_NULL 5 -#ifdef SQLITE_TEXT -# undef SQLITE_TEXT -#else -# define SQLITE_TEXT 3 -#endif -#define SQLITE3_TEXT 3 - -/* -** CAPI3REF: Result Values From A Query -** KEYWORDS: {column access functions} -** METHOD: sqlite3_stmt -** -** ^These routines return information about a single column of the current -** result row of a query. ^In every case the first argument is a pointer -** to the [prepared statement] that is being evaluated (the [sqlite3_stmt*] -** that was returned from [sqlite3_prepare_v2()] or one of its variants) -** and the second argument is the index of the column for which information -** should be returned. ^The leftmost column of the result set has the index 0. -** ^The number of columns in the result can be determined using -** [sqlite3_column_count()]. -** -** If the SQL statement does not currently point to a valid row, or if the -** column index is out of range, the result is undefined. -** These routines may only be called when the most recent call to -** [sqlite3_step()] has returned [SQLITE_ROW] and neither -** [sqlite3_reset()] nor [sqlite3_finalize()] have been called subsequently. -** If any of these routines are called after [sqlite3_reset()] or -** [sqlite3_finalize()] or after [sqlite3_step()] has returned -** something other than [SQLITE_ROW], the results are undefined. -** If [sqlite3_step()] or [sqlite3_reset()] or [sqlite3_finalize()] -** are called from a different thread while any of these routines -** are pending, then the results are undefined. -** -** ^The sqlite3_column_type() routine returns the -** [SQLITE_INTEGER | datatype code] for the initial data type -** of the result column. ^The returned value is one of [SQLITE_INTEGER], -** [SQLITE_FLOAT], [SQLITE_TEXT], [SQLITE_BLOB], or [SQLITE_NULL]. The value -** returned by sqlite3_column_type() is only meaningful if no type -** conversions have occurred as described below. After a type conversion, -** the value returned by sqlite3_column_type() is undefined. Future -** versions of SQLite may change the behavior of sqlite3_column_type() -** following a type conversion. -** -** ^If the result is a BLOB or UTF-8 string then the sqlite3_column_bytes() -** routine returns the number of bytes in that BLOB or string. -** ^If the result is a UTF-16 string, then sqlite3_column_bytes() converts -** the string to UTF-8 and then returns the number of bytes. -** ^If the result is a numeric value then sqlite3_column_bytes() uses -** [sqlite3_snprintf()] to convert that value to a UTF-8 string and returns -** the number of bytes in that string. -** ^If the result is NULL, then sqlite3_column_bytes() returns zero. -** -** ^If the result is a BLOB or UTF-16 string then the sqlite3_column_bytes16() -** routine returns the number of bytes in that BLOB or string. -** ^If the result is a UTF-8 string, then sqlite3_column_bytes16() converts -** the string to UTF-16 and then returns the number of bytes. -** ^If the result is a numeric value then sqlite3_column_bytes16() uses -** [sqlite3_snprintf()] to convert that value to a UTF-16 string and returns -** the number of bytes in that string. -** ^If the result is NULL, then sqlite3_column_bytes16() returns zero. -** -** ^The values returned by [sqlite3_column_bytes()] and -** [sqlite3_column_bytes16()] do not include the zero terminators at the end -** of the string. ^For clarity: the values returned by -** [sqlite3_column_bytes()] and [sqlite3_column_bytes16()] are the number of -** bytes in the string, not the number of characters. -** -** ^Strings returned by sqlite3_column_text() and sqlite3_column_text16(), -** even empty strings, are always zero-terminated. ^The return -** value from sqlite3_column_blob() for a zero-length BLOB is a NULL pointer. -** -** Warning: ^The object returned by [sqlite3_column_value()] is an -** [unprotected sqlite3_value] object. In a multithreaded environment, -** an unprotected sqlite3_value object may only be used safely with -** [sqlite3_bind_value()] and [sqlite3_result_value()]. -** If the [unprotected sqlite3_value] object returned by -** [sqlite3_column_value()] is used in any other way, including calls -** to routines like [sqlite3_value_int()], [sqlite3_value_text()], -** or [sqlite3_value_bytes()], the behavior is not threadsafe. -** -** These routines attempt to convert the value where appropriate. ^For -** example, if the internal representation is FLOAT and a text result -** is requested, [sqlite3_snprintf()] is used internally to perform the -** conversion automatically. ^(The following table details the conversions -** that are applied: -** -**
-** -**
Internal
Type
Requested
Type
Conversion -** -**
NULL INTEGER Result is 0 -**
NULL FLOAT Result is 0.0 -**
NULL TEXT Result is a NULL pointer -**
NULL BLOB Result is a NULL pointer -**
INTEGER FLOAT Convert from integer to float -**
INTEGER TEXT ASCII rendering of the integer -**
INTEGER BLOB Same as INTEGER->TEXT -**
FLOAT INTEGER [CAST] to INTEGER -**
FLOAT TEXT ASCII rendering of the float -**
FLOAT BLOB [CAST] to BLOB -**
TEXT INTEGER [CAST] to INTEGER -**
TEXT FLOAT [CAST] to REAL -**
TEXT BLOB No change -**
BLOB INTEGER [CAST] to INTEGER -**
BLOB FLOAT [CAST] to REAL -**
BLOB TEXT Add a zero terminator if needed -**
-**
)^ -** -** Note that when type conversions occur, pointers returned by prior -** calls to sqlite3_column_blob(), sqlite3_column_text(), and/or -** sqlite3_column_text16() may be invalidated. -** Type conversions and pointer invalidations might occur -** in the following cases: -** -**
    -**
  • The initial content is a BLOB and sqlite3_column_text() or -** sqlite3_column_text16() is called. A zero-terminator might -** need to be added to the string.
  • -**
  • The initial content is UTF-8 text and sqlite3_column_bytes16() or -** sqlite3_column_text16() is called. The content must be converted -** to UTF-16.
  • -**
  • The initial content is UTF-16 text and sqlite3_column_bytes() or -** sqlite3_column_text() is called. The content must be converted -** to UTF-8.
  • -**
-** -** ^Conversions between UTF-16be and UTF-16le are always done in place and do -** not invalidate a prior pointer, though of course the content of the buffer -** that the prior pointer references will have been modified. Other kinds -** of conversion are done in place when it is possible, but sometimes they -** are not possible and in those cases prior pointers are invalidated. -** -** The safest policy is to invoke these routines -** in one of the following ways: -** -**
    -**
  • sqlite3_column_text() followed by sqlite3_column_bytes()
  • -**
  • sqlite3_column_blob() followed by sqlite3_column_bytes()
  • -**
  • sqlite3_column_text16() followed by sqlite3_column_bytes16()
  • -**
-** -** In other words, you should call sqlite3_column_text(), -** sqlite3_column_blob(), or sqlite3_column_text16() first to force the result -** into the desired format, then invoke sqlite3_column_bytes() or -** sqlite3_column_bytes16() to find the size of the result. Do not mix calls -** to sqlite3_column_text() or sqlite3_column_blob() with calls to -** sqlite3_column_bytes16(), and do not mix calls to sqlite3_column_text16() -** with calls to sqlite3_column_bytes(). -** -** ^The pointers returned are valid until a type conversion occurs as -** described above, or until [sqlite3_step()] or [sqlite3_reset()] or -** [sqlite3_finalize()] is called. ^The memory space used to hold strings -** and BLOBs is freed automatically. Do not pass the pointers returned -** from [sqlite3_column_blob()], [sqlite3_column_text()], etc. into -** [sqlite3_free()]. -** -** ^(If a memory allocation error occurs during the evaluation of any -** of these routines, a default value is returned. The default value -** is either the integer 0, the floating point number 0.0, or a NULL -** pointer. Subsequent calls to [sqlite3_errcode()] will return -** [SQLITE_NOMEM].)^ -*/ -SQLITE_API const void *sqlite3_column_blob(sqlite3_stmt*, int iCol); -SQLITE_API int sqlite3_column_bytes(sqlite3_stmt*, int iCol); -SQLITE_API int sqlite3_column_bytes16(sqlite3_stmt*, int iCol); -SQLITE_API double sqlite3_column_double(sqlite3_stmt*, int iCol); -SQLITE_API int sqlite3_column_int(sqlite3_stmt*, int iCol); -SQLITE_API sqlite3_int64 sqlite3_column_int64(sqlite3_stmt*, int iCol); -SQLITE_API const unsigned char *sqlite3_column_text(sqlite3_stmt*, int iCol); -SQLITE_API const void *sqlite3_column_text16(sqlite3_stmt*, int iCol); -SQLITE_API int sqlite3_column_type(sqlite3_stmt*, int iCol); -SQLITE_API sqlite3_value *sqlite3_column_value(sqlite3_stmt*, int iCol); - -/* -** CAPI3REF: Destroy A Prepared Statement Object -** DESTRUCTOR: sqlite3_stmt -** -** ^The sqlite3_finalize() function is called to delete a [prepared statement]. -** ^If the most recent evaluation of the statement encountered no errors -** or if the statement is never been evaluated, then sqlite3_finalize() returns -** SQLITE_OK. ^If the most recent evaluation of statement S failed, then -** sqlite3_finalize(S) returns the appropriate [error code] or -** [extended error code]. -** -** ^The sqlite3_finalize(S) routine can be called at any point during -** the life cycle of [prepared statement] S: -** before statement S is ever evaluated, after -** one or more calls to [sqlite3_reset()], or after any call -** to [sqlite3_step()] regardless of whether or not the statement has -** completed execution. -** -** ^Invoking sqlite3_finalize() on a NULL pointer is a harmless no-op. -** -** The application must finalize every [prepared statement] in order to avoid -** resource leaks. It is a grievous error for the application to try to use -** a prepared statement after it has been finalized. Any use of a prepared -** statement after it has been finalized can result in undefined and -** undesirable behavior such as segfaults and heap corruption. -*/ -SQLITE_API int sqlite3_finalize(sqlite3_stmt *pStmt); - -/* -** CAPI3REF: Reset A Prepared Statement Object -** METHOD: sqlite3_stmt -** -** The sqlite3_reset() function is called to reset a [prepared statement] -** object back to its initial state, ready to be re-executed. -** ^Any SQL statement variables that had values bound to them using -** the [sqlite3_bind_blob | sqlite3_bind_*() API] retain their values. -** Use [sqlite3_clear_bindings()] to reset the bindings. -** -** ^The [sqlite3_reset(S)] interface resets the [prepared statement] S -** back to the beginning of its program. -** -** ^If the most recent call to [sqlite3_step(S)] for the -** [prepared statement] S returned [SQLITE_ROW] or [SQLITE_DONE], -** or if [sqlite3_step(S)] has never before been called on S, -** then [sqlite3_reset(S)] returns [SQLITE_OK]. -** -** ^If the most recent call to [sqlite3_step(S)] for the -** [prepared statement] S indicated an error, then -** [sqlite3_reset(S)] returns an appropriate [error code]. -** -** ^The [sqlite3_reset(S)] interface does not change the values -** of any [sqlite3_bind_blob|bindings] on the [prepared statement] S. -*/ -SQLITE_API int sqlite3_reset(sqlite3_stmt *pStmt); - -/* -** CAPI3REF: Create Or Redefine SQL Functions -** KEYWORDS: {function creation routines} -** KEYWORDS: {application-defined SQL function} -** KEYWORDS: {application-defined SQL functions} -** METHOD: sqlite3 -** -** ^These functions (collectively known as "function creation routines") -** are used to add SQL functions or aggregates or to redefine the behavior -** of existing SQL functions or aggregates. The only differences between -** these routines are the text encoding expected for -** the second parameter (the name of the function being created) -** and the presence or absence of a destructor callback for -** the application data pointer. -** -** ^The first parameter is the [database connection] to which the SQL -** function is to be added. ^If an application uses more than one database -** connection then application-defined SQL functions must be added -** to each database connection separately. -** -** ^The second parameter is the name of the SQL function to be created or -** redefined. ^The length of the name is limited to 255 bytes in a UTF-8 -** representation, exclusive of the zero-terminator. ^Note that the name -** length limit is in UTF-8 bytes, not characters nor UTF-16 bytes. -** ^Any attempt to create a function with a longer name -** will result in [SQLITE_MISUSE] being returned. -** -** ^The third parameter (nArg) -** is the number of arguments that the SQL function or -** aggregate takes. ^If this parameter is -1, then the SQL function or -** aggregate may take any number of arguments between 0 and the limit -** set by [sqlite3_limit]([SQLITE_LIMIT_FUNCTION_ARG]). If the third -** parameter is less than -1 or greater than 127 then the behavior is -** undefined. -** -** ^The fourth parameter, eTextRep, specifies what -** [SQLITE_UTF8 | text encoding] this SQL function prefers for -** its parameters. The application should set this parameter to -** [SQLITE_UTF16LE] if the function implementation invokes -** [sqlite3_value_text16le()] on an input, or [SQLITE_UTF16BE] if the -** implementation invokes [sqlite3_value_text16be()] on an input, or -** [SQLITE_UTF16] if [sqlite3_value_text16()] is used, or [SQLITE_UTF8] -** otherwise. ^The same SQL function may be registered multiple times using -** different preferred text encodings, with different implementations for -** each encoding. -** ^When multiple implementations of the same function are available, SQLite -** will pick the one that involves the least amount of data conversion. -** -** ^The fourth parameter may optionally be ORed with [SQLITE_DETERMINISTIC] -** to signal that the function will always return the same result given -** the same inputs within a single SQL statement. Most SQL functions are -** deterministic. The built-in [random()] SQL function is an example of a -** function that is not deterministic. The SQLite query planner is able to -** perform additional optimizations on deterministic functions, so use -** of the [SQLITE_DETERMINISTIC] flag is recommended where possible. -** -** ^(The fifth parameter is an arbitrary pointer. The implementation of the -** function can gain access to this pointer using [sqlite3_user_data()].)^ -** -** ^The sixth, seventh and eighth parameters, xFunc, xStep and xFinal, are -** pointers to C-language functions that implement the SQL function or -** aggregate. ^A scalar SQL function requires an implementation of the xFunc -** callback only; NULL pointers must be passed as the xStep and xFinal -** parameters. ^An aggregate SQL function requires an implementation of xStep -** and xFinal and NULL pointer must be passed for xFunc. ^To delete an existing -** SQL function or aggregate, pass NULL pointers for all three function -** callbacks. -** -** ^(If the ninth parameter to sqlite3_create_function_v2() is not NULL, -** then it is destructor for the application data pointer. -** The destructor is invoked when the function is deleted, either by being -** overloaded or when the database connection closes.)^ -** ^The destructor is also invoked if the call to -** sqlite3_create_function_v2() fails. -** ^When the destructor callback of the tenth parameter is invoked, it -** is passed a single argument which is a copy of the application data -** pointer which was the fifth parameter to sqlite3_create_function_v2(). -** -** ^It is permitted to register multiple implementations of the same -** functions with the same name but with either differing numbers of -** arguments or differing preferred text encodings. ^SQLite will use -** the implementation that most closely matches the way in which the -** SQL function is used. ^A function implementation with a non-negative -** nArg parameter is a better match than a function implementation with -** a negative nArg. ^A function where the preferred text encoding -** matches the database encoding is a better -** match than a function where the encoding is different. -** ^A function where the encoding difference is between UTF16le and UTF16be -** is a closer match than a function where the encoding difference is -** between UTF8 and UTF16. -** -** ^Built-in functions may be overloaded by new application-defined functions. -** -** ^An application-defined function is permitted to call other -** SQLite interfaces. However, such calls must not -** close the database connection nor finalize or reset the prepared -** statement in which the function is running. -*/ -SQLITE_API int sqlite3_create_function( - sqlite3 *db, - const char *zFunctionName, - int nArg, - int eTextRep, - void *pApp, - void (*xFunc)(sqlite3_context*,int,sqlite3_value**), - void (*xStep)(sqlite3_context*,int,sqlite3_value**), - void (*xFinal)(sqlite3_context*) -); -SQLITE_API int sqlite3_create_function16( - sqlite3 *db, - const void *zFunctionName, - int nArg, - int eTextRep, - void *pApp, - void (*xFunc)(sqlite3_context*,int,sqlite3_value**), - void (*xStep)(sqlite3_context*,int,sqlite3_value**), - void (*xFinal)(sqlite3_context*) -); -SQLITE_API int sqlite3_create_function_v2( - sqlite3 *db, - const char *zFunctionName, - int nArg, - int eTextRep, - void *pApp, - void (*xFunc)(sqlite3_context*,int,sqlite3_value**), - void (*xStep)(sqlite3_context*,int,sqlite3_value**), - void (*xFinal)(sqlite3_context*), - void(*xDestroy)(void*) -); - -/* -** CAPI3REF: Text Encodings -** -** These constant define integer codes that represent the various -** text encodings supported by SQLite. -*/ -#define SQLITE_UTF8 1 /* IMP: R-37514-35566 */ -#define SQLITE_UTF16LE 2 /* IMP: R-03371-37637 */ -#define SQLITE_UTF16BE 3 /* IMP: R-51971-34154 */ -#define SQLITE_UTF16 4 /* Use native byte order */ -#define SQLITE_ANY 5 /* Deprecated */ -#define SQLITE_UTF16_ALIGNED 8 /* sqlite3_create_collation only */ - -/* -** CAPI3REF: Function Flags -** -** These constants may be ORed together with the -** [SQLITE_UTF8 | preferred text encoding] as the fourth argument -** to [sqlite3_create_function()], [sqlite3_create_function16()], or -** [sqlite3_create_function_v2()]. -*/ -#define SQLITE_DETERMINISTIC 0x800 - -/* -** CAPI3REF: Deprecated Functions -** DEPRECATED -** -** These functions are [deprecated]. In order to maintain -** backwards compatibility with older code, these functions continue -** to be supported. However, new applications should avoid -** the use of these functions. To encourage programmers to avoid -** these functions, we will not explain what they do. -*/ -#ifndef SQLITE_OMIT_DEPRECATED -SQLITE_API SQLITE_DEPRECATED int sqlite3_aggregate_count(sqlite3_context*); -SQLITE_API SQLITE_DEPRECATED int sqlite3_expired(sqlite3_stmt*); -SQLITE_API SQLITE_DEPRECATED int sqlite3_transfer_bindings(sqlite3_stmt*, sqlite3_stmt*); -SQLITE_API SQLITE_DEPRECATED int sqlite3_global_recover(void); -SQLITE_API SQLITE_DEPRECATED void sqlite3_thread_cleanup(void); -SQLITE_API SQLITE_DEPRECATED int sqlite3_memory_alarm(void(*)(void*,sqlite3_int64,int), - void*,sqlite3_int64); -#endif - -/* -** CAPI3REF: Obtaining SQL Values -** METHOD: sqlite3_value -** -** The C-language implementation of SQL functions and aggregates uses -** this set of interface routines to access the parameter values on -** the function or aggregate. -** -** The xFunc (for scalar functions) or xStep (for aggregates) parameters -** to [sqlite3_create_function()] and [sqlite3_create_function16()] -** define callbacks that implement the SQL functions and aggregates. -** The 3rd parameter to these callbacks is an array of pointers to -** [protected sqlite3_value] objects. There is one [sqlite3_value] object for -** each parameter to the SQL function. These routines are used to -** extract values from the [sqlite3_value] objects. -** -** These routines work only with [protected sqlite3_value] objects. -** Any attempt to use these routines on an [unprotected sqlite3_value] -** object results in undefined behavior. -** -** ^These routines work just like the corresponding [column access functions] -** except that these routines take a single [protected sqlite3_value] object -** pointer instead of a [sqlite3_stmt*] pointer and an integer column number. -** -** ^The sqlite3_value_text16() interface extracts a UTF-16 string -** in the native byte-order of the host machine. ^The -** sqlite3_value_text16be() and sqlite3_value_text16le() interfaces -** extract UTF-16 strings as big-endian and little-endian respectively. -** -** ^(The sqlite3_value_numeric_type() interface attempts to apply -** numeric affinity to the value. This means that an attempt is -** made to convert the value to an integer or floating point. If -** such a conversion is possible without loss of information (in other -** words, if the value is a string that looks like a number) -** then the conversion is performed. Otherwise no conversion occurs. -** The [SQLITE_INTEGER | datatype] after conversion is returned.)^ -** -** Please pay particular attention to the fact that the pointer returned -** from [sqlite3_value_blob()], [sqlite3_value_text()], or -** [sqlite3_value_text16()] can be invalidated by a subsequent call to -** [sqlite3_value_bytes()], [sqlite3_value_bytes16()], [sqlite3_value_text()], -** or [sqlite3_value_text16()]. -** -** These routines must be called from the same thread as -** the SQL function that supplied the [sqlite3_value*] parameters. -*/ -SQLITE_API const void *sqlite3_value_blob(sqlite3_value*); -SQLITE_API int sqlite3_value_bytes(sqlite3_value*); -SQLITE_API int sqlite3_value_bytes16(sqlite3_value*); -SQLITE_API double sqlite3_value_double(sqlite3_value*); -SQLITE_API int sqlite3_value_int(sqlite3_value*); -SQLITE_API sqlite3_int64 sqlite3_value_int64(sqlite3_value*); -SQLITE_API const unsigned char *sqlite3_value_text(sqlite3_value*); -SQLITE_API const void *sqlite3_value_text16(sqlite3_value*); -SQLITE_API const void *sqlite3_value_text16le(sqlite3_value*); -SQLITE_API const void *sqlite3_value_text16be(sqlite3_value*); -SQLITE_API int sqlite3_value_type(sqlite3_value*); -SQLITE_API int sqlite3_value_numeric_type(sqlite3_value*); - -/* -** CAPI3REF: Finding The Subtype Of SQL Values -** METHOD: sqlite3_value -** -** The sqlite3_value_subtype(V) function returns the subtype for -** an [application-defined SQL function] argument V. The subtype -** information can be used to pass a limited amount of context from -** one SQL function to another. Use the [sqlite3_result_subtype()] -** routine to set the subtype for the return value of an SQL function. -** -** SQLite makes no use of subtype itself. It merely passes the subtype -** from the result of one [application-defined SQL function] into the -** input of another. -*/ -SQLITE_API unsigned int sqlite3_value_subtype(sqlite3_value*); - -/* -** CAPI3REF: Copy And Free SQL Values -** METHOD: sqlite3_value -** -** ^The sqlite3_value_dup(V) interface makes a copy of the [sqlite3_value] -** object D and returns a pointer to that copy. ^The [sqlite3_value] returned -** is a [protected sqlite3_value] object even if the input is not. -** ^The sqlite3_value_dup(V) interface returns NULL if V is NULL or if a -** memory allocation fails. -** -** ^The sqlite3_value_free(V) interface frees an [sqlite3_value] object -** previously obtained from [sqlite3_value_dup()]. ^If V is a NULL pointer -** then sqlite3_value_free(V) is a harmless no-op. -*/ -SQLITE_API sqlite3_value *sqlite3_value_dup(const sqlite3_value*); -SQLITE_API void sqlite3_value_free(sqlite3_value*); - -/* -** CAPI3REF: Obtain Aggregate Function Context -** METHOD: sqlite3_context -** -** Implementations of aggregate SQL functions use this -** routine to allocate memory for storing their state. -** -** ^The first time the sqlite3_aggregate_context(C,N) routine is called -** for a particular aggregate function, SQLite -** allocates N of memory, zeroes out that memory, and returns a pointer -** to the new memory. ^On second and subsequent calls to -** sqlite3_aggregate_context() for the same aggregate function instance, -** the same buffer is returned. Sqlite3_aggregate_context() is normally -** called once for each invocation of the xStep callback and then one -** last time when the xFinal callback is invoked. ^(When no rows match -** an aggregate query, the xStep() callback of the aggregate function -** implementation is never called and xFinal() is called exactly once. -** In those cases, sqlite3_aggregate_context() might be called for the -** first time from within xFinal().)^ -** -** ^The sqlite3_aggregate_context(C,N) routine returns a NULL pointer -** when first called if N is less than or equal to zero or if a memory -** allocate error occurs. -** -** ^(The amount of space allocated by sqlite3_aggregate_context(C,N) is -** determined by the N parameter on first successful call. Changing the -** value of N in subsequent call to sqlite3_aggregate_context() within -** the same aggregate function instance will not resize the memory -** allocation.)^ Within the xFinal callback, it is customary to set -** N=0 in calls to sqlite3_aggregate_context(C,N) so that no -** pointless memory allocations occur. -** -** ^SQLite automatically frees the memory allocated by -** sqlite3_aggregate_context() when the aggregate query concludes. -** -** The first parameter must be a copy of the -** [sqlite3_context | SQL function context] that is the first parameter -** to the xStep or xFinal callback routine that implements the aggregate -** function. -** -** This routine must be called from the same thread in which -** the aggregate SQL function is running. -*/ -SQLITE_API void *sqlite3_aggregate_context(sqlite3_context*, int nBytes); - -/* -** CAPI3REF: User Data For Functions -** METHOD: sqlite3_context -** -** ^The sqlite3_user_data() interface returns a copy of -** the pointer that was the pUserData parameter (the 5th parameter) -** of the [sqlite3_create_function()] -** and [sqlite3_create_function16()] routines that originally -** registered the application defined function. -** -** This routine must be called from the same thread in which -** the application-defined function is running. -*/ -SQLITE_API void *sqlite3_user_data(sqlite3_context*); - -/* -** CAPI3REF: Database Connection For Functions -** METHOD: sqlite3_context -** -** ^The sqlite3_context_db_handle() interface returns a copy of -** the pointer to the [database connection] (the 1st parameter) -** of the [sqlite3_create_function()] -** and [sqlite3_create_function16()] routines that originally -** registered the application defined function. -*/ -SQLITE_API sqlite3 *sqlite3_context_db_handle(sqlite3_context*); - -/* -** CAPI3REF: Function Auxiliary Data -** METHOD: sqlite3_context -** -** These functions may be used by (non-aggregate) SQL functions to -** associate metadata with argument values. If the same value is passed to -** multiple invocations of the same SQL function during query execution, under -** some circumstances the associated metadata may be preserved. An example -** of where this might be useful is in a regular-expression matching -** function. The compiled version of the regular expression can be stored as -** metadata associated with the pattern string. -** Then as long as the pattern string remains the same, -** the compiled regular expression can be reused on multiple -** invocations of the same function. -** -** ^The sqlite3_get_auxdata() interface returns a pointer to the metadata -** associated by the sqlite3_set_auxdata() function with the Nth argument -** value to the application-defined function. ^If there is no metadata -** associated with the function argument, this sqlite3_get_auxdata() interface -** returns a NULL pointer. -** -** ^The sqlite3_set_auxdata(C,N,P,X) interface saves P as metadata for the N-th -** argument of the application-defined function. ^Subsequent -** calls to sqlite3_get_auxdata(C,N) return P from the most recent -** sqlite3_set_auxdata(C,N,P,X) call if the metadata is still valid or -** NULL if the metadata has been discarded. -** ^After each call to sqlite3_set_auxdata(C,N,P,X) where X is not NULL, -** SQLite will invoke the destructor function X with parameter P exactly -** once, when the metadata is discarded. -** SQLite is free to discard the metadata at any time, including:
    -**
  • ^(when the corresponding function parameter changes)^, or -**
  • ^(when [sqlite3_reset()] or [sqlite3_finalize()] is called for the -** SQL statement)^, or -**
  • ^(when sqlite3_set_auxdata() is invoked again on the same -** parameter)^, or -**
  • ^(during the original sqlite3_set_auxdata() call when a memory -** allocation error occurs.)^
-** -** Note the last bullet in particular. The destructor X in -** sqlite3_set_auxdata(C,N,P,X) might be called immediately, before the -** sqlite3_set_auxdata() interface even returns. Hence sqlite3_set_auxdata() -** should be called near the end of the function implementation and the -** function implementation should not make any use of P after -** sqlite3_set_auxdata() has been called. -** -** ^(In practice, metadata is preserved between function calls for -** function parameters that are compile-time constants, including literal -** values and [parameters] and expressions composed from the same.)^ -** -** These routines must be called from the same thread in which -** the SQL function is running. -*/ -SQLITE_API void *sqlite3_get_auxdata(sqlite3_context*, int N); -SQLITE_API void sqlite3_set_auxdata(sqlite3_context*, int N, void*, void (*)(void*)); - - -/* -** CAPI3REF: Constants Defining Special Destructor Behavior -** -** These are special values for the destructor that is passed in as the -** final argument to routines like [sqlite3_result_blob()]. ^If the destructor -** argument is SQLITE_STATIC, it means that the content pointer is constant -** and will never change. It does not need to be destroyed. ^The -** SQLITE_TRANSIENT value means that the content will likely change in -** the near future and that SQLite should make its own private copy of -** the content before returning. -** -** The typedef is necessary to work around problems in certain -** C++ compilers. -*/ -typedef void (*sqlite3_destructor_type)(void*); -#define SQLITE_STATIC ((sqlite3_destructor_type)0) -#define SQLITE_TRANSIENT ((sqlite3_destructor_type)-1) - -/* -** CAPI3REF: Setting The Result Of An SQL Function -** METHOD: sqlite3_context -** -** These routines are used by the xFunc or xFinal callbacks that -** implement SQL functions and aggregates. See -** [sqlite3_create_function()] and [sqlite3_create_function16()] -** for additional information. -** -** These functions work very much like the [parameter binding] family of -** functions used to bind values to host parameters in prepared statements. -** Refer to the [SQL parameter] documentation for additional information. -** -** ^The sqlite3_result_blob() interface sets the result from -** an application-defined function to be the BLOB whose content is pointed -** to by the second parameter and which is N bytes long where N is the -** third parameter. -** -** ^The sqlite3_result_zeroblob(C,N) and sqlite3_result_zeroblob64(C,N) -** interfaces set the result of the application-defined function to be -** a BLOB containing all zero bytes and N bytes in size. -** -** ^The sqlite3_result_double() interface sets the result from -** an application-defined function to be a floating point value specified -** by its 2nd argument. -** -** ^The sqlite3_result_error() and sqlite3_result_error16() functions -** cause the implemented SQL function to throw an exception. -** ^SQLite uses the string pointed to by the -** 2nd parameter of sqlite3_result_error() or sqlite3_result_error16() -** as the text of an error message. ^SQLite interprets the error -** message string from sqlite3_result_error() as UTF-8. ^SQLite -** interprets the string from sqlite3_result_error16() as UTF-16 in native -** byte order. ^If the third parameter to sqlite3_result_error() -** or sqlite3_result_error16() is negative then SQLite takes as the error -** message all text up through the first zero character. -** ^If the third parameter to sqlite3_result_error() or -** sqlite3_result_error16() is non-negative then SQLite takes that many -** bytes (not characters) from the 2nd parameter as the error message. -** ^The sqlite3_result_error() and sqlite3_result_error16() -** routines make a private copy of the error message text before -** they return. Hence, the calling function can deallocate or -** modify the text after they return without harm. -** ^The sqlite3_result_error_code() function changes the error code -** returned by SQLite as a result of an error in a function. ^By default, -** the error code is SQLITE_ERROR. ^A subsequent call to sqlite3_result_error() -** or sqlite3_result_error16() resets the error code to SQLITE_ERROR. -** -** ^The sqlite3_result_error_toobig() interface causes SQLite to throw an -** error indicating that a string or BLOB is too long to represent. -** -** ^The sqlite3_result_error_nomem() interface causes SQLite to throw an -** error indicating that a memory allocation failed. -** -** ^The sqlite3_result_int() interface sets the return value -** of the application-defined function to be the 32-bit signed integer -** value given in the 2nd argument. -** ^The sqlite3_result_int64() interface sets the return value -** of the application-defined function to be the 64-bit signed integer -** value given in the 2nd argument. -** -** ^The sqlite3_result_null() interface sets the return value -** of the application-defined function to be NULL. -** -** ^The sqlite3_result_text(), sqlite3_result_text16(), -** sqlite3_result_text16le(), and sqlite3_result_text16be() interfaces -** set the return value of the application-defined function to be -** a text string which is represented as UTF-8, UTF-16 native byte order, -** UTF-16 little endian, or UTF-16 big endian, respectively. -** ^The sqlite3_result_text64() interface sets the return value of an -** application-defined function to be a text string in an encoding -** specified by the fifth (and last) parameter, which must be one -** of [SQLITE_UTF8], [SQLITE_UTF16], [SQLITE_UTF16BE], or [SQLITE_UTF16LE]. -** ^SQLite takes the text result from the application from -** the 2nd parameter of the sqlite3_result_text* interfaces. -** ^If the 3rd parameter to the sqlite3_result_text* interfaces -** is negative, then SQLite takes result text from the 2nd parameter -** through the first zero character. -** ^If the 3rd parameter to the sqlite3_result_text* interfaces -** is non-negative, then as many bytes (not characters) of the text -** pointed to by the 2nd parameter are taken as the application-defined -** function result. If the 3rd parameter is non-negative, then it -** must be the byte offset into the string where the NUL terminator would -** appear if the string where NUL terminated. If any NUL characters occur -** in the string at a byte offset that is less than the value of the 3rd -** parameter, then the resulting string will contain embedded NULs and the -** result of expressions operating on strings with embedded NULs is undefined. -** ^If the 4th parameter to the sqlite3_result_text* interfaces -** or sqlite3_result_blob is a non-NULL pointer, then SQLite calls that -** function as the destructor on the text or BLOB result when it has -** finished using that result. -** ^If the 4th parameter to the sqlite3_result_text* interfaces or to -** sqlite3_result_blob is the special constant SQLITE_STATIC, then SQLite -** assumes that the text or BLOB result is in constant space and does not -** copy the content of the parameter nor call a destructor on the content -** when it has finished using that result. -** ^If the 4th parameter to the sqlite3_result_text* interfaces -** or sqlite3_result_blob is the special constant SQLITE_TRANSIENT -** then SQLite makes a copy of the result into space obtained from -** from [sqlite3_malloc()] before it returns. -** -** ^The sqlite3_result_value() interface sets the result of -** the application-defined function to be a copy of the -** [unprotected sqlite3_value] object specified by the 2nd parameter. ^The -** sqlite3_result_value() interface makes a copy of the [sqlite3_value] -** so that the [sqlite3_value] specified in the parameter may change or -** be deallocated after sqlite3_result_value() returns without harm. -** ^A [protected sqlite3_value] object may always be used where an -** [unprotected sqlite3_value] object is required, so either -** kind of [sqlite3_value] object can be used with this interface. -** -** If these routines are called from within the different thread -** than the one containing the application-defined function that received -** the [sqlite3_context] pointer, the results are undefined. -*/ -SQLITE_API void sqlite3_result_blob(sqlite3_context*, const void*, int, void(*)(void*)); -SQLITE_API void sqlite3_result_blob64(sqlite3_context*,const void*, - sqlite3_uint64,void(*)(void*)); -SQLITE_API void sqlite3_result_double(sqlite3_context*, double); -SQLITE_API void sqlite3_result_error(sqlite3_context*, const char*, int); -SQLITE_API void sqlite3_result_error16(sqlite3_context*, const void*, int); -SQLITE_API void sqlite3_result_error_toobig(sqlite3_context*); -SQLITE_API void sqlite3_result_error_nomem(sqlite3_context*); -SQLITE_API void sqlite3_result_error_code(sqlite3_context*, int); -SQLITE_API void sqlite3_result_int(sqlite3_context*, int); -SQLITE_API void sqlite3_result_int64(sqlite3_context*, sqlite3_int64); -SQLITE_API void sqlite3_result_null(sqlite3_context*); -SQLITE_API void sqlite3_result_text(sqlite3_context*, const char*, int, void(*)(void*)); -SQLITE_API void sqlite3_result_text64(sqlite3_context*, const char*,sqlite3_uint64, - void(*)(void*), unsigned char encoding); -SQLITE_API void sqlite3_result_text16(sqlite3_context*, const void*, int, void(*)(void*)); -SQLITE_API void sqlite3_result_text16le(sqlite3_context*, const void*, int,void(*)(void*)); -SQLITE_API void sqlite3_result_text16be(sqlite3_context*, const void*, int,void(*)(void*)); -SQLITE_API void sqlite3_result_value(sqlite3_context*, sqlite3_value*); -SQLITE_API void sqlite3_result_zeroblob(sqlite3_context*, int n); -SQLITE_API int sqlite3_result_zeroblob64(sqlite3_context*, sqlite3_uint64 n); - - -/* -** CAPI3REF: Setting The Subtype Of An SQL Function -** METHOD: sqlite3_context -** -** The sqlite3_result_subtype(C,T) function causes the subtype of -** the result from the [application-defined SQL function] with -** [sqlite3_context] C to be the value T. Only the lower 8 bits -** of the subtype T are preserved in current versions of SQLite; -** higher order bits are discarded. -** The number of subtype bytes preserved by SQLite might increase -** in future releases of SQLite. -*/ -SQLITE_API void sqlite3_result_subtype(sqlite3_context*,unsigned int); - -/* -** CAPI3REF: Define New Collating Sequences -** METHOD: sqlite3 -** -** ^These functions add, remove, or modify a [colla