Update vendor files

This commit is contained in:
2020-10-11 18:17:12 +02:00
parent 50bfdc6166
commit d73119a493
226 changed files with 13364 additions and 15397 deletions

10
go.mod
View File

@@ -5,6 +5,16 @@ go 1.15
require (
github.com/caarlos0/env/v6 v6.3.0
github.com/gin-gonic/gin v1.6.3
github.com/go-playground/validator/v10 v10.4.0 // indirect
github.com/golang-migrate/migrate/v4 v4.13.0
github.com/hashicorp/errwrap v1.1.0 // indirect
github.com/jackc/pgx/v4 v4.9.0
github.com/json-iterator/go v1.1.10 // indirect
github.com/lib/pq v1.8.0 // indirect
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
github.com/modern-go/reflect2 v1.0.1 // indirect
github.com/ugorji/go v1.1.10 // indirect
golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0 // indirect
golang.org/x/sys v0.0.0-20201009025420-dfb3f7c4e634 // indirect
gopkg.in/yaml.v2 v2.3.0 // indirect
)

22
go.sum
View File

@@ -104,6 +104,8 @@ github.com/go-playground/universal-translator v0.17.0 h1:icxd5fm+REJzpZx7ZfpaD87
github.com/go-playground/universal-translator v0.17.0/go.mod h1:UkSxE5sNxxRwHyU+Scu5vgOQjsIJAF8j9muTVoKLVtA=
github.com/go-playground/validator/v10 v10.2.0 h1:KgJ0snyC2R9VXYN2rneOtQcw5aHQB1Vv0sFl1UcHBOY=
github.com/go-playground/validator/v10 v10.2.0/go.mod h1:uOYAAleCW8F/7oMFd6aG0GOhaH6EGOAJShg8Id5JGkI=
github.com/go-playground/validator/v10 v10.4.0 h1:72qIR/m8ybvL8L5TIyfgrigqkrw7kVYAvjEvpT85l70=
github.com/go-playground/validator/v10 v10.4.0/go.mod h1:nlOn6nFhuKACm19sB/8EGNn9GlaMV7XkbRSipzJ0Ii4=
github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w=
github.com/go-sql-driver/mysql v1.5.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg=
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
@@ -181,6 +183,8 @@ github.com/gorilla/mux v1.7.4/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB7
github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed/go.mod h1:tMWxXQ9wFIaZeTI9F+hmhFiGpFmhOHzyShyFUhRm0H4=
github.com/hashicorp/errwrap v1.0.0 h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/UYA=
github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I=
github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
github.com/hashicorp/go-multierror v1.1.0 h1:B9UzwGQJehnUY1yNrnwREHc3fGbC2xefo8g4TbElacI=
github.com/hashicorp/go-multierror v1.1.0/go.mod h1:spPvp8C1qA32ftKqdAHm4hHTbPw+vmowP0z+KUhOZdA=
github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
@@ -245,6 +249,8 @@ github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht
github.com/jmoiron/sqlx v1.2.0/go.mod h1:1FEQNm3xlJgrMD+FBdI9+xvCksHtbpVBBw5dYhBSsks=
github.com/json-iterator/go v1.1.9 h1:9yzud/Ht36ygwatGx56VwCZtlI/2AD15T1X2sjSuGns=
github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
github.com/json-iterator/go v1.1.10 h1:Kz6Cvnvv2wGdaG/V8yMvfkmNiXq9Ya2KUv4rouJJr68=
github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU=
github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk=
github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0/go.mod h1:1NbS8ALrpOvjt0rHPNLyCIeMtbizbir8U//inJ+zuB8=
@@ -266,6 +272,8 @@ github.com/lib/pq v1.1.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
github.com/lib/pq v1.2.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
github.com/lib/pq v1.3.0 h1:/qkRGz8zljWiDcFvgpwUpwIAPu3r07TDvs3Rws+o/pU=
github.com/lib/pq v1.3.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
github.com/lib/pq v1.8.0 h1:9xohqzkUwzR4Ga4ivdTcawVS89YSDVxXMa3xJX3cGzg=
github.com/lib/pq v1.8.0/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o=
github.com/markbates/pkger v0.15.1/go.mod h1:0JoVlrol20BSywW79rN3kdFFsE5xYM+rSCQDXbLhiuI=
github.com/mattn/go-colorable v0.1.1/go.mod h1:FuOcm+DKB9mbwrcAfNl7/TZVBZ6rcnceauSikq3lYCQ=
github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE=
@@ -280,8 +288,12 @@ github.com/mattn/go-sqlite3 v1.9.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOq
github.com/mattn/go-sqlite3 v1.10.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc=
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421 h1:ZqeYNhU3OHLH3mGKHDcjJRFFRrJa6eAM5H+CtDdOsPc=
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742 h1:Esafd1046DLDQ0W1YjYsBW+p8U2u7vzgW2SQVmlNazg=
github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
github.com/modern-go/reflect2 v1.0.1 h1:9f412s+6RmYXLWZSEzVVgPGK7C2PphHj5RJrvfx9AWI=
github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A=
github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc=
github.com/mutecomm/go-sqlcipher/v4 v4.4.0/go.mod h1:PyN04SaWalavxRGH9E8ZftG6Ju7rsPrGmQRjrEaVpiY=
@@ -333,8 +345,12 @@ github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5
github.com/tidwall/pretty v0.0.0-20180105212114-65a9db5fad51/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk=
github.com/ugorji/go v1.1.7 h1:/68gy2h+1mWMrwZFeD1kQialdSzAb432dtpeJ42ovdo=
github.com/ugorji/go v1.1.7/go.mod h1:kZn38zHttfInRq0xu/PH0az30d+z6vm202qpg1oXVMw=
github.com/ugorji/go v1.1.10 h1:Mh7W3N/hGJJ8fRQNHIgomNTa0CgZc0aKDFvbgHl+U7A=
github.com/ugorji/go v1.1.10/go.mod h1:/tC+H0R6N4Lcv4DoSdphIa9y/RAs4QFHDtN9W2oQcHw=
github.com/ugorji/go/codec v1.1.7 h1:2SvQaVZ1ouYrrKKwoSk2pzd4A9evlKJb9oTL+OaLUSs=
github.com/ugorji/go/codec v1.1.7/go.mod h1:Ax+UKWsSmolVDwsd+7N3ZtXu+yMGCf907BLYF3GoBXY=
github.com/ugorji/go/codec v1.1.10 h1:otofY/FAoRTMVqlVeDv/Kpm04D13lfJdrDqPbc3axg4=
github.com/ugorji/go/codec v1.1.10/go.mod h1:jdPQoxvTq1mb8XV6RmofOz5UgNKV2czR6xvxXGwy1Bo=
github.com/xanzy/go-gitlab v0.15.0/go.mod h1:8zdQa/ri1dfn8eS3Ir1SyfvOKlw7WBJ8DVThkpGiXrs=
github.com/xdg/scram v0.0.0-20180814205039-7eeb5667e42c/go.mod h1:lB8K/P019DLNhemzwFU4jHLhdvlE6uDZjXFejJXr49I=
github.com/xdg/stringprep v1.0.0/go.mod h1:Jhud4/sHMO4oL310DaZAKk9ZaJ08SJfe+sJh0HrGL1Y=
@@ -372,6 +388,8 @@ golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9 h1:psW17arqaxU48Z5kZ0CQnk
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20200709230013-948cd5f35899 h1:DZhuSZLsGlFL4CmhA8BcRA0mnthyA/nZ00AqCUo7vHg=
golang.org/x/crypto v0.0.0-20200709230013-948cd5f35899/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0 h1:hb9wdF1z5waM+dSIICn1l0DkLVDT3hqhhQsDNUmHPRE=
golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
@@ -485,6 +503,8 @@ golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200817155316-9781c653f443 h1:X18bCaipMcoJGm27Nv7zr4XYPKGUy92GtqboKC2Hxaw=
golang.org/x/sys v0.0.0-20200817155316-9781c653f443/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20201009025420-dfb3f7c4e634 h1:bNEHhJCnrwMKNMmOx3yAynp5vs5/gRy+XWFtZFu7NBM=
golang.org/x/sys v0.0.0-20201009025420-dfb3f7c4e634/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
@@ -647,6 +667,8 @@ gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.7/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.8 h1:obN1ZagJSUGI0Ek/LBmuj4SNLPfIny3KsKFopxRdj10=
gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.3.0 h1:clyUAQHOM3G0M3f5vQj7LuJrETvjVot3Z5el9nffUtU=
gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gotest.tools/v3 v3.0.2 h1:kG1BFyqVHuQoVQiR1bWGnfz/fmHvvuiSPIV7rvl360E=
gotest.tools/v3 v3.0.2/go.mod h1:3SzNCllyD9/Y+b5r9JIKQ474KzkZyqLqEfYqMsX94Bk=
honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=

View File

@@ -6,6 +6,7 @@
# Folders
_obj
_test
bin
# Architecture specific extensions/prefixes
*.[568vq]
@@ -26,4 +27,4 @@ _testmain.go
*.out
*.txt
cover.html
README.html
README.html

View File

@@ -1,6 +1,6 @@
language: go
go:
- 1.13.4
- 1.15.2
- tip
matrix:
allow_failures:
@@ -25,5 +25,5 @@ script:
- go test -v -race -covermode=atomic -coverprofile=coverage.coverprofile ./...
after_success: |
[ $TRAVIS_GO_VERSION = 1.13.4 ] &&
goveralls -coverprofile=coverage.coverprofile -service travis-ci -repotoken $COVERALLS_TOKEN
[ $TRAVIS_GO_VERSION = 1.15.2 ] &&
goveralls -coverprofile=coverage.coverprofile -service travis-ci -repotoken $COVERALLS_TOKEN

View File

@@ -1,20 +1,20 @@
Package validator
================
<img align="right" src="https://raw.githubusercontent.com/go-playground/validator/v9/logo.png">[![Join the chat at https://gitter.im/go-playground/validator](https://badges.gitter.im/Join%20Chat.svg)](https://gitter.im/go-playground/validator?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge)
![Project status](https://img.shields.io/badge/version-10.1.0-green.svg)
![Project status](https://img.shields.io/badge/version-10.3.0-green.svg)
[![Build Status](https://travis-ci.org/go-playground/validator.svg?branch=master)](https://travis-ci.org/go-playground/validator)
[![Coverage Status](https://coveralls.io/repos/go-playground/validator/badge.svg?branch=master&service=github)](https://coveralls.io/github/go-playground/validator?branch=master)
[![Go Report Card](https://goreportcard.com/badge/github.com/go-playground/validator)](https://goreportcard.com/report/github.com/go-playground/validator)
[![GoDoc](https://godoc.org/github.com/go-playground/validator?status.svg)](https://godoc.org/github.com/go-playground/validator)
[![GoDoc](https://godoc.org/github.com/go-playground/validator?status.svg)](https://pkg.go.dev/github.com/go-playground/validator/v10)
![License](https://img.shields.io/dub/l/vibe-d.svg)
Package validator implements value validations for structs and individual fields based on tags.
It has the following **unique** features:
- Cross Field and Cross Struct validations by using validation tags or custom validators.
- Cross Field and Cross Struct validations by using validation tags or custom validators.
- Slice, Array and Map diving, which allows any or all levels of a multidimensional field to be validated.
- Ability to dive into both map keys and values for validation
- Ability to dive into both map keys and values for validation
- Handles type interface by determining it's underlying type prior to validation.
- Handles custom field types such as sql driver Valuer see [Valuer](https://golang.org/src/database/sql/driver/types.go?s=1210:1293#L29)
- Alias validation tags, which allows for mapping of several validations to a single tag for easier defining of validations on structs
@@ -64,6 +64,152 @@ Please see https://godoc.org/github.com/go-playground/validator for detailed usa
- [Gin upgrade and/or override validator](https://github.com/go-playground/validator/tree/v9/_examples/gin-upgrading-overriding)
- [wash - an example application putting it all together](https://github.com/bluesuncorp/wash)
Baked-in Validations
------
### Fields:
| Tag | Description |
| - | - |
| eqcsfield | Field Equals Another Field (relative)|
| eqfield | Field Equals Another Field |
| fieldcontains | NOT DOCUMENTED IN doc.go |
| fieldexcludes | NOT DOCUMENTED IN doc.go |
| gtcsfield | Field Greater Than Another Relative Field |
| gtecsfield | Field Greater Than or Equal To Another Relative Field |
| gtefield | Field Greater Than or Equal To Another Field |
| gtfield | Field Greater Than Another Field |
| ltcsfield | Less Than Another Relative Field |
| ltecsfield | Less Than or Equal To Another Relative Field |
| ltefield | Less Than or Equal To Another Field |
| ltfield | Less Than Another Field |
| necsfield | Field Does Not Equal Another Field (relative) |
| nefield | Field Does Not Equal Another Field |
### Network:
| Tag | Description |
| - | - |
| cidr | Classless Inter-Domain Routing CIDR |
| cidrv4 | Classless Inter-Domain Routing CIDRv4 |
| cidrv6 | Classless Inter-Domain Routing CIDRv6 |
| datauri | Data URL |
| fqdn | Full Qualified Domain Name (FQDN) |
| hostname | Hostname RFC 952 |
| hostname_port | HostPort |
| hostname_rfc1123 | Hostname RFC 1123 |
| ip | Internet Protocol Address IP |
| ip4_addr | Internet Protocol Address IPv4 |
| ip6_addr |Internet Protocol Address IPv6 |
| ip_addr | Internet Protocol Address IP |
| ipv4 | Internet Protocol Address IPv4 |
| ipv6 | Internet Protocol Address IPv6 |
| mac | Media Access Control Address MAC |
| tcp4_addr | Transmission Control Protocol Address TCPv4 |
| tcp6_addr | Transmission Control Protocol Address TCPv6 |
| tcp_addr | Transmission Control Protocol Address TCP |
| udp4_addr | User Datagram Protocol Address UDPv4 |
| udp6_addr | User Datagram Protocol Address UDPv6 |
| udp_addr | User Datagram Protocol Address UDP |
| unix_addr | Unix domain socket end point Address |
| uri | URI String |
| url | URL String |
| url_encoded | URL Encoded |
| urn_rfc2141 | Urn RFC 2141 String |
### Strings:
| Tag | Description |
| - | - |
| alpha | Alpha Only |
| alphanum | Alphanumeric |
| alphanumunicode | Alphanumeric Unicode |
| alphaunicode | Alpha Unicode |
| ascii | ASCII |
| contains | Contains |
| containsany | Contains Any |
| containsrune | Contains Rune |
| endswith | Ends With |
| lowercase | Lowercase |
| multibyte | Multi-Byte Characters |
| number | NOT DOCUMENTED IN doc.go |
| numeric | Numeric |
| printascii | Printable ASCII |
| startswith | Starts With |
| uppercase | Uppercase |
### Format:
| Tag | Description |
| - | - |
| base64 | Base64 String |
| base64url | Base64URL String |
| btc_addr | Bitcoin Address |
| btc_addr_bech32 | Bitcoin Bech32 Address (segwit) |
| datetime | Datetime |
| e164 | e164 formatted phone number |
| email | E-mail String
| eth_addr | Ethereum Address |
| hexadecimal | Hexadecimal String |
| hexcolor | Hexcolor String |
| hsl | HSL String |
| hsla | HSLA String |
| html | HTML Tags |
| html_encoded | HTML Encoded |
| isbn | International Standard Book Number |
| isbn10 | International Standard Book Number 10 |
| isbn13 | International Standard Book Number 13 |
| json | JSON |
| latitude | Latitude |
| longitude | Longitude |
| rgb | RGB String |
| rgba | RGBA String |
| ssn | Social Security Number SSN |
| uuid | Universally Unique Identifier UUID |
| uuid3 | Universally Unique Identifier UUID v3 |
| uuid3_rfc4122 | Universally Unique Identifier UUID v3 RFC4122 |
| uuid4 | Universally Unique Identifier UUID v4 |
| uuid4_rfc4122 | Universally Unique Identifier UUID v4 RFC4122 |
| uuid5 | Universally Unique Identifier UUID v5 |
| uuid5_rfc4122 | Universally Unique Identifier UUID v5 RFC4122 |
| uuid_rfc4122 | Universally Unique Identifier UUID RFC4122 |
### Comparisons:
| Tag | Description |
| - | - |
| eq | Equals |
| gt | Greater than|
| gte |Greater than or equal |
| lt | Less Than |
| lte | Less Than or Equal |
| ne | Not Equal |
### Other:
| Tag | Description |
| - | - |
| dir | Directory |
| endswith | Ends With |
| excludes | Excludes |
| excludesall | Excludes All |
| excludesrune | Excludes Rune |
| file | File path |
| isdefault | Is Default |
| len | Length |
| max | Maximum |
| min | Minimum |
| oneof | One Of |
| required | Required |
| required_if | Required If |
| required_unless | Required Unless |
| required_with | Required With |
| required_with_all | Required With All |
| required_without | Required Without |
| required_without_all | Required Without All |
| excluded_with | Excluded With |
| excluded_with_all | Excluded With All |
| excluded_without | Excluded Without |
| excluded_without_all | Excluded Without All |
| unique | Unique |
Benchmarks
------
###### Run on MacBook Pro (15-inch, 2017) go version go1.10.2 darwin/amd64

View File

@@ -4,6 +4,7 @@ import (
"bytes"
"context"
"crypto/sha256"
"encoding/hex"
"encoding/json"
"fmt"
"net"
@@ -16,6 +17,8 @@ import (
"time"
"unicode/utf8"
"golang.org/x/crypto/sha3"
urn "github.com/leodido/go-urn"
)
@@ -56,7 +59,8 @@ var (
// defines a common or complex set of validation(s) to simplify
// adding validation to structs.
bakedInAliases = map[string]string{
"iscolor": "hexcolor|rgb|rgba|hsl|hsla",
"iscolor": "hexcolor|rgb|rgba|hsl|hsla",
"country_code": "iso3166_1_alpha2|iso3166_1_alpha3|iso3166_1_alpha_numeric",
}
// BakedInValidators is the default map of ValidationFunc
@@ -64,10 +68,16 @@ var (
// or even disregard and use your own map if so desired.
bakedInValidators = map[string]Func{
"required": hasValue,
"required_if": requiredIf,
"required_unless": requiredUnless,
"required_with": requiredWith,
"required_with_all": requiredWithAll,
"required_without": requiredWithout,
"required_without_all": requiredWithoutAll,
"excluded_with": excludedWith,
"excluded_with_all": excludedWithAll,
"excluded_without": excludedWithout,
"excluded_without_all": excludedWithoutAll,
"isdefault": isDefault,
"len": hasLengthOf,
"min": hasMinOf,
@@ -120,6 +130,8 @@ var (
"excludesrune": excludesRune,
"startswith": startsWith,
"endswith": endsWith,
"startsnotwith": startsNotWith,
"endsnotwith": endsNotWith,
"isbn": isISBN,
"isbn10": isISBN10,
"isbn13": isISBN13,
@@ -172,6 +184,10 @@ var (
"lowercase": isLowercase,
"uppercase": isUppercase,
"datetime": isDatetime,
"timezone": isTimeZone,
"iso3166_1_alpha2": isIso3166Alpha2,
"iso3166_1_alpha3": isIso3166Alpha3,
"iso3166_1_alpha_numeric": isIso3166AlphaNumeric,
}
)
@@ -239,23 +255,33 @@ func isUnique(fl FieldLevel) bool {
switch field.Kind() {
case reflect.Slice, reflect.Array:
elem := field.Type().Elem()
if elem.Kind() == reflect.Ptr {
elem = elem.Elem()
}
if param == "" {
m := reflect.MakeMap(reflect.MapOf(field.Type().Elem(), v.Type()))
m := reflect.MakeMap(reflect.MapOf(elem, v.Type()))
for i := 0; i < field.Len(); i++ {
m.SetMapIndex(field.Index(i), v)
m.SetMapIndex(reflect.Indirect(field.Index(i)), v)
}
return field.Len() == m.Len()
}
sf, ok := field.Type().Elem().FieldByName(param)
sf, ok := elem.FieldByName(param)
if !ok {
panic(fmt.Sprintf("Bad field name %s", param))
}
m := reflect.MakeMap(reflect.MapOf(sf.Type, v.Type()))
sfTyp := sf.Type
if sfTyp.Kind() == reflect.Ptr {
sfTyp = sfTyp.Elem()
}
m := reflect.MakeMap(reflect.MapOf(sfTyp, v.Type()))
for i := 0; i < field.Len(); i++ {
m.SetMapIndex(field.Index(i).FieldByName(param), v)
m.SetMapIndex(reflect.Indirect(reflect.Indirect(field.Index(i)).FieldByName(param)), v)
}
return field.Len() == m.Len()
case reflect.Map:
@@ -513,7 +539,7 @@ func isISBN10(fl FieldLevel) bool {
return checksum%11 == 0
}
// IsEthereumAddress is the validation function for validating if the field's value is a valid ethereum address based currently only on the format
// IsEthereumAddress is the validation function for validating if the field's value is a valid Ethereum address.
func isEthereumAddress(fl FieldLevel) bool {
address := fl.Field().String()
@@ -525,7 +551,21 @@ func isEthereumAddress(fl FieldLevel) bool {
return true
}
// checksum validation is blocked by https://github.com/golang/crypto/pull/28
// Checksum validation. Reference: https://github.com/ethereum/EIPs/blob/master/EIPS/eip-55.md
address = address[2:] // Skip "0x" prefix.
h := sha3.NewLegacyKeccak256()
// hash.Hash's io.Writer implementation says it never returns an error. https://golang.org/pkg/hash/#Hash
_, _ = h.Write([]byte(strings.ToLower(address)))
hash := hex.EncodeToString(h.Sum(nil))
for i := 0; i < len(address); i++ {
if address[i] <= '9' { // Skip 0-9 digits: they don't have upper/lower-case.
continue
}
if hash[i] > '7' && address[i] >= 'a' || hash[i] <= '7' && address[i] <= 'F' {
return false
}
}
return true
}
@@ -690,6 +730,16 @@ func endsWith(fl FieldLevel) bool {
return strings.HasSuffix(fl.Field().String(), fl.Param())
}
// StartsNotWith is the validation function for validating that the field's value does not start with the text specified within the param.
func startsNotWith(fl FieldLevel) bool {
return !startsWith(fl)
}
// EndsNotWith is the validation function for validating that the field's value does not end with the text specified within the param.
func endsNotWith(fl FieldLevel) bool {
return !endsWith(fl)
}
// FieldContains is the validation function for validating if the current field's value contains the field specified by the param's value.
func fieldContains(fl FieldLevel) bool {
field := fl.Field()
@@ -1117,7 +1167,7 @@ func isEq(fl FieldLevel) bool {
return int64(field.Len()) == p
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
p := asInt(param)
p := asIntFromType(field.Type(), param)
return field.Int() == p
@@ -1371,6 +1421,75 @@ func requireCheckFieldKind(fl FieldLevel, param string, defaultNotFoundValue boo
}
}
// requireCheckFieldValue is a func for check field value
func requireCheckFieldValue(fl FieldLevel, param string, value string, defaultNotFoundValue bool) bool {
field, kind, _, found := fl.GetStructFieldOKAdvanced2(fl.Parent(), param)
if !found {
return defaultNotFoundValue
}
switch kind {
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
return field.Int() == asInt(value)
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
return field.Uint() == asUint(value)
case reflect.Float32, reflect.Float64:
return field.Float() == asFloat(value)
case reflect.Slice, reflect.Map, reflect.Array:
return int64(field.Len()) == asInt(value)
}
// default reflect.String:
return field.String() == value
}
// requiredIf is the validation function
// The field under validation must be present and not empty only if all the other specified fields are equal to the value following with the specified field.
func requiredIf(fl FieldLevel) bool {
params := parseOneOfParam2(fl.Param())
if len(params)%2 != 0 {
panic(fmt.Sprintf("Bad param number for required_if %s", fl.FieldName()))
}
for i := 0; i < len(params); i += 2 {
if !requireCheckFieldValue(fl, params[i], params[i+1], false) {
return true
}
}
return hasValue(fl)
}
// requiredUnless is the validation function
// The field under validation must be present and not empty only unless all the other specified fields are equal to the value following with the specified field.
func requiredUnless(fl FieldLevel) bool {
params := parseOneOfParam2(fl.Param())
if len(params)%2 != 0 {
panic(fmt.Sprintf("Bad param number for required_unless %s", fl.FieldName()))
}
for i := 0; i < len(params); i += 2 {
if requireCheckFieldValue(fl, params[i], params[i+1], false) {
return true
}
}
return hasValue(fl)
}
// ExcludedWith is the validation function
// The field under validation must not be present or is empty if any of the other specified fields are present.
func excludedWith(fl FieldLevel) bool {
params := parseOneOfParam2(fl.Param())
for _, param := range params {
if !requireCheckFieldKind(fl, param, true) {
return !hasValue(fl)
}
}
return true
}
// RequiredWith is the validation function
// The field under validation must be present and not empty only if any of the other specified fields are present.
func requiredWith(fl FieldLevel) bool {
@@ -1383,6 +1502,18 @@ func requiredWith(fl FieldLevel) bool {
return true
}
// ExcludedWithAll is the validation function
// The field under validation must not be present or is empty if all of the other specified fields are present.
func excludedWithAll(fl FieldLevel) bool {
params := parseOneOfParam2(fl.Param())
for _, param := range params {
if requireCheckFieldKind(fl, param, true) {
return true
}
}
return !hasValue(fl)
}
// RequiredWithAll is the validation function
// The field under validation must be present and not empty only if all of the other specified fields are present.
func requiredWithAll(fl FieldLevel) bool {
@@ -1395,6 +1526,15 @@ func requiredWithAll(fl FieldLevel) bool {
return hasValue(fl)
}
// ExcludedWithout is the validation function
// The field under validation must not be present or is empty when any of the other specified fields are not present.
func excludedWithout(fl FieldLevel) bool {
if requireCheckFieldKind(fl, strings.TrimSpace(fl.Param()), true) {
return !hasValue(fl)
}
return true
}
// RequiredWithout is the validation function
// The field under validation must be present and not empty only when any of the other specified fields are not present.
func requiredWithout(fl FieldLevel) bool {
@@ -1404,6 +1544,18 @@ func requiredWithout(fl FieldLevel) bool {
return true
}
// RequiredWithoutAll is the validation function
// The field under validation must not be present or is empty when all of the other specified fields are not present.
func excludedWithoutAll(fl FieldLevel) bool {
params := parseOneOfParam2(fl.Param())
for _, param := range params {
if !requireCheckFieldKind(fl, param, true) {
return true
}
}
return !hasValue(fl)
}
// RequiredWithoutAll is the validation function
// The field under validation must be present and not empty only when all of the other specified fields are not present.
func requiredWithoutAll(fl FieldLevel) bool {
@@ -1529,7 +1681,7 @@ func isGte(fl FieldLevel) bool {
return int64(field.Len()) >= p
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
p := asInt(param)
p := asIntFromType(field.Type(), param)
return field.Int() >= p
@@ -1576,7 +1728,7 @@ func isGt(fl FieldLevel) bool {
return int64(field.Len()) > p
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
p := asInt(param)
p := asIntFromType(field.Type(), param)
return field.Int() > p
@@ -1619,7 +1771,7 @@ func hasLengthOf(fl FieldLevel) bool {
return int64(field.Len()) == p
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
p := asInt(param)
p := asIntFromType(field.Type(), param)
return field.Int() == p
@@ -1755,7 +1907,7 @@ func isLte(fl FieldLevel) bool {
return int64(field.Len()) <= p
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
p := asInt(param)
p := asIntFromType(field.Type(), param)
return field.Int() <= p
@@ -1802,7 +1954,7 @@ func isLt(fl FieldLevel) bool {
return int64(field.Len()) < p
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
p := asInt(param)
p := asIntFromType(field.Type(), param)
return field.Int() < p
@@ -1990,12 +2142,7 @@ func isFQDN(fl FieldLevel) bool {
return false
}
if val[len(val)-1] == '.' {
val = val[0 : len(val)-1]
}
return strings.ContainsAny(val, ".") &&
hostnameRegexRFC952.MatchString(val)
return fqdnRegexRFC1123.MatchString(val)
}
// IsDir is the validation function for validating if the current field's value is a valid directory.
@@ -2080,6 +2227,29 @@ func isDatetime(fl FieldLevel) bool {
if field.Kind() == reflect.String {
_, err := time.Parse(param, field.String())
return err == nil
}
panic(fmt.Sprintf("Bad field type %T", field.Interface()))
}
// isTimeZone is the validation function for validating if the current field's value is a valid time zone string.
func isTimeZone(fl FieldLevel) bool {
field := fl.Field()
if field.Kind() == reflect.String {
// empty value is converted to UTC by time.LoadLocation but disallow it as it is not a valid time zone name
if field.String() == "" {
return false
}
// Local value is converted to the current system time zone by time.LoadLocation but disallow it as it is not a valid time zone name
if strings.ToLower(field.String()) == "local" {
return false
}
_, err := time.LoadLocation(field.String())
if err != nil {
return false
}
@@ -2089,3 +2259,31 @@ func isDatetime(fl FieldLevel) bool {
panic(fmt.Sprintf("Bad field type %T", field.Interface()))
}
// isIso3166Alpha2 is the validation function for validating if the current field's value is a valid iso3166-1 alpha-2 country code.
func isIso3166Alpha2(fl FieldLevel) bool {
val := fl.Field().String()
return iso3166_1_alpha2[val]
}
// isIso3166Alpha2 is the validation function for validating if the current field's value is a valid iso3166-1 alpha-3 country code.
func isIso3166Alpha3(fl FieldLevel) bool {
val := fl.Field().String()
return iso3166_1_alpha3[val]
}
// isIso3166Alpha2 is the validation function for validating if the current field's value is a valid iso3166-1 alpha-numeric country code.
func isIso3166AlphaNumeric(fl FieldLevel) bool {
field := fl.Field()
var code int
switch field.Kind() {
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
code = int(field.Int() % 1000)
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
code = int(field.Uint() % 1000)
default:
panic(fmt.Sprintf("Bad field type %T", field.Interface()))
}
return iso3166_1_alpha_numeric[code]
}

View File

@@ -0,0 +1,162 @@
package validator
var iso3166_1_alpha2 = map[string]bool{
// see: https://www.iso.org/iso-3166-country-codes.html
"AF": true, "AX": true, "AL": true, "DZ": true, "AS": true,
"AD": true, "AO": true, "AI": true, "AQ": true, "AG": true,
"AR": true, "AM": true, "AW": true, "AU": true, "AT": true,
"AZ": true, "BS": true, "BH": true, "BD": true, "BB": true,
"BY": true, "BE": true, "BZ": true, "BJ": true, "BM": true,
"BT": true, "BO": true, "BQ": true, "BA": true, "BW": true,
"BV": true, "BR": true, "IO": true, "BN": true, "BG": true,
"BF": true, "BI": true, "KH": true, "CM": true, "CA": true,
"CV": true, "KY": true, "CF": true, "TD": true, "CL": true,
"CN": true, "CX": true, "CC": true, "CO": true, "KM": true,
"CG": true, "CD": true, "CK": true, "CR": true, "CI": true,
"HR": true, "CU": true, "CW": true, "CY": true, "CZ": true,
"DK": true, "DJ": true, "DM": true, "DO": true, "EC": true,
"EG": true, "SV": true, "GQ": true, "ER": true, "EE": true,
"ET": true, "FK": true, "FO": true, "FJ": true, "FI": true,
"FR": true, "GF": true, "PF": true, "TF": true, "GA": true,
"GM": true, "GE": true, "DE": true, "GH": true, "GI": true,
"GR": true, "GL": true, "GD": true, "GP": true, "GU": true,
"GT": true, "GG": true, "GN": true, "GW": true, "GY": true,
"HT": true, "HM": true, "VA": true, "HN": true, "HK": true,
"HU": true, "IS": true, "IN": true, "ID": true, "IR": true,
"IQ": true, "IE": true, "IM": true, "IL": true, "IT": true,
"JM": true, "JP": true, "JE": true, "JO": true, "KZ": true,
"KE": true, "KI": true, "KP": true, "KR": true, "KW": true,
"KG": true, "LA": true, "LV": true, "LB": true, "LS": true,
"LR": true, "LY": true, "LI": true, "LT": true, "LU": true,
"MO": true, "MK": true, "MG": true, "MW": true, "MY": true,
"MV": true, "ML": true, "MT": true, "MH": true, "MQ": true,
"MR": true, "MU": true, "YT": true, "MX": true, "FM": true,
"MD": true, "MC": true, "MN": true, "ME": true, "MS": true,
"MA": true, "MZ": true, "MM": true, "NA": true, "NR": true,
"NP": true, "NL": true, "NC": true, "NZ": true, "NI": true,
"NE": true, "NG": true, "NU": true, "NF": true, "MP": true,
"NO": true, "OM": true, "PK": true, "PW": true, "PS": true,
"PA": true, "PG": true, "PY": true, "PE": true, "PH": true,
"PN": true, "PL": true, "PT": true, "PR": true, "QA": true,
"RE": true, "RO": true, "RU": true, "RW": true, "BL": true,
"SH": true, "KN": true, "LC": true, "MF": true, "PM": true,
"VC": true, "WS": true, "SM": true, "ST": true, "SA": true,
"SN": true, "RS": true, "SC": true, "SL": true, "SG": true,
"SX": true, "SK": true, "SI": true, "SB": true, "SO": true,
"ZA": true, "GS": true, "SS": true, "ES": true, "LK": true,
"SD": true, "SR": true, "SJ": true, "SZ": true, "SE": true,
"CH": true, "SY": true, "TW": true, "TJ": true, "TZ": true,
"TH": true, "TL": true, "TG": true, "TK": true, "TO": true,
"TT": true, "TN": true, "TR": true, "TM": true, "TC": true,
"TV": true, "UG": true, "UA": true, "AE": true, "GB": true,
"US": true, "UM": true, "UY": true, "UZ": true, "VU": true,
"VE": true, "VN": true, "VG": true, "VI": true, "WF": true,
"EH": true, "YE": true, "ZM": true, "ZW": true,
}
var iso3166_1_alpha3 = map[string]bool{
// see: https://www.iso.org/iso-3166-country-codes.html
"AFG": true, "ALB": true, "DZA": true, "ASM": true, "AND": true,
"AGO": true, "AIA": true, "ATA": true, "ATG": true, "ARG": true,
"ARM": true, "ABW": true, "AUS": true, "AUT": true, "AZE": true,
"BHS": true, "BHR": true, "BGD": true, "BRB": true, "BLR": true,
"BEL": true, "BLZ": true, "BEN": true, "BMU": true, "BTN": true,
"BOL": true, "BES": true, "BIH": true, "BWA": true, "BVT": true,
"BRA": true, "IOT": true, "BRN": true, "BGR": true, "BFA": true,
"BDI": true, "CPV": true, "KHM": true, "CMR": true, "CAN": true,
"CYM": true, "CAF": true, "TCD": true, "CHL": true, "CHN": true,
"CXR": true, "CCK": true, "COL": true, "COM": true, "COD": true,
"COG": true, "COK": true, "CRI": true, "HRV": true, "CUB": true,
"CUW": true, "CYP": true, "CZE": true, "CIV": true, "DNK": true,
"DJI": true, "DMA": true, "DOM": true, "ECU": true, "EGY": true,
"SLV": true, "GNQ": true, "ERI": true, "EST": true, "SWZ": true,
"ETH": true, "FLK": true, "FRO": true, "FJI": true, "FIN": true,
"FRA": true, "GUF": true, "PYF": true, "ATF": true, "GAB": true,
"GMB": true, "GEO": true, "DEU": true, "GHA": true, "GIB": true,
"GRC": true, "GRL": true, "GRD": true, "GLP": true, "GUM": true,
"GTM": true, "GGY": true, "GIN": true, "GNB": true, "GUY": true,
"HTI": true, "HMD": true, "VAT": true, "HND": true, "HKG": true,
"HUN": true, "ISL": true, "IND": true, "IDN": true, "IRN": true,
"IRQ": true, "IRL": true, "IMN": true, "ISR": true, "ITA": true,
"JAM": true, "JPN": true, "JEY": true, "JOR": true, "KAZ": true,
"KEN": true, "KIR": true, "PRK": true, "KOR": true, "KWT": true,
"KGZ": true, "LAO": true, "LVA": true, "LBN": true, "LSO": true,
"LBR": true, "LBY": true, "LIE": true, "LTU": true, "LUX": true,
"MAC": true, "MDG": true, "MWI": true, "MYS": true, "MDV": true,
"MLI": true, "MLT": true, "MHL": true, "MTQ": true, "MRT": true,
"MUS": true, "MYT": true, "MEX": true, "FSM": true, "MDA": true,
"MCO": true, "MNG": true, "MNE": true, "MSR": true, "MAR": true,
"MOZ": true, "MMR": true, "NAM": true, "NRU": true, "NPL": true,
"NLD": true, "NCL": true, "NZL": true, "NIC": true, "NER": true,
"NGA": true, "NIU": true, "NFK": true, "MKD": true, "MNP": true,
"NOR": true, "OMN": true, "PAK": true, "PLW": true, "PSE": true,
"PAN": true, "PNG": true, "PRY": true, "PER": true, "PHL": true,
"PCN": true, "POL": true, "PRT": true, "PRI": true, "QAT": true,
"ROU": true, "RUS": true, "RWA": true, "REU": true, "BLM": true,
"SHN": true, "KNA": true, "LCA": true, "MAF": true, "SPM": true,
"VCT": true, "WSM": true, "SMR": true, "STP": true, "SAU": true,
"SEN": true, "SRB": true, "SYC": true, "SLE": true, "SGP": true,
"SXM": true, "SVK": true, "SVN": true, "SLB": true, "SOM": true,
"ZAF": true, "SGS": true, "SSD": true, "ESP": true, "LKA": true,
"SDN": true, "SUR": true, "SJM": true, "SWE": true, "CHE": true,
"SYR": true, "TWN": true, "TJK": true, "TZA": true, "THA": true,
"TLS": true, "TGO": true, "TKL": true, "TON": true, "TTO": true,
"TUN": true, "TUR": true, "TKM": true, "TCA": true, "TUV": true,
"UGA": true, "UKR": true, "ARE": true, "GBR": true, "UMI": true,
"USA": true, "URY": true, "UZB": true, "VUT": true, "VEN": true,
"VNM": true, "VGB": true, "VIR": true, "WLF": true, "ESH": true,
"YEM": true, "ZMB": true, "ZWE": true, "ALA": true,
}
var iso3166_1_alpha_numeric = map[int]bool{
// see: https://www.iso.org/iso-3166-country-codes.html
4: true, 8: true, 12: true, 16: true, 20: true,
24: true, 660: true, 10: true, 28: true, 32: true,
51: true, 533: true, 36: true, 40: true, 31: true,
44: true, 48: true, 50: true, 52: true, 112: true,
56: true, 84: true, 204: true, 60: true, 64: true,
68: true, 535: true, 70: true, 72: true, 74: true,
76: true, 86: true, 96: true, 100: true, 854: true,
108: true, 132: true, 116: true, 120: true, 124: true,
136: true, 140: true, 148: true, 152: true, 156: true,
162: true, 166: true, 170: true, 174: true, 180: true,
178: true, 184: true, 188: true, 191: true, 192: true,
531: true, 196: true, 203: true, 384: true, 208: true,
262: true, 212: true, 214: true, 218: true, 818: true,
222: true, 226: true, 232: true, 233: true, 748: true,
231: true, 238: true, 234: true, 242: true, 246: true,
250: true, 254: true, 258: true, 260: true, 266: true,
270: true, 268: true, 276: true, 288: true, 292: true,
300: true, 304: true, 308: true, 312: true, 316: true,
320: true, 831: true, 324: true, 624: true, 328: true,
332: true, 334: true, 336: true, 340: true, 344: true,
348: true, 352: true, 356: true, 360: true, 364: true,
368: true, 372: true, 833: true, 376: true, 380: true,
388: true, 392: true, 832: true, 400: true, 398: true,
404: true, 296: true, 408: true, 410: true, 414: true,
417: true, 418: true, 428: true, 422: true, 426: true,
430: true, 434: true, 438: true, 440: true, 442: true,
446: true, 450: true, 454: true, 458: true, 462: true,
466: true, 470: true, 584: true, 474: true, 478: true,
480: true, 175: true, 484: true, 583: true, 498: true,
492: true, 496: true, 499: true, 500: true, 504: true,
508: true, 104: true, 516: true, 520: true, 524: true,
528: true, 540: true, 554: true, 558: true, 562: true,
566: true, 570: true, 574: true, 807: true, 580: true,
578: true, 512: true, 586: true, 585: true, 275: true,
591: true, 598: true, 600: true, 604: true, 608: true,
612: true, 616: true, 620: true, 630: true, 634: true,
642: true, 643: true, 646: true, 638: true, 652: true,
654: true, 659: true, 662: true, 663: true, 666: true,
670: true, 882: true, 674: true, 678: true, 682: true,
686: true, 688: true, 690: true, 694: true, 702: true,
534: true, 703: true, 705: true, 90: true, 706: true,
710: true, 239: true, 728: true, 724: true, 144: true,
729: true, 740: true, 744: true, 752: true, 756: true,
760: true, 158: true, 762: true, 834: true, 764: true,
626: true, 768: true, 772: true, 776: true, 780: true,
788: true, 792: true, 795: true, 796: true, 798: true,
800: true, 804: true, 784: true, 826: true, 581: true,
840: true, 858: true, 860: true, 548: true, 862: true,
704: true, 92: true, 850: true, 876: true, 732: true,
887: true, 894: true, 716: true, 248: true,
}

View File

@@ -245,6 +245,40 @@ ensures the value is not nil.
Usage: required
Required If
The field under validation must be present and not empty only if all
the other specified fields are equal to the value following the specified
field. For strings ensures value is not "". For slices, maps, pointers,
interfaces, channels and functions ensures the value is not nil.
Usage: required_if
Examples:
// require the field if the Field1 is equal to the parameter given:
Usage: required_if=Field1 foobar
// require the field if the Field1 and Field2 is equal to the value respectively:
Usage: required_if=Field1 foo Field2 bar
Required Unless
The field under validation must be present and not empty unless all
the other specified fields are equal to the value following the specified
field. For strings ensures value is not "". For slices, maps, pointers,
interfaces, channels and functions ensures the value is not nil.
Usage: required_unless
Examples:
// require the field unless the Field1 is equal to the parameter given:
Usage: required_unless=Field1 foobar
// require the field unless the Field1 and Field2 is equal to the value respectively:
Usage: required_unless=Field1 foo Field2 bar
Required With
The field under validation must be present and not empty only if any
@@ -321,8 +355,17 @@ equal to the parameter given. For strings, it checks that
the string length is exactly that number of characters. For slices,
arrays, and maps, validates the number of items.
Example #1
Usage: len=10
Example #2 (time.Duration)
For time.Duration, len will ensure that the value is equal to the duration given
in the parameter.
Usage: len=1h30m
Maximum
For numbers, max will ensure that the value is
@@ -330,8 +373,17 @@ less than or equal to the parameter given. For strings, it checks
that the string length is at most that number of characters. For
slices, arrays, and maps, validates the number of items.
Example #1
Usage: max=10
Example #2 (time.Duration)
For time.Duration, max will ensure that the value is less than or equal to the
duration given in the parameter.
Usage: max=1h30m
Minimum
For numbers, min will ensure that the value is
@@ -339,24 +391,51 @@ greater or equal to the parameter given. For strings, it checks that
the string length is at least that number of characters. For slices,
arrays, and maps, validates the number of items.
Example #1
Usage: min=10
Example #2 (time.Duration)
For time.Duration, min will ensure that the value is greater than or equal to
the duration given in the parameter.
Usage: min=1h30m
Equals
For strings & numbers, eq will ensure that the value is
equal to the parameter given. For slices, arrays, and maps,
validates the number of items.
Example #1
Usage: eq=10
Example #2 (time.Duration)
For time.Duration, eq will ensure that the value is equal to the duration given
in the parameter.
Usage: eq=1h30m
Not Equal
For strings & numbers, ne will ensure that the value is not
equal to the parameter given. For slices, arrays, and maps,
validates the number of items.
Example #1
Usage: ne=10
Example #2 (time.Duration)
For time.Duration, ne will ensure that the value is not equal to the duration
given in the parameter.
Usage: ne=1h30m
One Of
For strings, ints, and uints, oneof will ensure that the value
@@ -386,11 +465,17 @@ For time.Time ensures the time value is greater than time.Now.UTC().
Usage: gt
Example #3 (time.Duration)
For time.Duration, gt will ensure that the value is greater than the duration
given in the parameter.
Usage: gt=1h30m
Greater Than or Equal
Same as 'min' above. Kept both to make terminology with 'len' easier.
Example #1
Usage: gte=10
@@ -401,6 +486,13 @@ For time.Time ensures the time value is greater than or equal to time.Now.UTC().
Usage: gte
Example #3 (time.Duration)
For time.Duration, gte will ensure that the value is greater than or equal to
the duration given in the parameter.
Usage: gte=1h30m
Less Than
For numbers, this will ensure that the value is less than the parameter given.
@@ -412,10 +504,18 @@ Example #1
Usage: lt=10
Example #2 (time.Time)
For time.Time ensures the time value is less than time.Now.UTC().
Usage: lt
Example #3 (time.Duration)
For time.Duration, lt will ensure that the value is less than the duration given
in the parameter.
Usage: lt=1h30m
Less Than or Equal
Same as 'max' above. Kept both to make terminology with 'len' easier.
@@ -430,6 +530,13 @@ For time.Time ensures the time value is less than or equal to time.Now.UTC().
Usage: lte
Example #3 (time.Duration)
For time.Duration, lte will ensure that the value is less than or equal to the
duration given in the parameter.
Usage: lte=1h30m
Field Equals Another Field
This will validate the field value against another fields value either within
@@ -476,9 +583,9 @@ relative to the top level struct.
Field Greater Than Another Field
Only valid for Numbers and time.Time types, this will validate the field value
against another fields value either within a struct or passed in field.
usage examples are for validation of a Start and End date:
Only valid for Numbers, time.Duration and time.Time types, this will validate
the field value against another fields value either within a struct or passed in
field. usage examples are for validation of a Start and End date:
Example #1:
@@ -490,7 +597,6 @@ Example #2:
// Validating by field:
validate.VarWithValue(start, end, "gtfield")
Field Greater Than Another Relative Field
This does the same as gtfield except that it validates the field provided
@@ -500,9 +606,9 @@ relative to the top level struct.
Field Greater Than or Equal To Another Field
Only valid for Numbers and time.Time types, this will validate the field value
against another fields value either within a struct or passed in field.
usage examples are for validation of a Start and End date:
Only valid for Numbers, time.Duration and time.Time types, this will validate
the field value against another fields value either within a struct or passed in
field. usage examples are for validation of a Start and End date:
Example #1:
@@ -523,9 +629,9 @@ to the top level struct.
Less Than Another Field
Only valid for Numbers and time.Time types, this will validate the field value
against another fields value either within a struct or passed in field.
usage examples are for validation of a Start and End date:
Only valid for Numbers, time.Duration and time.Time types, this will validate
the field value against another fields value either within a struct or passed in
field. usage examples are for validation of a Start and End date:
Example #1:
@@ -546,9 +652,9 @@ to the top level struct.
Less Than or Equal To Another Field
Only valid for Numbers and time.Time types, this will validate the field value
against another fields value either within a struct or passed in field.
usage examples are for validation of a Start and End date:
Only valid for Numbers, time.Duration and time.Time types, this will validate
the field value against another fields value either within a struct or passed in
field. usage examples are for validation of a Start and End date:
Example #1:
@@ -620,6 +726,13 @@ This validates that a string value contains unicode alphanumeric characters only
Usage: alphanumunicode
Number
This validates that a string value contains number values only.
For integers or float it returns true.
Usage: number
Numeric
This validates that a string value contains a basic numeric value.
@@ -677,6 +790,13 @@ This validates that a string value contains a valid hsla color
Usage: hsla
E.164 Phone Number String
This validates that a string value contains a valid E.164 Phone number
https://en.wikipedia.org/wiki/E.164 (ex. +1123456789)
Usage: e164
E-mail String
This validates that a string value contains a valid email
@@ -759,8 +879,7 @@ Special thanks to Pieter Wuille for providng reference implementations.
Ethereum Address
This validates that a string value contains a valid ethereum address.
The format of the string is checked to ensure it matches the standard Ethereum address format
Full validation is blocked by https://github.com/golang/crypto/pull/28
The format of the string is checked to ensure it matches the standard Ethereum address format.
Usage: eth_addr
@@ -814,6 +933,18 @@ This validates that a string value ends with the supplied string value
Usage: endswith=goodbye
Does Not Start With
This validates that a string value does not start with the supplied string value
Usage: startsnotwith=hello
Does Not End With
This validates that a string value does not end with the supplied string value
Usage: endsnotwith=goodbye
International Standard Book Number
This validates that a string value contains a valid isbn10 or isbn13 value.
@@ -1069,6 +1200,36 @@ Supplied format must match the official Go time format layout as documented in h
Usage: datetime=2006-01-02
Iso3166-1 alpha-2
This validates that a string value is a valid country code based on iso3166-1 alpha-2 standard.
see: https://www.iso.org/iso-3166-country-codes.html
Usage: iso3166_1_alpha2
Iso3166-1 alpha-3
This validates that a string value is a valid country code based on iso3166-1 alpha-3 standard.
see: https://www.iso.org/iso-3166-country-codes.html
Usage: iso3166_1_alpha3
Iso3166-1 alpha-numeric
This validates that a string value is a valid country code based on iso3166-1 alpha-numeric standard.
see: https://www.iso.org/iso-3166-country-codes.html
Usage: iso3166_1_alpha3
TimeZone
This validates that a string value is a valid time zone based on the time zone database present on the system.
Although empty value and Local value are allowed by time.LoadLocation golang function, they are not allowed by this validator.
More information on https://golang.org/pkg/time/#LoadLocation
Usage: timezone
Alias Validators and Tags
NOTE: When returning an error, the tag returned in "FieldError" will be
@@ -1080,6 +1241,8 @@ Here is a list of the current built in alias tags:
"iscolor"
alias is "hexcolor|rgb|rgba|hsl|hsla" (Usage: iscolor)
"country_code"
alias is "iso3166_1_alpha2|iso3166_1_alpha3|iso3166_1_alpha_numeric" (Usage: country_code)
Validator notes:

View File

@@ -99,7 +99,7 @@ type FieldError interface {
ActualTag() string
// returns the namespace for the field error, with the tag
// name taking precedence over the fields actual name.
// name taking precedence over the field's actual name.
//
// eg. JSON name "User.fname"
//
@@ -109,29 +109,29 @@ type FieldError interface {
// using validate.Field(...) as there is no way to extract it's name
Namespace() string
// returns the namespace for the field error, with the fields
// returns the namespace for the field error, with the field's
// actual name.
//
// eq. "User.FirstName" see Namespace for comparison
//
// NOTE: this field can be blank when validating a single primitive field
// using validate.Field(...) as there is no way to extract it's name
// using validate.Field(...) as there is no way to extract its name
StructNamespace() string
// returns the fields name with the tag name taking precedence over the
// fields actual name.
// field's actual name.
//
// eq. JSON name "fname"
// see StructField for comparison
Field() string
// returns the fields actual name from the struct, when able to determine.
// returns the field's actual name from the struct, when able to determine.
//
// eq. "FirstName"
// see Field for comparison
StructField() string
// returns the actual fields value in case needed for creating the error
// returns the actual field's value in case needed for creating the error
// message
Value() interface{}
@@ -155,6 +155,9 @@ type FieldError interface {
// NOTE: if no registered translator can be found it returns the same as
// calling fe.Error()
Translate(ut ut.Translator) string
// Error returns the FieldError's message
Error() string
}
// compile time interface checks
@@ -190,19 +193,19 @@ func (fe *fieldError) ActualTag() string {
}
// Namespace returns the namespace for the field error, with the tag
// name taking precedence over the fields actual name.
// name taking precedence over the field's actual name.
func (fe *fieldError) Namespace() string {
return fe.ns
}
// StructNamespace returns the namespace for the field error, with the fields
// StructNamespace returns the namespace for the field error, with the field's
// actual name.
func (fe *fieldError) StructNamespace() string {
return fe.structNs
}
// Field returns the fields name with the tag name taking precedence over the
// fields actual name.
// Field returns the field's name with the tag name taking precedence over the
// field's actual name.
func (fe *fieldError) Field() string {
return fe.ns[len(fe.ns)-int(fe.fieldLen):]
@@ -218,13 +221,13 @@ func (fe *fieldError) Field() string {
// return fld
}
// returns the fields actual name from the struct, when able to determine.
// returns the field's actual name from the struct, when able to determine.
func (fe *fieldError) StructField() string {
// return fe.structField
return fe.structNs[len(fe.structNs)-int(fe.structfieldLen):]
}
// Value returns the actual fields value in case needed for creating the error
// Value returns the actual field's value in case needed for creating the error
// message
func (fe *fieldError) Value() interface{} {
return fe.value
@@ -254,8 +257,8 @@ func (fe *fieldError) Error() string {
// Translate returns the FieldError's translated error
// from the provided 'ut.Translator' and registered 'TranslationFunc'
//
// NOTE: is not registered translation can be found it returns the same
// as calling fe.Error()
// NOTE: if no registered translation can be found, it returns the original
// untranslated error message.
func (fe *fieldError) Translate(ut ut.Translator) string {
m, ok := fe.v.transTagFunc[ut]

View File

@@ -7,4 +7,5 @@ require (
github.com/go-playground/locales v0.13.0
github.com/go-playground/universal-translator v0.17.0
github.com/leodido/go-urn v1.2.0
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9
)

View File

@@ -13,6 +13,13 @@ github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZN
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk=
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9 h1:psW17arqaxU48Z5kZ0CQnkZWQJsqcURM6tKiBApRjXI=
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM=

View File

@@ -9,7 +9,7 @@ const (
alphaUnicodeNumericRegexString = "^[\\p{L}\\p{N}]+$"
numericRegexString = "^[-+]?[0-9]+(?:\\.[0-9]+)?$"
numberRegexString = "^[0-9]+$"
hexadecimalRegexString = "^[0-9a-fA-F]+$"
hexadecimalRegexString = "^(0[xX])?[0-9a-fA-F]+$"
hexcolorRegexString = "^#(?:[0-9a-fA-F]{3}|[0-9a-fA-F]{6})$"
rgbRegexString = "^rgb\\(\\s*(?:(?:0|[1-9]\\d?|1\\d\\d?|2[0-4]\\d|25[0-5])\\s*,\\s*(?:0|[1-9]\\d?|1\\d\\d?|2[0-4]\\d|25[0-5])\\s*,\\s*(?:0|[1-9]\\d?|1\\d\\d?|2[0-4]\\d|25[0-5])|(?:0|[1-9]\\d?|1\\d\\d?|2[0-4]\\d|25[0-5])%\\s*,\\s*(?:0|[1-9]\\d?|1\\d\\d?|2[0-4]\\d|25[0-5])%\\s*,\\s*(?:0|[1-9]\\d?|1\\d\\d?|2[0-4]\\d|25[0-5])%)\\s*\\)$"
rgbaRegexString = "^rgba\\(\\s*(?:(?:0|[1-9]\\d?|1\\d\\d?|2[0-4]\\d|25[0-5])\\s*,\\s*(?:0|[1-9]\\d?|1\\d\\d?|2[0-4]\\d|25[0-5])\\s*,\\s*(?:0|[1-9]\\d?|1\\d\\d?|2[0-4]\\d|25[0-5])|(?:0|[1-9]\\d?|1\\d\\d?|2[0-4]\\d|25[0-5])%\\s*,\\s*(?:0|[1-9]\\d?|1\\d\\d?|2[0-4]\\d|25[0-5])%\\s*,\\s*(?:0|[1-9]\\d?|1\\d\\d?|2[0-4]\\d|25[0-5])%)\\s*,\\s*(?:(?:0.[1-9]*)|[01])\\s*\\)$"
@@ -36,11 +36,12 @@ const (
latitudeRegexString = "^[-+]?([1-8]?\\d(\\.\\d+)?|90(\\.0+)?)$"
longitudeRegexString = "^[-+]?(180(\\.0+)?|((1[0-7]\\d)|([1-9]?\\d))(\\.\\d+)?)$"
sSNRegexString = `^[0-9]{3}[ -]?(0[1-9]|[1-9][0-9])[ -]?([1-9][0-9]{3}|[0-9][1-9][0-9]{2}|[0-9]{2}[1-9][0-9]|[0-9]{3}[1-9])$`
hostnameRegexStringRFC952 = `^[a-zA-Z][a-zA-Z0-9\-\.]+[a-zA-Z0-9]$` // https://tools.ietf.org/html/rfc952
hostnameRegexStringRFC1123 = `^([a-zA-Z0-9]{1}[a-zA-Z0-9_-]{0,62}){1}(\.[a-zA-Z0-9_]{1}[a-zA-Z0-9_-]{0,62})*?$` // accepts hostname starting with a digit https://tools.ietf.org/html/rfc1123
btcAddressRegexString = `^[13][a-km-zA-HJ-NP-Z1-9]{25,34}$` // bitcoin address
btcAddressUpperRegexStringBech32 = `^BC1[02-9AC-HJ-NP-Z]{7,76}$` // bitcoin bech32 address https://en.bitcoin.it/wiki/Bech32
btcAddressLowerRegexStringBech32 = `^bc1[02-9ac-hj-np-z]{7,76}$` // bitcoin bech32 address https://en.bitcoin.it/wiki/Bech32
hostnameRegexStringRFC952 = `^[a-zA-Z]([a-zA-Z0-9\-]+[\.]?)*[a-zA-Z0-9]$` // https://tools.ietf.org/html/rfc952
hostnameRegexStringRFC1123 = `^([a-zA-Z0-9]{1}[a-zA-Z0-9_-]{0,62}){1}(\.[a-zA-Z0-9_]{1}[a-zA-Z0-9_-]{0,62})*?$` // accepts hostname starting with a digit https://tools.ietf.org/html/rfc1123
fqdnRegexStringRFC1123 = `^([a-zA-Z0-9]{1}[a-zA-Z0-9_-]{0,62})(\.[a-zA-Z0-9_]{1}[a-zA-Z0-9_-]{0,62})*?(\.[a-zA-Z]{1}[a-zA-Z0-9]{0,62})\.?$` // same as hostnameRegexStringRFC1123 but must contain a non numerical TLD (possibly ending with '.')
btcAddressRegexString = `^[13][a-km-zA-HJ-NP-Z1-9]{25,34}$` // bitcoin address
btcAddressUpperRegexStringBech32 = `^BC1[02-9AC-HJ-NP-Z]{7,76}$` // bitcoin bech32 address https://en.bitcoin.it/wiki/Bech32
btcAddressLowerRegexStringBech32 = `^bc1[02-9ac-hj-np-z]{7,76}$` // bitcoin bech32 address https://en.bitcoin.it/wiki/Bech32
ethAddressRegexString = `^0x[0-9a-fA-F]{40}$`
ethAddressUpperRegexString = `^0x[0-9A-F]{40}$`
ethAddressLowerRegexString = `^0x[0-9a-f]{40}$`
@@ -86,6 +87,7 @@ var (
sSNRegex = regexp.MustCompile(sSNRegexString)
hostnameRegexRFC952 = regexp.MustCompile(hostnameRegexStringRFC952)
hostnameRegexRFC1123 = regexp.MustCompile(hostnameRegexStringRFC1123)
fqdnRegexRFC1123 = regexp.MustCompile(fqdnRegexStringRFC1123)
btcAddressRegex = regexp.MustCompile(btcAddressRegexString)
btcUpperAddressRegexBech32 = regexp.MustCompile(btcAddressUpperRegexStringBech32)
btcLowerAddressRegexBech32 = regexp.MustCompile(btcAddressLowerRegexStringBech32)

View File

@@ -4,6 +4,7 @@ import (
"reflect"
"strconv"
"strings"
"time"
)
// extractTypeInternal gets the actual underlying type of field value.
@@ -229,6 +230,26 @@ func asInt(param string) int64 {
return i
}
// asIntFromTimeDuration parses param as time.Duration and returns it as int64
// or panics on error.
func asIntFromTimeDuration(param string) int64 {
d, err := time.ParseDuration(param)
panicIf(err)
return int64(d)
}
// asIntFromType calls the proper function to parse param as int64,
// given a field's Type t.
func asIntFromType(t reflect.Type, param string) int64 {
switch t {
case timeDurationType:
return asIntFromTimeDuration(param)
default:
return asInt(param)
}
}
// asUint returns the parameter as a uint64
// or panics if it can't convert
func asUint(param string) uint64 {

View File

@@ -249,7 +249,7 @@ OUTER:
v.cf = cf
v.ct = ct
if !v.fldIsPointer && !hasValue(v) {
if !hasValue(v) {
return
}

View File

@@ -27,6 +27,8 @@ const (
requiredWithoutTag = "required_without"
requiredWithTag = "required_with"
requiredWithAllTag = "required_with_all"
requiredIfTag = "required_if"
requiredUnlessTag = "required_unless"
skipValidationTag = "-"
diveTag = "dive"
keysTag = "keys"
@@ -41,7 +43,9 @@ const (
)
var (
timeType = reflect.TypeOf(time.Time{})
timeDurationType = reflect.TypeOf(time.Duration(0))
timeType = reflect.TypeOf(time.Time{})
defaultCField = &cField{namesEqual: true}
)
@@ -107,7 +111,7 @@ func New() *Validate {
switch k {
// these require that even if the value is nil that the validation should run, omitempty still overrides this behaviour
case requiredWithTag, requiredWithAllTag, requiredWithoutTag, requiredWithoutAllTag:
case requiredIfTag, requiredUnlessTag, requiredWithTag, requiredWithAllTag, requiredWithoutTag, requiredWithoutAllTag:
_ = v.registerValidation(k, wrapFunc(val), true, true)
default:
// no need to error check here, baked in will always be valid

View File

@@ -44,6 +44,8 @@ func Wrap(outer, inner error) error {
//
// format is the format of the error message. The string '{{err}}' will
// be replaced with the original error message.
//
// Deprecated: Use fmt.Errorf()
func Wrapf(format string, err error) error {
outerMsg := "<nil>"
if err != nil {
@@ -148,6 +150,9 @@ func Walk(err error, cb WalkFunc) {
for _, err := range e.WrappedErrors() {
Walk(err, cb)
}
case interface{ Unwrap() error }:
cb(err)
Walk(e.Unwrap(), cb)
default:
cb(err)
}
@@ -167,3 +172,7 @@ func (w *wrappedError) Error() string {
func (w *wrappedError) WrappedErrors() []error {
return []error{w.Outer, w.Inner}
}
func (w *wrappedError) Unwrap() error {
return w.Inner
}

View File

@@ -1,5 +1,5 @@
[![Sourcegraph](https://sourcegraph.com/github.com/json-iterator/go/-/badge.svg)](https://sourcegraph.com/github.com/json-iterator/go?badge)
[![GoDoc](http://img.shields.io/badge/go-documentation-blue.svg?style=flat-square)](http://godoc.org/github.com/json-iterator/go)
[![GoDoc](http://img.shields.io/badge/go-documentation-blue.svg?style=flat-square)](https://pkg.go.dev/github.com/json-iterator/go)
[![Build Status](https://travis-ci.org/json-iterator/go.svg?branch=master)](https://travis-ci.org/json-iterator/go)
[![codecov](https://codecov.io/gh/json-iterator/go/branch/master/graph/badge.svg)](https://codecov.io/gh/json-iterator/go)
[![rcard](https://goreportcard.com/badge/github.com/json-iterator/go)](https://goreportcard.com/report/github.com/json-iterator/go)
@@ -18,16 +18,16 @@ Source code: https://github.com/json-iterator/go-benchmark/blob/master/src/githu
Raw Result (easyjson requires static code generation)
| | ns/op | allocation bytes | allocation times |
| --- | --- | --- | --- |
| std decode | 35510 ns/op | 1960 B/op | 99 allocs/op |
| easyjson decode | 8499 ns/op | 160 B/op | 4 allocs/op |
| jsoniter decode | 5623 ns/op | 160 B/op | 3 allocs/op |
| std encode | 2213 ns/op | 712 B/op | 5 allocs/op |
| easyjson encode | 883 ns/op | 576 B/op | 3 allocs/op |
| jsoniter encode | 837 ns/op | 384 B/op | 4 allocs/op |
| | ns/op | allocation bytes | allocation times |
| --------------- | ----------- | ---------------- | ---------------- |
| std decode | 35510 ns/op | 1960 B/op | 99 allocs/op |
| easyjson decode | 8499 ns/op | 160 B/op | 4 allocs/op |
| jsoniter decode | 5623 ns/op | 160 B/op | 3 allocs/op |
| std encode | 2213 ns/op | 712 B/op | 5 allocs/op |
| easyjson encode | 883 ns/op | 576 B/op | 3 allocs/op |
| jsoniter encode | 837 ns/op | 384 B/op | 4 allocs/op |
Always benchmark with your own workload.
Always benchmark with your own workload.
The result depends heavily on the data input.
# Usage
@@ -41,10 +41,10 @@ import "encoding/json"
json.Marshal(&data)
```
with
with
```go
import "github.com/json-iterator/go"
import jsoniter "github.com/json-iterator/go"
var json = jsoniter.ConfigCompatibleWithStandardLibrary
json.Marshal(&data)
@@ -60,7 +60,7 @@ json.Unmarshal(input, &data)
with
```go
import "github.com/json-iterator/go"
import jsoniter "github.com/json-iterator/go"
var json = jsoniter.ConfigCompatibleWithStandardLibrary
json.Unmarshal(input, &data)
@@ -78,10 +78,10 @@ go get github.com/json-iterator/go
Contributors
* [thockin](https://github.com/thockin)
* [mattn](https://github.com/mattn)
* [cch123](https://github.com/cch123)
* [Oleg Shaldybin](https://github.com/olegshaldybin)
* [Jason Toffaletti](https://github.com/toffaletti)
- [thockin](https://github.com/thockin)
- [mattn](https://github.com/mattn)
- [cch123](https://github.com/cch123)
- [Oleg Shaldybin](https://github.com/olegshaldybin)
- [Jason Toffaletti](https://github.com/toffaletti)
Report issue or pull request, or email taowen@gmail.com, or [![Gitter chat](https://badges.gitter.im/gitterHQ/gitter.png)](https://gitter.im/json-iterator/Lobby)

View File

@@ -64,7 +64,6 @@ func (any *stringAny) ToInt64() int64 {
flag := 1
startPos := 0
endPos := 0
if any.val[0] == '+' || any.val[0] == '-' {
startPos = 1
}
@@ -73,6 +72,7 @@ func (any *stringAny) ToInt64() int64 {
flag = -1
}
endPos := startPos
for i := startPos; i < len(any.val); i++ {
if any.val[i] >= '0' && any.val[i] <= '9' {
endPos = i + 1
@@ -98,7 +98,6 @@ func (any *stringAny) ToUint64() uint64 {
}
startPos := 0
endPos := 0
if any.val[0] == '-' {
return 0
@@ -107,6 +106,7 @@ func (any *stringAny) ToUint64() uint64 {
startPos = 1
}
endPos := startPos
for i := startPos; i < len(any.val); i++ {
if any.val[i] >= '0' && any.val[i] <= '9' {
endPos = i + 1

View File

@@ -183,11 +183,11 @@ func (cfg *frozenConfig) validateJsonRawMessage(extension EncoderExtension) {
encoder := &funcEncoder{func(ptr unsafe.Pointer, stream *Stream) {
rawMessage := *(*json.RawMessage)(ptr)
iter := cfg.BorrowIterator([]byte(rawMessage))
defer cfg.ReturnIterator(iter)
iter.Read()
if iter.Error != nil {
if iter.Error != nil && iter.Error != io.EOF {
stream.WriteRaw("null")
} else {
cfg.ReturnIterator(iter)
stream.WriteRaw(string(rawMessage))
}
}, func(ptr unsafe.Pointer) bool {

View File

@@ -150,7 +150,7 @@ func (iter *Iterator) ReadObjectCB(callback func(*Iterator, string) bool) bool {
if c == '}' {
return iter.decrementDepth()
}
iter.ReportError("ReadObjectCB", `expect " after }, but found `+string([]byte{c}))
iter.ReportError("ReadObjectCB", `expect " after {, but found `+string([]byte{c}))
iter.decrementDepth()
return false
}
@@ -206,7 +206,7 @@ func (iter *Iterator) ReadMapCB(callback func(*Iterator, string) bool) bool {
if c == '}' {
return iter.decrementDepth()
}
iter.ReportError("ReadMapCB", `expect " after }, but found `+string([]byte{c}))
iter.ReportError("ReadMapCB", `expect " after {, but found `+string([]byte{c}))
iter.decrementDepth()
return false
}

View File

@@ -475,7 +475,7 @@ func calcFieldNames(originalFieldName string, tagProvidedFieldName string, whole
fieldNames = []string{tagProvidedFieldName}
}
// private?
isNotExported := unicode.IsLower(rune(originalFieldName[0]))
isNotExported := unicode.IsLower(rune(originalFieldName[0])) || originalFieldName[0] == '_'
if isNotExported {
fieldNames = []string{}
}

View File

@@ -49,6 +49,33 @@ func decoderOfMapKey(ctx *ctx, typ reflect2.Type) ValDecoder {
return decoder
}
}
ptrType := reflect2.PtrTo(typ)
if ptrType.Implements(unmarshalerType) {
return &referenceDecoder{
&unmarshalerDecoder{
valType: ptrType,
},
}
}
if typ.Implements(unmarshalerType) {
return &unmarshalerDecoder{
valType: typ,
}
}
if ptrType.Implements(textUnmarshalerType) {
return &referenceDecoder{
&textUnmarshalerDecoder{
valType: ptrType,
},
}
}
if typ.Implements(textUnmarshalerType) {
return &textUnmarshalerDecoder{
valType: typ,
}
}
switch typ.Kind() {
case reflect.String:
return decoderOfType(ctx, reflect2.DefaultTypeOfKind(reflect.String))
@@ -63,31 +90,6 @@ func decoderOfMapKey(ctx *ctx, typ reflect2.Type) ValDecoder {
typ = reflect2.DefaultTypeOfKind(typ.Kind())
return &numericMapKeyDecoder{decoderOfType(ctx, typ)}
default:
ptrType := reflect2.PtrTo(typ)
if ptrType.Implements(unmarshalerType) {
return &referenceDecoder{
&unmarshalerDecoder{
valType: ptrType,
},
}
}
if typ.Implements(unmarshalerType) {
return &unmarshalerDecoder{
valType: typ,
}
}
if ptrType.Implements(textUnmarshalerType) {
return &referenceDecoder{
&textUnmarshalerDecoder{
valType: ptrType,
},
}
}
if typ.Implements(textUnmarshalerType) {
return &textUnmarshalerDecoder{
valType: typ,
}
}
return &lazyErrorDecoder{err: fmt.Errorf("unsupported map key type: %v", typ)}
}
}
@@ -103,6 +105,19 @@ func encoderOfMapKey(ctx *ctx, typ reflect2.Type) ValEncoder {
return encoder
}
}
if typ == textMarshalerType {
return &directTextMarshalerEncoder{
stringEncoder: ctx.EncoderOf(reflect2.TypeOf("")),
}
}
if typ.Implements(textMarshalerType) {
return &textMarshalerEncoder{
valType: typ,
stringEncoder: ctx.EncoderOf(reflect2.TypeOf("")),
}
}
switch typ.Kind() {
case reflect.String:
return encoderOfType(ctx, reflect2.DefaultTypeOfKind(reflect.String))
@@ -117,17 +132,6 @@ func encoderOfMapKey(ctx *ctx, typ reflect2.Type) ValEncoder {
typ = reflect2.DefaultTypeOfKind(typ.Kind())
return &numericMapKeyEncoder{encoderOfType(ctx, typ)}
default:
if typ == textMarshalerType {
return &directTextMarshalerEncoder{
stringEncoder: ctx.EncoderOf(reflect2.TypeOf("")),
}
}
if typ.Implements(textMarshalerType) {
return &textMarshalerEncoder{
valType: typ,
stringEncoder: ctx.EncoderOf(reflect2.TypeOf("")),
}
}
if typ.Kind() == reflect.Interface {
return &dynamicMapKeyEncoder{ctx, typ}
}
@@ -163,10 +167,6 @@ func (decoder *mapDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
if c == '}' {
return
}
if c != '"' {
iter.ReportError("ReadMapCB", `expect " after }, but found `+string([]byte{c}))
return
}
iter.unreadByte()
key := decoder.keyType.UnsafeNew()
decoder.keyDecoder.Decode(key, iter)

View File

@@ -2,7 +2,6 @@ package jsoniter
import (
"github.com/modern-go/reflect2"
"reflect"
"unsafe"
)
@@ -10,9 +9,6 @@ func decoderOfOptional(ctx *ctx, typ reflect2.Type) ValDecoder {
ptrType := typ.(*reflect2.UnsafePtrType)
elemType := ptrType.Elem()
decoder := decoderOfType(ctx, elemType)
if ctx.prefix == "" && elemType.Kind() == reflect.Ptr {
return &dereferenceDecoder{elemType, decoder}
}
return &OptionalDecoder{elemType, decoder}
}

View File

@@ -507,7 +507,7 @@ func (decoder *generalStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator)
for c = ','; c == ','; c = iter.nextToken() {
decoder.decodeOneField(ptr, iter)
}
if iter.Error != nil && iter.Error != io.EOF {
if iter.Error != nil && iter.Error != io.EOF && len(decoder.typ.Type1().Name()) != 0 {
iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error())
}
if c != '}' {
@@ -588,7 +588,7 @@ func (decoder *oneFieldStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator)
break
}
}
if iter.Error != nil && iter.Error != io.EOF {
if iter.Error != nil && iter.Error != io.EOF && len(decoder.typ.Type1().Name()) != 0 {
iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error())
}
iter.decrementDepth()
@@ -622,7 +622,7 @@ func (decoder *twoFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator
break
}
}
if iter.Error != nil && iter.Error != io.EOF {
if iter.Error != nil && iter.Error != io.EOF && len(decoder.typ.Type1().Name()) != 0 {
iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error())
}
iter.decrementDepth()
@@ -660,7 +660,7 @@ func (decoder *threeFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterat
break
}
}
if iter.Error != nil && iter.Error != io.EOF {
if iter.Error != nil && iter.Error != io.EOF && len(decoder.typ.Type1().Name()) != 0 {
iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error())
}
iter.decrementDepth()
@@ -702,7 +702,7 @@ func (decoder *fourFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterato
break
}
}
if iter.Error != nil && iter.Error != io.EOF {
if iter.Error != nil && iter.Error != io.EOF && len(decoder.typ.Type1().Name()) != 0 {
iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error())
}
iter.decrementDepth()
@@ -748,7 +748,7 @@ func (decoder *fiveFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterato
break
}
}
if iter.Error != nil && iter.Error != io.EOF {
if iter.Error != nil && iter.Error != io.EOF && len(decoder.typ.Type1().Name()) != 0 {
iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error())
}
iter.decrementDepth()
@@ -798,7 +798,7 @@ func (decoder *sixFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator
break
}
}
if iter.Error != nil && iter.Error != io.EOF {
if iter.Error != nil && iter.Error != io.EOF && len(decoder.typ.Type1().Name()) != 0 {
iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error())
}
iter.decrementDepth()
@@ -852,7 +852,7 @@ func (decoder *sevenFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterat
break
}
}
if iter.Error != nil && iter.Error != io.EOF {
if iter.Error != nil && iter.Error != io.EOF && len(decoder.typ.Type1().Name()) != 0 {
iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error())
}
iter.decrementDepth()
@@ -910,7 +910,7 @@ func (decoder *eightFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterat
break
}
}
if iter.Error != nil && iter.Error != io.EOF {
if iter.Error != nil && iter.Error != io.EOF && len(decoder.typ.Type1().Name()) != 0 {
iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error())
}
iter.decrementDepth()
@@ -972,7 +972,7 @@ func (decoder *nineFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterato
break
}
}
if iter.Error != nil && iter.Error != io.EOF {
if iter.Error != nil && iter.Error != io.EOF && len(decoder.typ.Type1().Name()) != 0 {
iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error())
}
iter.decrementDepth()
@@ -1038,7 +1038,7 @@ func (decoder *tenFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator
break
}
}
if iter.Error != nil && iter.Error != io.EOF {
if iter.Error != nil && iter.Error != io.EOF && len(decoder.typ.Type1().Name()) != 0 {
iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error())
}
iter.decrementDepth()

View File

@@ -103,14 +103,14 @@ func (stream *Stream) Flush() error {
if stream.Error != nil {
return stream.Error
}
n, err := stream.out.Write(stream.buf)
_, err := stream.out.Write(stream.buf)
if err != nil {
if stream.Error == nil {
stream.Error = err
}
return err
}
stream.buf = stream.buf[n:]
stream.buf = stream.buf[:0]
return nil
}
@@ -177,7 +177,6 @@ func (stream *Stream) WriteEmptyObject() {
func (stream *Stream) WriteMore() {
stream.writeByte(',')
stream.writeIndention(0)
stream.Flush()
}
// WriteArrayStart write [ with possible indention

View File

@@ -1,8 +1,8 @@
language: go
go:
- 1.11.x
- 1.12.x
- 1.13.x
- 1.14.x
- master
sudo: true
@@ -27,7 +27,7 @@ before_install:
- ./.travis.sh client_configure
- go get golang.org/x/tools/cmd/goimports
- go get golang.org/x/lint/golint
- GO111MODULE=on go get honnef.co/go/tools/cmd/staticcheck@2019.2.1
- GO111MODULE=on go get honnef.co/go/tools/cmd/staticcheck@2020.1.3
before_script:
- createdb pqgotest
@@ -38,7 +38,7 @@ script:
- >
goimports -d -e $(find -name '*.go') | awk '{ print } END { exit NR == 0 ? 0 : 1 }'
- go vet ./...
- staticcheck -go 1.11 ./...
- staticcheck -go 1.13 ./...
- golint ./...
- PQTEST_BINARY_PARAMETERS=no go test -race -v ./...
- PQTEST_BINARY_PARAMETERS=yes go test -race -v ./...

View File

@@ -1,29 +0,0 @@
## Contributing to pq
`pq` has a backlog of pull requests, but contributions are still very
much welcome. You can help with patch review, submitting bug reports,
or adding new functionality. There is no formal style guide, but
please conform to the style of existing code and general Go formatting
conventions when submitting patches.
### Patch review
Help review existing open pull requests by commenting on the code or
proposed functionality.
### Bug reports
We appreciate any bug reports, but especially ones with self-contained
(doesn't depend on code outside of pq), minimal (can't be simplified
further) test cases. It's especially helpful if you can submit a pull
request with just the failing test case (you'll probably want to
pattern it after the tests in
[conn_test.go](https://github.com/lib/pq/blob/master/conn_test.go).
### New functionality
There are a number of pending patches for new functionality, so
additional feature patches will take a while to merge. Still, patches
are generally reviewed based on usefulness and complexity in addition
to time-in-queue, so if you have a knockout idea, take a shot. Feel
free to open an issue discussion your proposed patch beforehand.

77
vendor/github.com/lib/pq/README.md generated vendored
View File

@@ -1,21 +1,11 @@
# pq - A pure Go postgres driver for Go's database/sql package
[![GoDoc](https://godoc.org/github.com/lib/pq?status.svg)](https://godoc.org/github.com/lib/pq)
[![Build Status](https://travis-ci.org/lib/pq.svg?branch=master)](https://travis-ci.org/lib/pq)
[![GoDoc](https://godoc.org/github.com/lib/pq?status.svg)](https://pkg.go.dev/github.com/lib/pq?tab=doc)
## Install
go get github.com/lib/pq
## Docs
For detailed documentation and basic usage examples, please see the package
documentation at <https://godoc.org/github.com/lib/pq>.
## Tests
`go test` is used for testing. See [TESTS.md](TESTS.md) for more details.
## Features
* SSL
@@ -29,67 +19,12 @@ documentation at <https://godoc.org/github.com/lib/pq>.
* Unix socket support
* Notifications: `LISTEN`/`NOTIFY`
* pgpass support
* GSS (Kerberos) auth
## Future / Things you can help with
## Tests
* Better COPY FROM / COPY TO (see discussion in #181)
`go test` is used for testing. See [TESTS.md](TESTS.md) for more details.
## Thank you (alphabetical)
## Status
Some of these contributors are from the original library `bmizerany/pq.go` whose
code still exists in here.
* Andy Balholm (andybalholm)
* Ben Berkert (benburkert)
* Benjamin Heatwole (bheatwole)
* Bill Mill (llimllib)
* Bjørn Madsen (aeons)
* Blake Gentry (bgentry)
* Brad Fitzpatrick (bradfitz)
* Charlie Melbye (cmelbye)
* Chris Bandy (cbandy)
* Chris Gilling (cgilling)
* Chris Walsh (cwds)
* Dan Sosedoff (sosedoff)
* Daniel Farina (fdr)
* Eric Chlebek (echlebek)
* Eric Garrido (minusnine)
* Eric Urban (hydrogen18)
* Everyone at The Go Team
* Evan Shaw (edsrzf)
* Ewan Chou (coocood)
* Fazal Majid (fazalmajid)
* Federico Romero (federomero)
* Fumin (fumin)
* Gary Burd (garyburd)
* Heroku (heroku)
* James Pozdena (jpoz)
* Jason McVetta (jmcvetta)
* Jeremy Jay (pbnjay)
* Joakim Sernbrant (serbaut)
* John Gallagher (jgallagher)
* Jonathan Rudenberg (titanous)
* Joël Stemmer (jstemmer)
* Kamil Kisiel (kisielk)
* Kelly Dunn (kellydunn)
* Keith Rarick (kr)
* Kir Shatrov (kirs)
* Lann Martin (lann)
* Maciek Sakrejda (uhoh-itsmaciek)
* Marc Brinkmann (mbr)
* Marko Tiikkaja (johto)
* Matt Newberry (MattNewberry)
* Matt Robenolt (mattrobenolt)
* Martin Olsen (martinolsen)
* Mike Lewis (mikelikespie)
* Nicolas Patry (Narsil)
* Oliver Tonnhofer (olt)
* Patrick Hayes (phayes)
* Paul Hammond (paulhammond)
* Ryan Smith (ryandotsmith)
* Samuel Stauffer (samuel)
* Timothée Peignier (cyberdelia)
* Travis Cline (tmc)
* TruongSinh Tran-Nguyen (truongsinh)
* Yaismel Miranda (ympons)
* notedit (notedit)
This package is effectively in maintenance mode and is not actively developed. Small patches and features are only rarely reviewed and merged. We recommend using [pgx](https://github.com/jackc/pgx) which is actively maintained.

89
vendor/github.com/lib/pq/conn.go generated vendored
View File

@@ -149,6 +149,15 @@ type conn struct {
// If true this connection is in the middle of a COPY
inCopy bool
// If not nil, notices will be synchronously sent here
noticeHandler func(*Error)
// If not nil, notifications will be synchronously sent here
notificationHandler func(*Notification)
// GSSAPI context
gss GSS
}
// Handle driver-side settings in parsed connection string.
@@ -329,10 +338,6 @@ func (c *Connector) open(ctx context.Context) (cn *conn, err error) {
func dial(ctx context.Context, d Dialer, o values) (net.Conn, error) {
network, address := network(o)
// SSL is not necessary or supported over UNIX domain sockets
if network == "unix" {
o["sslmode"] = "disable"
}
// Zero or not specified means wait indefinitely.
if timeout, ok := o["connect_timeout"]; ok && timeout != "0" {
@@ -971,7 +976,13 @@ func (cn *conn) recv() (t byte, r *readBuf) {
case 'E':
panic(parseError(r))
case 'N':
// ignore
if n := cn.noticeHandler; n != nil {
n(parseError(r))
}
case 'A':
if n := cn.notificationHandler; n != nil {
n(recvNotification(r))
}
default:
return
}
@@ -988,8 +999,14 @@ func (cn *conn) recv1Buf(r *readBuf) byte {
}
switch t {
case 'A', 'N':
// ignore
case 'A':
if n := cn.notificationHandler; n != nil {
n(recvNotification(r))
}
case 'N':
if n := cn.noticeHandler; n != nil {
n(parseError(r))
}
case 'S':
cn.processParameterStatus(r)
default:
@@ -1057,7 +1074,10 @@ func isDriverSetting(key string) bool {
return true
case "binary_parameters":
return true
case "krbsrvname":
return true
case "krbspn":
return true
default:
return false
}
@@ -1137,6 +1157,59 @@ func (cn *conn) auth(r *readBuf, o values) {
if r.int32() != 0 {
errorf("unexpected authentication response: %q", t)
}
case 7: // GSSAPI, startup
if newGss == nil {
errorf("kerberos error: no GSSAPI provider registered (import github.com/lib/pq/auth/kerberos if you need Kerberos support)")
}
cli, err := newGss()
if err != nil {
errorf("kerberos error: %s", err.Error())
}
var token []byte
if spn, ok := o["krbspn"]; ok {
// Use the supplied SPN if provided..
token, err = cli.GetInitTokenFromSpn(spn)
} else {
// Allow the kerberos service name to be overridden
service := "postgres"
if val, ok := o["krbsrvname"]; ok {
service = val
}
token, err = cli.GetInitToken(o["host"], service)
}
if err != nil {
errorf("failed to get Kerberos ticket: %q", err)
}
w := cn.writeBuf('p')
w.bytes(token)
cn.send(w)
// Store for GSSAPI continue message
cn.gss = cli
case 8: // GSSAPI continue
if cn.gss == nil {
errorf("GSSAPI protocol error")
}
b := []byte(*r)
done, tokOut, err := cn.gss.Continue(b)
if err == nil && !done {
w := cn.writeBuf('p')
w.bytes(tokOut)
cn.send(w)
}
// Errors fall through and read the more detailed message
// from the server..
case 10:
sc := scram.NewClient(sha256.New, o["user"], o["password"])
sc.Step(nil)

View File

@@ -27,7 +27,7 @@ func (c *Connector) Connect(ctx context.Context) (driver.Conn, error) {
return c.open(ctx)
}
// Driver returnst the underlying driver of this Connector.
// Driver returns the underlying driver of this Connector.
func (c *Connector) Driver() driver.Driver {
return &Driver{}
}
@@ -106,5 +106,10 @@ func NewConnector(dsn string) (*Connector, error) {
o["user"] = u
}
// SSL is not necessary or supported over UNIX domain sockets
if network, _ := network(o); network == "unix" {
o["sslmode"] = "disable"
}
return &Connector{opts: o, dialer: defaultDialer{}}, nil
}

29
vendor/github.com/lib/pq/copy.go generated vendored
View File

@@ -49,6 +49,7 @@ type copyin struct {
buffer []byte
rowData chan []byte
done chan bool
driver.Result
closed bool
@@ -151,8 +152,12 @@ func (ci *copyin) resploop() {
switch t {
case 'C':
// complete
res, _ := ci.cn.parseComplete(r.string())
ci.setResult(res)
case 'N':
// NoticeResponse
if n := ci.cn.noticeHandler; n != nil {
n(parseError(&r))
}
case 'Z':
ci.cn.processReadyForQuery(&r)
ci.done <- true
@@ -199,6 +204,22 @@ func (ci *copyin) setError(err error) {
ci.Unlock()
}
func (ci *copyin) setResult(result driver.Result) {
ci.Lock()
ci.Result = result
ci.Unlock()
}
func (ci *copyin) getResult() driver.Result {
ci.Lock()
result := ci.Result
if result == nil {
return driver.RowsAffected(0)
}
ci.Unlock()
return result
}
func (ci *copyin) NumInput() int {
return -1
}
@@ -229,7 +250,11 @@ func (ci *copyin) Exec(v []driver.Value) (r driver.Result, err error) {
}
if len(v) == 0 {
return driver.RowsAffected(0), ci.Close()
if err := ci.Close(); err != nil {
return driver.RowsAffected(0), err
}
return ci.getResult(), nil
}
numValues := len(v)

23
vendor/github.com/lib/pq/doc.go generated vendored
View File

@@ -241,5 +241,28 @@ bytes by the PostgreSQL server.
You can find a complete, working example of Listener usage at
https://godoc.org/github.com/lib/pq/example/listen.
Kerberos Support
If you need support for Kerberos authentication, add the following to your main
package:
import "github.com/lib/pq/auth/kerberos"
func init() {
pq.RegisterGSSProvider(func() (pq.Gss, error) { return kerberos.NewGSS() })
}
This package is in a separate module so that users who don't need Kerberos
don't have to download unnecessary dependencies.
When imported, additional connection string parameters are supported:
* krbsrvname - GSS (Kerberos) service name when constructing the
SPN (default is `postgres`). This will be combined with the host
to form the full SPN: `krbsrvname/host`.
* krbspn - GSS (Kerberos) SPN. This takes priority over
`krbsrvname` if present.
*/
package pq

20
vendor/github.com/lib/pq/encode.go generated vendored
View File

@@ -8,6 +8,7 @@ import (
"errors"
"fmt"
"math"
"regexp"
"strconv"
"strings"
"sync"
@@ -16,6 +17,8 @@ import (
"github.com/lib/pq/oid"
)
var time2400Regex = regexp.MustCompile(`^(24:00(?::00(?:\.0+)?)?)(?:[Z+-].*)?$`)
func binaryEncode(parameterStatus *parameterStatus, x interface{}) []byte {
switch v := x.(type) {
case []byte:
@@ -202,10 +205,27 @@ func mustParse(f string, typ oid.Oid, s []byte) time.Time {
str[len(str)-3] == ':' {
f += ":00"
}
// Special case for 24:00 time.
// Unfortunately, golang does not parse 24:00 as a proper time.
// In this case, we want to try "round to the next day", to differentiate.
// As such, we find if the 24:00 time matches at the beginning; if so,
// we default it back to 00:00 but add a day later.
var is2400Time bool
switch typ {
case oid.T_timetz, oid.T_time:
if matches := time2400Regex.FindStringSubmatch(str); matches != nil {
// Concatenate timezone information at the back.
str = "00:00:00" + str[len(matches[1]):]
is2400Time = true
}
}
t, err := time.Parse(f, str)
if err != nil {
errorf("decode: %s", err)
}
if is2400Time {
t = t.Add(24 * time.Hour)
}
return t
}

2
vendor/github.com/lib/pq/go.mod generated vendored
View File

@@ -1 +1,3 @@
module github.com/lib/pq
go 1.13

27
vendor/github.com/lib/pq/krb.go generated vendored Normal file
View File

@@ -0,0 +1,27 @@
package pq
// NewGSSFunc creates a GSS authentication provider, for use with
// RegisterGSSProvider.
type NewGSSFunc func() (GSS, error)
var newGss NewGSSFunc
// RegisterGSSProvider registers a GSS authentication provider. For example, if
// you need to use Kerberos to authenticate with your server, add this to your
// main package:
//
// import "github.com/lib/pq/auth/kerberos"
//
// func init() {
// pq.RegisterGSSProvider(func() (pq.GSS, error) { return kerberos.NewGSS() })
// }
func RegisterGSSProvider(newGssArg NewGSSFunc) {
newGss = newGssArg
}
// GSS provides GSSAPI authentication (e.g., Kerberos).
type GSS interface {
GetInitToken(host string, service string) ([]byte, error)
GetInitTokenFromSpn(spn string) ([]byte, error)
Continue(inToken []byte) (done bool, outToken []byte, err error)
}

71
vendor/github.com/lib/pq/notice.go generated vendored Normal file
View File

@@ -0,0 +1,71 @@
// +build go1.10
package pq
import (
"context"
"database/sql/driver"
)
// NoticeHandler returns the notice handler on the given connection, if any. A
// runtime panic occurs if c is not a pq connection. This is rarely used
// directly, use ConnectorNoticeHandler and ConnectorWithNoticeHandler instead.
func NoticeHandler(c driver.Conn) func(*Error) {
return c.(*conn).noticeHandler
}
// SetNoticeHandler sets the given notice handler on the given connection. A
// runtime panic occurs if c is not a pq connection. A nil handler may be used
// to unset it. This is rarely used directly, use ConnectorNoticeHandler and
// ConnectorWithNoticeHandler instead.
//
// Note: Notice handlers are executed synchronously by pq meaning commands
// won't continue to be processed until the handler returns.
func SetNoticeHandler(c driver.Conn, handler func(*Error)) {
c.(*conn).noticeHandler = handler
}
// NoticeHandlerConnector wraps a regular connector and sets a notice handler
// on it.
type NoticeHandlerConnector struct {
driver.Connector
noticeHandler func(*Error)
}
// Connect calls the underlying connector's connect method and then sets the
// notice handler.
func (n *NoticeHandlerConnector) Connect(ctx context.Context) (driver.Conn, error) {
c, err := n.Connector.Connect(ctx)
if err == nil {
SetNoticeHandler(c, n.noticeHandler)
}
return c, err
}
// ConnectorNoticeHandler returns the currently set notice handler, if any. If
// the given connector is not a result of ConnectorWithNoticeHandler, nil is
// returned.
func ConnectorNoticeHandler(c driver.Connector) func(*Error) {
if c, ok := c.(*NoticeHandlerConnector); ok {
return c.noticeHandler
}
return nil
}
// ConnectorWithNoticeHandler creates or sets the given handler for the given
// connector. If the given connector is a result of calling this function
// previously, it is simply set on the given connector and returned. Otherwise,
// this returns a new connector wrapping the given one and setting the notice
// handler. A nil notice handler may be used to unset it.
//
// The returned connector is intended to be used with database/sql.OpenDB.
//
// Note: Notice handlers are executed synchronously by pq meaning commands
// won't continue to be processed until the handler returns.
func ConnectorWithNoticeHandler(c driver.Connector, handler func(*Error)) *NoticeHandlerConnector {
if c, ok := c.(*NoticeHandlerConnector); ok {
c.noticeHandler = handler
return c
}
return &NoticeHandlerConnector{Connector: c, noticeHandler: handler}
}

63
vendor/github.com/lib/pq/notify.go generated vendored
View File

@@ -4,6 +4,8 @@ package pq
// This module contains support for Postgres LISTEN/NOTIFY.
import (
"context"
"database/sql/driver"
"errors"
"fmt"
"sync"
@@ -29,6 +31,61 @@ func recvNotification(r *readBuf) *Notification {
return &Notification{bePid, channel, extra}
}
// SetNotificationHandler sets the given notification handler on the given
// connection. A runtime panic occurs if c is not a pq connection. A nil handler
// may be used to unset it.
//
// Note: Notification handlers are executed synchronously by pq meaning commands
// won't continue to be processed until the handler returns.
func SetNotificationHandler(c driver.Conn, handler func(*Notification)) {
c.(*conn).notificationHandler = handler
}
// NotificationHandlerConnector wraps a regular connector and sets a notification handler
// on it.
type NotificationHandlerConnector struct {
driver.Connector
notificationHandler func(*Notification)
}
// Connect calls the underlying connector's connect method and then sets the
// notification handler.
func (n *NotificationHandlerConnector) Connect(ctx context.Context) (driver.Conn, error) {
c, err := n.Connector.Connect(ctx)
if err == nil {
SetNotificationHandler(c, n.notificationHandler)
}
return c, err
}
// ConnectorNotificationHandler returns the currently set notification handler, if any. If
// the given connector is not a result of ConnectorWithNotificationHandler, nil is
// returned.
func ConnectorNotificationHandler(c driver.Connector) func(*Notification) {
if c, ok := c.(*NotificationHandlerConnector); ok {
return c.notificationHandler
}
return nil
}
// ConnectorWithNotificationHandler creates or sets the given handler for the given
// connector. If the given connector is a result of calling this function
// previously, it is simply set on the given connector and returned. Otherwise,
// this returns a new connector wrapping the given one and setting the notification
// handler. A nil notification handler may be used to unset it.
//
// The returned connector is intended to be used with database/sql.OpenDB.
//
// Note: Notification handlers are executed synchronously by pq meaning commands
// won't continue to be processed until the handler returns.
func ConnectorWithNotificationHandler(c driver.Connector, handler func(*Notification)) *NotificationHandlerConnector {
if c, ok := c.(*NotificationHandlerConnector); ok {
c.notificationHandler = handler
return c
}
return &NotificationHandlerConnector{Connector: c, notificationHandler: handler}
}
const (
connStateIdle int32 = iota
connStateExpectResponse
@@ -174,8 +231,12 @@ func (l *ListenerConn) listenerConnLoop() (err error) {
}
l.replyChan <- message{t, nil}
case 'N', 'S':
case 'S':
// ignore
case 'N':
if n := l.cn.noticeHandler; n != nil {
n(parseError(r))
}
default:
return fmt.Errorf("unexpected message %q from server in listenerConnLoop", t)
}

View File

@@ -1,6 +1,6 @@
// Package pq is a pure Go Postgres driver for the database/sql package.
// +build darwin dragonfly freebsd linux nacl netbsd openbsd solaris rumprun
// +build aix darwin dragonfly freebsd linux nacl netbsd openbsd plan9 solaris rumprun
package pq

1
vendor/github.com/modern-go/concurrent/.gitignore generated vendored Normal file
View File

@@ -0,0 +1 @@
/coverage.txt

14
vendor/github.com/modern-go/concurrent/.travis.yml generated vendored Normal file
View File

@@ -0,0 +1,14 @@
language: go
go:
- 1.8.x
- 1.x
before_install:
- go get -t -v ./...
script:
- ./test.sh
after_success:
- bash <(curl -s https://codecov.io/bash)

View File

@@ -1,2 +1,49 @@
# concurrent
concurrency utilities
[![Sourcegraph](https://sourcegraph.com/github.com/modern-go/concurrent/-/badge.svg)](https://sourcegraph.com/github.com/modern-go/concurrent?badge)
[![GoDoc](http://img.shields.io/badge/go-documentation-blue.svg?style=flat-square)](http://godoc.org/github.com/modern-go/concurrent)
[![Build Status](https://travis-ci.org/modern-go/concurrent.svg?branch=master)](https://travis-ci.org/modern-go/concurrent)
[![codecov](https://codecov.io/gh/modern-go/concurrent/branch/master/graph/badge.svg)](https://codecov.io/gh/modern-go/concurrent)
[![rcard](https://goreportcard.com/badge/github.com/modern-go/concurrent)](https://goreportcard.com/report/github.com/modern-go/concurrent)
[![License](https://img.shields.io/badge/License-Apache%202.0-blue.svg)](https://raw.githubusercontent.com/modern-go/concurrent/master/LICENSE)
* concurrent.Map: backport sync.Map for go below 1.9
* concurrent.Executor: goroutine with explicit ownership and cancellable
# concurrent.Map
because sync.Map is only available in go 1.9, we can use concurrent.Map to make code portable
```go
m := concurrent.NewMap()
m.Store("hello", "world")
elem, found := m.Load("hello")
// elem will be "world"
// found will be true
```
# concurrent.Executor
```go
executor := concurrent.NewUnboundedExecutor()
executor.Go(func(ctx context.Context) {
everyMillisecond := time.NewTicker(time.Millisecond)
for {
select {
case <-ctx.Done():
fmt.Println("goroutine exited")
return
case <-everyMillisecond.C:
// do something
}
}
})
time.Sleep(time.Second)
executor.StopAndWaitForever()
fmt.Println("executor stopped")
```
attach goroutine to executor instance, so that we can
* cancel it by stop the executor with Stop/StopAndWait/StopAndWaitForever
* handle panic by callback: the default behavior will no longer crash your application

View File

@@ -2,6 +2,13 @@ package concurrent
import "context"
// Executor replace go keyword to start a new goroutine
// the goroutine should cancel itself if the context passed in has been cancelled
// the goroutine started by the executor, is owned by the executor
// we can cancel all executors owned by the executor just by stop the executor itself
// however Executor interface does not Stop method, the one starting and owning executor
// should use the concrete type of executor, instead of this interface.
type Executor interface {
// Go starts a new goroutine controlled by the context
Go(handler func(ctx context.Context))
}
}

View File

@@ -4,10 +4,12 @@ package concurrent
import "sync"
// Map is a wrapper for sync.Map introduced in go1.9
type Map struct {
sync.Map
}
// NewMap creates a thread safe Map
func NewMap() *Map {
return &Map{}
}
}

View File

@@ -4,17 +4,20 @@ package concurrent
import "sync"
// Map implements a thread safe map for go version below 1.9 using mutex
type Map struct {
lock sync.RWMutex
data map[interface{}]interface{}
}
// NewMap creates a thread safe map
func NewMap() *Map {
return &Map{
data: make(map[interface{}]interface{}, 32),
}
}
// Load is same as sync.Map Load
func (m *Map) Load(key interface{}) (elem interface{}, found bool) {
m.lock.RLock()
elem, found = m.data[key]
@@ -22,9 +25,9 @@ func (m *Map) Load(key interface{}) (elem interface{}, found bool) {
return
}
// Load is same as sync.Map Store
func (m *Map) Store(key interface{}, elem interface{}) {
m.lock.Lock()
m.data[key] = elem
m.lock.Unlock()
}

13
vendor/github.com/modern-go/concurrent/log.go generated vendored Normal file
View File

@@ -0,0 +1,13 @@
package concurrent
import (
"os"
"log"
"io/ioutil"
)
// ErrorLogger is used to print out error, can be set to writer other than stderr
var ErrorLogger = log.New(os.Stderr, "", 0)
// InfoLogger is used to print informational message, default to off
var InfoLogger = log.New(ioutil.Discard, "", 0)

12
vendor/github.com/modern-go/concurrent/test.sh generated vendored Normal file
View File

@@ -0,0 +1,12 @@
#!/usr/bin/env bash
set -e
echo "" > coverage.txt
for d in $(go list ./... | grep -v vendor); do
go test -coverprofile=profile.out -coverpkg=github.com/modern-go/concurrent $d
if [ -f profile.out ]; then
cat profile.out >> coverage.txt
rm profile.out
fi
done

View File

@@ -4,33 +4,37 @@ import (
"context"
"fmt"
"runtime"
"runtime/debug"
"sync"
"time"
"runtime/debug"
"reflect"
)
var LogInfo = func(event string, properties ...interface{}) {
// HandlePanic logs goroutine panic by default
var HandlePanic = func(recovered interface{}, funcName string) {
ErrorLogger.Println(fmt.Sprintf("%s panic: %v", funcName, recovered))
ErrorLogger.Println(string(debug.Stack()))
}
var LogPanic = func(recovered interface{}, properties ...interface{}) interface{} {
fmt.Println(fmt.Sprintf("paniced: %v", recovered))
debug.PrintStack()
return recovered
}
const StopSignal = "STOP!"
// UnboundedExecutor is a executor without limits on counts of alive goroutines
// it tracks the goroutine started by it, and can cancel them when shutdown
type UnboundedExecutor struct {
ctx context.Context
cancel context.CancelFunc
activeGoroutinesMutex *sync.Mutex
activeGoroutines map[string]int
HandlePanic func(recovered interface{}, funcName string)
}
// GlobalUnboundedExecutor has the life cycle of the program itself
// any goroutine want to be shutdown before main exit can be started from this executor
// GlobalUnboundedExecutor expects the main function to call stop
// it does not magically knows the main function exits
var GlobalUnboundedExecutor = NewUnboundedExecutor()
// NewUnboundedExecutor creates a new UnboundedExecutor,
// UnboundedExecutor can not be created by &UnboundedExecutor{}
// HandlePanic can be set with a callback to override global HandlePanic
func NewUnboundedExecutor() *UnboundedExecutor {
ctx, cancel := context.WithCancel(context.TODO())
return &UnboundedExecutor{
@@ -41,8 +45,13 @@ func NewUnboundedExecutor() *UnboundedExecutor {
}
}
// Go starts a new goroutine and tracks its lifecycle.
// Panic will be recovered and logged automatically, except for StopSignal
func (executor *UnboundedExecutor) Go(handler func(ctx context.Context)) {
_, file, line, _ := runtime.Caller(1)
pc := reflect.ValueOf(handler).Pointer()
f := runtime.FuncForPC(pc)
funcName := f.Name()
file, line := f.FileLine(pc)
executor.activeGoroutinesMutex.Lock()
defer executor.activeGoroutinesMutex.Unlock()
startFrom := fmt.Sprintf("%s:%d", file, line)
@@ -50,46 +59,57 @@ func (executor *UnboundedExecutor) Go(handler func(ctx context.Context)) {
go func() {
defer func() {
recovered := recover()
if recovered != nil && recovered != StopSignal {
LogPanic(recovered)
// if you want to quit a goroutine without trigger HandlePanic
// use runtime.Goexit() to quit
if recovered != nil {
if executor.HandlePanic == nil {
HandlePanic(recovered, funcName)
} else {
executor.HandlePanic(recovered, funcName)
}
}
executor.activeGoroutinesMutex.Lock()
defer executor.activeGoroutinesMutex.Unlock()
executor.activeGoroutines[startFrom] -= 1
executor.activeGoroutinesMutex.Unlock()
}()
handler(executor.ctx)
}()
}
// Stop cancel all goroutines started by this executor without wait
func (executor *UnboundedExecutor) Stop() {
executor.cancel()
}
// StopAndWaitForever cancel all goroutines started by this executor and
// wait until all goroutines exited
func (executor *UnboundedExecutor) StopAndWaitForever() {
executor.StopAndWait(context.Background())
}
// StopAndWait cancel all goroutines started by this executor and wait.
// Wait can be cancelled by the context passed in.
func (executor *UnboundedExecutor) StopAndWait(ctx context.Context) {
executor.cancel()
for {
fiveSeconds := time.NewTimer(time.Millisecond * 100)
oneHundredMilliseconds := time.NewTimer(time.Millisecond * 100)
select {
case <-fiveSeconds.C:
case <-oneHundredMilliseconds.C:
if executor.checkNoActiveGoroutines() {
return
}
case <-ctx.Done():
return
}
if executor.checkGoroutines() {
return
}
}
}
func (executor *UnboundedExecutor) checkGoroutines() bool {
func (executor *UnboundedExecutor) checkNoActiveGoroutines() bool {
executor.activeGoroutinesMutex.Lock()
defer executor.activeGoroutinesMutex.Unlock()
for startFrom, count := range executor.activeGoroutines {
if count > 0 {
LogInfo("event!unbounded_executor.still waiting goroutines to quit",
InfoLogger.Println("UnboundedExecutor is still waiting goroutines to quit",
"startFrom", startFrom,
"count", count)
return false

View File

@@ -4,6 +4,7 @@ import (
"reflect"
"runtime"
"strings"
"sync"
"unsafe"
)
@@ -15,10 +16,17 @@ func typelinks1() [][]unsafe.Pointer
//go:linkname typelinks2 reflect.typelinks
func typelinks2() (sections []unsafe.Pointer, offset [][]int32)
var types = map[string]reflect.Type{}
var packages = map[string]map[string]reflect.Type{}
// initOnce guards initialization of types and packages
var initOnce sync.Once
var types map[string]reflect.Type
var packages map[string]map[string]reflect.Type
// discoverTypes initializes types and packages
func discoverTypes() {
types = make(map[string]reflect.Type)
packages = make(map[string]map[string]reflect.Type)
func init() {
ver := runtime.Version()
if ver == "go1.5" || strings.HasPrefix(ver, "go1.5.") {
loadGo15Types()
@@ -90,11 +98,13 @@ type emptyInterface struct {
// TypeByName return the type by its name, just like Class.forName in java
func TypeByName(typeName string) Type {
initOnce.Do(discoverTypes)
return Type2(types[typeName])
}
// TypeByPackageName return the type by its package and name
func TypeByPackageName(pkgPath string, name string) Type {
initOnce.Do(discoverTypes)
pkgTypes := packages[pkgPath]
if pkgTypes == nil {
return nil

View File

@@ -1,6 +1,6 @@
The MIT License (MIT)
Copyright (c) 2012-2015 Ugorji Nwoke.
Copyright (c) 2012-2020 Ugorji Nwoke.
All rights reserved.
Permission is hereby granted, free of charge, to any person obtaining a copy

281
vendor/github.com/ugorji/go/codec/README.md generated vendored Normal file
View File

@@ -0,0 +1,281 @@
# Package Documentation for github.com/ugorji/go/codec
Package codec provides a High Performance, Feature-Rich Idiomatic Go 1.4+
codec/encoding library for binc, msgpack, cbor, json.
Supported Serialization formats are:
- msgpack: https://github.com/msgpack/msgpack
- binc: http://github.com/ugorji/binc
- cbor: http://cbor.io http://tools.ietf.org/html/rfc7049
- json: http://json.org http://tools.ietf.org/html/rfc7159
- simple:
This package will carefully use 'package unsafe' for performance reasons in
specific places. You can build without unsafe use by passing the safe or
appengine tag i.e. 'go install -tags=safe ...'.
For detailed usage information, read the primer at
http://ugorji.net/blog/go-codec-primer .
The idiomatic Go support is as seen in other encoding packages in the
standard library (ie json, xml, gob, etc).
Rich Feature Set includes:
- Simple but extremely powerful and feature-rich API
- Support for go 1.4 and above, while selectively using newer APIs for later releases
- Excellent code coverage ( > 90% )
- Very High Performance.
Our extensive benchmarks show us outperforming Gob, Json, Bson, etc by 2-4X.
- Careful selected use of 'unsafe' for targeted performance gains.
- 100% safe mode supported, where 'unsafe' is not used at all.
- Lock-free (sans mutex) concurrency for scaling to 100's of cores
- In-place updates during decode, with option to zero value in maps and slices prior to decode
- Coerce types where appropriate
e.g. decode an int in the stream into a float, decode numbers from formatted strings, etc
- Corner Cases:
Overflows, nil maps/slices, nil values in streams are handled correctly
- Standard field renaming via tags
- Support for omitting empty fields during an encoding
- Encoding from any value and decoding into pointer to any value
(struct, slice, map, primitives, pointers, interface{}, etc)
- Extensions to support efficient encoding/decoding of any named types
- Support encoding.(Binary|Text)(M|Unm)arshaler interfaces
- Support IsZero() bool to determine if a value is a zero value.
Analogous to time.Time.IsZero() bool.
- Decoding without a schema (into a interface{}).
Includes Options to configure what specific map or slice type to use
when decoding an encoded list or map into a nil interface{}
- Mapping a non-interface type to an interface, so we can decode appropriately
into any interface type with a correctly configured non-interface value.
- Encode a struct as an array, and decode struct from an array in the data stream
- Option to encode struct keys as numbers (instead of strings)
(to support structured streams with fields encoded as numeric codes)
- Comprehensive support for anonymous fields
- Fast (no-reflection) encoding/decoding of common maps and slices
- Code-generation for faster performance, supported in go 1.6+
- Support binary (e.g. messagepack, cbor) and text (e.g. json) formats
- Support indefinite-length formats to enable true streaming
(for formats which support it e.g. json, cbor)
- Support canonical encoding, where a value is ALWAYS encoded as same sequence of bytes.
This mostly applies to maps, where iteration order is non-deterministic.
- NIL in data stream decoded as zero value
- Never silently skip data when decoding.
User decides whether to return an error or silently skip data when keys or indexes
in the data stream do not map to fields in the struct.
- Detect and error when encoding a cyclic reference (instead of stack overflow shutdown)
- Encode/Decode from/to chan types (for iterative streaming support)
- Drop-in replacement for encoding/json. `json:` key in struct tag supported.
- Provides a RPC Server and Client Codec for net/rpc communication protocol.
- Handle unique idiosyncrasies of codecs e.g.
- For messagepack, configure how ambiguities in handling raw bytes are resolved
- For messagepack, provide rpc server/client codec to support
msgpack-rpc protocol defined at:
https://github.com/msgpack-rpc/msgpack-rpc/blob/master/spec.md
## Extension Support
Users can register a function to handle the encoding or decoding of their
custom types.
There are no restrictions on what the custom type can be. Some examples:
```go
type BisSet []int
type BitSet64 uint64
type UUID string
type MyStructWithUnexportedFields struct { a int; b bool; c []int; }
type GifImage struct { ... }
```
As an illustration, MyStructWithUnexportedFields would normally be encoded
as an empty map because it has no exported fields, while UUID would be
encoded as a string. However, with extension support, you can encode any of
these however you like.
There is also seamless support provided for registering an extension (with a
tag) but letting the encoding mechanism default to the standard way.
## Custom Encoding and Decoding
This package maintains symmetry in the encoding and decoding halfs. We
determine how to encode or decode by walking this decision tree
- is there an extension registered for the type?
- is type a codec.Selfer?
- is format binary, and is type a encoding.BinaryMarshaler and BinaryUnmarshaler?
- is format specifically json, and is type a encoding/json.Marshaler and Unmarshaler?
- is format text-based, and type an encoding.TextMarshaler and TextUnmarshaler?
- else we use a pair of functions based on the "kind" of the type e.g. map, slice, int64, etc
This symmetry is important to reduce chances of issues happening because the
encoding and decoding sides are out of sync e.g. decoded via very specific
encoding.TextUnmarshaler but encoded via kind-specific generalized mode.
Consequently, if a type only defines one-half of the symmetry (e.g. it
implements UnmarshalJSON() but not MarshalJSON() ), then that type doesn't
satisfy the check and we will continue walking down the decision tree.
## RPC
RPC Client and Server Codecs are implemented, so the codecs can be used with
the standard net/rpc package.
## Usage
The Handle is SAFE for concurrent READ, but NOT SAFE for concurrent
modification.
The Encoder and Decoder are NOT safe for concurrent use.
Consequently, the usage model is basically:
- Create and initialize the Handle before any use.
Once created, DO NOT modify it.
- Multiple Encoders or Decoders can now use the Handle concurrently.
They only read information off the Handle (never write).
- However, each Encoder or Decoder MUST not be used concurrently
- To re-use an Encoder/Decoder, call Reset(...) on it first.
This allows you use state maintained on the Encoder/Decoder.
Sample usage model:
```go
// create and configure Handle
var (
bh codec.BincHandle
mh codec.MsgpackHandle
ch codec.CborHandle
)
mh.MapType = reflect.TypeOf(map[string]interface{}(nil))
// configure extensions
// e.g. for msgpack, define functions and enable Time support for tag 1
// mh.SetExt(reflect.TypeOf(time.Time{}), 1, myExt)
// create and use decoder/encoder
var (
r io.Reader
w io.Writer
b []byte
h = &bh // or mh to use msgpack
)
dec = codec.NewDecoder(r, h)
dec = codec.NewDecoderBytes(b, h)
err = dec.Decode(&v)
enc = codec.NewEncoder(w, h)
enc = codec.NewEncoderBytes(&b, h)
err = enc.Encode(v)
//RPC Server
go func() {
for {
conn, err := listener.Accept()
rpcCodec := codec.GoRpc.ServerCodec(conn, h)
//OR rpcCodec := codec.MsgpackSpecRpc.ServerCodec(conn, h)
rpc.ServeCodec(rpcCodec)
}
}()
//RPC Communication (client side)
conn, err = net.Dial("tcp", "localhost:5555")
rpcCodec := codec.GoRpc.ClientCodec(conn, h)
//OR rpcCodec := codec.MsgpackSpecRpc.ClientCodec(conn, h)
client := rpc.NewClientWithCodec(rpcCodec)
```
## Running Tests
To run tests, use the following:
```
go test
```
To run the full suite of tests, use the following:
```
go test -tags alltests -run Suite
```
You can run the tag 'safe' to run tests or build in safe mode. e.g.
```
go test -tags safe -run Json
go test -tags "alltests safe" -run Suite
```
## Running Benchmarks
```
cd bench
go test -bench . -benchmem -benchtime 1s
```
Please see http://github.com/ugorji/go-codec-bench .
## Caveats
Struct fields matching the following are ignored during encoding and
decoding
- struct tag value set to -
- func, complex numbers, unsafe pointers
- unexported and not embedded
- unexported and embedded and not struct kind
- unexported and embedded pointers (from go1.10)
Every other field in a struct will be encoded/decoded.
Embedded fields are encoded as if they exist in the top-level struct, with
some caveats. See Encode documentation.
## Exported Package API
```go
const CborStreamBytes byte = 0x5f ...
const GenVersion = 17
var SelfExt = &extFailWrapper{}
var GoRpc goRpc
var MsgpackSpecRpc msgpackSpecRpc
func GenHelperDecoder(d *Decoder) (gd genHelperDecoder, dd genHelperDecDriver)
func GenHelperEncoder(e *Encoder) (ge genHelperEncoder, ee genHelperEncDriver)
type BasicHandle struct{ ... }
type BincHandle struct{ ... }
type BytesExt interface{ ... }
type CborHandle struct{ ... }
type DecodeOptions struct{ ... }
type Decoder struct{ ... }
func NewDecoder(r io.Reader, h Handle) *Decoder
func NewDecoderBytes(in []byte, h Handle) *Decoder
type EncodeOptions struct{ ... }
type Encoder struct{ ... }
func NewEncoder(w io.Writer, h Handle) *Encoder
func NewEncoderBytes(out *[]byte, h Handle) *Encoder
type Ext interface{ ... }
type Handle interface{ ... }
type InterfaceExt interface{ ... }
type JsonHandle struct{ ... }
type MapBySlice interface{ ... }
type MissingFielder interface{ ... }
type MsgpackHandle struct{ ... }
type MsgpackSpecRpcMultiArgs []interface{}
type RPCOptions struct{ ... }
type Raw []byte
type RawExt struct{ ... }
type Rpc interface{ ... }
type Selfer interface{ ... }
type SimpleHandle struct{ ... }
type TypeInfos struct{ ... }
func NewTypeInfos(tags []string) *TypeInfos
```

View File

@@ -1,4 +1,4 @@
// Copyright (c) 2012-2018 Ugorji Nwoke. All rights reserved.
// Copyright (c) 2012-2020 Ugorji Nwoke. All rights reserved.
// Use of this source code is governed by a MIT license found in the LICENSE file.
package codec
@@ -8,7 +8,7 @@ import (
"time"
)
const bincDoPrune = true // No longer needed. Needed before as C lib did not support pruning.
const bincDoPrune = true
// vd as low 4 bits (there are 16 slots)
const (
@@ -24,10 +24,10 @@ const (
bincVdTimestamp
bincVdSmallInt
bincVdUnicodeOther
_ // bincVdUnicodeOther
bincVdSymbol
bincVdDecimal
_ // bincVdDecimal
_ // open slot
_ // open slot
bincVdCustomExt = 0x0f
@@ -46,7 +46,7 @@ const (
)
const (
bincFlBin16 byte = iota
_ byte = iota // bincFlBin16 byte = iota
bincFlBin32
_ // bincFlBin32e
bincFlBin64
@@ -54,48 +54,43 @@ const (
// others not currently supported
)
func bincdesc(vd, vs byte) string {
switch vd {
case bincVdSpecial:
switch vs {
case bincSpNil:
return "nil"
case bincSpFalse:
return "false"
case bincSpTrue:
return "true"
case bincSpNan, bincSpPosInf, bincSpNegInf, bincSpZeroFloat:
return "float"
case bincSpZero:
return "uint"
case bincSpNegOne:
return "int"
default:
return "unknown"
}
case bincVdSmallInt, bincVdPosInt:
return "uint"
case bincVdNegInt:
return "int"
case bincVdFloat:
return "float"
case bincVdSymbol:
return "string"
case bincVdString:
return "string"
case bincVdByteArray:
return "bytes"
case bincVdTimestamp:
return "time"
case bincVdCustomExt:
return "ext"
case bincVdArray:
return "array"
case bincVdMap:
return "map"
default:
return "unknown"
var (
bincdescSpecialVsNames = map[byte]string{
bincSpNil: "nil",
bincSpFalse: "false",
bincSpTrue: "true",
bincSpNan: "float",
bincSpPosInf: "float",
bincSpNegInf: "float",
bincSpZeroFloat: "float",
bincSpZero: "uint",
bincSpNegOne: "int",
}
bincdescVdNames = map[byte]string{
bincVdSpecial: "special",
bincVdSmallInt: "uint",
bincVdPosInt: "uint",
bincVdFloat: "float",
bincVdSymbol: "string",
bincVdString: "string",
bincVdByteArray: "bytes",
bincVdTimestamp: "time",
bincVdCustomExt: "ext",
bincVdArray: "array",
bincVdMap: "map",
}
)
func bincdesc(vd, vs byte) (s string) {
if vd == bincVdSpecial {
s = bincdescSpecialVsNames[vs]
} else {
s = bincdescVdNames[vd]
}
if s == "" {
s = "unknown"
}
return
}
type bincEncDriver struct {
@@ -135,18 +130,30 @@ func (e *bincEncDriver) EncodeBool(b bool) {
}
}
func (e *bincEncDriver) EncodeFloat32(f float32) {
func (e *bincEncDriver) encSpFloat(f float64) (done bool) {
if f == 0 {
e.e.encWr.writen1(bincVdSpecial<<4 | bincSpZeroFloat)
} else if math.IsNaN(float64(f)) {
e.e.encWr.writen1(bincVdSpecial<<4 | bincSpNan)
} else if math.IsInf(float64(f), +1) {
e.e.encWr.writen1(bincVdSpecial<<4 | bincSpPosInf)
} else if math.IsInf(float64(f), -1) {
e.e.encWr.writen1(bincVdSpecial<<4 | bincSpNegInf)
} else {
return
}
e.e.encWr.writen1(bincVdFloat<<4 | bincFlBin32)
bigenHelper{e.b[:4], e.e.w()}.writeUint32(math.Float32bits(f))
return true
}
func (e *bincEncDriver) EncodeFloat32(f float32) {
if !e.encSpFloat(float64(f)) {
e.e.encWr.writen1(bincVdFloat<<4 | bincFlBin32)
bigenHelper{e.b[:4], e.e.w()}.writeUint32(math.Float32bits(f))
}
}
func (e *bincEncDriver) EncodeFloat64(f float64) {
if f == 0 {
e.e.encWr.writen1(bincVdSpecial<<4 | bincSpZeroFloat)
if e.encSpFloat(f) {
return
}
bigen.PutUint64(e.b[:8], math.Float64bits(f))
@@ -172,14 +179,12 @@ func (e *bincEncDriver) encIntegerPrune(bd byte, pos bool, v uint64, lim uint8)
} else {
bigen.PutUint64(e.b[:lim], v)
}
var i byte
if bincDoPrune {
i := pruneSignExt(e.b[:lim], pos)
e.e.encWr.writen1(bd | lim - 1 - byte(i))
e.e.encWr.writeb(e.b[i:lim])
} else {
e.e.encWr.writen1(bd | lim - 1)
e.e.encWr.writeb(e.b[:lim])
i = byte(pruneSignExt(e.b[:lim], pos))
}
e.e.encWr.writen1(bd | lim - 1 - i)
e.e.encWr.writeb(e.b[i:lim])
}
func (e *bincEncDriver) EncodeInt(v int64) {
@@ -349,7 +354,7 @@ func (e *bincEncDriver) EncodeStringBytesRaw(v []byte) {
}
func (e *bincEncDriver) encBytesLen(c charEncoding, length uint64) {
// NOTE: we currently only support UTF-8 (string) and RAW (bytearray).
// MARKER: we currently only support UTF-8 (string) and RAW (bytearray).
// We should consider supporting bincUnicodeOther.
if c == cRAW {
@@ -394,7 +399,7 @@ type bincDecDriver struct {
vd byte
vs byte
fnil bool
_ bool
// _ [3]byte // padding
// linear searching on this slice is ok,
// because we typically expect < 32 symbols in each stream.
@@ -417,30 +422,17 @@ func (d *bincDecDriver) readNextBd() {
d.bdRead = true
}
func (d *bincDecDriver) uncacheRead() {
if d.bdRead {
d.d.decRd.unreadn1()
d.bdRead = false
}
}
func (d *bincDecDriver) advanceNil() (null bool) {
d.fnil = false
if !d.bdRead {
d.readNextBd()
}
if d.bd == bincVdSpecial<<4|bincSpNil {
d.bdRead = false
d.fnil = true
null = true
}
return
}
func (d *bincDecDriver) Nil() bool {
return d.fnil
}
func (d *bincDecDriver) TryNil() bool {
return d.advanceNil()
}
@@ -449,11 +441,9 @@ func (d *bincDecDriver) ContainerType() (vt valueType) {
if !d.bdRead {
d.readNextBd()
}
d.fnil = false
// if d.vd == bincVdSpecial && d.vs == bincSpNil {
if d.bd == bincVdSpecial<<4|bincSpNil {
d.bdRead = false
d.fnil = true
return valueTypeNil
} else if d.vd == bincVdByteArray {
return valueTypeBytes
@@ -473,24 +463,20 @@ func (d *bincDecDriver) DecodeTime() (t time.Time) {
}
if d.vd != bincVdTimestamp {
d.d.errorf("cannot decode time - %s %x-%x/%s", msgBadDesc, d.vd, d.vs, bincdesc(d.vd, d.vs))
return
}
t, err := bincDecodeTime(d.d.decRd.readx(uint(d.vs)))
if err != nil {
panic(err)
}
halt.onerror(err)
d.bdRead = false
return
}
func (d *bincDecDriver) decFloatPre(vs, defaultLen byte) {
if vs&0x8 == 0 {
d.d.decRd.readb(d.b[0:defaultLen])
func (d *bincDecDriver) decFloatPre(xlen byte) {
if d.vs&0x8 == 0 {
d.d.decRd.readb(d.b[0:xlen])
} else {
l := d.d.decRd.readn1()
if l > 8 {
d.d.errorf("cannot read float - at most 8 bytes used to represent float - received %v bytes", l)
return
}
for i := l; i < 8; i++ {
d.b[i] = 0
@@ -502,47 +488,51 @@ func (d *bincDecDriver) decFloatPre(vs, defaultLen byte) {
func (d *bincDecDriver) decFloat() (f float64) {
//if true { f = math.Float64frombits(bigen.Uint64(d.d.decRd.readx(8))); break; }
if x := d.vs & 0x7; x == bincFlBin32 {
d.decFloatPre(d.vs, 4)
d.decFloatPre(4)
f = float64(math.Float32frombits(bigen.Uint32(d.b[0:4])))
} else if x == bincFlBin64 {
d.decFloatPre(d.vs, 8)
d.decFloatPre(8)
f = math.Float64frombits(bigen.Uint64(d.b[0:8]))
} else {
d.d.errorf("read float - only float32 and float64 are supported - %s %x-%x/%s",
msgBadDesc, d.vd, d.vs, bincdesc(d.vd, d.vs))
return
d.d.errorf("read float supports only float32/64 - %s %x-%x/%s", msgBadDesc, d.vd, d.vs, bincdesc(d.vd, d.vs))
}
return
}
func (d *bincDecDriver) decUint() (v uint64) {
// need to inline the code (interface conversion and type assertion expensive)
_, v = d.uintBytes()
return
}
func (d *bincDecDriver) uintBytes() (bs []byte, v uint64) {
switch d.vs {
case 0:
v = uint64(d.d.decRd.readn1())
d.b[0] = d.d.decRd.readn1()
v = uint64(d.b[0])
bs = d.b[:1]
case 1:
d.d.decRd.readb(d.b[6:8])
v = uint64(bigen.Uint16(d.b[6:8]))
bs = d.b[6:8]
d.d.decRd.readb(bs)
v = uint64(bigen.Uint16(bs))
case 2:
bs = d.b[5:8]
d.d.decRd.readb(bs)
d.b[4] = 0
d.d.decRd.readb(d.b[5:8])
v = uint64(bigen.Uint32(d.b[4:8]))
case 3:
d.d.decRd.readb(d.b[4:8])
v = uint64(bigen.Uint32(d.b[4:8]))
case 4, 5, 6:
bs = d.b[4:8]
d.d.decRd.readb(bs)
v = uint64(bigen.Uint32(bs))
case 4, 5, 6, 7:
lim := 7 - d.vs
d.d.decRd.readb(d.b[lim:8])
bs = d.b[lim:8]
d.d.decRd.readb(bs)
for i := uint8(0); i < lim; i++ {
d.b[i] = 0
}
v = uint64(bigen.Uint64(d.b[:8]))
case 7:
d.d.decRd.readb(d.b[:8])
v = uint64(bigen.Uint64(d.b[:8]))
default:
d.d.errorf("unsigned integers with greater than 64 bits of precision not supported")
return
d.d.errorf("unsigned integers with greater than 64 bits of precision not supported: d.vs: %v %x", d.vs, d.vs)
}
return
}
@@ -563,13 +553,10 @@ func (d *bincDecDriver) decCheckInteger() (ui uint64, neg bool) {
neg = true
ui = 1
} else {
d.d.errorf("integer decode fails - invalid special value from descriptor %x-%x/%s",
d.vd, d.vs, bincdesc(d.vd, d.vs))
return
d.d.errorf("integer decode has invalid special value %x-%x/%s", d.vd, d.vs, bincdesc(d.vd, d.vs))
}
} else {
d.d.errorf("integer can only be decoded from int/uint. d.bd: 0x%x, d.vd: 0x%x", d.bd, d.vd)
return
}
return
}
@@ -594,7 +581,6 @@ func (d *bincDecDriver) DecodeUint64() (ui uint64) {
ui, neg := d.decCheckInteger()
if neg {
d.d.errorf("assigning negative signed value to unsigned integer type")
return
}
d.bdRead = false
return
@@ -616,9 +602,7 @@ func (d *bincDecDriver) DecodeFloat64() (f float64) {
} else if vs == bincSpNegInf {
return math.Inf(-1)
} else {
d.d.errorf("float - invalid special value from descriptor %x-%x/%s",
d.vd, d.vs, bincdesc(d.vd, d.vs))
return
d.d.errorf("float - invalid special value %x-%x/%s", d.vd, d.vs, bincdesc(d.vd, d.vs))
}
} else if vd == bincVdFloat {
f = d.decFloat()
@@ -640,7 +624,6 @@ func (d *bincDecDriver) DecodeBool() (b bool) {
b = true
} else {
d.d.errorf("bool - %s %x-%x/%s", msgBadDesc, d.vd, d.vs, bincdesc(d.vd, d.vs))
return
}
d.bdRead = false
return
@@ -648,11 +631,10 @@ func (d *bincDecDriver) DecodeBool() (b bool) {
func (d *bincDecDriver) ReadMapStart() (length int) {
if d.advanceNil() {
return decContainerLenNil
return containerLenNil
}
if d.vd != bincVdMap {
d.d.errorf("map - %s %x-%x/%s", msgBadDesc, d.vd, d.vs, bincdesc(d.vd, d.vs))
return
}
length = d.decLen()
d.bdRead = false
@@ -661,11 +643,10 @@ func (d *bincDecDriver) ReadMapStart() (length int) {
func (d *bincDecDriver) ReadArrayStart() (length int) {
if d.advanceNil() {
return decContainerLenNil
return containerLenNil
}
if d.vd != bincVdArray {
d.d.errorf("array - %s %x-%x/%s", msgBadDesc, d.vd, d.vs, bincdesc(d.vd, d.vs))
return
}
length = d.decLen()
d.bdRead = false
@@ -706,7 +687,7 @@ func (d *bincDecDriver) decStringBytes(bs []byte, zerocopy bool) (bs2 []byte) {
slen = d.decLen()
if zerocopy {
if d.d.bytes {
bs2 = d.d.decRd.readx(uint(slen))
bs2 = d.d.decRd.rb.readx(uint(slen))
} else if len(bs) == 0 {
bs2 = decByteSlice(d.d.r(), slen, d.d.h.MaxInitLen, d.d.b[:])
} else {
@@ -757,7 +738,6 @@ func (d *bincDecDriver) decStringBytes(bs []byte, zerocopy bool) (bs2 []byte) {
}
default:
d.d.errorf("string/bytes - %s %x-%x/%s", msgBadDesc, d.vd, d.vs, bincdesc(d.vd, d.vs))
return
}
d.bdRead = false
return
@@ -789,15 +769,13 @@ func (d *bincDecDriver) DecodeBytes(bs []byte, zerocopy bool) (bsOut []byte) {
clen = d.decLen()
} else {
d.d.errorf("bytes - %s %x-%x/%s", msgBadDesc, d.vd, d.vs, bincdesc(d.vd, d.vs))
return
}
d.bdRead = false
if zerocopy {
if d.d.bytes {
return d.d.decRd.readx(uint(clen))
} else if len(bs) == 0 {
bs = d.d.b[:]
}
if d.d.bytes && (zerocopy || d.h.ZeroCopy) {
return d.d.decRd.rb.readx(uint(clen))
}
if zerocopy && len(bs) == 0 {
bs = d.d.b[:]
}
return decByteSlice(d.d.r(), clen, d.d.h.MaxInitLen, bs)
}
@@ -805,7 +783,6 @@ func (d *bincDecDriver) DecodeBytes(bs []byte, zerocopy bool) (bsOut []byte) {
func (d *bincDecDriver) DecodeExt(rv interface{}, xtag uint64, ext Ext) {
if xtag > 0xff {
d.d.errorf("ext: tag must be <= 0xff; got: %v", xtag)
return
}
if d.advanceNil() {
return
@@ -829,19 +806,16 @@ func (d *bincDecDriver) decodeExtV(verifyTag bool, tag byte) (xtag byte, xbs []b
xtag = d.d.decRd.readn1()
if verifyTag && xtag != tag {
d.d.errorf("wrong extension tag - got %b, expecting: %v", xtag, tag)
return
}
if d.d.bytes {
xbs = d.d.decRd.readx(uint(l))
xbs = d.d.decRd.rb.readx(uint(l))
} else {
xbs = decByteSlice(d.d.r(), l, d.d.h.MaxInitLen, d.d.b[:])
}
} else if d.vd == bincVdByteArray {
xbs = d.DecodeBytes(nil, true)
} else {
d.d.errorf("ext - expecting extensions or byte array - %s %x-%x/%s",
msgBadDesc, d.vd, d.vs, bincdesc(d.vd, d.vs))
return
d.d.errorf("ext expects extensions or byte array - %s %x-%x/%s", msgBadDesc, d.vd, d.vs, bincdesc(d.vd, d.vs))
}
d.bdRead = false
return
@@ -852,7 +826,6 @@ func (d *bincDecDriver) DecodeNaked() {
d.readNextBd()
}
d.fnil = false
n := d.d.naked()
var decodeFurther bool
@@ -861,7 +834,6 @@ func (d *bincDecDriver) DecodeNaked() {
switch d.vs {
case bincSpNil:
n.v = valueTypeNil
d.fnil = true
case bincSpFalse:
n.v = valueTypeBool
n.b = false
@@ -887,8 +859,7 @@ func (d *bincDecDriver) DecodeNaked() {
n.v = valueTypeInt
n.i = int64(-1) // int8(-1)
default:
d.d.errorf("cannot infer value - unrecognized special value from descriptor %x-%x/%s",
d.vd, d.vs, bincdesc(d.vd, d.vs))
d.d.errorf("cannot infer value - unrecognized special value %x-%x/%s", d.vd, d.vs, bincdesc(d.vd, d.vs))
}
case bincVdSmallInt:
n.v = valueTypeUint
@@ -902,27 +873,25 @@ func (d *bincDecDriver) DecodeNaked() {
case bincVdFloat:
n.v = valueTypeFloat
n.f = d.decFloat()
case bincVdSymbol:
n.v = valueTypeSymbol
n.s = string(d.DecodeStringAsBytes())
case bincVdString:
n.v = valueTypeString
n.s = string(d.DecodeStringAsBytes())
case bincVdByteArray:
decNakedReadRawBytes(d, &d.d, n, d.h.RawToString)
fauxUnionReadRawBytes(d, &d.d, n, d.h.RawToString)
case bincVdSymbol:
n.v = valueTypeSymbol
n.s = string(d.DecodeStringAsBytes())
case bincVdTimestamp:
n.v = valueTypeTime
tt, err := bincDecodeTime(d.d.decRd.readx(uint(d.vs)))
if err != nil {
panic(err)
}
halt.onerror(err)
n.t = tt
case bincVdCustomExt:
n.v = valueTypeExt
l := d.decLen()
n.u = uint64(d.d.decRd.readn1())
if d.d.bytes {
n.l = d.d.decRd.readx(uint(l))
n.l = d.d.decRd.rb.readx(uint(l))
} else {
n.l = decByteSlice(d.d.r(), l, d.d.h.MaxInitLen, d.d.b[:])
}
@@ -945,6 +914,121 @@ func (d *bincDecDriver) DecodeNaked() {
}
}
func (d *bincDecDriver) nextValueBytes(start []byte) (v []byte) {
if !d.bdRead {
d.readNextBd()
}
v = append(start, d.bd)
v = d.nextValueBytesBdReadR(v)
d.bdRead = false
return
}
func (d *bincDecDriver) nextValueBytesR(v0 []byte) (v []byte) {
d.readNextBd()
v = append(v0, d.bd)
return d.nextValueBytesBdReadR(v)
}
func (d *bincDecDriver) nextValueBytesBdReadR(v0 []byte) (v []byte) {
v = v0
fnLen := func(vs byte) uint {
var bs []byte
switch vs {
case 0:
x := d.d.decRd.readn1()
v = append(v, x)
return uint(x)
case 1:
bs = d.b[6:8]
d.d.decRd.readb(bs)
v = append(v, bs...)
return uint(bigen.Uint16(bs))
case 2:
bs = d.b[4:8]
d.d.decRd.readb(bs)
v = append(v, bs...)
return uint(bigen.Uint32(bs))
case 3:
bs = d.b[:8]
d.d.decRd.readb(bs)
v = append(v, bs...)
return uint(bigen.Uint64(bs))
default:
return uint(vs - 4)
}
}
var clen uint
switch d.vd {
case bincVdSpecial:
switch d.vs {
case bincSpNil, bincSpFalse, bincSpTrue, bincSpNan, bincSpPosInf: // pass
case bincSpNegInf, bincSpZeroFloat, bincSpZero, bincSpNegOne: // pass
default:
d.d.errorf("cannot infer value - unrecognized special value %x-%x/%s", d.vd, d.vs, bincdesc(d.vd, d.vs))
}
case bincVdSmallInt: // pass
case bincVdPosInt, bincVdNegInt:
bs, _ := d.uintBytes()
v = append(v, bs...)
case bincVdFloat:
fn := func(xlen byte) {
if d.vs&0x8 != 0 {
xlen = d.d.decRd.readn1()
v = append(v, xlen)
if xlen > 8 {
d.d.errorf("cannot read float - at most 8 bytes used to represent float - received %v bytes", xlen)
}
}
d.d.decRd.readb(d.b[:xlen])
v = append(v, d.b[:xlen]...)
}
switch d.vs & 0x7 {
case bincFlBin32:
fn(4)
case bincFlBin64:
fn(8)
default:
d.d.errorf("read float supports only float32/64 - %s %x-%x/%s", msgBadDesc, d.vd, d.vs, bincdesc(d.vd, d.vs))
}
case bincVdString, bincVdByteArray:
clen = fnLen(d.vs)
v = append(v, d.d.decRd.readx(clen)...)
case bincVdSymbol:
if d.vs&0x8 == 0 {
v = append(v, d.d.decRd.readn1())
} else {
v = append(v, d.d.decRd.rb.readx(2)...)
}
if d.vs&0x4 != 0 {
clen = fnLen(d.vs & 0x3)
v = append(v, d.d.decRd.readx(clen)...)
}
case bincVdTimestamp:
v = append(v, d.d.decRd.readx(uint(d.vs))...)
case bincVdCustomExt:
clen = fnLen(d.vs)
v = append(v, d.d.decRd.readn1()) // tag
v = append(v, d.d.decRd.readx(clen)...)
case bincVdArray:
clen = fnLen(d.vs)
for i := uint(0); i < clen; i++ {
v = d.nextValueBytesR(v)
}
case bincVdMap:
clen = fnLen(d.vs)
for i := uint(0); i < clen; i++ {
v = d.nextValueBytesR(v)
v = d.nextValueBytesR(v)
}
default:
d.d.errorf("cannot infer value - %s %x-%x/%s", msgBadDesc, d.vd, d.vs, bincdesc(d.vd, d.vs))
}
return
}
//------------------------------------
//BincHandle is a Handle for the Binc Schema-Free Encoding Format
@@ -990,6 +1074,8 @@ type BincHandle struct {
// Name returns the name of the handle: binc
func (h *BincHandle) Name() string { return "binc" }
func (h *BincHandle) desc(bd byte) string { return bincdesc(bd>>4, bd&0x0f) }
func (h *BincHandle) newEncDriver() encDriver {
var e = &bincEncDriver{h: h}
e.e.e = e
@@ -1022,7 +1108,6 @@ func (e *bincEncDriver) atEndOfEncode() {
func (d *bincDecDriver) reset() {
d.s = nil
d.bd, d.bdRead, d.vd, d.vs = 0, false, 0, 0
d.fnil = false
}
func (d *bincDecDriver) atEndOfDecode() {
@@ -1193,27 +1278,5 @@ func bincDecodeTime(bs []byte) (tt time.Time, err error) {
return
}
// func timeLocUTCName(tzint int16) string {
// if tzint == 0 {
// return "UTC"
// }
// var tzname = []byte("UTC+00:00")
// //tzname := fmt.Sprintf("UTC%s%02d:%02d", tzsign, tz/60, tz%60) //perf issue using Sprintf.. inline below.
// //tzhr, tzmin := tz/60, tz%60 //faster if u convert to int first
// var tzhr, tzmin int16
// if tzint < 0 {
// tzname[3] = '-'
// tzhr, tzmin = -tzint/60, (-tzint)%60
// } else {
// tzhr, tzmin = tzint/60, tzint%60
// }
// tzname[4] = timeDigits[tzhr/10]
// tzname[5] = timeDigits[tzhr%10]
// tzname[7] = timeDigits[tzmin/10]
// tzname[8] = timeDigits[tzmin%10]
// return string(tzname)
// //return time.FixedZone(string(tzname), int(tzint)*60)
// }
var _ decDriver = (*bincDecDriver)(nil)
var _ encDriver = (*bincEncDriver)(nil)

View File

@@ -6,23 +6,32 @@
_tests() {
local vet="" # TODO: make it off
local gover=$( go version | cut -f 3 -d ' ' )
case $gover in
go1.[7-9]*|go1.1[0-9]*|go2.*|devel*) true ;;
*) return 1
esac
# note that codecgen requires fastpath, so you cannot do "codecgen notfastpath"
local a=( "" "safe" "notfastpath" "notfastpath safe" "codecgen" "codecgen safe" )
# we test the following permutations: fastpath/unsafe, !fastpath/!unsafe, codecgen/unsafe
## local a=( "" "safe" "notfastpath safe" "codecgen" )
echo "TestCodecSuite: (fastpath/unsafe), (!fastpath/!unsafe), (codecgen/unsafe)"
local a=( "" "notfastpath safe" "codecgen" )
local b=()
for i in "${a[@]}"
do
echo ">>>> TAGS: $i"
local i2=${i:-default}
case $gover in
go1.[0-6]*) go test ${zargs[*]} -tags "$i" "$@" ;;
*) go vet -printfuncs "errorf" "$@" &&
go test ${zargs[*]} -vet "$vet" -tags "alltests $i" -run "Suite" -coverprofile "${i2// /-}.cov.out" "$@" ;;
esac
if [[ "$?" != 0 ]]; then return 1; fi
[[ "$zwait" == "1" ]] && echo ">>>> TAGS: '$i'"
true &&
go vet -printfuncs "errorf" "$@" &&
go test ${zargs[*]} ${ztestargs[*]} -vet "$vet" -tags "alltests $i" \
-run "TestCodecSuite" -coverprofile "${i2// /-}.cov.out" "$@" &
b+=("${i2// /-}.cov.out")
[[ "$zwait" == "1" ]] && wait
# if [[ "$?" != 0 ]]; then return 1; fi
done
echo "++++++++ TEST SUITES ALL PASSED ++++++++"
wait
[[ "$zcover" == "1" ]] && command -v gocovmerge && gocovmerge "${b[@]}" > __merge.cov.out && go tool cover -html=__merge.cov.out
}
# is a generation needed?
_ng() {
local a="$1"
@@ -59,7 +68,7 @@ _build() {
cat > gen.generated.go <<EOF
// +build codecgen.exec
// Copyright (c) 2012-2018 Ugorji Nwoke. All rights reserved.
// Copyright (c) 2012-2020 Ugorji Nwoke. All rights reserved.
// Use of this source code is governed by a MIT license found in the LICENSE file.
package codec
@@ -86,10 +95,8 @@ EOF
EOF
cat > gen-from-tmpl.codec.generated.go <<EOF
package codec
import "io"
func GenInternalGoFile(r io.Reader, w io.Writer) error {
return genInternalGoFile(r, w)
}
func GenRunTmpl2Go(in, out string) { genRunTmpl2Go(in, out) }
func GenRunSortTmpl2Go(in, out string) { genRunSortTmpl2Go(in, out) }
EOF
cat > gen-from-tmpl.generated.go <<EOF
//+build ignore
@@ -97,26 +104,13 @@ EOF
package main
import "${zpkg}"
import "os"
func run(fnameIn, fnameOut string) {
println("____ " + fnameIn + " --> " + fnameOut + " ______")
fin, err := os.Open(fnameIn)
if err != nil { panic(err) }
defer fin.Close()
fout, err := os.Create(fnameOut)
if err != nil { panic(err) }
defer fout.Close()
err = codec.GenInternalGoFile(fin, fout)
if err != nil { panic(err) }
}
func main() {
run("fast-path.go.tmpl", "fast-path.generated.go")
run("gen-helper.go.tmpl", "gen-helper.generated.go")
run("mammoth-test.go.tmpl", "mammoth_generated_test.go")
run("mammoth2-test.go.tmpl", "mammoth2_generated_test.go")
// run("sort-slice.go.tmpl", "sort-slice.generated.go")
codec.GenRunTmpl2Go("fast-path.go.tmpl", "fast-path.generated.go")
codec.GenRunTmpl2Go("gen-helper.go.tmpl", "gen-helper.generated.go")
codec.GenRunTmpl2Go("mammoth-test.go.tmpl", "mammoth_generated_test.go")
codec.GenRunTmpl2Go("mammoth2-test.go.tmpl", "mammoth2_generated_test.go")
codec.GenRunSortTmpl2Go("sort-slice.go.tmpl", "sort-slice.generated.go")
}
EOF
@@ -124,7 +118,6 @@ EOF
shared_test.go > bench/shared_test.go
# explicitly return 0 if this passes, else return 1
go run -tags "prebuild" prebuild.go || return 1
go run -tags "notfastpath safe codecgen.exec" gen-from-tmpl.generated.go || return 1
rm -f gen-from-tmpl.*generated.go
return 0
@@ -225,8 +218,8 @@ EOF
_usage() {
cat <<EOF
primary usage: $0
-[tmpfxnld] -> [tests, make, prebuild (force) (external), inlining diagnostics, mid-stack inlining, race detector]
-v -> verbose
-[tosw m pf n l d] -> [t=tests (o=cover, s=short, w=wait), m=make, p=prebuild (f=force), n=inlining diagnostics, l=mid-stack inlining, d=race detector]
-v -> v=verbose
EOF
if [[ "$(type -t _usage_run)" = "function" ]]; then _usage_run ; fi
}
@@ -235,14 +228,20 @@ _main() {
if [[ -z "$1" ]]; then _usage; return 1; fi
local x
local zforce
local zcover
local zwait
local ztestargs=()
local zargs=()
local zverbose=()
local zbenchflags=""
OPTIND=1
while getopts ":ctmnrgpfvlyzdb:" flag
while getopts ":ctmnrgpfvlyzdsowb:" flag
do
case "x$flag" in
'xo') zcover=1 ;;
'xw') zwait=1 ;;
'xf') zforce=1 ;;
'xs') ztestargs+=("-short") ;;
'xv') zverbose+=(1) ;;
'xl') zargs+=("-gcflags"); zargs+=("-l=4") ;;
'xn') zargs+=("-gcflags"); zargs+=("-m=2") ;;

View File

@@ -1,4 +1,4 @@
// Copyright (c) 2012-2018 Ugorji Nwoke. All rights reserved.
// Copyright (c) 2012-2020 Ugorji Nwoke. All rights reserved.
// Use of this source code is governed by a MIT license found in the LICENSE file.
package codec
@@ -69,45 +69,48 @@ const (
// cborSelfDesrTag3 byte = 0xf7
// )
func cbordesc(bd byte) string {
switch bd >> 5 {
case cborMajorUint:
return "(u)int"
case cborMajorNegInt:
return "int"
case cborMajorBytes:
return "bytes"
case cborMajorString:
return "string"
case cborMajorArray:
return "array"
case cborMajorMap:
return "map"
case cborMajorTag:
return "tag"
case cborMajorSimpleOrFloat: // default
switch bd {
case cborBdNil:
return "nil"
case cborBdFalse:
return "false"
case cborBdTrue:
return "true"
case cborBdFloat16, cborBdFloat32, cborBdFloat64:
return "float"
case cborBdIndefiniteBytes:
return "bytes*"
case cborBdIndefiniteString:
return "string*"
case cborBdIndefiniteArray:
return "array*"
case cborBdIndefiniteMap:
return "map*"
default:
return "unknown(simple)"
var (
cbordescSimpleNames = map[byte]string{
cborBdNil: "nil",
cborBdFalse: "false",
cborBdTrue: "true",
cborBdFloat16: "float",
cborBdFloat32: "float",
cborBdFloat64: "float",
cborBdBreak: "break",
}
cbordescIndefNames = map[byte]string{
cborBdIndefiniteBytes: "bytes*",
cborBdIndefiniteString: "string*",
cborBdIndefiniteArray: "array*",
cborBdIndefiniteMap: "map*",
}
cbordescMajorNames = map[byte]string{
cborMajorUint: "(u)int",
cborMajorNegInt: "int",
cborMajorBytes: "bytes",
cborMajorString: "string",
cborMajorArray: "array",
cborMajorMap: "map",
cborMajorTag: "tag",
cborMajorSimpleOrFloat: "simple",
}
)
func cbordesc(bd byte) (s string) {
bm := bd >> 5
if bm == cborMajorSimpleOrFloat {
s = cbordescSimpleNames[bd]
} else {
s = cbordescMajorNames[bm]
if s == "" {
s = cbordescIndefNames[bd]
}
}
return "unknown"
if s == "" {
s = "unknown"
}
return
}
// -------------------
@@ -138,11 +141,26 @@ func (e *cborEncDriver) EncodeBool(b bool) {
}
func (e *cborEncDriver) EncodeFloat32(f float32) {
b := math.Float32bits(f)
if e.h.OptimumSize {
if h := floatToHalfFloatBits(b); halfFloatToFloatBits(h) == b {
// fmt.Printf("no 32-16 overflow: %v\n", f)
e.e.encWr.writen1(cborBdFloat16)
bigenHelper{e.x[:2], e.e.w()}.writeUint16(h)
return
}
}
e.e.encWr.writen1(cborBdFloat32)
bigenHelper{e.x[:4], e.e.w()}.writeUint32(math.Float32bits(f))
bigenHelper{e.x[:4], e.e.w()}.writeUint32(b)
}
func (e *cborEncDriver) EncodeFloat64(f float64) {
if e.h.OptimumSize {
if f32 := float32(f); float64(f32) == f {
e.EncodeFloat32(f32)
return
}
}
e.e.encWr.writen1(cborBdFloat64)
bigenHelper{e.x[:8], e.e.w()}.writeUint64(math.Float64bits(f))
}
@@ -305,7 +323,7 @@ type cborDecDriver struct {
bdRead bool
bd byte
st bool // skip tags
fnil bool // found nil
_ bool // found nil
noBuiltInTypes
_ [6]uint64 // padding cache-aligned
d Decoder
@@ -321,13 +339,11 @@ func (d *cborDecDriver) readNextBd() {
}
func (d *cborDecDriver) advanceNil() (null bool) {
d.fnil = false
if !d.bdRead {
d.readNextBd()
}
if d.bd == cborBdNil || d.bd == cborBdUndefined {
d.bdRead = false
d.fnil = true
null = true
}
return
@@ -347,15 +363,7 @@ func (d *cborDecDriver) skipTags() {
}
}
func (d *cborDecDriver) uncacheRead() {
if d.bdRead {
d.d.decRd.unreadn1()
d.bdRead = false
}
}
func (d *cborDecDriver) ContainerType() (vt valueType) {
d.fnil = false
if !d.bdRead {
d.readNextBd()
}
@@ -364,24 +372,21 @@ func (d *cborDecDriver) ContainerType() (vt valueType) {
}
if d.bd == cborBdNil {
d.bdRead = false // always consume nil after seeing it in container type
d.fnil = true
return valueTypeNil
} else if d.bd == cborBdIndefiniteBytes || (d.bd>>5 == cborMajorBytes) {
}
major := d.bd >> 5
if major == cborMajorBytes {
return valueTypeBytes
} else if d.bd == cborBdIndefiniteString || (d.bd>>5 == cborMajorString) {
} else if major == cborMajorString {
return valueTypeString
} else if d.bd == cborBdIndefiniteArray || (d.bd>>5 == cborMajorArray) {
} else if major == cborMajorArray {
return valueTypeArray
} else if d.bd == cborBdIndefiniteMap || (d.bd>>5 == cborMajorMap) {
} else if major == cborMajorMap {
return valueTypeMap
}
return valueTypeUnset
}
func (d *cborDecDriver) Nil() bool {
return d.fnil
}
func (d *cborDecDriver) TryNil() bool {
return d.advanceNil()
}
@@ -401,19 +406,16 @@ func (d *cborDecDriver) decUint() (ui uint64) {
v := d.bd & 0x1f
if v <= 0x17 {
ui = uint64(v)
} else if v == 0x18 {
ui = uint64(d.d.decRd.readn1())
} else if v == 0x19 {
ui = uint64(bigen.Uint16(d.d.decRd.readx(2)))
} else if v == 0x1a {
ui = uint64(bigen.Uint32(d.d.decRd.readx(4)))
} else if v == 0x1b {
ui = uint64(bigen.Uint64(d.d.decRd.readx(8)))
} else {
if v == 0x18 {
ui = uint64(d.d.decRd.readn1())
} else if v == 0x19 {
ui = uint64(bigen.Uint16(d.d.decRd.readx(2)))
} else if v == 0x1a {
ui = uint64(bigen.Uint32(d.d.decRd.readx(4)))
} else if v == 0x1b {
ui = uint64(bigen.Uint64(d.d.decRd.readx(8)))
} else {
d.d.errorf("invalid descriptor decoding uint: %x/%s", d.bd, cbordesc(d.bd))
return
}
d.d.errorf("invalid descriptor decoding uint: %x/%s", d.bd, cbordesc(d.bd))
}
return
}
@@ -427,8 +429,8 @@ func (d *cborDecDriver) decCheckInteger() (neg bool) {
} else if major == cborMajorNegInt {
neg = true
} else {
d.d.errorf("invalid integer; got major %v from descriptor %x/%s, expected %v or %v",
major, d.bd, cbordesc(d.bd), cborMajorUint, cborMajorNegInt)
d.d.errorf("invalid integer %x (%s); got major %v, expected %v or %v",
d.bd, cbordesc(d.bd), major, cborMajorUint, cborMajorNegInt)
}
return
}
@@ -451,8 +453,8 @@ func (d *cborDecDriver) decAppendIndefiniteBytes(bs []byte) []byte {
d.bdRead = false
for !d.CheckBreak() {
if major := d.bd >> 5; major != cborMajorBytes && major != cborMajorString {
d.d.errorf("invalid indefinite string/bytes; got major %v, expected %x/%s",
major, d.bd, cbordesc(d.bd))
d.d.errorf("invalid indefinite string/bytes %x (%s); got major %v, expected %v or %v",
d.bd, cbordesc(d.bd), major, cborMajorBytes, cborMajorString)
}
n := uint(d.decLen())
oldLen := uint(len(bs))
@@ -515,8 +517,7 @@ func (d *cborDecDriver) DecodeFloat64() (f float64) {
} else if major == cborMajorNegInt {
f = float64(cborDecInt64(d.decUint(), true))
} else {
d.d.errorf("invalid float descriptor; got %d/%s, expected float16/32/64 or (-)int",
d.bd, cbordesc(d.bd))
d.d.errorf("invalid float descriptor; got %d/%s, expected float16/32/64 or (-)int", d.bd, cbordesc(d.bd))
}
}
d.bdRead = false
@@ -536,7 +537,6 @@ func (d *cborDecDriver) DecodeBool() (b bool) {
} else if d.bd == cborBdFalse {
} else {
d.d.errorf("not bool - %s %x/%s", msgBadDesc, d.bd, cbordesc(d.bd))
return
}
d.bdRead = false
return
@@ -544,36 +544,34 @@ func (d *cborDecDriver) DecodeBool() (b bool) {
func (d *cborDecDriver) ReadMapStart() (length int) {
if d.advanceNil() {
return decContainerLenNil
return containerLenNil
}
if d.st {
d.skipTags()
}
d.bdRead = false
if d.bd == cborBdIndefiniteMap {
return decContainerLenUnknown
return containerLenUnknown
}
if d.bd>>5 != cborMajorMap {
d.d.errorf("error reading map; got major type: %x, expected %x/%s",
d.bd>>5, cborMajorMap, cbordesc(d.bd))
d.d.errorf("error reading map; got major type: %x, expected %x/%s", d.bd>>5, cborMajorMap, cbordesc(d.bd))
}
return d.decLen()
}
func (d *cborDecDriver) ReadArrayStart() (length int) {
if d.advanceNil() {
return decContainerLenNil
return containerLenNil
}
if d.st {
d.skipTags()
}
d.bdRead = false
if d.bd == cborBdIndefiniteArray {
return decContainerLenUnknown
return containerLenUnknown
}
if d.bd>>5 != cborMajorArray {
d.d.errorf("invalid array; got major type: %x, expect: %x/%s",
d.bd>>5, cborMajorArray, cbordesc(d.bd))
d.d.errorf("invalid array; got major type: %x, expect: %x/%s", d.bd>>5, cborMajorArray, cbordesc(d.bd))
}
return d.decLen()
}
@@ -624,12 +622,11 @@ func (d *cborDecDriver) DecodeBytes(bs []byte, zerocopy bool) (bsOut []byte) {
}
clen := d.decLen()
d.bdRead = false
if zerocopy {
if d.d.bytes {
return d.d.decRd.readx(uint(clen))
} else if len(bs) == 0 {
bs = d.d.b[:]
}
if d.d.bytes && (zerocopy || d.h.ZeroCopy) {
return d.d.decRd.rb.readx(uint(clen))
}
if zerocopy && len(bs) == 0 {
bs = d.d.b[:]
}
return decByteSlice(d.d.r(), clen, d.h.MaxInitLen, bs)
}
@@ -655,7 +652,7 @@ func (d *cborDecDriver) decodeTime(xtag uint64) (t time.Time) {
case 0:
var err error
if t, err = time.Parse(time.RFC3339, stringView(d.DecodeStringAsBytes())); err != nil {
d.d.errorv(err)
d.d.onerror(err)
}
case 1:
f1, f2 := math.Modf(d.DecodeFloat64())
@@ -682,7 +679,6 @@ func (d *cborDecDriver) DecodeExt(rv interface{}, xtag uint64, ext Ext) {
d.d.decode(&re.Value)
} else if xtag != realxtag {
d.d.errorf("Wrong extension tag. Got %b. Expecting: %v", realxtag, xtag)
return
} else if ext == SelfExt {
rv2 := baseRV(rv)
d.d.decodeValue(rv2, d.h.fnNoExt(rv2.Type()))
@@ -697,7 +693,6 @@ func (d *cborDecDriver) DecodeNaked() {
d.readNextBd()
}
d.fnil = false
n := d.d.naked()
var decodeFurther bool
@@ -714,7 +709,7 @@ func (d *cborDecDriver) DecodeNaked() {
n.v = valueTypeInt
n.i = d.DecodeInt64()
case cborMajorBytes:
decNakedReadRawBytes(d, &d.d, n, d.h.RawToString)
fauxUnionReadRawBytes(d, &d.d, n, d.h.RawToString)
case cborMajorString:
n.v = valueTypeString
n.s = string(d.DecodeStringAsBytes())
@@ -742,7 +737,6 @@ func (d *cborDecDriver) DecodeNaked() {
switch d.bd {
case cborBdNil, cborBdUndefined:
n.v = valueTypeNil
d.fnil = true
case cborBdFalse:
n.v = valueTypeBool
n.b = false
@@ -752,17 +746,6 @@ func (d *cborDecDriver) DecodeNaked() {
case cborBdFloat16, cborBdFloat32, cborBdFloat64:
n.v = valueTypeFloat
n.f = d.DecodeFloat64()
case cborBdIndefiniteBytes:
decNakedReadRawBytes(d, &d.d, n, d.h.RawToString)
case cborBdIndefiniteString:
n.v = valueTypeString
n.s = string(d.DecodeStringAsBytes())
case cborBdIndefiniteArray:
n.v = valueTypeArray
decodeFurther = true
case cborBdIndefiniteMap:
n.v = valueTypeMap
decodeFurther = true
default:
d.d.errorf("decodeNaked: Unrecognized d.bd: 0x%x", d.bd)
}
@@ -774,6 +757,129 @@ func (d *cborDecDriver) DecodeNaked() {
}
}
func (d *cborDecDriver) uintBytes() (v []byte, ui uint64) {
switch vv := d.bd & 0x1f; vv {
case 0x18:
v = d.d.decRd.readx(1)
ui = uint64(v[0])
case 0x19:
v = d.d.decRd.readx(2)
ui = uint64(bigen.Uint16(v))
case 0x1a:
v = d.d.decRd.readx(4)
ui = uint64(bigen.Uint32(v))
case 0x1b:
v = d.d.decRd.readx(8)
ui = uint64(bigen.Uint64(v))
default:
if vv > 0x1b {
d.d.errorf("invalid descriptor decoding uint: %x/%s", d.bd, cbordesc(d.bd))
}
ui = uint64(vv)
}
return
}
func (d *cborDecDriver) nextValueBytes(start []byte) (v []byte) {
if !d.bdRead {
d.readNextBd()
}
v = append(start, d.bd)
v = d.nextValueBytesBdReadR(v)
d.bdRead = false
return
}
func (d *cborDecDriver) nextValueBytesR(v0 []byte) (v []byte) {
d.readNextBd()
v = append(v0, d.bd)
return d.nextValueBytesBdReadR(v)
}
func (d *cborDecDriver) nextValueBytesBdReadR(v0 []byte) (v []byte) {
v = v0
var bs []byte
var ui uint64
switch d.bd >> 5 {
case cborMajorUint, cborMajorNegInt:
bs, _ = d.uintBytes()
v = append(v, bs...)
case cborMajorString, cborMajorBytes:
if d.bd == cborBdIndefiniteBytes || d.bd == cborBdIndefiniteString {
for {
d.readNextBd()
v = append(v, d.bd)
if d.bd == cborBdBreak {
break
}
bs, ui = d.uintBytes()
v = append(v, bs...)
v = append(v, d.d.decRd.readx(uint(ui))...)
}
} else {
bs, ui = d.uintBytes()
v = append(v, bs...)
v = append(v, d.d.decRd.readx(uint(ui))...)
}
case cborMajorArray:
if d.bd == cborBdIndefiniteArray {
for {
d.readNextBd()
v = append(v, d.bd)
if d.bd == cborBdBreak {
break
}
v = d.nextValueBytesBdReadR(v)
}
} else {
bs, ui = d.uintBytes()
v = append(v, bs...)
for i := uint64(0); i < ui; i++ {
v = d.nextValueBytesR(v)
}
}
case cborMajorMap:
if d.bd == cborBdIndefiniteMap {
for {
d.readNextBd()
v = append(v, d.bd)
if d.bd == cborBdBreak {
break
}
v = d.nextValueBytesBdReadR(v)
v = d.nextValueBytesR(v)
}
} else {
bs, ui = d.uintBytes()
v = append(v, bs...)
for i := uint64(0); i < ui; i++ {
v = d.nextValueBytesR(v)
v = d.nextValueBytesR(v)
}
}
case cborMajorTag:
bs, _ = d.uintBytes()
v = append(v, bs...)
v = d.nextValueBytesR(v)
case cborMajorSimpleOrFloat:
switch d.bd {
case cborBdNil, cborBdUndefined, cborBdFalse, cborBdTrue: // pass
case cborBdFloat16:
v = append(v, d.d.decRd.readx(2)...)
case cborBdFloat32:
v = append(v, d.d.decRd.readx(4)...)
case cborBdFloat64:
v = append(v, d.d.decRd.readx(8)...)
default:
d.d.errorf("nextValueBytes: Unrecognized d.bd: 0x%x", d.bd)
}
default: // should never happen
d.d.errorf("nextValueBytes: Unrecognized d.bd: 0x%x", d.bd)
}
return
}
// -------------------------
// CborHandle is a Handle for the CBOR encoding format,
@@ -815,6 +921,8 @@ type CborHandle struct {
// Name returns the name of the handle: cbor
func (h *CborHandle) Name() string { return "cbor" }
func (h *CborHandle) desc(bd byte) string { return cbordesc(bd) }
func (h *CborHandle) newEncDriver() encDriver {
var e = &cborEncDriver{h: h}
e.e.e = e
@@ -838,7 +946,6 @@ func (e *cborEncDriver) reset() {
func (d *cborDecDriver) reset() {
d.bd = 0
d.bdRead = false
d.fnil = false
d.st = d.h.SkipUnexpectedTags
}

461
vendor/github.com/ugorji/go/codec/decimal.go generated vendored Normal file
View File

@@ -0,0 +1,461 @@
// Copyright (c) 2012-2020 Ugorji Nwoke. All rights reserved.
// Use of this source code is governed by a MIT license found in the LICENSE file.
package codec
import (
"strconv"
)
func parseFloat32(b []byte) (f float32, err error) {
return parseFloat32_custom(b)
// return parseFloat32_strconv(b)
}
func parseFloat64(b []byte) (f float64, err error) {
return parseFloat64_custom(b)
// return parseFloat64_strconv(b)
}
func parseFloat32_strconv(b []byte) (f float32, err error) {
f64, err := strconv.ParseFloat(stringView(b), 32)
f = float32(f64)
return
}
func parseFloat64_strconv(b []byte) (f float64, err error) {
return strconv.ParseFloat(stringView(b), 64)
}
// ------ parseFloat custom below --------
// We assume that a lot of floating point numbers in json files will be
// those that are handwritten, and with defined precision (in terms of number
// of digits after decimal point), etc.
//
// We further assume that this ones can be written in exact format.
//
// strconv.ParseFloat has some unnecessary overhead which we can do without
// for the common case:
//
// - expensive char-by-char check to see if underscores are in right place
// - testing for and skipping underscores
// - check if the string matches ignorecase +/- inf, +/- infinity, nan
// - support for base 16 (0xFFFF...)
//
// The functions below will try a fast-path for floats which can be decoded
// without any loss of precision, meaning they:
//
// - fits within the significand bits of the 32-bits or 64-bits
// - exponent fits within the exponent value
// - there is no truncation (any extra numbers are all trailing zeros)
//
// To figure out what the values are for maxMantDigits, use this idea below:
//
// 2^23 = 838 8608 (between 10^ 6 and 10^ 7) (significand bits of uint32)
// 2^32 = 42 9496 7296 (between 10^ 9 and 10^10) (full uint32)
// 2^52 = 4503 5996 2737 0496 (between 10^15 and 10^16) (significand bits of uint64)
// 2^64 = 1844 6744 0737 0955 1616 (between 10^19 and 10^20) (full uint64)
//
// Note: we only allow for up to what can comfortably fit into the significand
// ignoring the exponent, and we only try to parse iff significand fits.
const (
thousand = 1000
million = thousand * thousand
billion = thousand * million
trillion = thousand * billion
quadrillion = thousand * trillion
quintillion = thousand * quadrillion
)
// Exact powers of 10.
var uint64pow10 = [...]uint64{
1, 10, 100,
1 * thousand, 10 * thousand, 100 * thousand,
1 * million, 10 * million, 100 * million,
1 * billion, 10 * billion, 100 * billion,
1 * trillion, 10 * trillion, 100 * trillion,
1 * quadrillion, 10 * quadrillion, 100 * quadrillion,
1 * quintillion, 10 * quintillion,
}
var float64pow10 = [...]float64{
1e0, 1e1, 1e2, 1e3, 1e4, 1e5, 1e6, 1e7, 1e8, 1e9,
1e10, 1e11, 1e12, 1e13, 1e14, 1e15, 1e16, 1e17, 1e18, 1e19,
1e20, 1e21, 1e22,
}
var float32pow10 = [...]float32{
1e0, 1e1, 1e2, 1e3, 1e4, 1e5, 1e6, 1e7, 1e8, 1e9, 1e10,
}
type floatinfo struct {
mantbits uint8
// expbits uint8 // (unused)
// bias int16 // (unused)
is32bit bool
exactPow10 int8 // Exact powers of ten are <= 10^N (32: 10, 64: 22)
exactInts int8 // Exact integers are <= 10^N (for non-float, set to 0)
// maxMantDigits int8 // 10^19 fits in uint64, while 10^9 fits in uint32
mantCutoffIsUint64Cutoff bool
mantCutoff uint64
}
// var fi32 = floatinfo{23, 8, -127, 10, 7, 9, fUint32Cutoff}
// var fi64 = floatinfo{52, 11, -1023, 22, 15, 19, fUint64Cutoff}
// var fi64u = floatinfo{64, 0, -1023, 19, 0, 19, fUint64Cutoff}
// var fi64i = floatinfo{63, 0, -1023, 19, 0, 19, fUint64Cutoff}
var fi32 = floatinfo{23, true, 10, 7, false, 1<<23 - 1}
var fi64 = floatinfo{52, false, 22, 15, false, 1<<52 - 1}
var fi64u = floatinfo{0, false, 19, 0, true, fUint64Cutoff}
var fi64i = floatinfo{0, false, 19, 0, true, fUint64Cutoff}
const fMaxMultiplierForExactPow10_64 = 1e15
const fMaxMultiplierForExactPow10_32 = 1e7
const fUint64Cutoff = (1<<64-1)/10 + 1
const fUint32Cutoff = (1<<32-1)/10 + 1
const fBase = 10
func strconvParseErr(b []byte, fn string) error {
return &strconv.NumError{
Func: fn,
Err: strconv.ErrSyntax,
Num: string(b),
}
}
func parseFloat32_reader(r readFloatResult) (f float32, fail bool) {
// parseFloatDebug(b, 32, false, exp, trunc, ok)
f = float32(r.mantissa)
// xdebugf("parsefloat32: exp: %v, mantissa: %v", r.exp, r.mantissa)
if r.exp == 0 {
} else if r.exp < 0 { // int / 10^k
f /= float32pow10[uint8(-r.exp)]
} else { // exp > 0
if r.exp > fi32.exactPow10 {
f *= float32pow10[r.exp-fi32.exactPow10]
if f > fMaxMultiplierForExactPow10_32 { // exponent too large - outside range
fail = true
return // ok = false
}
f *= float32pow10[fi32.exactPow10]
} else {
f *= float32pow10[uint8(r.exp)]
}
}
if r.neg {
f = -f
}
return
}
func parseFloat32_custom(b []byte) (f float32, err error) {
r := readFloat(b, fi32)
// xdebug2f("\tparsing: %s - ok: %v, bad: %v, trunc: %v, mantissa: %v, exp: %v", b, r.ok, r.bad, r.trunc, r.mantissa, r.exp)
if r.bad {
return 0, strconvParseErr(b, "ParseFloat")
}
if r.ok {
f, r.bad = parseFloat32_reader(r)
if !r.bad {
return
}
}
return parseFloat32_strconv(b)
}
func parseFloat64_reader(r readFloatResult) (f float64, fail bool) {
f = float64(r.mantissa)
if r.exp == 0 {
} else if r.exp < 0 { // int / 10^k
f /= float64pow10[-uint8(r.exp)]
} else { // exp > 0
if r.exp > fi64.exactPow10 {
f *= float64pow10[r.exp-fi64.exactPow10]
if f > fMaxMultiplierForExactPow10_64 { // exponent too large - outside range
fail = true
return
}
f *= float64pow10[fi64.exactPow10]
} else {
f *= float64pow10[uint8(r.exp)]
}
}
if r.neg {
f = -f
}
return
}
func parseFloat64_custom(b []byte) (f float64, err error) {
r := readFloat(b, fi64)
if r.bad {
return 0, strconvParseErr(b, "ParseFloat")
}
if r.ok {
f, r.bad = parseFloat64_reader(r)
if !r.bad {
return
}
}
return parseFloat64_strconv(b)
}
func parseUint64_simple(b []byte) (n uint64, ok bool) {
var i int
var n1 uint64
var c uint8
LOOP:
if i < len(b) {
c = b[i]
// unsigned integers don't overflow well on multiplication, so check cutoff here
// e.g. (maxUint64-5)*10 doesn't overflow well ...
// if n >= fUint64Cutoff || !isDigitChar(b[i]) { // if c < '0' || c > '9' {
if n >= fUint64Cutoff || c < '0' || c > '9' {
return
} else if c == '0' {
n *= fBase
} else {
n1 = n
n = n*fBase + uint64(c-'0')
if n < n1 {
return
}
}
i++
goto LOOP
}
ok = true
return
}
func parseUint64_reader(r readFloatResult) (f uint64, fail bool) {
f = r.mantissa
if r.exp == 0 {
} else if r.exp < 0 { // int / 10^k
if f%uint64pow10[uint8(-r.exp)] != 0 {
fail = true
} else {
f /= uint64pow10[uint8(-r.exp)]
}
} else { // exp > 0
f *= uint64pow10[uint8(r.exp)]
}
return
}
func parseInt64_reader(r readFloatResult) (v int64, fail bool) {
if r.exp == 0 {
} else if r.exp < 0 { // int / 10^k
if r.mantissa%uint64pow10[uint8(-r.exp)] != 0 {
// fail = true
return 0, true
}
r.mantissa /= uint64pow10[uint8(-r.exp)]
} else { // exp > 0
r.mantissa *= uint64pow10[uint8(r.exp)]
}
if chkOvf.Uint2Int(r.mantissa, r.neg) {
fail = true
} else if r.neg {
v = -int64(r.mantissa)
} else {
v = int64(r.mantissa)
}
return
}
// parseNumber will return an integer if only composed of [-]?[0-9]+
// Else it will return a float.
func parseNumber(b []byte, z *fauxUnion, preferSignedInt bool) (err error) {
var ok, neg bool
var f uint64
// var b1 []byte
// if b[0] == '-' {
// neg = true
// b1 = b[1:]
// } else {
// b1 = b
// }
// f, ok = parseUint64_simple(b1)
if len(b) == 0 {
return
}
if b[0] == '-' {
neg = true
f, ok = parseUint64_simple(b[1:])
} else {
f, ok = parseUint64_simple(b)
}
if ok {
if neg {
z.v = valueTypeInt
if chkOvf.Uint2Int(f, neg) {
return strconvParseErr(b, "ParseInt")
}
z.i = -int64(f)
} else if preferSignedInt {
z.v = valueTypeInt
if chkOvf.Uint2Int(f, neg) {
return strconvParseErr(b, "ParseInt")
}
z.i = int64(f)
} else {
z.v = valueTypeUint
z.u = f
}
return
}
z.v = valueTypeFloat
z.f, err = parseFloat64_custom(b)
return
}
type readFloatResult struct {
mantissa uint64
exp int8
neg, sawdot, sawexp, trunc, bad bool
ok bool
_ byte // padding
}
func readFloat(s []byte, y floatinfo) (r readFloatResult) {
var i uint // uint, so that we eliminate bounds checking
var slen = uint(len(s))
if slen == 0 {
// read an empty string as the zero value
// r.bad = true
r.ok = true
return
}
if s[0] == '-' {
r.neg = true
i++
}
// we considered punting early if string has length > maxMantDigits, but this doesn't account
// for trailing 0's e.g. 700000000000000000000 can be encoded exactly as it is 7e20
var nd, ndMant, dp int8
var xu uint64
LOOP:
for ; i < slen; i++ {
switch s[i] {
case '.':
if r.sawdot {
r.bad = true
return
}
r.sawdot = true
dp = nd
case 'e', 'E':
r.sawexp = true
break LOOP
case '0':
if nd == 0 {
dp--
continue LOOP
}
nd++
if r.mantissa < y.mantCutoff {
r.mantissa *= fBase
ndMant++
}
case '1', '2', '3', '4', '5', '6', '7', '8', '9':
nd++
if y.mantCutoffIsUint64Cutoff && r.mantissa < fUint64Cutoff {
r.mantissa *= fBase
xu = r.mantissa + uint64(s[i]-'0')
if xu < r.mantissa {
r.trunc = true
return
}
r.mantissa = xu
} else if r.mantissa < y.mantCutoff {
// mantissa = (mantissa << 1) + (mantissa << 3) + uint64(c-'0')
r.mantissa = r.mantissa*fBase + uint64(s[i]-'0')
} else {
r.trunc = true
return
}
ndMant++
default:
r.bad = true
return
}
}
if !r.sawdot {
dp = nd
}
if r.sawexp {
i++
if i < slen {
var eneg bool
if s[i] == '+' {
i++
} else if s[i] == '-' {
i++
eneg = true
}
if i < slen {
// for exact match, exponent is 1 or 2 digits (float64: -22 to 37, float32: -1 to 17).
// exit quick if exponent is more than 2 digits.
if i+2 < slen {
return
}
var e int8
if s[i] < '0' || s[i] > '9' { // !isDigitChar(s[i]) { //
r.bad = true
return
}
e = int8(s[i] - '0')
i++
if i < slen {
if s[i] < '0' || s[i] > '9' { // !isDigitChar(s[i]) { //
r.bad = true
return
}
e = e*fBase + int8(s[i]-'0') // (e << 1) + (e << 3) + int8(s[i]-'0')
i++
}
if eneg {
dp -= e
} else {
dp += e
}
}
}
}
if r.mantissa != 0 {
r.exp = dp - ndMant
// do not set ok=true for cases we cannot handle
if r.exp < -y.exactPow10 ||
r.exp > y.exactInts+y.exactPow10 ||
(y.mantbits != 0 && r.mantissa>>y.mantbits != 0) {
return
}
}
r.ok = true
return
}

View File

@@ -1,4 +1,4 @@
// Copyright (c) 2012-2018 Ugorji Nwoke. All rights reserved.
// Copyright (c) 2012-2020 Ugorji Nwoke. All rights reserved.
// Use of this source code is governed by a MIT license found in the LICENSE file.
package codec
@@ -8,7 +8,6 @@ import (
"errors"
"fmt"
"io"
"math"
"reflect"
"strconv"
"time"
@@ -26,15 +25,6 @@ const (
decDefChanCap = 64 // should be large, as cap cannot be expanded
decScratchByteArrayLen = (6 * 8) // ??? cacheLineSize +
// decContainerLenUnknown is length returned from Read(Map|Array)Len
// when a format doesn't know apiori.
// For example, json doesn't pre-determine the length of a container (sequence/map).
decContainerLenUnknown = -1
// decContainerLenNil is length returned from Read(Map|Array)Len
// when a 'nil' was encountered in the stream.
decContainerLenNil = math.MinInt32
// decFailNonEmptyIntf configures whether we error
// when decoding naked into a non-empty interface.
//
@@ -50,8 +40,8 @@ const (
)
var (
errstrOnlyMapOrArrayCanDecodeIntoStruct = "only encoded map or array can be decoded into a struct"
errstrCannotDecodeIntoNil = "cannot decode into nil"
errOnlyMapOrArrayCanDecodeIntoStruct = errors.New("only encoded map or array can be decoded into a struct")
errCannotDecodeIntoNil = errors.New("cannot decode into nil")
// errmsgExpandSliceOverflow = "expand slice: slice overflow"
errmsgExpandSliceCannotChange = "expand slice: cannot change"
@@ -63,7 +53,7 @@ var (
errDecUnreadByteUnknown = errors.New("cannot unread - reason unknown")
errMaxDepthExceeded = errors.New("maximum decoding depth exceeded")
errBytesDecReaderCannotUnread = errors.New("cannot unread last byte read")
// errBytesDecReaderCannotUnread = errors.New("cannot unread last byte read")
)
type decDriver interface {
@@ -82,7 +72,7 @@ type decDriver interface {
// DecodeNaked will decode primitives (number, bool, string, []byte) and RawExt.
// For maps and arrays, it will not do the decoding in-band, but will signal
// the decoder, so that is done later, by setting the decNaked.valueType field.
// the decoder, so that is done later, by setting the fauxUnion.valueType field.
//
// Note: Numbers are decoded as int64, uint64, float64 only (no smaller sized number types).
// for extensions, DecodeNaked must read the tag and the []byte if it exists.
@@ -119,20 +109,21 @@ type decDriver interface {
DecodeTime() (t time.Time)
// ReadArrayStart will return the length of the array.
// If the format doesn't prefix the length, it returns decContainerLenUnknown.
// If the expected array was a nil in the stream, it returns decContainerLenNil.
// If the format doesn't prefix the length, it returns containerLenUnknown.
// If the expected array was a nil in the stream, it returns containerLenNil.
ReadArrayStart() int
ReadArrayEnd()
// ReadMapStart will return the length of the array.
// If the format doesn't prefix the length, it returns decContainerLenUnknown.
// If the expected array was a nil in the stream, it returns decContainerLenNil.
// If the format doesn't prefix the length, it returns containerLenUnknown.
// If the expected array was a nil in the stream, it returns containerLenNil.
ReadMapStart() int
ReadMapEnd()
reset()
atEndOfDecode()
uncacheRead()
nextValueBytes(start []byte) []byte
decoder() *Decoder
}
@@ -269,6 +260,25 @@ type DecodeOptions struct {
// RawToString controls how raw bytes in a stream are decoded into a nil interface{}.
// By default, they are decoded as []byte, but can be decoded as string (if configured).
RawToString bool
// ZeroCopy controls whether decoded values point into the
// input bytes passed into a NewDecoderBytes/ResetBytes(...) call.
//
// To illustrate, if ZeroCopy and decoding from a []byte (not io.Writer),
// then a []byte in the output result may just be a slice of (point into)
// the input bytes.
//
// This optimization prevents unnecessary copying.
//
// However, it is made optional, as the caller MUST ensure that the input parameter
// is not modified after the Decode() happens.
ZeroCopy bool
// PreferPointerForStructOrArray controls whether a struct or array
// is stored in a nil interface{}, or a pointer to it.
//
// This mostly impacts when we decode registered extensions.
PreferPointerForStructOrArray bool
}
// ----------------------------------------
@@ -288,27 +298,25 @@ func (d *Decoder) selferUnmarshal(f *codecFnInfo, rv reflect.Value) {
func (d *Decoder) binaryUnmarshal(f *codecFnInfo, rv reflect.Value) {
bm := rv2i(rv).(encoding.BinaryUnmarshaler)
xbs := d.d.DecodeBytes(nil, true)
if fnerr := bm.UnmarshalBinary(xbs); fnerr != nil {
panic(fnerr)
}
fnerr := bm.UnmarshalBinary(xbs)
halt.onerror(fnerr)
}
func (d *Decoder) textUnmarshal(f *codecFnInfo, rv reflect.Value) {
tm := rv2i(rv).(encoding.TextUnmarshaler)
fnerr := tm.UnmarshalText(d.d.DecodeStringAsBytes())
if fnerr != nil {
panic(fnerr)
}
halt.onerror(fnerr)
}
func (d *Decoder) jsonUnmarshal(f *codecFnInfo, rv reflect.Value) {
tm := rv2i(rv).(jsonUnmarshaler)
// bs := d.d.DecodeBytes(d.b[:], true, true)
// grab the bytes to be read, as UnmarshalJSON needs the full JSON so as to unmarshal it itself.
fnerr := tm.UnmarshalJSON(d.nextValueBytes())
if fnerr != nil {
panic(fnerr)
}
bs := d.blist.get(256)[:0]
bs = d.d.nextValueBytes(bs)
fnerr := tm.UnmarshalJSON(bs)
d.blist.put(bs)
halt.onerror(fnerr)
}
func (d *Decoder) kErr(f *codecFnInfo, rv reflect.Value) {
@@ -399,7 +407,6 @@ func (d *Decoder) kInterfaceNaked(f *codecFnInfo) (rvn reflect.Value) {
// Consequently, we should relax this. Put it behind a const flag for now.
if decFailNonEmptyIntf && f.ti.numMeth > 0 {
d.errorf("cannot decode non-nil codec value into nil %v (%v methods)", f.ti.rt, f.ti.numMeth)
return
}
switch n.v {
case valueTypeMap:
@@ -483,6 +490,12 @@ func (d *Decoder) kInterfaceNaked(f *codecFnInfo) (rvn reflect.Value) {
rvn = rvn.Elem()
}
}
// if struct/array, directly store pointer into the interface
if d.h.PreferPointerForStructOrArray && rvn.CanAddr() {
if rk := rvn.Kind(); rk == reflect.Array || rk == reflect.Struct {
rvn = rvn.Addr()
}
}
case valueTypeNil:
// rvn = reflect.Zero(f.ti.rt)
// no-op
@@ -501,7 +514,7 @@ func (d *Decoder) kInterfaceNaked(f *codecFnInfo) (rvn reflect.Value) {
case valueTypeTime:
rvn = n.rt()
default:
panicv.errorf("kInterfaceNaked: unexpected valueType: %d", n.v)
halt.errorf("kInterfaceNaked: unexpected valueType: %d", n.v)
}
return
}
@@ -522,7 +535,6 @@ func (d *Decoder) kInterface(f *codecFnInfo, rv reflect.Value) {
rv.Set(rvn)
} else {
rvn = d.kInterfaceNaked(f)
// xdebugf("kInterface: %v", rvn)
if rvn.IsValid() {
rv.Set(rvn)
} else if d.h.InterfaceReset {
@@ -575,10 +587,6 @@ func decStructFieldKey(dd decDriver, keyType valueType, b *[decScratchByteArrayL
func (d *Decoder) kStruct(f *codecFnInfo, rv reflect.Value) {
sfn := structFieldNode{v: rv, update: true}
ctyp := d.d.ContainerType()
if ctyp == valueTypeNil {
rvSetDirect(rv, f.ti.rv0)
return
}
var mf MissingFielder
if f.ti.isFlag(tiflagMissingFielder) {
mf = rv2i(rv).(MissingFielder)
@@ -611,8 +619,7 @@ func (d *Decoder) kStruct(f *codecFnInfo, rv reflect.Value) {
var f interface{}
d.decode(&f)
if !mf.CodecMissingField(rvkencname, f) && d.h.ErrorIfNoField {
d.errorf("no matching struct field found when decoding stream map with key: %s ",
stringView(rvkencname))
d.errorf("no matching struct field when decoding stream map with key: %s ", stringView(rvkencname))
}
} else {
d.structFieldNotFound(-1, stringView(rvkencname))
@@ -653,8 +660,7 @@ func (d *Decoder) kStruct(f *codecFnInfo, rv reflect.Value) {
}
d.arrayEnd()
} else {
d.errorstr(errstrOnlyMapOrArrayCanDecodeIntoStruct)
return
d.onerror(errOnlyMapOrArrayCanDecodeIntoStruct)
}
}
@@ -664,14 +670,10 @@ func (d *Decoder) kSlice(f *codecFnInfo, rv reflect.Value) {
// Note: rv is a slice type here - guaranteed
rvCanset := rv.CanSet()
rtelem0 := f.ti.elem
ctyp := d.d.ContainerType()
if ctyp == valueTypeNil {
if rv.CanSet() {
rvSetDirect(rv, f.ti.rv0)
}
return
}
if ctyp == valueTypeBytes || ctyp == valueTypeString {
// you can only decode bytes or string in the stream into a slice or array of bytes
if !(f.ti.rtid == uint8SliceTypId || rtelem0.Kind() == reflect.Uint8) {
@@ -681,7 +683,7 @@ func (d *Decoder) kSlice(f *codecFnInfo, rv reflect.Value) {
bs2 := d.d.DecodeBytes(rvbs, false)
// if rvbs == nil && bs2 != nil || rvbs != nil && bs2 == nil || len(bs2) != len(rvbs) {
if !(len(bs2) > 0 && len(bs2) == len(rvbs) && &bs2[0] == &rvbs[0]) {
if rv.CanSet() {
if rvCanset {
rvSetBytes(rv, bs2)
} else if len(rvbs) > 0 && len(bs2) > 0 {
copy(rvbs, bs2)
@@ -694,7 +696,7 @@ func (d *Decoder) kSlice(f *codecFnInfo, rv reflect.Value) {
// an array can never return a nil slice. so no need to check f.array here.
if containerLenS == 0 {
if rv.CanSet() {
if rvCanset {
if rvIsNil(rv) {
rvSetDirect(rv, reflect.MakeSlice(f.ti.rt, 0, 0))
} else {
@@ -717,9 +719,9 @@ func (d *Decoder) kSlice(f *codecFnInfo, rv reflect.Value) {
var fn *codecFn
var rv0 = rv
var rvChanged bool
var rvCanset = rv.CanSet()
var rv0 = rv
var rv9 reflect.Value
rvlen := rvGetSliceLen(rv)
@@ -728,24 +730,28 @@ func (d *Decoder) kSlice(f *codecFnInfo, rv reflect.Value) {
if hasLen {
if containerLenS > rvcap {
oldRvlenGtZero := rvlen > 0
rvlen = decInferLen(containerLenS, d.h.MaxInitLen, int(rtelem0.Size()))
if rvlen <= rvcap {
rvlen1 := decInferLen(containerLenS, d.h.MaxInitLen, int(rtelem0.Size()))
if rvlen1 == rvlen {
} else if rvlen1 <= rvcap {
if rvCanset {
rvlen = rvlen1
rvSetSliceLen(rv, rvlen)
}
} else if rvCanset {
} else if rvCanset { // rvlen1 > rvcap
rvlen = rvlen1
rv = reflect.MakeSlice(f.ti.rt, rvlen, rvlen)
rvCanset = rv.CanSet()
rvcap = rvlen
rvChanged = true
} else {
} else { // rvlen1 > rvcap && !canSet
d.errorf("cannot decode into non-settable slice")
}
if rvChanged && oldRvlenGtZero && rtelem0Mut { // !isImmutableKind(rtelem0.Kind()) {
rvCopySlice(rv, rv0) // only copy up to length NOT cap i.e. rv0.Slice(0, rvcap)
}
} else if containerLenS != rvlen {
rvlen = containerLenS
if rvCanset {
rvlen = containerLenS
rvSetSliceLen(rv, rvlen)
}
}
@@ -757,63 +763,52 @@ func (d *Decoder) kSlice(f *codecFnInfo, rv reflect.Value) {
var j int
for ; (hasLen && j < containerLenS) || !(hasLen || d.checkBreak()); j++ {
if j == 0 && f.seq == seqTypeSlice && rvIsNil(rv) {
if hasLen {
rvlen = decInferLen(containerLenS, d.h.MaxInitLen, rtelem0Size)
} else {
rvlen = decDefSliceCap
}
if j == 0 && f.seq == seqTypeSlice && rvIsNil(rv) { // means hasLen = false
if rvCanset {
rv = reflect.MakeSlice(f.ti.rt, rvlen, rvlen)
rvcap = rvlen
rvlen = decDefSliceCap
rvcap = rvlen * 2
rv = reflect.MakeSlice(f.ti.rt, rvlen, rvcap)
rvCanset = rv.CanSet()
rvChanged = true
} else {
d.errorf("cannot decode into non-settable slice")
}
}
slh.ElemContainerState(j)
// if indefinite, etc, then expand the slice if necessary
if j >= rvlen {
if f.seq == seqTypeArray {
d.arrayCannotExpand(rvlen, j+1)
// drain completely and return
d.swallow()
j++
for ; (hasLen && j < containerLenS) || !(hasLen || d.checkBreak()); j++ {
slh.ElemContainerState(j)
d.swallow()
}
slh.End()
decArrayCannotExpand(slh, hasLen, rvlen, j, containerLenS)
return
}
slh.ElemContainerState(j)
// rv = reflect.Append(rv, reflect.Zero(rtelem0)) // append logic + varargs
// expand the slice up to the cap.
// Note that we did, so we have to reset it later.
if rvlen < rvcap {
if rv.CanSet() {
rvSetSliceLen(rv, rvcap)
} else if rvCanset {
rv = rvSlice(rv, rvcap)
rvChanged = true
rvlen = rvcap
if rvCanset {
rvSetSliceLen(rv, rvlen)
} else if rvChanged {
rv = rvSlice(rv, rvlen)
} else {
d.errorf(errmsgExpandSliceCannotChange)
return
}
rvlen = rvcap
} else {
if !rvCanset {
if !(rvCanset || rvChanged) {
d.errorf(errmsgExpandSliceCannotChange)
return
}
rvcap = growCap(rvcap, rtelem0Size, rvcap)
rv9 = reflect.MakeSlice(f.ti.rt, rvcap, rvcap)
rvcap = growCap(rvcap, rtelem0Size, 1)
rvlen = rvcap
rv9 = reflect.MakeSlice(f.ti.rt, rvlen, rvcap)
rvCopySlice(rv9, rv)
rv = rv9
rvCanset = rv.CanSet()
rvChanged = true
rvlen = rvcap
}
} else {
slh.ElemContainerState(j)
}
rv9 = rvSliceIndex(rv, j, f.ti)
if d.h.SliceElementReset {
@@ -830,16 +825,16 @@ func (d *Decoder) kSlice(f *codecFnInfo, rv reflect.Value) {
d.decodeValue(rv9, fn)
}
if j < rvlen {
if rv.CanSet() {
if rvCanset {
rvSetSliceLen(rv, j)
} else if rvCanset {
} else if rvChanged {
rv = rvSlice(rv, j)
rvChanged = true
}
rvlen = j
} else if j == 0 && rvIsNil(rv) {
if rvCanset {
rv = reflect.MakeSlice(f.ti.rt, 0, 0)
// rvCanset = rv.CanSet()
rvChanged = true
}
}
@@ -848,7 +843,6 @@ func (d *Decoder) kSlice(f *codecFnInfo, rv reflect.Value) {
if rvChanged { // infers rvCanset=true, so it can be reset
rv0.Set(rv)
}
}
func (d *Decoder) kSliceForChan(f *codecFnInfo, rv reflect.Value) {
@@ -860,10 +854,6 @@ func (d *Decoder) kSliceForChan(f *codecFnInfo, rv reflect.Value) {
}
rtelem0 := f.ti.elem
ctyp := d.d.ContainerType()
if ctyp == valueTypeNil {
rvSetDirect(rv, f.ti.rv0)
return
}
if ctyp == valueTypeBytes || ctyp == valueTypeString {
// you can only decode bytes or string in the stream into a slice or array of bytes
if !(f.ti.rtid == uint8SliceTypId || rtelem0.Kind() == reflect.Uint8) {
@@ -949,10 +939,6 @@ func (d *Decoder) kSliceForChan(f *codecFnInfo, rv reflect.Value) {
func (d *Decoder) kMap(f *codecFnInfo, rv reflect.Value) {
containerLen := d.mapStart()
if containerLen == decContainerLenNil {
rvSetDirect(rv, f.ti.rv0)
return
}
ti := f.ti
if rvIsNil(rv) {
rvlen := decInferLen(containerLen, d.h.MaxInitLen, int(ti.key.Size()+ti.elem.Size()))
@@ -1082,51 +1068,18 @@ func (d *Decoder) kMap(f *codecFnInfo, rv reflect.Value) {
if doMapSet && ktypeIsString { // set to a real string (not string view)
rvk.SetString(d.string(kstrbs))
}
d.decodeValue(rvv, valFn)
// since a map, we have to set zero value if needed
if d.d.TryNil() {
rvv = reflect.Zero(rvv.Type())
} else {
d.decodeValueNoCheckNil(rvv, valFn)
}
if doMapSet {
mapSet(rv, rvk, rvv)
}
}
d.mapEnd()
}
// decNaked is used to keep track of the primitives decoded.
// Without it, we would have to decode each primitive and wrap it
// in an interface{}, causing an allocation.
// In this model, the primitives are decoded in a "pseudo-atomic" fashion,
// so we can rest assured that no other decoding happens while these
// primitives are being decoded.
//
// maps and arrays are not handled by this mechanism.
// However, RawExt is, and we accommodate for extensions that decode
// RawExt from DecodeNaked, but need to decode the value subsequently.
// kInterfaceNaked and swallow, which call DecodeNaked, handle this caveat.
//
// However, decNaked also keeps some arrays of default maps and slices
// used in DecodeNaked. This way, we can get a pointer to it
// without causing a new heap allocation.
//
// kInterfaceNaked will ensure that there is no allocation for the common
// uses.
type decNaked struct {
// r RawExt // used for RawExt, uint, []byte.
// primitives below
u uint64
i int64
f float64
l []byte
s string
// ---- cpu cache line boundary?
t time.Time
b bool
// state
v valueType
}
// Decoder reads and decodes an object from an input stream in a supported format.
@@ -1139,8 +1092,6 @@ type decNaked struct {
// This is the idiomatic way to use.
type Decoder struct {
panicHdl
// hopefully, reduce derefencing cost by laying the decReader inside the Decoder.
// Try to put things that go together to fit within a cache line (8 words).
d decDriver
@@ -1156,7 +1107,7 @@ type Decoder struct {
decRd
// ---- cpu cache line boundary?
n decNaked
n fauxUnion
hh Handle
err error
@@ -1269,13 +1220,16 @@ func (d *Decoder) ResetBytes(in []byte) {
if in == nil {
return
}
d.bytes = true
d.bufio = false
d.bytes = true
// if d.rb == nil {
// d.rb = new(bytesDecReader)
// }
d.rb.reset(in)
d.resetCommon()
}
func (d *Decoder) naked() *decNaked {
func (d *Decoder) naked() *fauxUnion {
return &d.n
}
@@ -1349,16 +1303,14 @@ func (d *Decoder) Decode(v interface{}) (err error) {
if d.err != nil {
return d.err
}
if recoverPanicToErr {
defer func() {
if x := recover(); x != nil {
panicValToErr(d, x, &d.err)
if d.err != err {
err = d.err
}
defer func() {
if x := recover(); x != nil {
panicValToErr(d, x, &d.err)
if d.err != err {
err = d.err
}
}()
}
}
}()
// defer d.deferred(&err)
d.mustDecode(v)
@@ -1368,9 +1320,7 @@ func (d *Decoder) Decode(v interface{}) (err error) {
// MustDecode is like Decode, but panics if unable to Decode.
// This provides insight to the code location that triggered the error.
func (d *Decoder) MustDecode(v interface{}) {
if d.err != nil {
panic(d.err)
}
halt.onerror(d.err)
d.mustDecode(v)
}
@@ -1400,40 +1350,9 @@ func (d *Decoder) Release() {
}
func (d *Decoder) swallow() {
switch d.d.ContainerType() {
case valueTypeNil:
case valueTypeMap:
containerLen := d.mapStart()
hasLen := containerLen >= 0
for j := 0; (hasLen && j < containerLen) || !(hasLen || d.checkBreak()); j++ {
d.mapElemKey()
d.swallow()
d.mapElemValue()
d.swallow()
}
d.mapEnd()
case valueTypeArray:
containerLen := d.arrayStart()
hasLen := containerLen >= 0
for j := 0; (hasLen && j < containerLen) || !(hasLen || d.checkBreak()); j++ {
d.arrayElem()
d.swallow()
}
d.arrayEnd()
case valueTypeBytes:
d.d.DecodeBytes(d.b[:], true)
case valueTypeString:
d.d.DecodeStringAsBytes()
default:
// these are all primitives, which we can get from decodeNaked
// if RawExt using Value, complete the processing.
n := d.naked()
d.d.DecodeNaked()
if n.v == valueTypeExt && n.l == nil {
var v2 interface{}
d.decode(&v2)
}
}
bs := d.blist.get(256)[:0]
bs = d.d.nextValueBytes(bs) // discard it
d.blist.put(bs)
}
func setZero(iv interface{}) {
@@ -1473,7 +1392,7 @@ func setZero(iv interface{}) {
*v = 0
case *float64:
*v = 0
case *[]uint8:
case *[]byte:
*v = nil
case *Raw:
*v = nil
@@ -1512,15 +1431,16 @@ func (d *Decoder) decode(iv interface{}) {
// consequently, we deal with nil and interfaces outside the switch.
if iv == nil {
d.errorstr(errstrCannotDecodeIntoNil)
return
d.onerror(errCannotDecodeIntoNil)
}
switch v := iv.(type) {
// case nil:
// case Selfer:
case reflect.Value:
d.ensureDecodeable(v)
if !isDecodeable(v) {
d.haltAsNotDecodeable(v)
}
d.decodeValue(v, nil)
case *string:
@@ -1551,9 +1471,9 @@ func (d *Decoder) decode(iv interface{}) {
*v = float32(d.decodeFloat32())
case *float64:
*v = d.d.DecodeFloat64()
case *[]uint8:
case *[]byte:
*v = d.d.DecodeBytes(*v, false)
case []uint8:
case []byte:
b := d.d.DecodeBytes(v, false)
if !(len(b) > 0 && len(b) == len(v) && &b[0] == &v[0]) {
copy(v, b)
@@ -1567,11 +1487,22 @@ func (d *Decoder) decode(iv interface{}) {
d.decodeValue(rv4i(iv), nil)
default:
if v, ok := iv.(Selfer); ok {
v.CodecDecodeSelf(d)
} else if !fastpathDecodeTypeSwitch(iv, d) {
// if xfFn := d.h.getExt(i2rtid(iv), true); xfFn != nil {
// d.d.DecodeExt(iv, xfFn.tag, xfFn.ext)
// } else if v, ok := iv.(Selfer); ok {
// v.CodecDecodeSelf(d)
// } else if !fastpathDecodeTypeSwitch(iv, d) {
// v := rv4i(iv)
// if !isDecodeable(v) {
// d.haltAsNotDecodeable(v)
// }
// d.decodeValue(v, nil)
// }
if !fastpathDecodeTypeSwitch(iv, d) {
v := rv4i(iv)
d.ensureDecodeable(v)
if !isDecodeable(v) {
d.haltAsNotDecodeable(v)
}
d.decodeValue(v, nil)
}
}
@@ -1582,26 +1513,37 @@ func (d *Decoder) decode(iv interface{}) {
//
// This way, we know if it is itself a pointer, and can handle nil in
// the stream effectively.
//
// Note that decodeValue will handle nil in the stream early, so that the
// subsequent calls i.e. kXXX methods, etc do not have to handle it themselves.
func (d *Decoder) decodeValue(rv reflect.Value, fn *codecFn) {
// if rv.Kind() == reflect.Ptr && d.d.TryNil() {
if d.d.TryNil() {
if rv.Kind() == reflect.Ptr {
rv = rv.Elem()
}
if rv.CanSet() {
rv.Set(reflect.Zero(rv.Type()))
}
return
}
d.decodeValueNoCheckNil(rv, fn)
}
func (d *Decoder) decodeValueNoCheckNil(rv reflect.Value, fn *codecFn) {
// If stream is not containing a nil value, then we can deref to the base
// non-pointer value, and decode into that.
var rvp reflect.Value
var rvpValid bool
PTR:
if rv.Kind() == reflect.Ptr {
if d.d.TryNil() {
if rvelem := rv.Elem(); rvelem.CanSet() {
rvelem.Set(reflect.Zero(rvelem.Type()))
}
return
}
rvpValid = true
for rv.Kind() == reflect.Ptr {
if rvIsNil(rv) {
rvSetDirect(rv, reflect.New(rv.Type().Elem()))
}
rvp = rv
rv = rv.Elem()
if rvIsNil(rv) {
rvSetDirect(rv, reflect.New(rv.Type().Elem()))
}
rvp = rv
rv = rv.Elem()
goto PTR
}
if fn == nil {
@@ -1627,10 +1569,8 @@ func (d *Decoder) structFieldNotFound(index int, rvkencname string) {
if d.h.ErrorIfNoField {
if index >= 0 {
d.errorf("no matching struct field found when decoding stream array at index %v", index)
return
} else if rvkencname != "" {
d.errorf("no matching struct field found when decoding stream map with key " + rvkencname)
return
}
}
d.swallow()
@@ -1642,49 +1582,46 @@ func (d *Decoder) arrayCannotExpand(sliceLen, streamLen int) {
}
}
// isDecodeable checks if value can be decoded into
//
// decode can take any reflect.Value that is a inherently addressable i.e.
// - array
// - non-nil chan (we will SEND to it)
// - non-nil slice (we will set its elements)
// - non-nil map (we will put into it)
// - non-nil pointer (we can "update" it)
func isDecodeable(rv reflect.Value) (canDecode bool) {
switch rv.Kind() {
case reflect.Array:
return rv.CanAddr()
case reflect.Ptr:
canDecode = rv.CanAddr()
case reflect.Ptr, reflect.Slice, reflect.Chan, reflect.Map:
if !rvIsNil(rv) {
return true
}
case reflect.Slice, reflect.Chan, reflect.Map:
if !rvIsNil(rv) {
return true
canDecode = true
}
}
return
}
func (d *Decoder) ensureDecodeable(rv reflect.Value) {
// decode can take any reflect.Value that is a inherently addressable i.e.
// - array
// - non-nil chan (we will SEND to it)
// - non-nil slice (we will set its elements)
// - non-nil map (we will put into it)
// - non-nil pointer (we can "update" it)
if isDecodeable(rv) {
return
}
// func (d *Decoder) ensureDecodeable(rv reflect.Value) {
// if !isDecodeable(rv) {
// d.haltAsNotDecodeable(rv)
// }
// }
func (d *Decoder) haltAsNotDecodeable(rv reflect.Value) {
if !rv.IsValid() {
d.errorstr(errstrCannotDecodeIntoNil)
return
d.onerror(errCannotDecodeIntoNil)
}
if !rv.CanInterface() {
d.errorf("cannot decode into a value without an interface: %v", rv)
return
}
rvi := rv2i(rv)
rvk := rv.Kind()
d.errorf("cannot decode into value of kind: %v, type: %T, %#v", rvk, rvi, rvi)
d.errorf("cannot decode into value of kind: %v, %#v", rv.Kind(), rv2i(rv))
}
func (d *Decoder) depthIncr() {
d.depth++
if d.depth >= d.maxdepth {
panic(errMaxDepthExceeded)
halt.onerror(errMaxDepthExceeded)
}
}
@@ -1711,25 +1648,13 @@ func (d *Decoder) string(v []byte) (s string) {
return
}
// nextValueBytes returns the next value in the stream as a set of bytes.
func (d *Decoder) nextValueBytes() (bs []byte) {
d.d.uncacheRead()
d.r().track()
d.swallow()
bs = d.r().stopTrack()
return
}
func (d *Decoder) rawBytes() []byte {
func (d *Decoder) rawBytes() (v []byte) {
// ensure that this is not a view into the bytes
// i.e. make new copy always.
bs := d.nextValueBytes()
bs2 := make([]byte, len(bs))
copy(bs2, bs)
return bs2
// i.e. if necessary, make new copy always.
return d.d.nextValueBytes(nil)
}
func (d *Decoder) wrapErr(v interface{}, err *error) {
func (d *Decoder) wrapErr(v error, err *error) {
*err = decodeError{codecError: codecError{name: d.hh.Name(), err: v}, pos: d.NumBytesRead()}
}
@@ -1753,12 +1678,11 @@ func (d *Decoder) decodeFloat32() float32 {
// Note: We update the .c after calling the callback.
// This way, the callback can know what the last status was.
// Note: if you call mapStart and it returns decContainerLenNil,
// then do NOT call mapEnd.
// MARKER: do not call mapEnd if mapStart returns containerLenNil.
func (d *Decoder) mapStart() (v int) {
v = d.d.ReadMapStart()
if v != decContainerLenNil {
if v != containerLenNil {
d.depthIncr()
d.c = containerMapStart
}
@@ -1788,7 +1712,7 @@ func (d *Decoder) mapEnd() {
func (d *Decoder) arrayStart() (v int) {
v = d.d.ReadArrayStart()
if v != decContainerLenNil {
if v != containerLenNil {
d.depthIncr()
d.c = containerArrayStart
}
@@ -1935,25 +1859,25 @@ func decByteSlice(r *decRd, clen, maxInitLen int, bs []byte) (bsOut []byte) {
// It is used to ensure that the []byte returned is not
// part of the input stream or input stream buffers.
func detachZeroCopyBytes(isBytesReader bool, dest []byte, in []byte) (out []byte) {
if len(in) > 0 {
// if isBytesReader || len(in) <= scratchByteArrayLen {
// if cap(dest) >= len(in) {
// out = dest[:len(in)]
// } else {
// out = make([]byte, len(in))
// }
// copy(out, in)
// return
// }
if cap(dest) >= len(in) {
out = dest[:len(in)]
} else {
out = make([]byte, len(in))
}
copy(out, in)
return
if len(in) == 0 {
return in
}
return in
// if isBytesReader || len(in) <= scratchByteArrayLen {
// if cap(dest) >= len(in) {
// out = dest[:len(in)]
// } else {
// out = make([]byte, len(in))
// }
// copy(out, in)
// return
// }
if cap(dest) >= len(in) {
out = dest[:len(in)]
} else {
out = make([]byte, len(in))
}
copy(out, in)
return
}
// decInferLen will infer a sensible length, given the following:
@@ -1971,13 +1895,10 @@ func decInferLen(clen, maxlen, unit int) (rvlen int) {
// maxlen<=0, clen>0: infer maxlen, and cap on it
// maxlen> 0, clen>0: cap at maxlen
if clen == 0 {
if clen == 0 || clen == containerLenNil {
return
}
if clen < 0 {
if clen == decContainerLenNil {
return 0
}
return maxLenIfUnset
}
if unit == 0 {
@@ -2006,24 +1927,7 @@ func decInferLen(clen, maxlen, unit int) (rvlen int) {
return
}
func decReadFull(r io.Reader, bs []byte) (n uint, err error) {
var nn int
for n < uint(len(bs)) && err == nil {
nn, err = r.Read(bs[n:])
if nn > 0 {
if err == io.EOF {
// leave EOF for next time
err = nil
}
n += uint(nn)
}
}
// do not do this - it serves no purpose
// if n != len(bs) && err == io.EOF { err = io.ErrUnexpectedEOF }
return
}
func decNakedReadRawBytes(dr decDriver, d *Decoder, n *decNaked, rawToString bool) {
func fauxUnionReadRawBytes(dr decDriver, d *Decoder, n *fauxUnion, rawToString bool) {
if rawToString {
n.v = valueTypeString
n.s = string(dr.DecodeBytes(d.b[:], true))
@@ -2032,3 +1936,16 @@ func decNakedReadRawBytes(dr decDriver, d *Decoder, n *decNaked, rawToString boo
n.l = dr.DecodeBytes(nil, false)
}
}
func decArrayCannotExpand(slh decSliceHelper, hasLen bool, lenv, j, containerLenS int) {
slh.d.arrayCannotExpand(lenv, j+1)
// drain completely and return
slh.ElemContainerState(j)
slh.d.swallow()
j++
for ; (hasLen && j < containerLenS) || !(hasLen || slh.d.checkBreak()); j++ {
slh.ElemContainerState(j)
slh.d.swallow()
}
slh.End()
}

View File

@@ -1,4 +1,4 @@
// Copyright (c) 2012-2018 Ugorji Nwoke. All rights reserved.
// Copyright (c) 2012-2020 Ugorji Nwoke. All rights reserved.
// Use of this source code is governed by a MIT license found in the LICENSE file.
/*

View File

@@ -1,4 +1,4 @@
// Copyright (c) 2012-2018 Ugorji Nwoke. All rights reserved.
// Copyright (c) 2012-2020 Ugorji Nwoke. All rights reserved.
// Use of this source code is governed by a MIT license found in the LICENSE file.
package codec
@@ -135,6 +135,16 @@ type EncodeOptions struct {
// These include encoding.TextMarshaler, time.Format calls, struct field names, etc.
StringToRaw bool
// OptimumSize controls whether we optimize for the smallest size.
//
// Some formats will use this flag to determine whether to encode
// in the smallest size possible, even if it takes slightly longer.
//
// For example, some formats that support half-floats might check if it is possible
// to store a float64 as a half float. Doing this check has a small performance cost,
// but the benefit is that the encoded message will be smaller.
OptimumSize bool
// // AsSymbols defines what should be encoded as symbols.
// //
// // Encoding as symbols can reduce the encoded size significantly.
@@ -248,10 +258,6 @@ func (e *Encoder) kUintptr(f *codecFnInfo, rv reflect.Value) {
e.e.EncodeUint(uint64(rvGetUintptr(rv)))
}
func (e *Encoder) kInvalid(f *codecFnInfo, rv reflect.Value) {
e.e.EncodeNil()
}
func (e *Encoder) kErr(f *codecFnInfo, rv reflect.Value) {
e.errorf("unsupported kind %s, for %#v", rv.Kind(), rv)
}
@@ -304,10 +310,7 @@ func (e *Encoder) kSliceWMbs(rv reflect.Value, ti *typeInfo) {
if l == 0 {
e.mapStart(0)
} else {
if l%2 == 1 {
e.errorf("mapBySlice requires even slice length, but got %v", l)
return
}
e.haltOnMbsOddLen(l)
e.mapStart(l / 2)
fn := e.kSeqFn(ti.elem)
for j := 0; j < l; j++ {
@@ -342,7 +345,6 @@ func (e *Encoder) kSeqWMbs(rv reflect.Value, ti *typeInfo) {
} else {
if l%2 == 1 {
e.errorf("mapBySlice requires even slice length, but got %v", l)
return
}
e.mapStart(l / 2)
fn := e.kSeqFn(ti.elem)
@@ -372,13 +374,8 @@ func (e *Encoder) kSeqW(rv reflect.Value, ti *typeInfo) {
}
func (e *Encoder) kChan(f *codecFnInfo, rv reflect.Value) {
if rvIsNil(rv) {
e.e.EncodeNil()
return
}
if f.ti.chandir&uint8(reflect.RecvDir) == 0 {
e.errorf("send-only channel cannot be encoded")
return
}
if !f.ti.mbs && uint8TypId == rt2id(f.ti.elem) {
e.kSliceBytesChan(rv)
@@ -395,10 +392,6 @@ func (e *Encoder) kChan(f *codecFnInfo, rv reflect.Value) {
}
func (e *Encoder) kSlice(f *codecFnInfo, rv reflect.Value) {
if rvIsNil(rv) {
e.e.EncodeNil()
return
}
if f.ti.mbs {
e.kSliceWMbs(rv, f.ti)
} else {
@@ -592,11 +585,6 @@ func (e *Encoder) kStruct(f *codecFnInfo, rv reflect.Value) {
}
func (e *Encoder) kMap(f *codecFnInfo, rv reflect.Value) {
if rvIsNil(rv) {
e.e.EncodeNil()
return
}
l := rv.Len()
e.mapStart(l)
if l == 0 {
@@ -915,6 +903,9 @@ func (e *Encoder) ResetBytes(out *[]byte) {
in = make([]byte, defEncByteBufSize)
}
e.bytes = true
// if e.wb == nil {
// e.wb = new(bytesEncAppender)
// }
e.wb.reset(in, out)
e.resetCommon()
}
@@ -1012,24 +1003,22 @@ func (e *Encoder) Encode(v interface{}) (err error) {
if e.err != nil {
return e.err
}
if recoverPanicToErr {
defer func() {
// if error occurred during encoding, return that error;
// else if error occurred on end'ing (i.e. during flush), return that error.
err = e.w().endErr()
x := recover()
if x == nil {
if e.err != err {
e.err = err
}
} else {
panicValToErr(e, x, &e.err)
if e.err != err {
err = e.err
}
defer func() {
// if error occurred during encoding, return that error;
// else if error occurred on end'ing (i.e. during flush), return that error.
err = e.w().endErr()
x := recover()
if x == nil {
if e.err != err {
e.err = err
}
}()
}
} else {
panicValToErr(e, x, &e.err)
if e.err != err {
err = e.err
}
}
}()
// defer e.deferred(&err)
e.mustEncode(v)
@@ -1039,9 +1028,7 @@ func (e *Encoder) Encode(v interface{}) (err error) {
// MustEncode is like Encode, but panics if unable to Encode.
// This provides insight to the code location that triggered the error.
func (e *Encoder) MustEncode(v interface{}) {
if e.err != nil {
panic(e.err)
}
halt.onerror(e.err)
e.mustEncode(v)
}
@@ -1080,8 +1067,6 @@ func (e *Encoder) encode(iv interface{}) {
return
}
var vself Selfer
switch v := iv.(type) {
// case nil:
// case Selfer:
@@ -1122,7 +1107,7 @@ func (e *Encoder) encode(iv interface{}) {
e.e.EncodeFloat64(v)
case time.Time:
e.e.EncodeTime(v)
case []uint8:
case []byte:
e.e.EncodeStringBytesRaw(v)
case *Raw:
e.rawBytes(*v)
@@ -1158,16 +1143,25 @@ func (e *Encoder) encode(iv interface{}) {
e.e.EncodeFloat64(*v)
case *time.Time:
e.e.EncodeTime(*v)
case *[]uint8:
case *[]byte:
if *v == nil {
e.e.EncodeNil()
} else {
e.e.EncodeStringBytesRaw(*v)
}
default:
if vself, ok = iv.(Selfer); ok {
vself.CodecEncodeSelf(e)
} else if !fastpathEncodeTypeSwitch(iv, e) {
// var vself Selfer
// if xfFn := e.h.getExt(i2rtid(iv), true); xfFn != nil {
// e.e.EncodeExt(iv, xfFn.tag, xfFn.ext)
// } else if vself, ok = iv.(Selfer); ok {
// vself.CodecEncodeSelf(e)
// } else if !fastpathEncodeTypeSwitch(iv, e) {
// if !rv.IsValid() {
// rv = rv4i(iv)
// }
// e.encodeValue(rv, nil)
// }
if !fastpathEncodeTypeSwitch(iv, e) {
if !rv.IsValid() {
rv = rv4i(iv)
}
@@ -1176,6 +1170,10 @@ func (e *Encoder) encode(iv interface{}) {
}
}
// encodeValue will encode a value.
//
// Note that encodeValue will handle nil in the stream early, so that the
// subsequent calls i.e. kXXX methods, etc do not have to handle it themselves.
func (e *Encoder) encodeValue(rv reflect.Value, fn *codecFn) {
// if a valid fn is passed, it MUST BE for the dereferenced type of rv
@@ -1184,6 +1182,9 @@ func (e *Encoder) encodeValue(rv reflect.Value, fn *codecFn) {
// type T struct { tHelper }
// Here, for var v T; &v and &v.tHelper are the same pointer.
// Consequently, we need a tuple of type and pointer, which interface{} natively provides.
// MARKER: We check if value is nil here, so that the kXXX method do not have to.
var sptr interface{} // uintptr
var rvp reflect.Value
var rvpValid bool
@@ -1209,7 +1210,7 @@ TOP:
}
rv = rv.Elem()
goto TOP
case reflect.Slice, reflect.Map:
case reflect.Slice, reflect.Map, reflect.Chan:
if rvIsNil(rv) {
e.e.EncodeNil()
return
@@ -1250,9 +1251,7 @@ TOP:
}
func (e *Encoder) marshalUtf8(bs []byte, fnerr error) {
if fnerr != nil {
panic(fnerr)
}
halt.onerror(fnerr)
if bs == nil {
e.e.EncodeNil()
} else {
@@ -1262,9 +1261,7 @@ func (e *Encoder) marshalUtf8(bs []byte, fnerr error) {
}
func (e *Encoder) marshalAsis(bs []byte, fnerr error) {
if fnerr != nil {
panic(fnerr)
}
halt.onerror(fnerr)
if bs == nil {
e.e.EncodeNil()
} else {
@@ -1273,9 +1270,7 @@ func (e *Encoder) marshalAsis(bs []byte, fnerr error) {
}
func (e *Encoder) marshalRaw(bs []byte, fnerr error) {
if fnerr != nil {
panic(fnerr)
}
halt.onerror(fnerr)
if bs == nil {
e.e.EncodeNil()
} else {
@@ -1291,7 +1286,7 @@ func (e *Encoder) rawBytes(vv Raw) {
e.encWr.writeb(v) // e.asis(v)
}
func (e *Encoder) wrapErr(v interface{}, err *error) {
func (e *Encoder) wrapErr(v error, err *error) {
*err = encodeError{codecError{name: e.hh.Name(), err: v}}
}
@@ -1344,6 +1339,12 @@ func (e *Encoder) arrayEnd() {
// ----------
func (e *Encoder) haltOnMbsOddLen(length int) {
if length%2 == 1 {
e.errorf("mapBySlice requires even slice length, but got %v", length)
}
}
func (e *Encoder) sideEncode(v interface{}, bs *[]byte) {
rv := baseRV(v)
e2 := NewEncoderBytes(bs, e.hh)
@@ -1354,7 +1355,6 @@ func (e *Encoder) sideEncode(v interface{}, bs *[]byte) {
func encStructFieldKey(encName string, ee encDriver, w *encWr,
keyType valueType, encNameAsciiAlphaNum bool, js bool) {
var m must
// use if-else-if, not switch (which compiles to binary-search)
// since keyType is typically valueTypeString, branch prediction is pretty good.
if keyType == valueTypeString {
@@ -1364,10 +1364,10 @@ func encStructFieldKey(encName string, ee encDriver, w *encWr,
ee.EncodeString(encName)
}
} else if keyType == valueTypeInt {
ee.EncodeInt(m.Int(strconv.ParseInt(encName, 10, 64)))
ee.EncodeInt(must.Int(strconv.ParseInt(encName, 10, 64)))
} else if keyType == valueTypeUint {
ee.EncodeUint(m.Uint(strconv.ParseUint(encName, 10, 64)))
ee.EncodeUint(must.Uint(strconv.ParseUint(encName, 10, 64)))
} else if keyType == valueTypeFloat {
ee.EncodeFloat64(m.Float(strconv.ParseFloat(encName, 64)))
ee.EncodeFloat64(must.Float(strconv.ParseFloat(encName, 64)))
}
}

File diff suppressed because it is too large Load Diff

View File

@@ -1,6 +1,6 @@
// +build !notfastpath
// Copyright (c) 2012-2018 Ugorji Nwoke. All rights reserved.
// Copyright (c) 2012-2020 Ugorji Nwoke. All rights reserved.
// Use of this source code is governed by a MIT license found in the LICENSE file.
// Code generated from fast-path.go.tmpl - DO NOT EDIT.
@@ -56,7 +56,9 @@ import (
const fastpathEnabled = true
{{/*
const fastpathMapBySliceErrMsg = "mapBySlice requires even slice length, but got %v"
*/ -}}
type fastpathT struct {}
@@ -112,7 +114,7 @@ func init() {
fastpathAV[i] = fastpathE{xptr, xrt, fe, fd}
i++
}
{{/* do not register []uint8 in fast-path */}}
{{/* do not register []byte in fast-path */}}
{{range .Values}}{{if not .Primitive}}{{if not .MapKey }}{{if ne .Elem "uint8" -}}
fn([]{{ .Elem }}(nil), (*Encoder).{{ .MethodNamePfx "fastpathEnc" false }}R, (*Decoder).{{ .MethodNamePfx "fastpathDec" false }}R)
{{end}}{{end}}{{end}}{{end}}
@@ -178,20 +180,22 @@ func (fastpathT) {{ .MethodNamePfx "Enc" false }}V(v []{{ .Elem }}, e *Encoder)
}
func (fastpathT) {{ .MethodNamePfx "EncAsMap" false }}V(v []{{ .Elem }}, e *Encoder) {
{{/* if v == nil { e.e.EncodeNil() } else */ -}}
e.haltOnMbsOddLen(len(v))
{{/*
if len(v)%2 == 1 {
e.errorf(fastpathMapBySliceErrMsg, len(v))
} else {
e.mapStart(len(v) / 2)
for j := range v {
if j%2 == 0 {
e.mapElemKey()
} else {
e.mapElemValue()
}
{{ encmd .Elem "v[j]"}}
}
e.mapEnd()
}
*/ -}}
e.mapStart(len(v) / 2)
for j := range v {
if j%2 == 0 {
e.mapElemKey()
} else {
e.mapElemValue()
}
{{ encmd .Elem "v[j]"}}
}
e.mapEnd()
}
{{end}}{{end}}{{end -}}
@@ -269,7 +273,7 @@ func fastpathDecodeTypeSwitch(iv interface{}, d *Decoder) bool {
*/ -}}
case map[{{ .MapKey }}]{{ .Elem }}:
containerLen = d.mapStart()
if containerLen != decContainerLenNil {
if containerLen != containerLenNil {
if containerLen != 0 {
fastpathTV.{{ .MethodNamePfx "Dec" false }}L(v, containerLen, d)
}
@@ -280,7 +284,7 @@ func fastpathDecodeTypeSwitch(iv interface{}, d *Decoder) bool {
containerLen = d.mapStart()
if containerLen == 0 {
d.mapEnd()
} else if containerLen == decContainerLenNil {
} else if containerLen == containerLenNil {
*v = nil
} else {
if *v == nil {
@@ -364,14 +368,10 @@ func (fastpathT) {{ .MethodNamePfx "Dec" false }}Y(v []{{ .Elem }}, d *Decoder)
}
var j int
for j = 0; (hasLen && j < containerLenS) || !(hasLen || d.checkBreak()); j++ {
if j == 0 && len(v) == 0 {
if hasLen {
xlen = decInferLen(containerLenS, d.h.MaxInitLen, {{ .Size }})
} else {
xlen = 8
}
if j == 0 && len(v) == 0 { // means hasLen == false
xlen = decDefSliceCap
v = make([]{{ .Elem }}, uint(xlen))
changed = true
changed = true
}
{{/* // if indefinite, etc, then expand the slice if necessary */ -}}
if j >= len(v) {
@@ -404,7 +404,7 @@ func (fastpathT) {{ .MethodNamePfx "Dec" false }}N(v []{{ .Elem }}, d *Decoder)
for j := 0; (hasLen && j < containerLenS) || !(hasLen || d.checkBreak()); j++ {
{{/* // if indefinite, etc, then expand the slice if necessary */ -}}
if j >= len(v) {
fastpathDecArrayCannotExpand(slh, hasLen, len(v), j, containerLenS)
decArrayCannotExpand(slh, hasLen, len(v), j, containerLenS)
return
}
slh.ElemContainerState(j)
@@ -418,48 +418,41 @@ func (fastpathT) {{ .MethodNamePfx "Dec" false }}N(v []{{ .Elem }}, d *Decoder)
}
{{end}}{{end}}{{end -}}
func fastpathDecArrayCannotExpand(slh decSliceHelper, hasLen bool, lenv, j, containerLenS int) {
slh.d.arrayCannotExpand(lenv, j+1)
slh.ElemContainerState(j)
slh.d.swallow()
j++
for ; (hasLen && j < containerLenS) || !(hasLen || slh.d.checkBreak()); j++ {
slh.ElemContainerState(j)
slh.d.swallow()
}
slh.End()
}
{{range .Values}}{{if not .Primitive}}{{if .MapKey -}}
{{/*
Maps can change if they are
- addressable (from a ptr)
- settable (e.g. contained in an interface{})
Also, these methods are called by decodeValue directly, after handling a TryNil.
Consequently, there's no need to check for containerLenNil here.
*/ -}}
func (d *Decoder) {{ .MethodNamePfx "fastpathDec" false }}R(f *codecFnInfo, rv reflect.Value) {
containerLen := d.mapStart()
if containerLen == decContainerLenNil {
{{/*
if containerLen == containerLenNil {
if rv.Kind() == reflect.Ptr {
*(rv2i(rv).(*map[{{ .MapKey }}]{{ .Elem }})) = nil
}
} else {
if rv.Kind() == reflect.Ptr {
vp, _ := rv2i(rv).(*map[{{ .MapKey }}]{{ .Elem }})
if *vp == nil {
*vp = make(map[{{ .MapKey }}]{{ .Elem }}, decInferLen(containerLen, d.h.MaxInitLen, {{ .Size }}))
}
if containerLen != 0 {
fastpathTV.{{ .MethodNamePfx "Dec" false }}L(*vp, containerLen, d)
}
} else if containerLen != 0 {
fastpathTV.{{ .MethodNamePfx "Dec" false }}L(rv2i(rv).(map[{{ .MapKey }}]{{ .Elem }}), containerLen, d)
}
d.mapEnd()
return
}
*/ -}}
if rv.Kind() == reflect.Ptr {
vp, _ := rv2i(rv).(*map[{{ .MapKey }}]{{ .Elem }})
if *vp == nil {
*vp = make(map[{{ .MapKey }}]{{ .Elem }}, decInferLen(containerLen, d.h.MaxInitLen, {{ .Size }}))
}
if containerLen != 0 {
fastpathTV.{{ .MethodNamePfx "Dec" false }}L(*vp, containerLen, d)
}
} else if containerLen != 0 {
fastpathTV.{{ .MethodNamePfx "Dec" false }}L(rv2i(rv).(map[{{ .MapKey }}]{{ .Elem }}), containerLen, d)
}
d.mapEnd()
}
func (f fastpathT) {{ .MethodNamePfx "Dec" false }}X(vp *map[{{ .MapKey }}]{{ .Elem }}, d *Decoder) {
containerLen := d.mapStart()
if containerLen == decContainerLenNil {
if containerLen == containerLenNil {
*vp = nil
} else {
if *vp == nil {
@@ -472,7 +465,7 @@ func (f fastpathT) {{ .MethodNamePfx "Dec" false }}X(vp *map[{{ .MapKey }}]{{ .E
}
}
func (fastpathT) {{ .MethodNamePfx "Dec" false }}L(v map[{{ .MapKey }}]{{ .Elem }}, containerLen int, d *Decoder) {
{{/* No need to check if containerLen == decContainerLenNil, as that is checked by R and L above */ -}}
{{/* No need to check if containerLen == containerLenNil, as that is checked by R and L above */ -}}
{{if eq .Elem "interface{}" }}mapGet := v != nil && !d.h.MapValueReset && !d.h.InterfaceReset
{{else if eq .Elem "bytes" "[]byte" }}mapGet := v != nil && !d.h.MapValueReset
{{end -}}

View File

@@ -1,4 +1,4 @@
// Copyright (c) 2012-2018 Ugorji Nwoke. All rights reserved.
// Copyright (c) 2012-2020 Ugorji Nwoke. All rights reserved.
// Use of this source code is governed by a MIT license found in the LICENSE file.
// +build notfastpath
@@ -17,11 +17,13 @@ const fastpathEnabled = false
// This tag disables fastpath during build, allowing for faster build, test execution,
// short-program runs, etc.
func fastpathDecodeTypeSwitch(iv interface{}, d *Decoder) bool { return false }
func fastpathEncodeTypeSwitch(iv interface{}, e *Encoder) bool { return false }
func fastpathEncodeTypeSwitchSlice(iv interface{}, e *Encoder) bool { return false }
func fastpathEncodeTypeSwitchMap(iv interface{}, e *Encoder) bool { return false }
func fastpathDecodeSetZeroTypeSwitch(iv interface{}) bool { return false }
func fastpathDecodeTypeSwitch(iv interface{}, d *Decoder) bool { return false }
func fastpathEncodeTypeSwitch(iv interface{}, e *Encoder) bool { return false }
// func fastpathEncodeTypeSwitchSlice(iv interface{}, e *Encoder) bool { return false }
// func fastpathEncodeTypeSwitchMap(iv interface{}, e *Encoder) bool { return false }
func fastpathDecodeSetZeroTypeSwitch(iv interface{}) bool { return false }
type fastpathT struct{}
type fastpathE struct {

View File

@@ -1,313 +0,0 @@
// Copyright (c) 2012-2018 Ugorji Nwoke. All rights reserved.
// Use of this source code is governed by a MIT license found in the LICENSE file.
package codec
import "strconv"
// func parseFloat(b []byte, bitsize int) (f float64, err error) {
// if bitsize == 32 {
// return parseFloat32(b)
// } else {
// return parseFloat64(b)
// }
// }
func parseFloat32(b []byte) (f float32, err error) {
return parseFloat32_custom(b)
// return parseFloat32_strconv(b)
}
func parseFloat64(b []byte) (f float64, err error) {
return parseFloat64_custom(b)
// return parseFloat64_strconv(b)
}
func parseFloat32_strconv(b []byte) (f float32, err error) {
f64, err := strconv.ParseFloat(stringView(b), 32)
f = float32(f64)
return
}
func parseFloat64_strconv(b []byte) (f float64, err error) {
return strconv.ParseFloat(stringView(b), 64)
}
// ------ parseFloat custom below --------
// We assume that a lot of floating point numbers in json files will be
// those that are handwritten, and with defined precision (in terms of number
// of digits after decimal point), etc.
//
// We further assume that this ones can be written in exact format.
//
// strconv.ParseFloat has some unnecessary overhead which we can do without
// for the common case:
//
// - expensive char-by-char check to see if underscores are in right place
// - testing for and skipping underscores
// - check if the string matches ignorecase +/- inf, +/- infinity, nan
// - support for base 16 (0xFFFF...)
//
// The functions below will try a fast-path for floats which can be decoded
// without any loss of precision, meaning they:
//
// - fits within the significand bits of the 32-bits or 64-bits
// - exponent fits within the exponent value
// - there is no truncation (any extra numbers are all trailing zeros)
//
// To figure out what the values are for maxMantDigits, use this idea below:
//
// 2^23 = 838 8608 (between 10^ 6 and 10^ 7) (significand bits of uint32)
// 2^32 = 42 9496 7296 (between 10^ 9 and 10^10) (full uint32)
// 2^52 = 4503 5996 2737 0496 (between 10^15 and 10^16) (significand bits of uint64)
// 2^64 = 1844 6744 0737 0955 1616 (between 10^19 and 10^20) (full uint64)
//
// Since we only allow for up to what can comfortably fit into the significand
// ignoring the exponent, and we only try to parse iff significand fits into the
// Exact powers of 10.
var float64pow10 = [...]float64{
1e0, 1e1, 1e2, 1e3, 1e4, 1e5, 1e6, 1e7, 1e8, 1e9,
1e10, 1e11, 1e12, 1e13, 1e14, 1e15, 1e16, 1e17, 1e18, 1e19,
1e20, 1e21, 1e22,
}
var float32pow10 = [...]float32{1e0, 1e1, 1e2, 1e3, 1e4, 1e5, 1e6, 1e7, 1e8, 1e9, 1e10}
type floatinfo struct {
mantbits uint8
expbits uint8
bias int16
exactPow10 int8 // Exact powers of ten are <= 10^N (32: 10, 64: 22)
exactInts int8 // Exact integers are <= 10^N
maxMantDigits int8 // 10^19 fits in uint64, while 10^9 fits in uint32
}
var fi32 = floatinfo{23, 8, -127, 10, 7, 9} // maxMantDigits = 9
var fi64 = floatinfo{52, 11, -1023, 22, 15, 19} // maxMantDigits = 19
const fMax64 = 1e15
const fMax32 = 1e7
const fBase = 10
func parseFloatErr(b []byte) error {
return &strconv.NumError{
Func: "ParseFloat",
Err: strconv.ErrSyntax,
Num: string(b),
}
}
func parseFloat32_custom(b []byte) (f float32, err error) {
mantissa, exp, neg, trunc, bad, ok := readFloat(b, fi32)
_ = trunc
if bad {
return 0, parseFloatErr(b)
}
if ok {
// parseFloatDebug(b, 32, false, exp, trunc, ok)
f = float32(mantissa)
if neg {
f = -f
}
if exp != 0 {
indx := fExpIndx(exp)
if exp < 0 { // int / 10^k
f /= float32pow10[indx]
} else { // exp > 0
if exp > fi32.exactPow10 {
f *= float32pow10[exp-fi32.exactPow10]
if f < -fMax32 || f > fMax32 { // exponent too large - outside range
goto FALLBACK
}
indx = uint8(fi32.exactPow10)
}
f *= float32pow10[indx]
}
}
return
}
FALLBACK:
// parseFloatDebug(b, 32, true, exp, trunc, ok)
return parseFloat32_strconv(b)
}
func parseFloat64_custom(b []byte) (f float64, err error) {
mantissa, exp, neg, trunc, bad, ok := readFloat(b, fi64)
_ = trunc
if bad {
return 0, parseFloatErr(b)
}
if ok {
f = float64(mantissa)
if neg {
f = -f
}
if exp != 0 {
indx := fExpIndx(exp)
if exp < 0 { // int / 10^k
f /= float64pow10[indx]
} else { // exp > 0
if exp > fi64.exactPow10 {
f *= float64pow10[exp-fi64.exactPow10]
if f < -fMax64 || f > fMax64 { // exponent too large - outside range
goto FALLBACK
}
indx = uint8(fi64.exactPow10)
}
f *= float64pow10[indx]
}
}
return
}
FALLBACK:
return parseFloat64_strconv(b)
}
func fExpIndx(v int8) uint8 {
if v < 0 {
return uint8(-v)
}
return uint8(v)
}
func readFloat(s []byte, y floatinfo) (mantissa uint64, exp int8, neg, trunc, bad, ok bool) {
var i uint // make it uint, so that we eliminate bounds checking
var slen = uint(len(s))
if slen == 0 {
bad = true
return
}
switch s[0] {
case '+':
i++
case '-':
neg = true
i++
}
// we considered punting early if string has length > maxMantDigits, but this doesn't account
// for trailing 0's e.g. 700000000000000000000 can be encoded exactly as it is 7e20
// var sawdot, sawdigits, sawexp bool
var sawdot, sawexp bool
var nd, ndMant, dp int8
L:
for ; i < slen; i++ {
switch s[i] {
case '.':
if sawdot {
bad = true
return
}
sawdot = true
dp = nd
case '0':
if nd == 0 { // ignore leading zeros
dp--
continue
}
nd++
if ndMant < y.maxMantDigits {
// mantissa = (mantissa << 1) + (mantissa << 3)
mantissa *= fBase
ndMant++
}
case '1', '2', '3', '4', '5', '6', '7', '8', '9':
// sawdigits = true
nd++
if ndMant < y.maxMantDigits {
// mantissa = (mantissa << 1) + (mantissa << 3) + uint64(s[i]-'0')
mantissa = mantissa*fBase + uint64(s[i]-'0')
// mantissa *= fBase
// mantissa += uint64(s[i] - '0')
ndMant++
} else {
trunc = true
return // break L
}
case 'e', 'E':
sawexp = true
break L
default:
bad = true
return
}
}
// if !sawdigits {
// bad = true
// return
// }
if !sawdot {
dp = nd
}
if sawexp {
i++
if i < slen {
var eneg bool
if s[i] == '+' {
i++
} else if s[i] == '-' {
i++
eneg = true
}
if i < slen {
// for exact match, exponent is 1 or 2 digits (float64: -22 to 37, float32: -1 to 17).
// exit quick if exponent is more than 2 digits.
if i+2 < slen {
return
}
var e int8
if s[i] < '0' || s[i] > '9' {
bad = true
return
}
e = e*fBase + int8(s[i]-'0') // (e << 1) + (e << 3) + int8(s[i]-'0')
i++
if i < slen {
if s[i] < '0' || s[i] > '9' {
bad = true
return
}
e = e*fBase + int8(s[i]-'0') // (e << 1) + (e << 3) + int8(s[i]-'0')
i++
}
if eneg {
dp -= e
} else {
dp += e
}
}
}
}
if mantissa != 0 {
if mantissa>>y.mantbits != 0 {
return
}
exp = dp - ndMant
if exp < -y.exactPow10 || exp > y.exactInts+y.exactPow10 { // cannot handle it
return
}
}
ok = true // && !trunc // if trunc=true, we return early (so here trunc=false)
return
}
// fMul10ShiftU64
// func parseFloatDebug(b []byte, bitsize int, strconv bool, exp int8, trunc, ok bool) {
// if strconv {
// xdebugf("parseFloat%d: delegating: %s, exp: %d, trunc: %v, ok: %v", bitsize, b, exp, trunc, ok)
// } else {
// xdebug2f("parseFloat%d: attempting: %s, exp: %d, trunc: %v, ok: %v", bitsize, b, exp, trunc, ok)
// }
// }

View File

@@ -48,6 +48,6 @@ if {{var "l"}} != 0 {
{{var "v"}}[{{var "mk"}}] = {{var "mv"}}
}
}
} // else len==0: TODO: Should we clear map entries?
} // else len==0: leave as-is (do not clear map entries)
z.DecReadMapEnd()
}

View File

@@ -1,6 +1,6 @@
// comment this out // + build ignore
// Copyright (c) 2012-2018 Ugorji Nwoke. All rights reserved.
// Copyright (c) 2012-2020 Ugorji Nwoke. All rights reserved.
// Use of this source code is governed by a MIT license found in the LICENSE file.
// Code generated from gen-helper.go.tmpl - DO NOT EDIT.
@@ -10,7 +10,7 @@ package codec
import "encoding"
// GenVersion is the current version of codecgen.
const GenVersion = 16
const GenVersion = 17
// This file is used to generate helper code for codecgen.
// The values here i.e. genHelper(En|De)coder are not to be used directly by
@@ -44,7 +44,7 @@ type genHelperDecDriver struct {
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
type genHelperEncoder struct {
M must
M mustHdl
F fastpathT
e *Encoder
}
@@ -118,9 +118,6 @@ func (f genHelperEncoder) WriteStr(s string) {
f.e.w().writestr(s)
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperEncoder) BytesView(v string) []byte { return bytesView(v) }
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperEncoder) EncWriteMapStart(length int) { f.e.mapStart(length) }
@@ -157,10 +154,10 @@ func (f genHelperDecoder) DecBinary() bool {
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperDecoder) DecSwallow() { f.d.swallow() }
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperDecoder) DecScratchBuffer() []byte {
return f.d.b[:]
}
// // FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
// func (f genHelperDecoder) DecScratchBuffer() []byte {
// return f.d.b[:]
// }
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperDecoder) DecScratchArrayBuffer() *[decScratchByteArrayLen]byte {
@@ -170,8 +167,8 @@ func (f genHelperDecoder) DecScratchArrayBuffer() *[decScratchByteArrayLen]byte
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperDecoder) DecFallback(iv interface{}, chkPtr bool) {
rv := rv4i(iv)
if chkPtr {
f.d.ensureDecodeable(rv)
if chkPtr && !isDecodeable(rv) {
f.d.haltAsNotDecodeable(rv)
}
f.d.decodeValue(rv, nil)
}
@@ -193,25 +190,23 @@ func (f genHelperDecoder) DecArrayCannotExpand(sliceLen, streamLen int) {
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperDecoder) DecTextUnmarshal(tm encoding.TextUnmarshaler) {
if fnerr := tm.UnmarshalText(f.d.d.DecodeStringAsBytes()); fnerr != nil {
panic(fnerr)
}
halt.onerror(tm.UnmarshalText(f.d.d.DecodeStringAsBytes()))
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperDecoder) DecJSONUnmarshal(tm jsonUnmarshaler) {
// bs := f.dd.DecodeStringAsBytes()
// grab the bytes to be read, as UnmarshalJSON needs the full JSON so as to unmarshal it itself.
if fnerr := tm.UnmarshalJSON(f.d.nextValueBytes()); fnerr != nil {
panic(fnerr)
}
bs := f.d.blist.get(256)[:0]
bs = f.d.d.nextValueBytes(bs)
fnerr := tm.UnmarshalJSON(bs)
f.d.blist.put(bs)
halt.onerror(fnerr)
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperDecoder) DecBinaryUnmarshal(bm encoding.BinaryUnmarshaler) {
if fnerr := bm.UnmarshalBinary(f.d.d.DecodeBytes(nil, true)); fnerr != nil {
panic(fnerr)
}
halt.onerror(bm.UnmarshalBinary(f.d.d.DecodeBytes(nil, true)))
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*

View File

@@ -1,6 +1,6 @@
// comment this out // + build ignore
// Copyright (c) 2012-2018 Ugorji Nwoke. All rights reserved.
// Copyright (c) 2012-2020 Ugorji Nwoke. All rights reserved.
// Use of this source code is governed by a MIT license found in the LICENSE file.
// Code generated from gen-helper.go.tmpl - DO NOT EDIT.
@@ -52,7 +52,7 @@ type genHelperDecDriver struct {
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
type genHelperEncoder struct {
M must
M mustHdl
F fastpathT
e *Encoder
}
@@ -115,9 +115,10 @@ func (f genHelperEncoder) EncExtension(v interface{}, xfFn *extTypeTagFn) {
func (f genHelperEncoder) WriteStr(s string) {
f.e.w().writestr(s)
}
{{/*
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperEncoder) BytesView(v string) []byte { return bytesView(v) }
*/ -}}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperEncoder) EncWriteMapStart(length int) { f.e.mapStart(length) }
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
@@ -145,10 +146,12 @@ func (f genHelperDecoder) DecBinary() bool {
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperDecoder) DecSwallow() { f.d.swallow() }
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperDecoder) DecScratchBuffer() []byte {
return f.d.b[:]
}
// // FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
// func (f genHelperDecoder) DecScratchBuffer() []byte {
// return f.d.b[:]
// }
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperDecoder) DecScratchArrayBuffer() *[decScratchByteArrayLen]byte {
return &f.d.b
@@ -156,8 +159,8 @@ func (f genHelperDecoder) DecScratchArrayBuffer() *[decScratchByteArrayLen]byte
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperDecoder) DecFallback(iv interface{}, chkPtr bool) {
rv := rv4i(iv)
if chkPtr {
f.d.ensureDecodeable(rv)
if chkPtr && !isDecodeable(rv) {
f.d.haltAsNotDecodeable(rv)
}
f.d.decodeValue(rv, nil)
}
@@ -175,23 +178,21 @@ func (f genHelperDecoder) DecArrayCannotExpand(sliceLen, streamLen int) {
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperDecoder) DecTextUnmarshal(tm encoding.TextUnmarshaler) {
if fnerr := tm.UnmarshalText(f.d.d.DecodeStringAsBytes()); fnerr != nil {
panic(fnerr)
}
halt.onerror(tm.UnmarshalText(f.d.d.DecodeStringAsBytes()))
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperDecoder) DecJSONUnmarshal(tm jsonUnmarshaler) {
// bs := f.dd.DecodeStringAsBytes()
// grab the bytes to be read, as UnmarshalJSON needs the full JSON so as to unmarshal it itself.
if fnerr := tm.UnmarshalJSON(f.d.nextValueBytes()); fnerr != nil {
panic(fnerr)
}
bs := f.d.blist.get(256)[:0]
bs = f.d.d.nextValueBytes(bs)
fnerr := tm.UnmarshalJSON(bs)
f.d.blist.put(bs)
halt.onerror(fnerr)
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperDecoder) DecBinaryUnmarshal(bm encoding.BinaryUnmarshaler) {
if fnerr := bm.UnmarshalBinary(f.d.d.DecodeBytes(nil, true)); fnerr != nil {
panic(fnerr)
}
halt.onerror(bm.UnmarshalBinary(f.d.d.DecodeBytes(nil, true)))
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperDecoder) DecRaw() []byte { return f.d.rawBytes() }

View File

@@ -1,6 +1,6 @@
// +build codecgen.exec
// Copyright (c) 2012-2018 Ugorji Nwoke. All rights reserved.
// Copyright (c) 2012-2020 Ugorji Nwoke. All rights reserved.
// Use of this source code is governed by a MIT license found in the LICENSE file.
package codec
@@ -58,7 +58,7 @@ if {{var "l"}} != 0 {
{{var "v"}}[{{var "mk"}}] = {{var "mv"}}
}
}
} // else len==0: TODO: Should we clear map entries?
} // else len==0: leave as-is (do not clear map entries)
z.DecReadMapEnd()
}
`

View File

@@ -1,6 +1,6 @@
// +build codecgen.exec
// Copyright (c) 2012-2018 Ugorji Nwoke. All rights reserved.
// Copyright (c) 2012-2020 Ugorji Nwoke. All rights reserved.
// Use of this source code is governed by a MIT license found in the LICENSE file.
package codec
@@ -14,6 +14,7 @@ import (
"io"
"io/ioutil"
"math/rand"
"os"
"reflect"
"regexp"
"sort"
@@ -90,19 +91,19 @@ import (
// GenVersion is the current version of codecgen.
//
// NOTE: Increment this value each time codecgen changes fundamentally.
// MARKER: Increment this value each time codecgen changes fundamentally.
// Fundamental changes are:
// - helper methods change (signature change, new ones added, some removed, etc)
// - codecgen command line changes
//
// v1: Initial Version
// v2:
// v2: -
// v3: Changes for Kubernetes:
// changes in signature of some unpublished helper methods and codecgen cmdline arguments.
// v4: Removed separator support from (en|de)cDriver, and refactored codec(gen)
// v5: changes to support faster json decoding. Let encoder/decoder maintain state of collections.
// v6: removed unsafe from gen, and now uses codecgen.exec tag
// v7:
// v7: -
// v8: current - we now maintain compatibility with old generated code.
// v9: skipped
// v10: modified encDriver and decDriver interfaces.
@@ -112,10 +113,11 @@ import (
// v14: 20190611 refactored nil handling: TryDecodeAsNil -> selective TryNil, etc
// v15: 20190626 encDriver.EncodeString handles StringToRaw flag inside handle
// v16: 20190629 refactoring for v1.1.6
const genVersion = 16
// v17: 20200911 reduce number of types for which we generate fast path functions (v1.1.8)
const genVersion = 17
const (
genCodecPkg = "codec1978"
genCodecPkg = "codec1978" // MARKER: keep in sync with codecgen/gen.go
genTempVarPfx = "yy"
genTopLevelVarName = "x"
@@ -133,7 +135,7 @@ const (
// genFastpathCanonical configures whether we support Canonical in fast path.
// The savings is not much.
//
// NOTE: This MUST ALWAYS BE TRUE. fast-path.go.tmp doesn't handle it being false.
// MARKER: This MUST ALWAYS BE TRUE. fast-path.go.tmp doesn't handle it being false.
genFastpathCanonical = true // MUST be true
// genFastpathTrimTypes configures whether we trim uncommon fastpath types.
@@ -155,8 +157,9 @@ const (
)
var (
errGenAllTypesSamePkg = errors.New("All types must be in the same package")
errGenExpectArrayOrMap = errors.New("unexpected type. Expecting array/map/slice")
errGenAllTypesSamePkg = errors.New("All types must be in the same package")
errGenExpectArrayOrMap = errors.New("unexpected type - expecting array/map/slice")
errGenUnexpectedTypeFastpath = errors.New("fast-path: unexpected type - requires map or slice")
genBase64enc = base64.NewEncoding("ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789__")
genQNameRegex = regexp.MustCompile(`[A-Za-z_.]+`)
@@ -275,7 +278,7 @@ func Gen(w io.Writer, buildTags, pkgName, uid string, noExtensions bool,
for _, t := range typ {
// fmt.Printf("###########: PkgPath: '%v', Name: '%s'\n", genImportPath(t), t.Name())
if genImportPath(t) != x.bp {
panic(errGenAllTypesSamePkg)
halt.onerror(errGenAllTypesSamePkg)
}
x.genRefPkgs(t)
}
@@ -331,10 +334,10 @@ func Gen(w io.Writer, buildTags, pkgName, uid string, noExtensions bool,
}
x.linef("codecSelferBitsize%s = uint8(32 << (^uint(0) >> 63))", x.xs)
x.linef("codecSelferDecContainerLenNil%s = %d", x.xs, int64(decContainerLenNil))
x.linef("codecSelferDecContainerLenNil%s = %d", x.xs, int64(containerLenNil))
x.line(")")
x.line("var (")
x.line("errCodecSelferOnlyMapOrArrayEncodeToStruct" + x.xs + " = " + "\nerrors.New(`only encoded map or array can be decoded into a struct`)")
x.line("errCodecSelferOnlyMapOrArrayEncodeToStruct" + x.xs + " = " + "errors.New(`only encoded map or array can be decoded into a struct`)")
x.line(")")
x.line("")
@@ -348,7 +351,7 @@ func Gen(w io.Writer, buildTags, pkgName, uid string, noExtensions bool,
x.linef("if %sGenVersion != %v {", x.cpfx, genVersion)
x.line("_, file, _, _ := runtime.Caller(0)")
x.linef("ver := strconv.FormatInt(int64(%sGenVersion), 10)", x.cpfx)
x.outf(`panic("codecgen version mismatch: current: %v, need " + ver + ". Re-generate file: " + file)`, genVersion)
x.outf(`panic(errors.New("codecgen version mismatch: current: %v, need " + ver + ". Re-generate file: " + file))`, genVersion)
x.linef("}")
if len(imKeys) > 0 {
x.line("if false { // reference the types, but skip this branch at build/run time")
@@ -380,7 +383,7 @@ func Gen(w io.Writer, buildTags, pkgName, uid string, noExtensions bool,
case reflect.Map:
x.encMapFallback("v", t)
default:
panic(errGenExpectArrayOrMap)
halt.onerror(errGenExpectArrayOrMap)
}
x.line("}")
x.line("")
@@ -395,7 +398,7 @@ func Gen(w io.Writer, buildTags, pkgName, uid string, noExtensions bool,
case reflect.Map:
x.decMapFallback("v", rtid, t)
default:
panic(errGenExpectArrayOrMap)
halt.onerror(errGenExpectArrayOrMap)
}
x.line("}")
x.line("")
@@ -479,16 +482,12 @@ func (x *genRunner) varsfxreset() {
func (x *genRunner) out(s string) {
_, err := io.WriteString(x.w, s)
if err != nil {
panic(err)
}
genCheckErr(err)
}
func (x *genRunner) outf(s string, params ...interface{}) {
_, err := fmt.Fprintf(x.w, s, params...)
if err != nil {
panic(err)
}
genCheckErr(err)
}
func (x *genRunner) line(s string) {
@@ -512,8 +511,6 @@ func (x *genRunner) linef(s string, params ...interface{}) {
}
func (x *genRunner) genTypeName(t reflect.Type) (n string) {
// defer func() { xdebugf(">>>> ####: genTypeName: t: %v, name: '%s'\n", t, n) }()
// if the type has a PkgPath, which doesn't match the current package,
// then include it.
// We cannot depend on t.String() because it includes current package,
@@ -710,22 +707,22 @@ func (x *genRunner) encVarChkNil(varname string, t reflect.Type, checkNil bool)
telem := t.Elem()
tek := telem.Kind()
if tek == reflect.Array || (tek == reflect.Struct && telem != timeTyp) {
x.enc(varname, genNonPtr(t))
x.enc(varname, genNonPtr(t), true)
break
}
i := x.varsfx()
x.line(genTempVarPfx + i + " := *" + varname)
x.enc(genTempVarPfx+i, genNonPtr(t))
x.enc(genTempVarPfx+i, genNonPtr(t), false)
case reflect.Struct, reflect.Array:
if t == timeTyp {
x.enc(varname, t)
x.enc(varname, t, false)
break
}
i := x.varsfx()
x.line(genTempVarPfx + i + " := &" + varname)
x.enc(genTempVarPfx+i, t)
x.enc(genTempVarPfx+i, t, true)
default:
x.enc(varname, t)
x.enc(varname, t, false)
}
if checkNil {
@@ -737,7 +734,7 @@ func (x *genRunner) encVarChkNil(varname string, t reflect.Type, checkNil bool)
// if t is !time.Time and t is of kind reflect.Struct or reflect.Array, varname is of type *T
// (to prevent copying),
// else t is of type T
func (x *genRunner) enc(varname string, t reflect.Type) {
func (x *genRunner) enc(varname string, t reflect.Type, isptr bool) {
rtid := rt2id(t)
ti2 := x.ti.get(rtid, t)
// We call CodecEncodeSelf if one of the following are honored:
@@ -747,28 +744,55 @@ func (x *genRunner) enc(varname string, t reflect.Type) {
mi := x.varsfx()
// tptr := reflect.PtrTo(t)
tk := t.Kind()
// tk := t.Kind()
// check if
// - type is time.Time, RawExt, Raw
// - the type implements (Text|JSON|Binary)(Unm|M)arshal
var hasIf genIfClause
defer hasIf.end(x) // end if block (if necessary)
var ptrPfx, addrPfx string
if isptr {
ptrPfx = "*"
} else {
addrPfx = "&"
}
if t == timeTyp {
x.linef("%s !z.EncBasicHandle().TimeNotBuiltin { r.EncodeTime(%s%s)", hasIf.c(false), ptrPfx, varname)
// return
}
if t == rawTyp {
x.linef("%s z.EncRaw(%s%s)", hasIf.c(true), ptrPfx, varname)
return
}
if t == rawExtTyp {
x.linef("%s r.EncodeRawExt(%s%s)", hasIf.c(true), addrPfx, varname)
return
}
// only check for extensions if extensions are configured,
// and the type is named, and has a packagePath,
// and this is not the CodecEncodeSelf or CodecDecodeSelf method (i.e. it is not a Selfer)
if !x.nx && varname != genTopLevelVarName && genImportPath(t) != "" && t.Name() != "" {
yy := fmt.Sprintf("%sxt%s", genTempVarPfx, mi)
x.linef("%s %s := z.Extension(z.I2Rtid(%s)); %s != nil { z.EncExtension(%s, %s) ",
hasIf.c(false), yy, varname, yy, varname, yy)
}
if x.checkForSelfer(t, varname) {
if tk == reflect.Array ||
(tk == reflect.Struct && rtid != timeTypId) { // varname is of type *T
// if tptr.Implements(selferTyp) || t.Implements(selferTyp) {
if ti2.isFlag(tiflagSelfer) || ti2.isFlag(tiflagSelferPtr) {
x.line(varname + ".CodecEncodeSelf(e)")
return
}
} else { // varname is of type T
if ti2.isFlag(tiflagSelfer) {
x.line(varname + ".CodecEncodeSelf(e)")
return
} else if ti2.isFlag(tiflagSelferPtr) {
x.linef("%ssf%s := &%s", genTempVarPfx, mi, varname)
x.linef("%ssf%s.CodecEncodeSelf(e)", genTempVarPfx, mi)
return
}
if ti2.isFlag(tiflagSelfer) {
x.linef("%s %s.CodecEncodeSelf(e)", hasIf.c(true), varname)
return
} else if ti2.isFlag(tiflagSelferPtr) {
x.linef("%s %ssf%s := &%s", hasIf.c(true), genTempVarPfx, mi, varname)
x.linef("%ssf%s.CodecEncodeSelf(e)", genTempVarPfx, mi)
return
}
if _, ok := x.te[rtid]; ok {
x.line(varname + ".CodecEncodeSelf(e)")
x.linef("%s %s.CodecEncodeSelf(e)", hasIf.c(true), varname)
return
}
}
@@ -791,59 +815,21 @@ func (x *genRunner) enc(varname string, t reflect.Type) {
rtidAdded = true
}
// check if
// - type is time.Time, RawExt, Raw
// - the type implements (Text|JSON|Binary)(Unm|M)arshal
if ti2.isFlag(tiflagBinaryMarshaler) {
x.linef("%s z.EncBinary() { z.EncBinaryMarshal(%s%v) ", hasIf.c(false), ptrPfx, varname)
} else if ti2.isFlag(tiflagBinaryMarshalerPtr) {
x.linef("%s z.EncBinary() { z.EncBinaryMarshal(%s%v) ", hasIf.c(false), addrPfx, varname)
}
if ti2.isFlag(tiflagJsonMarshaler) {
x.linef("%s !z.EncBinary() && z.IsJSONHandle() { z.EncJSONMarshal(%s%v) ", hasIf.c(false), ptrPfx, varname)
} else if ti2.isFlag(tiflagJsonMarshalerPtr) {
x.linef("%s !z.EncBinary() && z.IsJSONHandle() { z.EncJSONMarshal(%s%v) ", hasIf.c(false), addrPfx, varname)
} else if ti2.isFlag(tiflagTextMarshaler) {
x.linef("%s !z.EncBinary() { z.EncTextMarshal(%s%v) ", hasIf.c(false), ptrPfx, varname)
} else if ti2.isFlag(tiflagTextMarshalerPtr) {
x.linef("%s !z.EncBinary() { z.EncTextMarshal(%s%v) ", hasIf.c(false), addrPfx, varname)
}
var hasIf genIfClause
defer hasIf.end(x) // end if block (if necessary)
if t == timeTyp {
x.linef("%s !z.EncBasicHandle().TimeNotBuiltin { r.EncodeTime(%s)", hasIf.c(false), varname)
// return
}
if t == rawTyp {
x.linef("%s z.EncRaw(%s)", hasIf.c(true), varname)
return
}
if t == rawExtTyp {
x.linef("%s r.EncodeRawExt(%s)", hasIf.c(true), varname)
return
}
// only check for extensions if extensions are configured,
// and the type is named, and has a packagePath,
// and this is not the CodecEncodeSelf or CodecDecodeSelf method (i.e. it is not a Selfer)
var arrayOrStruct = tk == reflect.Array || tk == reflect.Struct // meaning varname if of type *T
if !x.nx && varname != genTopLevelVarName && genImportPath(t) != "" && t.Name() != "" {
yy := fmt.Sprintf("%sxt%s", genTempVarPfx, mi)
x.linef("%s %s := z.Extension(z.I2Rtid(%s)); %s != nil { z.EncExtension(%s, %s) ",
hasIf.c(false), yy, varname, yy, varname, yy)
}
if arrayOrStruct { // varname is of type *T
if ti2.isFlag(tiflagBinaryMarshaler) || ti2.isFlag(tiflagBinaryMarshalerPtr) {
x.linef("%s z.EncBinary() { z.EncBinaryMarshal(%v) ", hasIf.c(false), varname)
}
if ti2.isFlag(tiflagJsonMarshaler) || ti2.isFlag(tiflagJsonMarshalerPtr) {
x.linef("%s !z.EncBinary() && z.IsJSONHandle() { z.EncJSONMarshal(%v) ", hasIf.c(false), varname)
} else if ti2.isFlag(tiflagTextUnmarshaler) || ti2.isFlag(tiflagTextUnmarshalerPtr) {
x.linef("%s !z.EncBinary() { z.EncTextMarshal(%v) ", hasIf.c(false), varname)
}
} else { // varname is of type T
if ti2.isFlag(tiflagBinaryMarshaler) {
x.linef("%s z.EncBinary() { z.EncBinaryMarshal(%v) ", hasIf.c(false), varname)
} else if ti2.isFlag(tiflagBinaryMarshalerPtr) {
x.linef("%s z.EncBinary() { z.EncBinaryMarshal(&%v) ", hasIf.c(false), varname)
}
if ti2.isFlag(tiflagJsonMarshaler) {
x.linef("%s !z.EncBinary() && z.IsJSONHandle() { z.EncJSONMarshal(%v) ", hasIf.c(false), varname)
} else if ti2.isFlag(tiflagJsonMarshalerPtr) {
x.linef("%s !z.EncBinary() && z.IsJSONHandle() { z.EncJSONMarshal(&%v) ", hasIf.c(false), varname)
} else if ti2.isFlag(tiflagTextMarshaler) {
x.linef("%s !z.EncBinary() { z.EncTextMarshal(%v) ", hasIf.c(false), varname)
} else if ti2.isFlag(tiflagTextMarshalerPtr) {
x.linef("%s !z.EncBinary() { z.EncTextMarshal(&%v) ", hasIf.c(false), varname)
}
}
x.lineIf(hasIf.c(true))
switch t.Kind() {
@@ -866,7 +852,7 @@ func (x *genRunner) enc(varname string, t reflect.Type) {
x.xtraSM(varname, t, true, true)
case reflect.Slice:
// if nil, call dedicated function
// if a []uint8, call dedicated function
// if a []byte, call dedicated function
// if a known fastpath slice, call dedicated function
// else write encode function in-line.
// - if elements are primitives or Selfers, call dedicated function on each member.
@@ -967,7 +953,7 @@ func (x *genRunner) encOmitEmptyLine(t2 reflect.StructField, varname string, buf
}
//buf.s(")")
case reflect.Bool:
buf.s(varname2)
buf.s("bool(").s(varname2).s(")")
case reflect.Map, reflect.Slice, reflect.Array, reflect.Chan:
buf.s("len(").s(varname2).s(") != 0")
default:
@@ -1178,15 +1164,11 @@ func (x *genRunner) encListFallback(varname string, t reflect.Type) {
Label, Chan, Slice, Sfx string
}
tm, err := template.New("").Parse(genEncChanTmpl)
if err != nil {
panic(err)
}
genCheckErr(err)
x.linef("if %s == nil { r.EncodeNil() } else { ", varname)
x.linef("var sch%s []%s", i, x.genTypeName(t.Elem()))
err = tm.Execute(x.w, &ts{"Lsch" + i, varname, "sch" + i, i})
if err != nil {
panic(err)
}
genCheckErr(err)
if elemBytes {
x.linef("r.EncodeStringBytesRaw([]byte(%s))", "sch"+i)
x.line("}")
@@ -1209,7 +1191,7 @@ func (x *genRunner) encListFallback(varname string, t reflect.Type) {
func (x *genRunner) encMapFallback(varname string, t reflect.Type) {
x.linef("if %s == nil { r.EncodeNil(); return }", varname)
// NOTE: Canonical Option is not honored
// MARKER: Canonical Option is not honored
i := x.varsfx()
x.line("z.EncWriteMapStart(len(" + varname + "))")
x.linef("for %sk%s, %sv%s := range %s {", genTempVarPfx, i, genTempVarPfx, i, varname)
@@ -1344,41 +1326,12 @@ func (x *genRunner) decVar(varname, nilvar string, t reflect.Type, canBeNil, che
}
// dec will decode a variable (varname) of type t or ptrTo(t) if isptr==true.
// t is always a basetype (i.e. not of kind reflect.Ptr).
func (x *genRunner) dec(varname string, t reflect.Type, isptr bool) {
// assumptions:
// - the varname is to a pointer already. No need to take address of it
// - t is always a baseType T (not a *T, etc).
rtid := rt2id(t)
ti2 := x.ti.get(rtid, t)
if x.checkForSelfer(t, varname) {
if ti2.isFlag(tiflagSelfer) || ti2.isFlag(tiflagSelferPtr) {
x.line(varname + ".CodecDecodeSelf(d)")
return
}
if _, ok := x.td[rtid]; ok {
x.line(varname + ".CodecDecodeSelf(d)")
return
}
}
inlist := false
for _, t0 := range x.t {
if t == t0 {
inlist = true
if x.checkForSelfer(t, varname) {
x.line(varname + ".CodecDecodeSelf(d)")
return
}
break
}
}
var rtidAdded bool
if t == x.tc {
x.td[rtid] = true
rtidAdded = true
}
// check if
// - type is time.Time, Raw, RawExt
@@ -1412,18 +1365,58 @@ func (x *genRunner) dec(varname string, t reflect.Type, isptr bool) {
// only check for extensions if extensions are configured,
// and the type is named, and has a packagePath,
// and this is not the CodecEncodeSelf or CodecDecodeSelf method (i.e. it is not a Selfer)
// xdebugf("genRunner.dec: varname: %v, t: %v, genImportPath: %v, t.Name: %v", varname, t, genImportPath(t), t.Name())
if !x.nx && varname != genTopLevelVarName && genImportPath(t) != "" && t.Name() != "" {
// first check if extensions are configued, before doing the interface conversion
yy := fmt.Sprintf("%sxt%s", genTempVarPfx, mi)
x.linef("%s %s := z.Extension(z.I2Rtid(%s)); %s != nil { z.DecExtension(%s, %s) ", hasIf.c(false), yy, varname, yy, varname, yy)
x.linef("%s %s := z.Extension(z.I2Rtid(%s)); %s != nil { z.DecExtension(%s%s, %s) ", hasIf.c(false), yy, varname, yy, addrPfx, varname, yy)
}
if ti2.isFlag(tiflagBinaryUnmarshaler) || ti2.isFlag(tiflagBinaryUnmarshalerPtr) {
if x.checkForSelfer(t, varname) {
if ti2.isFlag(tiflagSelfer) {
x.linef("%s %s.CodecDecodeSelf(d)", hasIf.c(true), varname)
return
}
if ti2.isFlag(tiflagSelferPtr) {
x.linef("%s %s.CodecDecodeSelf(d)", hasIf.c(true), varname)
return
}
if _, ok := x.td[rtid]; ok {
x.linef("%s %s.CodecDecodeSelf(d)", hasIf.c(true), varname)
return
}
}
inlist := false
for _, t0 := range x.t {
if t == t0 {
inlist = true
if x.checkForSelfer(t, varname) {
x.linef("%s %s.CodecDecodeSelf(d)", hasIf.c(true), varname)
return
}
break
}
}
var rtidAdded bool
if t == x.tc {
x.td[rtid] = true
rtidAdded = true
}
if ti2.isFlag(tiflagBinaryUnmarshaler) {
x.linef("%s z.DecBinary() { z.DecBinaryUnmarshal(%s%v) ", hasIf.c(false), ptrPfx, varname)
} else if ti2.isFlag(tiflagBinaryUnmarshalerPtr) {
x.linef("%s z.DecBinary() { z.DecBinaryUnmarshal(%s%v) ", hasIf.c(false), addrPfx, varname)
}
if ti2.isFlag(tiflagJsonUnmarshaler) || ti2.isFlag(tiflagJsonUnmarshalerPtr) {
if ti2.isFlag(tiflagJsonUnmarshaler) {
x.linef("%s !z.DecBinary() && z.IsJSONHandle() { z.DecJSONUnmarshal(%s%v)", hasIf.c(false), ptrPfx, varname)
} else if ti2.isFlag(tiflagJsonUnmarshalerPtr) {
x.linef("%s !z.DecBinary() && z.IsJSONHandle() { z.DecJSONUnmarshal(%s%v)", hasIf.c(false), addrPfx, varname)
} else if ti2.isFlag(tiflagTextUnmarshaler) || ti2.isFlag(tiflagTextUnmarshalerPtr) {
} else if ti2.isFlag(tiflagTextUnmarshaler) {
x.linef("%s !z.DecBinary() { z.DecTextUnmarshal(%s%v)", hasIf.c(false), ptrPfx, varname)
} else if ti2.isFlag(tiflagTextUnmarshalerPtr) {
x.linef("%s !z.DecBinary() { z.DecTextUnmarshal(%s%v)", hasIf.c(false), addrPfx, varname)
}
@@ -1437,7 +1430,7 @@ func (x *genRunner) dec(varname string, t reflect.Type, isptr bool) {
case reflect.Array, reflect.Chan:
x.xtraSM(varname, t, false, isptr)
case reflect.Slice:
// if a []uint8, call dedicated function
// if a []byte, call dedicated function
// if a known fastpath slice, call dedicated function
// else write encode function in-line.
// - if elements are primitives or Selfers, call dedicated function on each member.
@@ -1581,12 +1574,8 @@ func (x *genRunner) decListFallback(varname string, rtid uintptr, t reflect.Type
return t.Kind() == reflect.Chan
}
tm, err := template.New("").Funcs(funcs).Parse(genDecListTmpl)
if err != nil {
panic(err)
}
if err = tm.Execute(x.w, &ts); err != nil {
panic(err)
}
genCheckErr(err)
genCheckErr(tm.Execute(x.w, &ts))
}
func (x *genRunner) decMapFallback(varname string, rtid uintptr, t reflect.Type) {
@@ -1635,12 +1624,8 @@ func (x *genRunner) decMapFallback(varname string, rtid uintptr, t reflect.Type)
}
tm, err := template.New("").Funcs(funcs).Parse(genDecMapTmpl)
if err != nil {
panic(err)
}
if err = tm.Execute(x.w, &ts); err != nil {
panic(err)
}
genCheckErr(err)
genCheckErr(tm.Execute(x.w, &ts))
}
func (x *genRunner) decStructMapSwitch(kName string, varname string, rtid uintptr, t reflect.Type) {
@@ -1811,7 +1796,7 @@ func (x *genRunner) newFastpathGenV(t reflect.Type) (v fastpathGenV) {
v.MapKey = x.genTypeName(tk)
v.Size = int(te.Size() + tk.Size())
default:
panic("unexpected type for newFastpathGenV. Requires map or slice type")
halt.onerror(errGenUnexpectedTypeFastpath)
}
return
}
@@ -2090,35 +2075,44 @@ func genInternalDecCommandAsString(s string) string {
case "bool":
return "d.d.DecodeBool()"
default:
panic(errors.New("gen internal: unknown type for decode: " + s))
halt.onerror(errors.New("gen internal: unknown type for decode: " + s))
}
return ""
}
// func genInternalSortType(s string, elem bool) string {
// for _, v := range [...]string{
// "int",
// "uint",
// "float",
// "bool",
// "string",
// "bytes", "[]uint8", "[]byte",
// } {
// if v == "[]byte" || v == "[]uint8" {
// v = "bytes"
// }
// if strings.HasPrefix(s, v) {
// if v == "int" || v == "uint" || v == "float" {
// v += "64"
// }
// if elem {
// return v
// }
// return v + "Slice"
// }
// }
// halt.onerror(errors.New("sorttype: unexpected type: " + s))
// }
func genInternalSortType(s string, elem bool) string {
for _, v := range [...]string{
"int",
"uint",
"float",
"bool",
"string",
"bytes", "[]uint8", "[]byte",
} {
if v == "[]byte" || v == "[]uint8" {
v = "bytes"
}
if strings.HasPrefix(s, v) {
if v == "int" || v == "uint" || v == "float" {
v += "64"
}
if elem {
return v
}
return v + "Slice"
}
if elem {
return s
}
panic("sorttype: unexpected type: " + s)
return s + "Slice"
}
// MARKER: keep in sync with codecgen/gen.go
func genStripVendor(s string) string {
// HACK: Misbehaviour occurs in go 1.5. May have to re-visit this later.
// if s contains /vendor/ OR startsWith vendor/, then return everything after it.
@@ -2191,65 +2185,12 @@ func genInternalInit() {
mapvaltypes = types[:]
if genFastpathTrimTypes {
slicetypes = []string{
"interface{}",
"string",
"[]byte",
"float32",
"float64",
"uint",
// "uint8", // no need for fastpath of []uint8, as it is handled specially
"uint16",
"uint32",
"uint64",
// "uintptr",
"int",
"int8",
"int16",
"int32",
"int64",
"bool",
}
// Note: we only create fast-paths for commonly used types.
// Consequently, things like int8, uint16, uint, etc are commented out.
mapkeytypes = []string{
//"interface{}",
"string",
//"[]byte",
//"float32",
//"float64",
"uint",
"uint8",
//"uint16",
//"uint32",
"uint64",
//"uintptr",
"int",
//"int8",
//"int16",
//"int32",
"int64",
// "bool",
}
mapvaltypes = []string{
"interface{}",
"string",
"[]byte",
"uint",
"uint8",
//"uint16",
//"uint32",
"uint64",
// "uintptr",
"int",
//"int8",
//"int16",
//"int32",
"int64",
"float32",
"float64",
"bool",
}
slicetypes = genInternalFastpathSliceTypes()
mapkeytypes = genInternalFastpathMapKeyTypes()
mapvaltypes = genInternalFastpathMapValueTypes()
}
// var mapkeytypes [len(&types) - 1]string // skip bool
@@ -2303,9 +2244,6 @@ func genInternalInit() {
}
// genInternalGoFile is used to generate source files from templates.
// It is run by the program author alone.
// Unfortunately, it has to be exported so that it can be called from a command line tool.
// *** DO NOT USE ***
func genInternalGoFile(r io.Reader, w io.Writer) (err error) {
genInternalOnce.Do(genInternalInit)
@@ -2337,3 +2275,182 @@ func genInternalGoFile(r io.Reader, w io.Writer) (err error) {
w.Write(bout)
return
}
func genInternalFastpathSliceTypes() []string {
return []string{
"interface{}",
"string",
"[]byte",
// "float32",
"float64",
// "uint",
// "uint8", // no need for fastpath of []uint8, as it is handled specially
// "uint16",
// "uint32",
"uint64",
// "uintptr",
"int",
// "int8",
// "int16",
"int32", // rune
"int64",
"bool",
}
}
func genInternalFastpathMapKeyTypes() []string {
return []string{
// "interface{}",
"string",
// "[]byte",
// "float32",
// "float64",
// "uint",
"uint8",
// "uint16",
// "uint32",
"uint64",
// "uintptr",
"int",
// "int8",
// "int16",
// "int32",
"int64",
// "bool",
}
}
func genInternalFastpathMapValueTypes() []string {
return []string{
"interface{}",
"string",
"[]byte",
// "uint",
"uint8",
// "uint16",
// "uint32",
"uint64",
// "uintptr",
"int",
//"int8",
// "int16",
// "int32", // rune (mostly used for unicode)
"int64",
// "float32",
"float64",
"bool",
}
}
// sort-slice ...
// generates sort implementations for
// various slice types and combination slice+reflect.Value types.
//
// The combination slice+reflect.Value types are used
// during canonical encode, and the others are used during fast-path
// encoding of map keys.
// genInternalSortableTypes returns the types
// that are used for fast-path canonical's encoding of maps.
//
// For now, we only support the highest sizes for
// int64, uint64, float64, bool, string, bytes.
func genInternalSortableTypes() []string {
return genInternalFastpathMapKeyTypes()
}
// genInternalSortablePlusTypes returns the types
// that are used for reflection-based canonical's encoding of maps.
//
// For now, we only support the highest sizes for
// int64, uint64, float64, bool, string, bytes.
func genInternalSortablePlusTypes() []string {
return []string{
"string",
"float64",
"uint64",
// "uintptr",
"int64",
"bool",
"time",
"bytes",
}
}
func genTypeForShortName(s string) string {
switch s {
case "time":
return "time.Time"
case "bytes":
return "[]byte"
}
return s
}
func genArgs(args ...interface{}) map[string]interface{} {
m := make(map[string]interface{}, len(args)/2)
for i := 0; i < len(args); {
m[args[i].(string)] = args[i+1]
i += 2
}
return m
}
func genEndsWith(s0 string, sn ...string) bool {
for _, s := range sn {
if strings.HasSuffix(s0, s) {
return true
}
}
return false
}
func genCheckErr(err error) {
halt.onerror(err)
}
func genRunSortTmpl2Go(fnameIn, fnameOut string) {
var err error
funcs := make(template.FuncMap)
funcs["sortables"] = genInternalSortableTypes
funcs["sortablesplus"] = genInternalSortablePlusTypes
funcs["tshort"] = genTypeForShortName
funcs["endswith"] = genEndsWith
funcs["args"] = genArgs
t := template.New("").Funcs(funcs)
fin, err := os.Open(fnameIn)
genCheckErr(err)
defer fin.Close()
fout, err := os.Create(fnameOut)
genCheckErr(err)
defer fout.Close()
tmplstr, err := ioutil.ReadAll(fin)
genCheckErr(err)
t, err = t.Parse(string(tmplstr))
genCheckErr(err)
var out bytes.Buffer
err = t.Execute(&out, 0)
genCheckErr(err)
bout, err := format.Source(out.Bytes())
if err != nil {
fout.Write(out.Bytes()) // write out if error, so we can still see.
}
genCheckErr(err)
// write out if error, as much as possible, so we can still see.
_, err = fout.Write(bout)
genCheckErr(err)
}
func genRunTmpl2Go(fnameIn, fnameOut string) {
// println("____ " + fnameIn + " --> " + fnameOut + " ______")
fin, err := os.Open(fnameIn)
genCheckErr(err)
defer fin.Close()
fout, err := os.Create(fnameOut)
genCheckErr(err)
defer fout.Close()
err = genInternalGoFile(fin, fout)
genCheckErr(err)
}

View File

@@ -1,5 +1,5 @@
module github.com/ugorji/go/codec
require (
github.com/ugorji/go v1.1.7
)
go 1.11
require github.com/ugorji/go v1.1.10

0
vendor/github.com/ugorji/go/codec/go.sum generated vendored Normal file
View File

View File

@@ -1,4 +1,4 @@
// Copyright (c) 2012-2018 Ugorji Nwoke. All rights reserved.
// Copyright (c) 2012-2020 Ugorji Nwoke. All rights reserved.
// Use of this source code is governed by a MIT license found in the LICENSE file.
// +build go1.5

View File

@@ -1,14 +1,19 @@
// Copyright (c) 2012-2018 Ugorji Nwoke. All rights reserved.
// Copyright (c) 2012-2020 Ugorji Nwoke. All rights reserved.
// Use of this source code is governed by a MIT license found in the LICENSE file.
// +build !go1.5
package codec
import "reflect"
import (
"errors"
"reflect"
)
const reflectArrayOfSupported = false
var errReflectArrayOfUnsupported = errors.New("codec: reflect.ArrayOf unsupported in this go version")
func reflectArrayOf(count int, elem reflect.Type) reflect.Type {
panic("codec: reflect.ArrayOf unsupported in this go version")
panic(errReflectArrayOfUnsupported)
}

View File

@@ -1,4 +1,4 @@
// Copyright (c) 2012-2018 Ugorji Nwoke. All rights reserved.
// Copyright (c) 2012-2020 Ugorji Nwoke. All rights reserved.
// Use of this source code is governed by a MIT license found in the LICENSE file.
// +build go1.5

View File

@@ -1,4 +1,4 @@
// Copyright (c) 2012-2018 Ugorji Nwoke. All rights reserved.
// Copyright (c) 2012-2020 Ugorji Nwoke. All rights reserved.
// Use of this source code is governed by a MIT license found in the LICENSE file.
// +build !go1.5

View File

@@ -1,4 +1,4 @@
// Copyright (c) 2012-2018 Ugorji Nwoke. All rights reserved.
// Copyright (c) 2012-2020 Ugorji Nwoke. All rights reserved.
// Use of this source code is governed by a MIT license found in the LICENSE file.
// +build go1.9
@@ -8,8 +8,8 @@ package codec
import "reflect"
func makeMapReflect(t reflect.Type, size int) reflect.Value {
if size < 0 {
return reflect.MakeMapWithSize(t, 4)
}
// if size < 0 {
// return reflect.MakeMapWithSize(t, 4)
// }
return reflect.MakeMapWithSize(t, size)
}

View File

@@ -1,4 +1,4 @@
// Copyright (c) 2012-2018 Ugorji Nwoke. All rights reserved.
// Copyright (c) 2012-2020 Ugorji Nwoke. All rights reserved.
// Use of this source code is governed by a MIT license found in the LICENSE file.
// +build !go1.9

View File

@@ -1,8 +1,8 @@
// Copyright (c) 2012-2018 Ugorji Nwoke. All rights reserved.
// Copyright (c) 2012-2020 Ugorji Nwoke. All rights reserved.
// Use of this source code is governed by a MIT license found in the LICENSE file.
// +build go1.12
// +build safe
// +build safe appengine
package codec

View File

@@ -1,8 +1,9 @@
// Copyright (c) 2012-2018 Ugorji Nwoke. All rights reserved.
// Copyright (c) 2012-2020 Ugorji Nwoke. All rights reserved.
// Use of this source code is governed by a MIT license found in the LICENSE file.
// +build go1.7
// +build !go1.12
// +build !go1.7 safe
// +build safe appengine
package codec

View File

@@ -1,4 +1,4 @@
// Copyright (c) 2012-2018 Ugorji Nwoke. All rights reserved.
// Copyright (c) 2012-2020 Ugorji Nwoke. All rights reserved.
// Use of this source code is governed by a MIT license found in the LICENSE file.
// +build go1.10

View File

@@ -1,4 +1,4 @@
// Copyright (c) 2012-2018 Ugorji Nwoke. All rights reserved.
// Copyright (c) 2012-2020 Ugorji Nwoke. All rights reserved.
// Use of this source code is governed by a MIT license found in the LICENSE file.
// +build !go1.10

View File

@@ -1,10 +1,12 @@
// Copyright (c) 2012-2018 Ugorji Nwoke. All rights reserved.
// Copyright (c) 2012-2020 Ugorji Nwoke. All rights reserved.
// Use of this source code is governed by a MIT license found in the LICENSE file.
// +build !go1.4
package codec
import "errors"
// This codec package will only work for go1.4 and above.
// This is for the following reasons:
// - go 1.4 was released in 2014
@@ -12,6 +14,8 @@ package codec
// - interface only holds pointers
// - reflect.Value is stabilized as 3 words
var errCodecSupportedOnlyFromGo14 = errors.New("codec: go 1.3 and below are not supported")
func init() {
panic("codec: go 1.3 and below are not supported")
panic(errCodecSupportedOnlyFromGo14)
}

View File

@@ -1,4 +1,4 @@
// Copyright (c) 2012-2018 Ugorji Nwoke. All rights reserved.
// Copyright (c) 2012-2020 Ugorji Nwoke. All rights reserved.
// Use of this source code is governed by a MIT license found in the LICENSE file.
// +build go1.5,!go1.6

View File

@@ -1,4 +1,4 @@
// Copyright (c) 2012-2018 Ugorji Nwoke. All rights reserved.
// Copyright (c) 2012-2020 Ugorji Nwoke. All rights reserved.
// Use of this source code is governed by a MIT license found in the LICENSE file.
// +build go1.6,!go1.7

View File

@@ -1,4 +1,4 @@
// Copyright (c) 2012-2018 Ugorji Nwoke. All rights reserved.
// Copyright (c) 2012-2020 Ugorji Nwoke. All rights reserved.
// Use of this source code is governed by a MIT license found in the LICENSE file.
// +build go1.7

View File

@@ -1,4 +1,4 @@
// Copyright (c) 2012-2018 Ugorji Nwoke. All rights reserved.
// Copyright (c) 2012-2020 Ugorji Nwoke. All rights reserved.
// Use of this source code is governed by a MIT license found in the LICENSE file.
// +build !go1.5

View File

@@ -1,4 +1,4 @@
// Copyright (c) 2012-2018 Ugorji Nwoke. All rights reserved.
// Copyright (c) 2012-2020 Ugorji Nwoke. All rights reserved.
// Use of this source code is governed by a MIT license found in the LICENSE file.
package codec
@@ -128,6 +128,15 @@ package codec
// For example, in json, we have dedicated functions for ReadMapElemKey, etc
// which do not delegate to readDelim, as readDelim takes a parameter.
// The difference in runtime was as much as 5%.
//
// ------------------------------------------
// Handling Nil
// - In dynamic (reflection) mode, decodeValue and encodeValue handle nil at the top
// - Consequently, methods used with them as a parent in the chain e.g. kXXX
// methods do not handle nil.
// - Fastpath methods also do not handle nil.
// The switch called in (en|de)code(...) handles it so the dependent calls don't have to.
// - codecgen will handle nil before calling into the library for further work also.
import (
"bytes"
@@ -138,15 +147,26 @@ import (
"io"
"math"
"reflect"
"runtime"
"sort"
"strconv"
"strings"
"sync"
"sync/atomic"
"time"
"unicode/utf8"
)
const (
// containerLenUnknown is length returned from Read(Map|Array)Len
// when a format doesn't know apiori.
// For example, json doesn't pre-determine the length of a container (sequence/map).
containerLenUnknown = -1
// containerLenNil is length returned from Read(Map|Array)Len
// when a 'nil' was encountered in the stream.
containerLenNil = math.MinInt32
// rvNLen is the length of the array for readn or writen calls
rwNLen = 7
@@ -157,9 +177,8 @@ const (
// This constant flag will enable or disable it.
supportMarshalInterfaces = true
// for debugging, set this to false, to catch panic traces.
// Note that this will always cause rpc tests to fail, since they need io.EOF sent via panic.
recoverPanicToErr = true
// for debugging, set this to true
bytesFreeListNoCache = false
// arrayCacheLen is the length of the cache used in encoder or decoder for
// allowing zero-alloc initialization.
@@ -172,11 +191,10 @@ const (
wordSizeBits = 32 << (^uint(0) >> 63) // strconv.IntSize
wordSize = wordSizeBits / 8
maxArrayLen = 1<<((32<<(^uint(0)>>63))-1) - 1
// so structFieldInfo fits into 8 bytes
maxLevelsEmbedding = 14
// xdebug controls whether xdebugf prints any output
xdebug = true
)
var (
@@ -185,16 +203,32 @@ var (
codecgen bool
panicv panicHdl
must mustHdl
halt panicHdl
refBitset bitset32
isnilBitset bitset32
scalarBitset bitset32
digitCharBitset bitset256
numCharBitset bitset256
whitespaceCharBitset bitset256
numCharWithExpBitset64 bitset64
numCharNoExpBitset64 bitset64
whitespaceCharBitset64 bitset64
)
var (
errMapTypeNotMapKind = errors.New("MapType MUST be of Map Kind")
errSliceTypeNotSliceKind = errors.New("SliceType MUST be of Slice Kind")
errExtFnWriteExtUnsupported = errors.New("BytesExt.WriteExt is not supported")
errExtFnReadExtUnsupported = errors.New("BytesExt.ReadExt is not supported")
errExtFnConvertExtUnsupported = errors.New("InterfaceExt.ConvertExt is not supported")
errExtFnUpdateExtUnsupported = errors.New("InterfaceExt.UpdateExt is not supported")
errPanicHdlUndefinedErr = errors.New("panic: undefined error")
)
var pool4tiload = sync.Pool{New: func() interface{} { return new(typeInfoLoadArray) }}
@@ -235,6 +269,26 @@ func init() {
set(byte(reflect.Complex128)).
set(byte(reflect.String))
var i byte
for i = 0; i <= utf8.RuneSelf; i++ {
switch i {
case ' ', '\t', '\r', '\n':
whitespaceCharBitset.set(i)
whitespaceCharBitset64 = whitespaceCharBitset64.set(i)
case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9':
digitCharBitset.set(i)
numCharBitset.set(i)
numCharWithExpBitset64 = numCharWithExpBitset64.set(i - 42)
numCharNoExpBitset64 = numCharNoExpBitset64.set(i)
case '.', '+', '-':
numCharBitset.set(i)
numCharWithExpBitset64 = numCharWithExpBitset64.set(i - 42)
numCharNoExpBitset64 = numCharNoExpBitset64.set(i)
case 'e', 'E':
numCharBitset.set(i)
numCharWithExpBitset64 = numCharWithExpBitset64.set(i - 42)
}
}
}
type handleFlag uint8
@@ -353,6 +407,33 @@ const (
typeInfoLoadArrayBLen = 8 * 4
)
// fauxUnion is used to keep track of the primitives decoded.
//
// Without it, we would have to decode each primitive and wrap it
// in an interface{}, causing an allocation.
// In this model, the primitives are decoded in a "pseudo-atomic" fashion,
// so we can rest assured that no other decoding happens while these
// primitives are being decoded.
//
// maps and arrays are not handled by this mechanism.
type fauxUnion struct {
// r RawExt // used for RawExt, uint, []byte.
// primitives below
u uint64
i int64
f float64
l []byte
s string
// ---- cpu cache line boundary?
t time.Time
b bool
// state
v valueType
}
// typeInfoLoad is a transient object used while loading up a typeInfo.
type typeInfoLoad struct {
etypes []uintptr
@@ -384,22 +465,11 @@ type isZeroer interface {
type codecError struct {
name string
err interface{}
err error
}
func (e codecError) Cause() error {
switch xerr := e.err.(type) {
case nil:
return nil
case error:
return xerr
case string:
return errors.New(xerr)
case fmt.Stringer:
return errors.New(xerr.String())
default:
return fmt.Errorf("%v", e.err)
}
return e.err
}
func (e codecError) Error() string {
@@ -543,7 +613,9 @@ type MissingFielder interface {
// It returns true if the missing field was set on the struct.
CodecMissingField(field []byte, value interface{}) bool
// CodecMissingFields returns the set of fields which are not struct fields
// CodecMissingFields returns the set of fields which are not struct fields.
//
// Note that the returned map may be mutated by the caller.
CodecMissingFields() map[string]interface{}
}
@@ -690,10 +762,10 @@ func (x *BasicHandle) init(hh Handle) {
atomic.StoreUint32(&x.inited, uint32(f))
// ensure MapType and SliceType are of correct type
if x.MapType != nil && x.MapType.Kind() != reflect.Map {
panic(errMapTypeNotMapKind)
halt.onerror(errMapTypeNotMapKind)
}
if x.SliceType != nil && x.SliceType.Kind() != reflect.Slice {
panic(errSliceTypeNotSliceKind)
halt.onerror(errSliceTypeNotSliceKind)
}
}
x.mu.Unlock()
@@ -927,9 +999,6 @@ func (x *BasicHandle) fnLoad(rt reflect.Type, rtid uintptr, checkExt bool) (fn *
case reflect.Float64:
fn.fe = (*Encoder).kFloat64
fn.fd = (*Decoder).kFloat64
case reflect.Invalid:
fn.fe = (*Encoder).kInvalid
fn.fd = (*Decoder).kErr
case reflect.Chan:
fi.seq = seqTypeChan
fn.fe = (*Encoder).kChan
@@ -997,6 +1066,8 @@ type Handle interface {
newEncDriver() encDriver
newDecDriver() decDriver
isBinary() bool
// desc describes the current byte descriptor, or returns "unknown[XXX]" if not understood.
desc(bd byte) string
}
// Raw represents raw formatted bytes.
@@ -1068,16 +1139,12 @@ type addExtWrapper struct {
func (x addExtWrapper) WriteExt(v interface{}) []byte {
bs, err := x.encFn(rv4i(v))
if err != nil {
panic(err)
}
halt.onerror(err)
return bs
}
func (x addExtWrapper) ReadExt(v interface{}, bs []byte) {
if err := x.decFn(rv4i(v), bs); err != nil {
panic(err)
}
halt.onerror(x.decFn(rv4i(v), bs))
}
func (x addExtWrapper) ConvertExt(v interface{}) interface{} {
@@ -1091,21 +1158,21 @@ func (x addExtWrapper) UpdateExt(dest interface{}, v interface{}) {
type bytesExtFailer struct{}
func (bytesExtFailer) WriteExt(v interface{}) []byte {
panicv.errorstr("BytesExt.WriteExt is not supported")
halt.onerror(errExtFnWriteExtUnsupported)
return nil
}
func (bytesExtFailer) ReadExt(v interface{}, bs []byte) {
panicv.errorstr("BytesExt.ReadExt is not supported")
halt.onerror(errExtFnReadExtUnsupported)
}
type interfaceExtFailer struct{}
func (interfaceExtFailer) ConvertExt(v interface{}) interface{} {
panicv.errorstr("InterfaceExt.ConvertExt is not supported")
halt.onerror(errExtFnConvertExtUnsupported)
return nil
}
func (interfaceExtFailer) UpdateExt(dest interface{}, v interface{}) {
panicv.errorstr("InterfaceExt.UpdateExt is not supported")
halt.onerror(errExtFnUpdateExtUnsupported)
}
type bytesExtWrapper struct {
@@ -1698,7 +1765,7 @@ func (x *TypeInfos) get(rtid uintptr, rt reflect.Type) (pti *typeInfo) {
rk := rt.Kind()
if rk == reflect.Ptr { // || (rk == reflect.Interface && rtid != intfTypId) {
panicv.errorf("invalid kind passed to TypeInfos.get: %v - %v", rk, rt)
halt.errorf("invalid kind passed to TypeInfos.get: %v - %v", rk, rt)
}
// do not hold lock while computing this.
@@ -1807,7 +1874,7 @@ func (x *TypeInfos) rget(rt reflect.Type, rtid uintptr, omitEmpty bool,
// and iteration using equals is faster than maps there
flen := rt.NumField()
if flen > (1<<maxLevelsEmbedding - 1) {
panicv.errorf("codec: types with > %v fields are not supported - has %v fields",
halt.errorf("codec: types with > %v fields are not supported - has %v fields",
(1<<maxLevelsEmbedding - 1), flen)
}
// pv.sfis = make([]structFieldInfo, flen)
@@ -1892,7 +1959,7 @@ LOOP:
}
if f.Name == "" {
panic(errNoFieldNameToStructFieldInfo)
halt.onerror(errNoFieldNameToStructFieldInfo)
}
// pv.fNames = append(pv.fNames, f.Name)
@@ -1918,7 +1985,7 @@ LOOP:
si.flagSet(structFieldInfoFlagReady)
if len(indexstack) > maxLevelsEmbedding-1 {
panicv.errorf("codec: only supports up to %v depth of embedding - type has %v depth",
halt.errorf("codec: only supports up to %v depth of embedding - type has %v depth",
maxLevelsEmbedding-1, len(indexstack))
}
si.nis = uint8(len(indexstack)) + 1
@@ -2022,7 +2089,7 @@ func rgetResolveSFI(rt reflect.Type, x []structFieldInfo, pv *typeInfoLoadArray)
n++
}
if n != len(y) {
panicv.errorf("failure reading struct %v - expecting %d of %d valid fields, got %d",
halt.errorf("failure reading struct %v - expecting %d of %d valid fields, got %d",
rt, len(y), len(x), n)
}
@@ -2104,24 +2171,26 @@ func isEmptyStruct(v reflect.Value, tinfos *TypeInfos, deref, checkStruct bool)
func panicToErr(h errDecorator, err *error) {
// Note: This method MUST be called directly from defer i.e. defer panicToErr ...
// else it seems the recover is not fully handled
if recoverPanicToErr {
if x := recover(); x != nil {
// fmt.Printf("panic'ing with: %v\n", x)
// debug.PrintStack()
panicValToErr(h, x, err)
}
if x := recover(); x != nil {
panicValToErr(h, x, err)
}
}
func isSliceBoundsError(s string) bool {
return strings.Contains(s, "index out of range") ||
strings.Contains(s, "slice bounds out of range")
return strings.Contains(s, " out of range") &&
(strings.Contains(s, "index") || strings.Contains(s, "slice bounds"))
}
func panicValToErr(h errDecorator, v interface{}, err *error) {
d, dok := h.(*Decoder)
switch xerr := v.(type) {
case nil:
case runtime.Error:
d, dok := h.(*Decoder)
if dok && d.bytes && isSliceBoundsError(xerr.Error()) {
*err = io.EOF
} else {
h.wrapErr(xerr, err)
}
case error:
switch xerr {
case nil:
@@ -2129,26 +2198,11 @@ func panicValToErr(h errDecorator, v interface{}, err *error) {
// treat as special (bubble up)
*err = xerr
default:
if dok && d.bytes && isSliceBoundsError(xerr.Error()) {
*err = io.EOF
} else {
h.wrapErr(xerr, err)
}
}
case string:
if xerr != "" {
if dok && d.bytes && isSliceBoundsError(xerr) {
*err = io.EOF
} else {
h.wrapErr(xerr, err)
}
}
case fmt.Stringer:
if xerr != nil {
h.wrapErr(xerr, err)
}
default:
h.wrapErr(v, err)
// we don't expect this to happen (as this library always panics with an error)
h.wrapErr(fmt.Errorf("%v", v), err)
}
}
@@ -2159,13 +2213,13 @@ func isImmutableKind(k reflect.Kind) (v bool) {
}
func usableByteSlice(bs []byte, slen int) []byte {
if cap(bs) >= slen {
if bs == nil {
return []byte{}
}
return bs[:slen]
if cap(bs) < slen {
return make([]byte, slen)
}
return make([]byte, slen)
if bs == nil {
return []byte{}
}
return bs[:slen]
}
// ----
@@ -2229,7 +2283,7 @@ func baseRV(v interface{}) (rv reflect.Value) {
type checkOverflow struct{}
// func (checkOverflow) Float16(f float64) (overflow bool) {
// panicv.errorf("unimplemented")
// halt.errorf("unimplemented")
// if f < 0 {
// f = -f
// }
@@ -2243,23 +2297,34 @@ func (checkOverflow) Float32(v float64) (overflow bool) {
return math.MaxFloat32 < v && v <= math.MaxFloat64
}
func (checkOverflow) Uint(v uint64, bitsize uint8) (overflow bool) {
if bitsize == 0 || bitsize >= 64 || v == 0 {
return
}
if trunc := (v << (64 - bitsize)) >> (64 - bitsize); v != trunc {
// if bitsize == 0 || bitsize >= 64 || v == 0 {
// if v == 0 {
// return
// }
// if trunc := (v << (64 - bitsize)) >> (64 - bitsize); v != trunc {
if v != 0 && v != (v<<(64-bitsize))>>(64-bitsize) {
overflow = true
}
return
}
func (checkOverflow) Int(v int64, bitsize uint8) (overflow bool) {
if bitsize == 0 || bitsize >= 64 || v == 0 {
return
}
if trunc := (v << (64 - bitsize)) >> (64 - bitsize); v != trunc {
// if bitsize == 0 || bitsize >= 64 || v == 0 {
// if v == 0 {
// return
// }
// if trunc := (v << (64 - bitsize)) >> (64 - bitsize); v != trunc {
// overflow = true
// }
if v != 0 && v != (v<<(64-bitsize))>>(64-bitsize) {
overflow = true
}
return
}
func (checkOverflow) Uint2Int(v uint64, neg bool) (overflow bool) {
return (neg && v > 1<<63) || (!neg && v >= 1<<63)
}
func (checkOverflow) SignedInt(v uint64) (overflow bool) {
//e.g. -127 to 128 for int8
pos := (v >> 63) == 0
@@ -2278,25 +2343,25 @@ func (checkOverflow) SignedInt(v uint64) (overflow bool) {
func (x checkOverflow) Float32V(v float64) float64 {
if x.Float32(v) {
panicv.errorf("float32 overflow: %v", v)
halt.errorf("float32 overflow: %v", v)
}
return v
}
func (x checkOverflow) UintV(v uint64, bitsize uint8) uint64 {
if x.Uint(v, bitsize) {
panicv.errorf("uint64 overflow: %v", v)
halt.errorf("uint64 overflow: %v", v)
}
return v
}
func (x checkOverflow) IntV(v int64, bitsize uint8) int64 {
if x.Int(v, bitsize) {
panicv.errorf("int64 overflow: %v", v)
halt.errorf("int64 overflow: %v", v)
}
return v
}
func (x checkOverflow) SignedIntV(v uint64) int64 {
if x.SignedInt(v) {
panicv.errorf("uint64 to int64 overflow: %v", v)
halt.errorf("uint64 to int64 overflow: %v", v)
}
return int64(v)
}
@@ -2304,7 +2369,6 @@ func (x checkOverflow) SignedIntV(v uint64) int64 {
// ------------------ FLOATING POINT -----------------
func isNaN64(f float64) bool { return f != f }
func isNaN32(f float32) bool { return f != f }
func abs32(f float32) float32 {
return math.Float32frombits(math.Float32bits(f) &^ (1 << 31))
}
@@ -2343,6 +2407,31 @@ func noFrac32(f float32) (v bool) {
return
}
func isWhitespaceChar(v byte) bool {
// these are in order of speed below ...
return v < 33
// return v < 33 && whitespaceCharBitset64.isset(v)
// return v < 33 && (v == ' ' || v == '\n' || v == '\t' || v == '\r')
// return v == ' ' || v == '\n' || v == '\t' || v == '\r'
// return whitespaceCharBitset.isset(v)
}
func isNumberChar(v byte) bool {
// these are in order of speed below ...
return numCharBitset.isset(v)
// return v < 64 && numCharNoExpBitset64.isset(v) || v == 'e' || v == 'E'
// return v > 42 && v < 102 && numCharWithExpBitset64.isset(v-42)
}
func isDigitChar(v byte) bool {
// these are in order of speed below ...
return digitCharBitset.isset(v)
// return v >= '0' && v <= '9'
}
// func noFrac(f float64) bool {
// _, frac := math.Modf(float64(f))
// return frac == 0
@@ -2451,23 +2540,17 @@ func (s *set) remove(v interface{}) (exists bool) {
// given x > 0 and n > 0 and x is exactly 2^n, then pos/x === pos>>n AND pos%x === pos&(x-1).
// consequently, pos/32 === pos>>5, pos/16 === pos>>4, pos/8 === pos>>3, pos%8 == pos&7
type bitset256 [32]byte
// MARKER: we noticed a little performance degradation when using bitset256 as [32]byte.
// Consequently, we are using a [256]bool only for bitset256.
// We decided not to do the same for bitset32 and bitset64 (hence the discrepancy).
func (x *bitset256) check(pos byte) uint8 {
return x[pos>>3] & (1 << (pos & 7))
}
func (x *bitset256) isset(pos byte) bool {
return x.check(pos) != 0
// return x[pos>>3]&(1<<(pos&7)) != 0
}
// func (x *bitset256) issetv(pos byte) byte {
// return x[pos>>3] & (1 << (pos & 7))
// }
type bitset256 [256]bool
func (x *bitset256) set(pos byte) {
x[pos>>3] |= (1 << (pos & 7))
x[pos] = true
}
func (x *bitset256) isset(pos byte) bool {
return x[pos]
}
type bitset32 uint32
@@ -2475,104 +2558,77 @@ type bitset32 uint32
func (x bitset32) set(pos byte) bitset32 {
return x | (1 << pos)
}
func (x bitset32) check(pos byte) uint32 {
return uint32(x) & (1 << pos)
}
func (x bitset32) isset(pos byte) bool {
return x.check(pos) != 0
// return x&(1<<pos) != 0
return uint32(x)&(1<<pos) != 0
}
// func (x *bitset256) unset(pos byte) {
// x[pos>>3] &^= (1 << (pos & 7))
// }
type bitset64 uint64
// type bit2set256 [64]byte
// func (x *bit2set256) set(pos byte, v1, v2 bool) {
// var pos2 uint8 = (pos & 3) << 1 // returning 0, 2, 4 or 6
// if v1 {
// x[pos>>2] |= 1 << (pos2 + 1)
// }
// if v2 {
// x[pos>>2] |= 1 << pos2
// }
// }
// func (x *bit2set256) get(pos byte) uint8 {
// var pos2 uint8 = (pos & 3) << 1 // returning 0, 2, 4 or 6
// return x[pos>>2] << (6 - pos2) >> 6 // 11000000 -> 00000011
// }
func (x bitset64) set(pos byte) bitset64 {
return x | (1 << pos)
}
func (x bitset64) isset(pos byte) bool {
return uint64(x)&(1<<pos) != 0
}
// ------------
type panicHdl struct{}
func (panicHdl) errorv(err error) {
// errorv will panic if err is defined (not nil)
func (panicHdl) onerror(err error) {
if err != nil {
panic(err)
}
}
func (panicHdl) errorstr(message string) {
if message != "" {
panic(message)
}
}
// errorf will always panic, using the parameters passed.
//go:noinline
func (panicHdl) errorf(format string, params ...interface{}) {
if len(params) != 0 {
panic(fmt.Sprintf(format, params...))
if format == "" {
panic(errPanicHdlUndefinedErr)
}
if len(params) == 0 {
panic(format)
panic(errors.New(format))
}
panic("undefined error")
panic(fmt.Errorf(format, params...))
}
// ----------------------------------------------------
type errDecorator interface {
wrapErr(in interface{}, out *error)
wrapErr(in error, out *error)
}
type errDecoratorDef struct{}
func (errDecoratorDef) wrapErr(v interface{}, e *error) { *e = fmt.Errorf("%v", v) }
func (errDecoratorDef) wrapErr(v error, e *error) { *e = v }
// ----------------------------------------------------
type must struct{}
type mustHdl struct{}
func (must) String(s string, err error) string {
if err != nil {
panicv.errorv(err)
}
func (mustHdl) String(s string, err error) string {
halt.onerror(err)
return s
}
func (must) Int(s int64, err error) int64 {
if err != nil {
panicv.errorv(err)
}
func (mustHdl) Int(s int64, err error) int64 {
halt.onerror(err)
return s
}
func (must) Uint(s uint64, err error) uint64 {
if err != nil {
panicv.errorv(err)
}
func (mustHdl) Uint(s uint64, err error) uint64 {
halt.onerror(err)
return s
}
func (must) Float(s float64, err error) float64 {
if err != nil {
panicv.errorv(err)
}
func (mustHdl) Float(s float64, err error) float64 {
halt.onerror(err)
return s
}
// -------------------
func freelistCapacity(length int) (capacity int) {
for capacity = 8; capacity < length; capacity *= 2 {
for capacity = 8; capacity <= length; capacity *= 2 {
}
return
}
@@ -2580,34 +2636,42 @@ func freelistCapacity(length int) (capacity int) {
type bytesFreelist [][]byte
func (x *bytesFreelist) get(length int) (out []byte) {
if bytesFreeListNoCache {
return make([]byte, length, freelistCapacity(length))
}
y := *x
var j int = -1
for i := 0; i < len(*x); i++ {
if cap((*x)[i]) >= length && (j == -1 || cap((*x)[j]) > cap((*x)[i])) {
for i := range y {
if cap(y[i]) >= length && (j == -1 || cap(y[i]) < cap(y[j])) {
j = i
}
}
if j == -1 {
return make([]byte, length, freelistCapacity(length))
}
out = (*x)[j][:length]
(*x)[j] = nil
for i := 0; i < len(out); i++ {
out = y[j][:length]
y[j] = nil
for i := range out { // memclr/memset
out[i] = 0
}
return
}
func (x *bytesFreelist) put(v []byte) {
if len(v) == 0 {
if bytesFreeListNoCache {
return
}
for i := 0; i < len(*x); i++ {
if cap((*x)[i]) == 0 {
(*x)[i] = v
if cap(v) == 0 {
return
}
y := *x
for i := range y {
if cap(y[i]) == 0 {
y[i] = v
return
}
}
*x = append(*x, v)
*x = append(y, v)
}
func (x *bytesFreelist) check(v []byte, length int) (out []byte) {
@@ -2649,34 +2713,3 @@ func (x *sfiRvFreelist) put(v []sfiRv) {
}
*x = append(*x, v)
}
// -----------
// xdebugf printf. the message in red on the terminal.
// Use it in place of fmt.Printf (which it calls internally)
func xdebugf(pattern string, args ...interface{}) {
xdebugAnyf("31", pattern, args...)
}
// xdebug2f printf. the message in blue on the terminal.
// Use it in place of fmt.Printf (which it calls internally)
func xdebug2f(pattern string, args ...interface{}) {
xdebugAnyf("34", pattern, args...)
}
func xdebugAnyf(colorcode, pattern string, args ...interface{}) {
if !xdebug {
return
}
var delim string
if len(pattern) > 0 && pattern[len(pattern)-1] != '\n' {
delim = "\n"
}
fmt.Printf("\033[1;"+colorcode+"m"+pattern+delim+"\033[0m", args...)
// os.Stderr.Flush()
}
// register these here, so that staticcheck stops barfing
var _ = xdebug2f
var _ = xdebugf
var _ = isNaN32

View File

@@ -1,4 +1,4 @@
// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved.
// Copyright (c) 2012-2020 Ugorji Nwoke. All rights reserved.
// Use of this source code is governed by a MIT license found in the LICENSE file.
package codec
@@ -18,27 +18,26 @@ func pruneSignExt(v []byte, pos bool) (n int) {
return
}
// validate that this function is correct ...
// culled from OGRE (Object-Oriented Graphics Rendering Engine)
// function: halfToFloatI (http://stderr.org/doc/ogre-doc/api/OgreBitwise_8h-source.html)
func halfFloatToFloatBits(yy uint16) (d uint32) {
y := uint32(yy)
s := (y >> 15) & 0x01
e := (y >> 10) & 0x1f
m := y & 0x03ff
func halfFloatToFloatBits(h uint16) (f uint32) {
// retrofitted from:
// - OGRE (Object-Oriented Graphics Rendering Engine)
// function: halfToFloatI https://www.ogre3d.org/docs/api/1.9/_ogre_bitwise_8h_source.html
s := uint32(h >> 15)
m := uint32(h & 0x03ff)
e := int32((h >> 10) & 0x1f)
if e == 0 {
if m == 0 { // plu or minus 0
if m == 0 { // plus or minus 0
return s << 31
}
// Denormalized number -- renormalize it
for (m & 0x00000400) == 0 {
for (m & 0x0400) == 0 {
m <<= 1
e -= 1
}
e += 1
const zz uint32 = 0x0400
m &= ^zz
m &= ^uint32(0x0400)
} else if e == 31 {
if m == 0 { // Inf
return (s << 31) | 0x7f800000
@@ -47,7 +46,47 @@ func halfFloatToFloatBits(yy uint16) (d uint32) {
}
e = e + (127 - 15)
m = m << 13
return (s << 31) | (e << 23) | m
return (s << 31) | (uint32(e) << 23) | m
}
func floatToHalfFloatBits(i uint32) (h uint16) {
// retrofitted from:
// - OGRE (Object-Oriented Graphics Rendering Engine)
// function: halfToFloatI https://www.ogre3d.org/docs/api/1.9/_ogre_bitwise_8h_source.html
// - http://www.java2s.com/example/java-utility-method/float-to/floattohalf-float-f-fae00.html
s := (i >> 16) & 0x8000
e := int32(((i >> 23) & 0xff) - (127 - 15))
m := i & 0x7fffff
var h32 uint32
if e <= 0 {
if e < -10 { // zero
h32 = s // track -0 vs +0
} else {
m = (m | 0x800000) >> uint32(1-e)
h32 = s | (m >> 13)
}
} else if e == 0xff-(127-15) {
if m == 0 { // Inf
h32 = s | 0x7c00
} else { // NAN
m >>= 13
var me uint32
if m == 0 {
me = 1
}
h32 = s | 0x7c00 | m | me
}
} else {
if e > 30 { // Overflow
h32 = s | 0x7c00
} else {
h32 = s | (uint32(e) << 10) | (m >> 13)
}
}
h = uint16(h32)
return
}
// GrowCap will return a new capacity for a slice, given the following:
@@ -60,65 +99,49 @@ func growCap(oldCap, unit, num int) (newCap int) {
// bytes.Buffer model (2*cap + n): much better for bytes.
// smarter way is to take the byte-size of the appended element(type) into account
// maintain 3 thresholds:
// maintain 1 thresholds:
// t1: if cap <= t1, newcap = 2x
// t2: if cap <= t2, newcap = 1.75x
// t3: if cap <= t3, newcap = 1.5x
// else newcap = 1.25x
// else newcap = 1.5x
//
// t1, t2, t3 >= 1024 always.
// i.e. if unit size >= 16, then always do 2x or 1.25x (ie t1, t2, t3 are all same)
// t1 is always >= 1024.
// This means that, if unit size >= 16, then always do 2x or 1.5x (ie t1, t2, t3 are all same)
//
// With this, appending for bytes increase by:
// 100% up to 4K
// 75% up to 8K
// 50% up to 16K
// 25% beyond that
// 50% beyond that
// unit can be 0 e.g. for struct{}{}; handle that appropriately
var t1, t2, t3 int // thresholds
if unit <= 1 {
t1, t2, t3 = 4*1024, 8*1024, 16*1024
} else if unit < 16 {
t3 = 16 / unit * 1024
t1 = t3 * 1 / 4
t2 = t3 * 2 / 4
} else {
t1, t2, t3 = 1024, 1024, 1024
if unit <= 0 {
return maxArrayLen
}
var x int // temporary variable
// handle if num < 0, cap=0, etc.
// x is multiplier here: one of 5, 6, 7 or 8; incr of 25%, 50%, 75% or 100% respectively
if oldCap <= t1 { // [0,t1]
x = 8
} else if oldCap > t3 { // (t3,infinity]
x = 5
} else if oldCap <= t2 { // (t1,t2]
x = 7
} else { // (t2,t3]
x = 6
}
newCap = x * oldCap / 4
if num > 0 {
newCap += num
}
if newCap <= oldCap {
newCap = oldCap + 1
var t1 int = 1024 // default thresholds for large values
if unit <= 4 {
t1 = 8 * 1024
} else if unit <= 16 {
t1 = 2 * 1024
}
// ensure newCap is a multiple of 64 (if it is > 64) or 16.
if newCap > 64 {
if x = newCap % 64; x != 0 {
x = newCap / 64
newCap = 64 * (x + 1)
}
} else {
if x = newCap % 16; x != 0 {
x = newCap / 16
newCap = 16 * (x + 1)
}
if oldCap <= 0 {
newCap = 2
} else if oldCap <= t1 { // [0,t1]
newCap = oldCap * 2
} else { // (t1,infinity]
newCap = oldCap * 3 / 2
}
if num > 0 && newCap < num+oldCap {
newCap = num + oldCap
}
// ensure newCap takes multiples of a cache line (size is a multiple of 64)
t1 = newCap * unit
if t2 := t1 % 64; t2 != 0 {
t1 += 64 - t2
newCap = t1 / unit
}
return
}

View File

@@ -1,6 +1,6 @@
// +build !go1.7 safe appengine
// Copyright (c) 2012-2018 Ugorji Nwoke. All rights reserved.
// Copyright (c) 2012-2020 Ugorji Nwoke. All rights reserved.
// Use of this source code is governed by a MIT license found in the LICENSE file.
package codec
@@ -155,25 +155,25 @@ func (x *atomicRtidFnSlice) store(p []codecRtidFn) {
}
// --------------------------
func (n *decNaked) ru() reflect.Value {
func (n *fauxUnion) ru() reflect.Value {
return rv4i(&n.u).Elem()
}
func (n *decNaked) ri() reflect.Value {
func (n *fauxUnion) ri() reflect.Value {
return rv4i(&n.i).Elem()
}
func (n *decNaked) rf() reflect.Value {
func (n *fauxUnion) rf() reflect.Value {
return rv4i(&n.f).Elem()
}
func (n *decNaked) rl() reflect.Value {
func (n *fauxUnion) rl() reflect.Value {
return rv4i(&n.l).Elem()
}
func (n *decNaked) rs() reflect.Value {
func (n *fauxUnion) rs() reflect.Value {
return rv4i(&n.s).Elem()
}
func (n *decNaked) rt() reflect.Value {
func (n *fauxUnion) rt() reflect.Value {
return rv4i(&n.t).Elem()
}
func (n *decNaked) rb() reflect.Value {
func (n *fauxUnion) rb() reflect.Value {
return rv4i(&n.b).Elem()
}

View File

@@ -2,7 +2,7 @@
// +build !appengine
// +build go1.7
// Copyright (c) 2012-2018 Ugorji Nwoke. All rights reserved.
// Copyright (c) 2012-2020 Ugorji Nwoke. All rights reserved.
// Use of this source code is governed by a MIT license found in the LICENSE file.
package codec
@@ -15,7 +15,7 @@ import (
)
// This file has unsafe variants of some helper methods.
// NOTE: See helper_not_unsafe.go for the usage information.
// MARKER: See helper_not_unsafe.go for the usage information.
// For reflect.Value code, we decided to do the following:
// - if we know the kind, we can elide conditional checks for
@@ -27,7 +27,7 @@ import (
const safeMode = false
// keep in sync with GO_ROOT/src/reflect/value.go
// MARKER: keep in sync with GO_ROOT/src/reflect/value.go
const (
unsafeFlagIndir = 1 << 7
unsafeFlagAddr = 1 << 8
@@ -73,21 +73,6 @@ func bytesView(v string) []byte {
return *(*[]byte)(unsafe.Pointer(&unsafeSlice{sx.Data, sx.Len, sx.Len}))
}
// // isNilRef says whether the interface is a nil reference or not.
// //
// // A reference here is a pointer-sized reference i.e. map, ptr, chan, func, unsafepointer.
// // It is optional to extend this to also check if slices or interfaces are nil also.
// //
// // NOTE: There is no global way of checking if an interface is nil.
// // For true references (map, ptr, func, chan), you can just look
// // at the word of the interface.
// // However, for slices, you have to dereference
// // the word, and get a pointer to the 3-word interface value.
// func isNilRef(v interface{}) (rv reflect.Value, isnil bool) {
// isnil = ((*unsafeIntf)(unsafe.Pointer(&v))).word == nil
// return
// }
func isNil(v interface{}) (rv reflect.Value, isnil bool) {
var ui = (*unsafeIntf)(unsafe.Pointer(&v))
if ui.word == nil {
@@ -297,15 +282,15 @@ func (x *atomicClsErr) store(p clsErr) {
// --------------------------
// to create a reflect.Value for each member field of decNaked,
// we first create a global decNaked, and create reflect.Value
// to create a reflect.Value for each member field of fauxUnion,
// we first create a global fauxUnion, and create reflect.Value
// for them all.
// This way, we have the flags and type in the reflect.Value.
// Then, when a reflect.Value is called, we just copy it,
// update the ptr to the decNaked's, and return it.
// update the ptr to the fauxUnion's, and return it.
type unsafeDecNakedWrapper struct {
decNaked
fauxUnion
ru, ri, rf, rl, rs, rb, rt reflect.Value // mapping to the primitives above
}
@@ -326,37 +311,37 @@ func init() {
defUnsafeDecNakedWrapper.init()
}
func (n *decNaked) ru() (v reflect.Value) {
func (n *fauxUnion) ru() (v reflect.Value) {
v = defUnsafeDecNakedWrapper.ru
((*unsafeReflectValue)(unsafe.Pointer(&v))).ptr = unsafe.Pointer(&n.u)
return
}
func (n *decNaked) ri() (v reflect.Value) {
func (n *fauxUnion) ri() (v reflect.Value) {
v = defUnsafeDecNakedWrapper.ri
((*unsafeReflectValue)(unsafe.Pointer(&v))).ptr = unsafe.Pointer(&n.i)
return
}
func (n *decNaked) rf() (v reflect.Value) {
func (n *fauxUnion) rf() (v reflect.Value) {
v = defUnsafeDecNakedWrapper.rf
((*unsafeReflectValue)(unsafe.Pointer(&v))).ptr = unsafe.Pointer(&n.f)
return
}
func (n *decNaked) rl() (v reflect.Value) {
func (n *fauxUnion) rl() (v reflect.Value) {
v = defUnsafeDecNakedWrapper.rl
((*unsafeReflectValue)(unsafe.Pointer(&v))).ptr = unsafe.Pointer(&n.l)
return
}
func (n *decNaked) rs() (v reflect.Value) {
func (n *fauxUnion) rs() (v reflect.Value) {
v = defUnsafeDecNakedWrapper.rs
((*unsafeReflectValue)(unsafe.Pointer(&v))).ptr = unsafe.Pointer(&n.s)
return
}
func (n *decNaked) rt() (v reflect.Value) {
func (n *fauxUnion) rt() (v reflect.Value) {
v = defUnsafeDecNakedWrapper.rt
((*unsafeReflectValue)(unsafe.Pointer(&v))).ptr = unsafe.Pointer(&n.t)
return
}
func (n *decNaked) rb() (v reflect.Value) {
func (n *fauxUnion) rb() (v reflect.Value) {
v = defUnsafeDecNakedWrapper.rb
((*unsafeReflectValue)(unsafe.Pointer(&v))).ptr = unsafe.Pointer(&n.b)
return
@@ -843,7 +828,7 @@ func (e *Encoder) jsondriver() *jsonEncDriver {
// ---------- DECODER optimized ---------------
func (d *Decoder) checkBreak() bool {
// jsonDecDriver.CheckBreak() CANNOT be inlined.
// MARKER: jsonDecDriver.CheckBreak() CANNOT be inlined.
// Consequently, there's no benefit in incurring the cost of this
// wrapping function checkBreak.
//

View File

@@ -1,4 +1,4 @@
// Copyright (c) 2012-2018 Ugorji Nwoke. All rights reserved.
// Copyright (c) 2012-2020 Ugorji Nwoke. All rights reserved.
// Use of this source code is governed by a MIT license found in the LICENSE file.
package codec
@@ -87,10 +87,8 @@ var (
// jsonTabs and jsonSpaces are used as caches for indents
jsonTabs, jsonSpaces [jsonSpacesOrTabsLen]byte
jsonCharHtmlSafeSet bitset256
jsonCharSafeSet bitset256
jsonCharWhitespaceSet bitset256
jsonNumSet bitset256
jsonCharHtmlSafeSet bitset256
jsonCharSafeSet bitset256
)
func init() {
@@ -113,14 +111,6 @@ func init() {
jsonCharHtmlSafeSet.set(i)
}
}
for i = 0; i <= utf8.RuneSelf; i++ {
switch i {
case ' ', '\t', '\r', '\n':
jsonCharWhitespaceSet.set(i)
case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'e', 'E', '.', '+', '-':
jsonNumSet.set(i)
}
}
}
// ----------------
@@ -141,15 +131,13 @@ type jsonEncDriver struct {
typical bool
s *bitset256 // safe set for characters (taking h.HTMLAsIs into consideration)
// scratch: encode time, numbers, etc. Note: leave 1 byte for containerState
b [cacheLineSize + 24]byte // buffer for encoding numbers and time
e Encoder
}
// Keep writeIndent, WriteArrayElem, WriteMapElemKey, WriteMapElemValue
// in jsonEncDriver, so that *Encoder can directly call them
func (e *jsonEncDriver) encoder() *Encoder { return &e.e }
func (e *jsonEncDriver) writeIndent() {
@@ -203,7 +191,7 @@ func (e *jsonEncDriver) EncodeNil() {
// ie if initial token is n.
// e.e.encWr.writeb(jsonLiteralNull)
e.e.encWr.writen([rwNLen]byte{'n', 'u', 'l', 'l'}, 4)
e.e.encWr.writen4('n', 'u', 'l', 'l')
}
func (e *jsonEncDriver) EncodeTime(t time.Time) {
@@ -247,15 +235,19 @@ func (e *jsonEncDriver) EncodeBool(b bool) {
if e.ks && e.e.c == containerMapKey {
if b {
e.e.encWr.writen([rwNLen]byte{'"', 't', 'r', 'u', 'e', '"'}, 6)
e.e.encWr.writen4('"', 't', 'r', 'u')
e.e.encWr.writen2('e', '"')
} else {
e.e.encWr.writen([rwNLen]byte{'"', 'f', 'a', 'l', 's', 'e', '"'}, 7)
e.e.encWr.writen4('"', 'f', 'a', 'l')
e.e.encWr.writen2('s', 'e')
e.e.encWr.writen1('"')
}
} else {
if b {
e.e.encWr.writen([rwNLen]byte{'t', 'r', 'u', 'e'}, 4)
e.e.encWr.writen4('t', 'r', 'u', 'e')
} else {
e.e.encWr.writen([rwNLen]byte{'f', 'a', 'l', 's', 'e'}, 5)
e.e.encWr.writen4('f', 'a', 'l', 's')
e.e.encWr.writen1('e')
}
}
}
@@ -274,11 +266,19 @@ func (e *jsonEncDriver) encodeFloat(f float64, bitsize, fmt byte, prec int8) {
}
func (e *jsonEncDriver) EncodeFloat64(f float64) {
if math.IsNaN(f) || math.IsInf(f, 0) {
e.EncodeNil()
return
}
fmt, prec := jsonFloatStrconvFmtPrec64(f)
e.encodeFloat(f, 64, fmt, prec)
}
func (e *jsonEncDriver) EncodeFloat32(f float32) {
if math.IsNaN(float64(f)) || math.IsInf(float64(f), 0) {
e.EncodeNil()
return
}
fmt, prec := jsonFloatStrconvFmtPrec32(f)
e.encodeFloat(float64(f), 32, fmt, prec)
}
@@ -393,18 +393,18 @@ func (e *jsonEncDriver) quoteStr(s string) {
// if 0x20 <= b && b != '\\' && b != '"' && b != '<' && b != '>' && b != '&' {
// if (htmlasis && jsonCharSafeSet.isset(b)) || jsonCharHtmlSafeSet.isset(b) {
b := s[i]
if e.s.isset(b) {
if e.s.isset(s[i]) {
i++
continue
}
if b < utf8.RuneSelf {
// b := s[i]
if s[i] < utf8.RuneSelf {
if start < i {
w.writestr(s[start:i])
}
switch b {
switch s[i] {
case '\\', '"':
w.writen2('\\', b)
w.writen2('\\', s[i])
case '\n':
w.writen2('\\', 'n')
case '\r':
@@ -417,7 +417,7 @@ func (e *jsonEncDriver) quoteStr(s string) {
w.writen2('\\', 't')
default:
w.writestr(`\u00`)
w.writen2(hex[b>>4], hex[b&0xF])
w.writen2(hex[s[i]>>4], hex[s[i]&0xF])
}
i++
start = i
@@ -425,15 +425,15 @@ func (e *jsonEncDriver) quoteStr(s string) {
}
c, size := utf8.DecodeRuneInString(s[i:])
if c == utf8.RuneError {
if size == 1 {
if size == 1 { // meaning invalid encoding (so output as-is)
if start < i {
w.writestr(s[start:i])
}
w.writestr(`\ufffd`)
w.writestr(`\uFFFD`)
i++
start = i
continue
}
continue
}
// U+2028 is LINE SEPARATOR. U+2029 is PARAGRAPH SEPARATOR.
// Both technically valid JSON, but bomb on JSONP, so fix here unconditionally.
@@ -472,7 +472,7 @@ type jsonDecDriver struct {
h *JsonHandle
tok uint8 // used to store the token read right after skipWhiteSpace
fnil bool // found null
_ bool // found null
_ [2]byte // padding
bstr [4]byte // scratch used for string \UXXX parsing
@@ -486,50 +486,50 @@ type jsonDecDriver struct {
d Decoder
}
// func jsonIsWS(b byte) bool {
// // return b == ' ' || b == '\t' || b == '\r' || b == '\n'
// return jsonCharWhitespaceSet.isset(b)
// }
func (d *jsonDecDriver) decoder() *Decoder {
return &d.d
}
func (d *jsonDecDriver) uncacheRead() {
if d.tok != 0 {
d.d.decRd.unreadn1()
d.tok = 0
}
}
func (d *jsonDecDriver) ReadMapStart() int {
d.advance()
if d.tok == 'n' {
d.readLit4Null()
return decContainerLenNil
return containerLenNil
}
if d.tok != '{' {
d.d.errorf("read map - expect char '%c' but got char '%c'", '{', d.tok)
}
d.tok = 0
return decContainerLenUnknown
return containerLenUnknown
}
func (d *jsonDecDriver) ReadArrayStart() int {
d.advance()
if d.tok == 'n' {
d.readLit4Null()
return decContainerLenNil
return containerLenNil
}
if d.tok != '[' {
d.d.errorf("read array - expect char '%c' but got char '%c'", '[', d.tok)
}
d.tok = 0
return decContainerLenUnknown
return containerLenUnknown
}
// skipWhitespaceForCheckBreak so that CheckBreak will be inlined
// and only incur a function call if needed.
//
// MARKER: keep in sync with jsonDecDriver.advance()
//
//go:noinline
func (d *jsonDecDriver) skipWhitespaceForCheckBreak() {
d.tok = d.d.decRd.skipWhitespace()
}
func (d *jsonDecDriver) CheckBreak() bool {
d.advance()
if d.tok == 0 {
d.skipWhitespaceForCheckBreak()
}
return d.tok == '}' || d.tok == ']'
}
@@ -616,16 +616,71 @@ func (d *jsonDecDriver) readLit4Null() {
if jsonValidateSymbols && bs != [rwNLen]byte{'u', 'l', 'l'} { // !Equal jsonLiteral4Null
d.d.errorf("expecting %s: got %s", jsonLiteral4Null, bs)
}
d.fnil = true
}
func (d *jsonDecDriver) advance() {
if d.tok == 0 {
d.fnil = false
d.tok = d.d.decRd.skip(&jsonCharWhitespaceSet)
d.tok = d.d.decRd.skipWhitespace() // skip(&whitespaceCharBitset)
}
}
func (d *jsonDecDriver) nextValueBytes(start []byte) (v []byte) {
v = start
consumeString := func() {
for {
c := d.d.decRd.readn1()
v = append(v, c)
if c == '"' {
break
}
if c == '\\' {
v = append(v, d.d.decRd.readn1())
}
}
}
d.advance()
switch d.tok {
default:
v = append(v, d.d.decRd.jsonReadNum()...)
case 'n':
d.readLit4Null()
v = append(v, jsonLiteralNull...)
case 'f':
d.readLit4False()
v = append(v, jsonLiteralFalse...)
case 't':
d.readLit4True()
v = append(v, jsonLiteralTrue...)
case '"':
v = append(v, '"')
consumeString()
case '{', '[':
var elem struct{}
var stack []struct{}
stack = append(stack, elem)
v = append(v, d.tok)
for len(stack) != 0 {
c := d.d.decRd.readn1()
v = append(v, c)
switch c {
case '"':
consumeString()
case '{', '[':
stack = append(stack, elem)
case '}', ']':
stack = stack[:len(stack)-1]
}
}
}
d.tok = 0
return
}
func (d *jsonDecDriver) TryNil() bool {
d.advance()
// we shouldn't try to see if quoted "null" was here, right?
@@ -637,10 +692,6 @@ func (d *jsonDecDriver) TryNil() bool {
return false
}
func (d *jsonDecDriver) Nil() bool {
return d.fnil
}
func (d *jsonDecDriver) DecodeBool() (v bool) {
d.advance()
if d.tok == 'n' {
@@ -675,10 +726,10 @@ func (d *jsonDecDriver) DecodeTime() (t time.Time) {
d.readLit4Null()
return
}
bs := d.readString()
bs := d.readUnescapedString()
t, err := time.Parse(time.RFC3339, stringView(bs))
if err != nil {
d.d.errorv(err)
d.d.onerror(err)
}
return
}
@@ -712,8 +763,7 @@ func (d *jsonDecDriver) decNumBytes() (bs []byte) {
} else if d.tok == 'n' {
d.readLit4Null()
} else {
d.d.decRd.unreadn1()
bs = d.d.decRd.readTo(&jsonNumSet)
bs = d.d.decRd.jsonReadNum()
}
d.tok = 0
return
@@ -724,83 +774,89 @@ func (d *jsonDecDriver) DecodeUint64() (u uint64) {
if len(bs) == 0 {
return
}
n, neg, badsyntax, overflow := jsonParseInteger(bs)
if overflow {
d.d.errorf("overflow parsing unsigned integer: %s", bs)
} else if neg {
d.d.errorf("minus found parsing unsigned integer: %s", bs)
} else if badsyntax {
// fallback: try to decode as float, and cast
n = d.decUint64ViaFloat(bs)
if bs[0] == '-' {
d.d.errorf("negative number cannot be decoded as uint64")
}
return n
}
func (d *jsonDecDriver) DecodeInt64() (i int64) {
const cutoff = uint64(1 << uint(64-1))
bs := d.decNumBytes()
if len(bs) == 0 {
var r readFloatResult
u, r.ok = parseUint64_simple(bs)
if r.ok {
return
}
n, neg, badsyntax, overflow := jsonParseInteger(bs)
if overflow {
d.d.errorf("overflow parsing integer: %s", bs)
} else if badsyntax {
// d.d.errorf("invalid syntax for integer: %s", bs)
// fallback: try to decode as float, and cast
if neg {
n = d.decUint64ViaFloat(bs[1:])
} else {
n = d.decUint64ViaFloat(bs)
r = readFloat(bs, fi64u)
if r.ok {
u, r.bad = parseUint64_reader(r)
if r.bad {
d.d.onerror(strconvParseErr(bs, "ParseUint"))
}
return
}
if neg {
if n > cutoff {
d.d.errorf("overflow parsing integer: %s", bs)
}
i = -(int64(n))
} else {
if n >= cutoff {
d.d.errorf("overflow parsing integer: %s", bs)
}
i = int64(n)
}
d.d.onerror(strconvParseErr(bs, "ParseUint"))
return
}
func (d *jsonDecDriver) decUint64ViaFloat(s []byte) (u uint64) {
if len(s) == 0 {
func (d *jsonDecDriver) DecodeInt64() (v int64) {
b := d.decNumBytes()
if len(b) == 0 {
return
}
f, err := parseFloat64(s)
if err != nil {
d.d.errorf("invalid syntax for integer: %s", s)
var r readFloatResult
var neg bool
if b[0] == '-' {
neg = true
b = b[1:]
}
fi, ff := math.Modf(f)
if ff > 0 {
d.d.errorf("fractional part found parsing integer: %s", s)
} else if fi > float64(math.MaxUint64) {
d.d.errorf("overflow parsing integer: %s", s)
r.mantissa, r.ok = parseUint64_simple(b)
if r.ok {
if chkOvf.Uint2Int(r.mantissa, neg) {
d.d.errorf("overflow decoding number from %s", b)
}
if neg {
v = -int64(r.mantissa)
} else {
v = int64(r.mantissa)
}
return
}
return uint64(fi)
r = readFloat(b, fi64i)
if r.ok {
r.neg = neg
v, r.bad = parseInt64_reader(r)
if r.bad {
d.d.onerror(strconvParseErr(b, "ParseInt"))
}
return
}
d.d.onerror(strconvParseErr(b, "ParseInt"))
return
}
func (d *jsonDecDriver) DecodeFloat64() (f float64) {
var err error
if bs := d.decNumBytes(); len(bs) > 0 {
if f, err = parseFloat64(bs); err != nil {
d.d.errorv(err)
}
bs := d.decNumBytes()
if len(bs) == 0 {
return
}
f, err = parseFloat64(bs)
if err != nil {
d.d.onerror(err)
}
return
}
func (d *jsonDecDriver) DecodeFloat32() (f float32) {
var err error
if bs := d.decNumBytes(); len(bs) > 0 {
if f, err = parseFloat32(bs); err != nil {
d.d.errorv(err)
}
bs := d.decNumBytes()
if len(bs) == 0 {
return
}
f, err = parseFloat32(bs)
if err != nil {
d.d.onerror(err)
}
return
}
@@ -831,14 +887,14 @@ func (d *jsonDecDriver) decBytesFromArray(bs []byte) []byte {
}
d.tok = 0
bs = append(bs, uint8(d.DecodeUint64()))
d.tok = d.d.decRd.skip(&jsonCharWhitespaceSet)
d.tok = d.d.decRd.skipWhitespace() // skip(&whitespaceCharBitset)
for d.tok != ']' {
if d.tok != ',' {
d.d.errorf("read array element - expect char '%c' but got char '%c'", ',', d.tok)
}
d.tok = 0
bs = append(bs, uint8(chkOvf.UintV(d.DecodeUint64(), 8)))
d.tok = d.d.decRd.skip(&jsonCharWhitespaceSet)
d.tok = d.d.decRd.skipWhitespace() // skip(&whitespaceCharBitset)
}
d.tok = 0
return bs
@@ -864,14 +920,13 @@ func (d *jsonDecDriver) DecodeBytes(bs []byte, zerocopy bool) (bsOut []byte) {
// base64 encodes []byte{} as "", and we encode nil []byte as null.
// Consequently, base64 should decode null as a nil []byte, and "" as an empty []byte{}.
// appendStringAsBytes returns a zero-len slice for both, so as not to reset d.buf.
// However, it sets a fnil field to true, so we can check if a null was found.
if d.tok == 'n' {
d.readLit4Null()
return nil
}
bs1 := d.readString()
bs1 := d.readUnescapedString()
slen := base64.StdEncoding.DecodedLen(len(bs1))
if slen == 0 {
bsOut = []byte{}
@@ -886,7 +941,6 @@ func (d *jsonDecDriver) DecodeBytes(bs []byte, zerocopy bool) (bsOut []byte) {
slen2, err := base64.StdEncoding.Decode(bsOut, bs1)
if err != nil {
d.d.errorf("error decoding base64 binary '%s': %v", bs1, err)
return nil
}
if slen != slen2 {
bsOut = bsOut[:slen2]
@@ -896,34 +950,35 @@ func (d *jsonDecDriver) DecodeBytes(bs []byte, zerocopy bool) (bsOut []byte) {
func (d *jsonDecDriver) DecodeStringAsBytes() (s []byte) {
d.advance()
if d.tok != '"' {
// d.d.errorf("expect char '%c' but got char '%c'", '"', d.tok)
// handle non-string scalar: null, true, false or a number
switch d.tok {
case 'n':
d.readLit4Null()
return []byte{}
case 'f':
d.readLit4False()
return jsonLiteralFalse
case 't':
d.readLit4True()
return jsonLiteralTrue
}
// try to parse a valid number
return d.decNumBytes()
// common case
if d.tok == '"' {
d.appendStringAsBytes()
return d.buf
}
s = d.appendStringAsBytes()
if d.fnil {
return nil
// d.d.errorf("expect char '%c' but got char '%c'", '"', d.tok)
// handle non-string scalar: null, true, false or a number
switch d.tok {
case 'n':
d.readLit4Null()
return nil // []byte{}
case 'f':
d.readLit4False()
return jsonLiteralFalse
case 't':
d.readLit4True()
return jsonLiteralTrue
}
return
// try to parse a valid number
d.tok = 0
return d.d.decRd.jsonReadNum()
}
func (d *jsonDecDriver) readString() (bs []byte) {
func (d *jsonDecDriver) readUnescapedString() (bs []byte) {
if d.tok != '"' {
d.d.errorf("expecting string starting with '\"'; got '%c'", d.tok)
return
}
bs = d.d.decRd.readUntil('"', false)
@@ -931,45 +986,26 @@ func (d *jsonDecDriver) readString() (bs []byte) {
return
}
func (d *jsonDecDriver) appendStringAsBytes() (bs []byte) {
func (d *jsonDecDriver) appendStringAsBytes() {
if d.buf != nil {
d.buf = d.buf[:0]
}
d.tok = 0
// append on each byte seen can be expensive, so we just
// keep track of where we last read a contiguous set of
// non-special bytes (using cursor variable),
// and when we see a special byte
// e.g. end-of-slice, " or \,
// we will append the full range into the v slice before proceeding
var cs = d.d.decRd.readUntil('"', true)
var c uint8
var i, cursor uint
for {
if i >= uint(len(cs)) {
d.buf = append(d.buf, cs[cursor:]...)
cs = d.d.decRd.readUntil('"', true)
i, cursor = 0, 0
continue // this continue helps elide the cs[i] below
}
c = cs[i]
c = d.d.decRd.readn1()
if c == '"' {
break
}
if c != '\\' {
i++
d.buf = append(d.buf, c)
continue
}
d.buf = append(d.buf, cs[cursor:i]...)
i++
if i >= uint(len(cs)) {
d.d.errorf("need at least 1 more bytes for \\ escape sequence")
return // bounds-check elimination
}
c = cs[i]
c = d.d.decRd.readn1()
switch c {
case '"', '\\', '/', '\'':
d.buf = append(d.buf, c)
@@ -984,40 +1020,21 @@ func (d *jsonDecDriver) appendStringAsBytes() (bs []byte) {
case 't':
d.buf = append(d.buf, '\t')
case 'u':
i = d.appendStringAsBytesSlashU(cs, i)
d.appendStringAsBytesSlashU()
default:
d.d.errorf("unsupported escaped value: %c", c)
}
i++
cursor = i
}
if len(cs) > 0 {
if len(d.buf) > 0 && cursor < uint(len(cs)) {
d.buf = append(d.buf, cs[cursor:i]...)
} else {
// if bytes, just return the cs got from readUntil.
// do not do it for io, especially bufio, as the buffer is needed for other things
cs = cs[:i]
if d.d.bytes {
return cs
}
d.buf = d.d.blist.check(d.buf, len(cs))
copy(d.buf, cs)
}
}
return d.buf
}
func (d *jsonDecDriver) appendStringAsBytesSlashU(cs []byte, i uint) uint {
func (d *jsonDecDriver) appendStringAsBytesSlashU() {
var r rune
var rr uint32
var j uint
var c byte
if uint(len(cs)) < i+4 {
d.d.errorf("need at least 4 more bytes for unicode sequence")
return 0 // bounds-check elimination
}
for _, c = range cs[i+1 : i+5] { // bounds-check-elimination
var cs [7]byte
cs = d.d.decRd.readn(4)
for _, c = range cs[:4] { // bounds-check-elimination
// best to use explicit if-else
// - not a table, etc which involve memory loads, array lookup with bounds checks, etc
if c >= '0' && c <= '9' {
@@ -1028,52 +1045,39 @@ func (d *jsonDecDriver) appendStringAsBytesSlashU(cs []byte, i uint) uint {
rr = rr*16 + uint32(c-jsonU4Chk0)
} else {
r = unicode.ReplacementChar
i += 4
goto encode_rune
}
}
r = rune(rr)
i += 4
if utf16.IsSurrogate(r) {
if len(cs) >= int(i+6) {
var cx = cs[i+1:][:6:6] // [:6] affords bounds-check-elimination
//var cx [6]byte
//copy(cx[:], cs[i+1:])
if cx[0] == '\\' && cx[1] == 'u' {
i += 2
var rr1 uint32
for j = 2; j < 6; j++ {
c = cx[j]
if c >= '0' && c <= '9' {
rr = rr*16 + uint32(c-jsonU4Chk2)
} else if c >= 'a' && c <= 'f' {
rr = rr*16 + uint32(c-jsonU4Chk1)
} else if c >= 'A' && c <= 'F' {
rr = rr*16 + uint32(c-jsonU4Chk0)
} else {
r = unicode.ReplacementChar
i += 4
goto encode_rune
}
cs = d.d.decRd.readn(6)
if cs[0] == '\\' && cs[1] == 'u' {
rr = 0
for j = 2; j < 6; j++ {
c = cs[j]
if c >= '0' && c <= '9' {
rr = rr*16 + uint32(c-jsonU4Chk2)
} else if c >= 'a' && c <= 'f' {
rr = rr*16 + uint32(c-jsonU4Chk1)
} else if c >= 'A' && c <= 'F' {
rr = rr*16 + uint32(c-jsonU4Chk0)
} else {
r = unicode.ReplacementChar
goto encode_rune
}
r = utf16.DecodeRune(r, rune(rr1))
i += 4
goto encode_rune
}
r = utf16.DecodeRune(r, rune(rr))
goto encode_rune
}
r = unicode.ReplacementChar
}
encode_rune:
w2 := utf8.EncodeRune(d.bstr[:], r)
d.buf = append(d.buf, d.bstr[:w2]...)
return i
}
func (d *jsonDecDriver) nakedNum(z *decNaked, bs []byte) (err error) {
const cutoff = uint64(1 << uint(64-1))
var n uint64
var neg, badsyntax, overflow bool
func (d *jsonDecDriver) nakedNum(z *fauxUnion, bs []byte) (err error) {
// const cutoff = uint64(1 << uint(64-1))
if len(bs) == 0 {
if d.h.PreferFloat {
@@ -1089,33 +1093,12 @@ func (d *jsonDecDriver) nakedNum(z *decNaked, bs []byte) (err error) {
return
}
if d.h.PreferFloat {
goto F
}
n, neg, badsyntax, overflow = jsonParseInteger(bs)
if badsyntax || overflow {
goto F
}
if neg {
if n > cutoff {
goto F
}
z.v = valueTypeInt
z.i = -(int64(n))
} else if d.h.SignedInteger {
if n >= cutoff {
goto F
}
z.v = valueTypeInt
z.i = int64(n)
z.v = valueTypeFloat
z.f, err = parseFloat64(bs)
} else {
z.v = valueTypeUint
z.u = n
err = parseNumber(bs, z, d.h.SignedInteger)
}
return
F:
z.v = valueTypeFloat
z.f, err = parseFloat64(bs)
return
}
func (d *jsonDecDriver) sliceToString(bs []byte) string {
@@ -1148,7 +1131,8 @@ func (d *jsonDecDriver) DecodeNaked() {
z.v = valueTypeArray // don't consume. kInterfaceNaked will call ReadArrayStart
case '"':
// if a string, and MapKeyAsString, then try to decode it as a nil, bool or number first
bs = d.appendStringAsBytes()
d.appendStringAsBytes()
bs = d.buf
if len(bs) > 0 && d.d.c == containerMapKey && d.h.MapKeyAsString {
if bytes.Equal(bs, jsonLiteralNull) {
z.v = valueTypeNil
@@ -1170,14 +1154,13 @@ func (d *jsonDecDriver) DecodeNaked() {
z.s = d.sliceToString(bs)
}
default: // number
bs = d.decNumBytes()
bs = d.d.decRd.jsonReadNum()
d.tok = 0
if len(bs) == 0 {
d.d.errorf("decode number from empty string")
return
}
if err := d.nakedNum(z, bs); err != nil {
d.d.errorf("decode number from %s: %v", bs, err)
return
}
}
}
@@ -1205,6 +1188,10 @@ func (d *jsonDecDriver) DecodeNaked() {
//
// Note that, when decoding quoted strings, invalid UTF-8 or invalid UTF-16 surrogate pairs are
// not treated as an error. Instead, they are replaced by the Unicode replacement character U+FFFD.
//
// Note also that the float values for NaN, +Inf or -Inf are encoded as null,
// as suggested by NOTE 4 of the ECMA-262 ECMAScript Language Specification 5.1 edition.
// see http://www.ecma-international.org/publications/files/ECMA-ST/Ecma-262.pdf .
type JsonHandle struct {
textEncodingType
BasicHandle
@@ -1266,6 +1253,8 @@ type JsonHandle struct {
// Name returns the name of the handle: json
func (h *JsonHandle) Name() string { return "json" }
func (h *JsonHandle) desc(bd byte) string { return string(bd) }
// func (h *JsonHandle) hasElemSeparators() bool { return true }
func (h *JsonHandle) typical() bool {
return h.Indent == 0 && !h.MapKeyAsString && h.IntegerAsString != 'A' && h.IntegerAsString != 'L'
@@ -1312,32 +1301,10 @@ func (d *jsonDecDriver) reset() {
d.se.InterfaceExt = d.h.RawBytesExt
d.buf = d.d.blist.check(d.buf, 256)[:0]
d.tok = 0
d.fnil = false
}
func (d *jsonDecDriver) atEndOfDecode() {}
// jsonFloatStrconvFmtPrec ...
//
// ensure that every float has an 'e' or '.' in it,/ for easy differentiation from integers.
// this is better/faster than checking if encoded value has [e.] and appending if needed.
// func jsonFloatStrconvFmtPrec(f float64, bits32 bool) (fmt byte, prec int) {
// fmt = 'f'
// prec = -1
// var abs = math.Abs(f)
// if abs == 0 || abs == 1 {
// prec = 1
// } else if !bits32 && (abs < 1e-6 || abs >= 1e21) ||
// bits32 && (float32(abs) < 1e-6 || float32(abs) >= 1e21) {
// fmt = 'e'
// } else if _, frac := math.Modf(abs); frac == 0 {
// // ensure that floats have a .0 at the end, for easy identification as floats
// prec = 1
// }
// return
// }
func jsonFloatStrconvFmtPrec64(f float64) (fmt byte, prec int8) {
fmt = 'f'
prec = -1
@@ -1366,128 +1333,8 @@ func jsonFloatStrconvFmtPrec32(f float32) (fmt byte, prec int8) {
return
}
// custom-fitted version of strconv.Parse(Ui|I)nt.
// Also ensures we don't have to search for .eE to determine if a float or not.
// Note: s CANNOT be a zero-length slice.
func jsonParseInteger(s []byte) (n uint64, neg, badSyntax, overflow bool) {
const maxUint64 = (1<<64 - 1)
const cutoff = maxUint64/10 + 1
if len(s) == 0 { // bounds-check-elimination
// treat empty string as zero value
// badSyntax = true
return
}
switch s[0] {
case '+':
s = s[1:]
case '-':
s = s[1:]
neg = true
}
for _, c := range s {
if c < '0' || c > '9' {
badSyntax = true
return
}
// unsigned integers don't overflow well on multiplication, so check cutoff here
// e.g. (maxUint64-5)*10 doesn't overflow well ...
if n >= cutoff {
overflow = true
return
}
n *= 10
n1 := n + uint64(c-'0')
if n1 < n || n1 > maxUint64 {
overflow = true
return
}
n = n1
}
return
}
var _ decDriverContainerTracker = (*jsonDecDriver)(nil)
var _ encDriverContainerTracker = (*jsonEncDriver)(nil)
var _ decDriver = (*jsonDecDriver)(nil)
var _ encDriver = (*jsonEncDriver)(nil)
// ----------------
/*
type jsonEncDriverTypical jsonEncDriver
func (e *jsonEncDriverTypical) WriteArrayStart(length int) {
e.e.encWr.writen1('[')
}
func (e *jsonEncDriverTypical) WriteArrayElem() {
if e.e.c != containerArrayStart {
e.e.encWr.writen1(',')
}
}
func (e *jsonEncDriverTypical) WriteArrayEnd() {
e.e.encWr.writen1(']')
}
func (e *jsonEncDriverTypical) WriteMapStart(length int) {
e.e.encWr.writen1('{')
}
func (e *jsonEncDriverTypical) WriteMapElemKey() {
if e.e.c != containerMapStart {
e.e.encWr.writen1(',')
}
}
func (e *jsonEncDriverTypical) WriteMapElemValue() {
e.e.encWr.writen1(':')
}
func (e *jsonEncDriverTypical) WriteMapEnd() {
e.e.encWr.writen1('}')
}
func (e *jsonEncDriverTypical) EncodeBool(b bool) {
if b {
// e.e.encWr.writeb(jsonLiteralTrue)
e.e.encWr.writen([rwNLen]byte{'t', 'r', 'u', 'e'}, 4)
} else {
// e.e.encWr.writeb(jsonLiteralFalse)
e.e.encWr.writen([rwNLen]byte{'f', 'a', 'l', 's', 'e'}, 5)
}
}
func (e *jsonEncDriverTypical) EncodeInt(v int64) {
e.e.encWr.writeb(strconv.AppendInt(e.b[:0], v, 10))
}
func (e *jsonEncDriverTypical) EncodeUint(v uint64) {
e.e.encWr.writeb(strconv.AppendUint(e.b[:0], v, 10))
}
func (e *jsonEncDriverTypical) EncodeFloat64(f float64) {
fmt, prec := jsonFloatStrconvFmtPrec64(f)
e.e.encWr.writeb(strconv.AppendFloat(e.b[:0], f, fmt, int(prec), 64))
// e.e.encWr.writeb(strconv.AppendFloat(e.b[:0], f, jsonFloatStrconvFmtPrec64(f), 64))
}
func (e *jsonEncDriverTypical) EncodeFloat32(f float32) {
fmt, prec := jsonFloatStrconvFmtPrec32(f)
e.e.encWr.writeb(strconv.AppendFloat(e.b[:0], float64(f), fmt, int(prec), 32))
}
// func (e *jsonEncDriverTypical) encodeFloat(f float64, bitsize uint8) {
// fmt, prec := jsonFloatStrconvFmtPrec(f, bitsize == 32)
// e.e.encWr.writeb(strconv.AppendFloat(e.b[:0], f, fmt, prec, int(bitsize)))
// }
// func (e *jsonEncDriverTypical) atEndOfEncode() {
// if e.tw {
// e.e.encWr.writen1(' ')
// }
// }
*/

View File

@@ -1,4 +1,4 @@
// Copyright (c) 2012-2018 Ugorji Nwoke. All rights reserved.
// Copyright (c) 2012-2020 Ugorji Nwoke. All rights reserved.
// Use of this source code is governed by a MIT license found in the LICENSE file.
// Code generated from mammoth-test.go.tmpl - DO NOT EDIT.
@@ -72,7 +72,8 @@ func doTestMammothSlices(t *testing.T, h Handle) {
if v == nil { v{{$i}}v2 = nil } else { v{{$i}}v2 = make([]{{ .Elem }}, len(v)) }
testUnmarshalErr(rv4i(v{{$i}}v2), bs{{$i}}, h, t, "dec-slice-v{{$i}}-noaddr") // non-addressable value
testDeepEqualErr(v{{$i}}v1, v{{$i}}v2, t, "equal-slice-v{{$i}}-noaddr")
}
}
testReleaseBytes(bs{{$i}})
// ...
bs{{$i}} = testMarshalErr(&v{{$i}}v1, h, t, "enc-slice-v{{$i}}-p")
v{{$i}}v2 = nil
@@ -98,6 +99,7 @@ func doTestMammothSlices(t *testing.T, h Handle) {
testUnmarshalErr((&v{{$i}}va)[:], bs{{$i}}, h, t, "dec-slice-v{{$i}}-p-cap-noaddr")
testDeepEqualErr(v{{$i}}v1, v{{$i}}va[:len(v{{$i}}v1)], t, "equal-slice-v{{$i}}-p-cap-noaddr")
}
testReleaseBytes(bs{{$i}})
// ...
var v{{$i}}v3, v{{$i}}v4 {{ .MethodNamePfx "typMbs" false }}
v{{$i}}v2 = nil
@@ -108,12 +110,14 @@ func doTestMammothSlices(t *testing.T, h Handle) {
bs{{$i}} = testMarshalErr(v{{$i}}v3, h, t, "enc-slice-v{{$i}}-custom")
testUnmarshalErr(v{{$i}}v4, bs{{$i}}, h, t, "dec-slice-v{{$i}}-custom")
testDeepEqualErr(v{{$i}}v3, v{{$i}}v4, t, "equal-slice-v{{$i}}-custom")
testReleaseBytes(bs{{$i}})
}
bs{{$i}} = testMarshalErr(&v{{$i}}v3, h, t, "enc-slice-v{{$i}}-custom-p")
v{{$i}}v2 = nil
v{{$i}}v4 = {{ .MethodNamePfx "typMbs" false }}(v{{$i}}v2)
testUnmarshalErr(&v{{$i}}v4, bs{{$i}}, h, t, "dec-slice-v{{$i}}-custom-p")
testDeepEqualErr(v{{$i}}v3, v{{$i}}v4, t, "equal-slice-v{{$i}}-custom-p")
testReleaseBytes(bs{{$i}})
}
{{end}}{{end}}{{end}}
}
@@ -137,10 +141,12 @@ func doTestMammothMaps(t *testing.T, h Handle) {
if v == nil { v{{$i}}v2 = nil } else { v{{$i}}v2 = make(map[{{ .MapKey }}]{{ .Elem }}, len(v)) } // reset map
testUnmarshalErr(&v{{$i}}v2, bs{{$i}}, h, t, "dec-map-v{{$i}}-p-len")
testDeepEqualErr(v{{$i}}v1, v{{$i}}v2, t, "equal-map-v{{$i}}-p-len")
testReleaseBytes(bs{{$i}})
bs{{$i}} = testMarshalErr(&v{{$i}}v1, h, t, "enc-map-v{{$i}}-p")
v{{$i}}v2 = nil
testUnmarshalErr(&v{{$i}}v2, bs{{$i}}, h, t, "dec-map-v{{$i}}-p-nil")
testDeepEqualErr(v{{$i}}v1, v{{$i}}v2, t, "equal-map-v{{$i}}-p-nil")
testReleaseBytes(bs{{$i}})
// ...
if v == nil { v{{$i}}v2 = nil } else { v{{$i}}v2 = make(map[{{ .MapKey }}]{{ .Elem }}, len(v)) } // reset map
var v{{$i}}v3, v{{$i}}v4 {{ .MethodNamePfx "typMap" false }}
@@ -150,6 +156,7 @@ func doTestMammothMaps(t *testing.T, h Handle) {
bs{{$i}} = testMarshalErr(v{{$i}}v3, h, t, "enc-map-v{{$i}}-custom")
testUnmarshalErr(v{{$i}}v4, bs{{$i}}, h, t, "dec-map-v{{$i}}-p-len")
testDeepEqualErr(v{{$i}}v3, v{{$i}}v4, t, "equal-map-v{{$i}}-p-len")
testReleaseBytes(bs{{$i}})
}
}
{{end}}{{end}}{{end}}

View File

@@ -1,6 +1,6 @@
// +build !notfastpath
// Copyright (c) 2012-2018 Ugorji Nwoke. All rights reserved.
// Copyright (c) 2012-2020 Ugorji Nwoke. All rights reserved.
// Use of this source code is governed by a MIT license found in the LICENSE file.
// Code generated from mammoth2-test.go.tmpl - DO NOT EDIT.

View File

@@ -1,4 +1,4 @@
// Copyright (c) 2012-2018 Ugorji Nwoke. All rights reserved.
// Copyright (c) 2012-2020 Ugorji Nwoke. All rights reserved.
// Use of this source code is governed by a MIT license found in the LICENSE file.
/*
@@ -79,84 +79,57 @@ const (
var mpTimeExtTag int8 = -1
var mpTimeExtTagU = uint8(mpTimeExtTag)
// var mpdesc = map[byte]string{
// mpPosFixNumMin: "PosFixNumMin",
// mpPosFixNumMax: "PosFixNumMax",
// mpFixMapMin: "FixMapMin",
// mpFixMapMax: "FixMapMax",
// mpFixArrayMin: "FixArrayMin",
// mpFixArrayMax: "FixArrayMax",
// mpFixStrMin: "FixStrMin",
// mpFixStrMax: "FixStrMax",
// mpNil: "Nil",
// mpFalse: "False",
// mpTrue: "True",
// mpFloat: "Float",
// mpDouble: "Double",
// mpUint8: "Uint8",
// mpUint16: "Uint16",
// mpUint32: "Uint32",
// mpUint64: "Uint64",
// mpInt8: "Int8",
// mpInt16: "Int16",
// mpInt32: "Int32",
// mpInt64: "Int64",
// mpBin8: "Bin8",
// mpBin16: "Bin16",
// mpBin32: "Bin32",
// mpExt8: "Ext8",
// mpExt16: "Ext16",
// mpExt32: "Ext32",
// mpFixExt1: "FixExt1",
// mpFixExt2: "FixExt2",
// mpFixExt4: "FixExt4",
// mpFixExt8: "FixExt8",
// mpFixExt16: "FixExt16",
// mpStr8: "Str8",
// mpStr16: "Str16",
// mpStr32: "Str32",
// mpArray16: "Array16",
// mpArray32: "Array32",
// mpMap16: "Map16",
// mpMap32: "Map32",
// mpNegFixNumMin: "NegFixNumMin",
// mpNegFixNumMax: "NegFixNumMax",
// }
var mpdescNames = map[byte]string{
mpNil: "nil",
mpFalse: "false",
mpTrue: "true",
mpFloat: "float",
mpDouble: "float",
mpUint8: "uuint",
mpUint16: "uint",
mpUint32: "uint",
mpUint64: "uint",
mpInt8: "int",
mpInt16: "int",
mpInt32: "int",
mpInt64: "int",
func mpdesc(bd byte) string {
switch bd {
case mpNil:
return "nil"
case mpFalse:
return "false"
case mpTrue:
return "true"
case mpFloat, mpDouble:
return "float"
case mpUint8, mpUint16, mpUint32, mpUint64:
return "uint"
case mpInt8, mpInt16, mpInt32, mpInt64:
return "int"
default:
mpStr8: "string|bytes",
mpStr16: "string|bytes",
mpStr32: "string|bytes",
mpBin8: "bytes",
mpBin16: "bytes",
mpBin32: "bytes",
mpArray16: "array",
mpArray32: "array",
mpMap16: "map",
mpMap32: "map",
}
func mpdesc(bd byte) (s string) {
s = mpdescNames[bd]
if s == "" {
switch {
case bd >= mpPosFixNumMin && bd <= mpPosFixNumMax:
return "int"
case bd >= mpNegFixNumMin && bd <= mpNegFixNumMax:
return "int"
case bd == mpStr8, bd == mpStr16, bd == mpStr32, bd >= mpFixStrMin && bd <= mpFixStrMax:
return "string|bytes"
case bd == mpBin8, bd == mpBin16, bd == mpBin32:
return "bytes"
case bd == mpArray16, bd == mpArray32, bd >= mpFixArrayMin && bd <= mpFixArrayMax:
return "array"
case bd == mpMap16, bd == mpMap32, bd >= mpFixMapMin && bd <= mpFixMapMax:
return "map"
case bd >= mpFixExt1 && bd <= mpFixExt16, bd >= mpExt8 && bd <= mpExt32:
return "ext"
case bd >= mpPosFixNumMin && bd <= mpPosFixNumMax,
bd >= mpNegFixNumMin && bd <= mpNegFixNumMax:
s = "int"
case bd >= mpFixStrMin && bd <= mpFixStrMax:
s = "string|bytes"
case bd >= mpFixArrayMin && bd <= mpFixArrayMax:
s = "array"
case bd >= mpFixMapMin && bd <= mpFixMapMax:
s = "map"
case bd >= mpFixExt1 && bd <= mpFixExt16,
bd >= mpExt8 && bd <= mpExt32:
s = "ext"
default:
return "unknown"
s = "unknown"
}
}
return
}
// MsgpackSpecRpcMultiArgs is a special type which signifies to the MsgpackSpecRpcCodec
@@ -169,8 +142,7 @@ type MsgpackSpecRpcMultiArgs []interface{}
// A MsgpackContainer type specifies the different types of msgpackContainers.
type msgpackContainerType struct {
fixCutoff uint8
bFixMin, b8, b16, b32 byte
fixCutoff, bFixMin, b8, b16, b32 byte
// hasFixMin, has8, has8Always bool
}
@@ -432,7 +404,7 @@ type msgpackDecDriver struct {
// b [scratchByteArrayLen]byte
bd byte
bdRead bool
fnil bool
_ bool
noBuiltInTypes
_ [6]uint64 // padding
d Decoder
@@ -451,7 +423,6 @@ func (d *msgpackDecDriver) DecodeNaked() {
if !d.bdRead {
d.readNextBd()
}
d.fnil = false
bd := d.bd
n := d.d.naked()
var decodeFurther bool
@@ -460,7 +431,6 @@ func (d *msgpackDecDriver) DecodeNaked() {
case mpNil:
n.v = valueTypeNil
d.bdRead = false
d.fnil = true
case mpFalse:
n.v = valueTypeBool
n.b = false
@@ -520,7 +490,7 @@ func (d *msgpackDecDriver) DecodeNaked() {
n.l = d.DecodeBytes(nil, false)
}
case bd == mpBin8, bd == mpBin16, bd == mpBin32:
decNakedReadRawBytes(d, &d.d, n, d.h.RawToString)
fauxUnionReadRawBytes(d, &d.d, n, d.h.RawToString)
case bd == mpArray16, bd == mpArray32, bd >= mpFixArrayMin && bd <= mpFixArrayMax:
n.v = valueTypeArray
decodeFurther = true
@@ -535,7 +505,7 @@ func (d *msgpackDecDriver) DecodeNaked() {
n.v = valueTypeTime
n.t = d.decodeTime(clen)
} else if d.d.bytes {
n.l = d.d.decRd.readx(uint(clen))
n.l = d.d.decRd.rb.readx(uint(clen))
} else {
n.l = decByteSlice(d.d.r(), clen, d.d.h.MaxInitLen, d.d.b[:])
}
@@ -552,6 +522,140 @@ func (d *msgpackDecDriver) DecodeNaked() {
}
}
func (d *msgpackDecDriver) nextValueBytes(start []byte) (v []byte) {
if !d.bdRead {
d.readNextBd()
}
v = append(start, d.bd)
v = d.nextValueBytesBdReadR(v)
d.bdRead = false
return
}
func (d *msgpackDecDriver) nextValueBytesR(v0 []byte) (v []byte) {
d.readNextBd()
v = append(v0, d.bd)
return d.nextValueBytesBdReadR(v)
}
func (d *msgpackDecDriver) nextValueBytesBdReadR(v0 []byte) (v []byte) {
v = v0
bd := d.bd
var clen uint
var x []byte
switch bd {
case mpNil, mpFalse, mpTrue: // pass
case mpUint8, mpInt8:
v = append(v, d.d.decRd.readn1())
case mpUint16, mpInt16:
v = append(v, d.d.decRd.readx(2)...)
case mpFloat, mpUint32, mpInt32:
v = append(v, d.d.decRd.readx(4)...)
case mpDouble, mpUint64, mpInt64:
v = append(v, d.d.decRd.readx(8)...)
case mpStr8, mpBin8:
clen = uint(d.d.decRd.readn1())
v = append(v, byte(clen))
v = append(v, d.d.decRd.readx(clen)...)
case mpStr16, mpBin16:
x = d.d.decRd.readx(2)
v = append(v, x...)
clen = uint(bigen.Uint16(x))
v = append(v, d.d.decRd.readx(clen)...)
case mpStr32, mpBin32:
x = d.d.decRd.readx(4)
v = append(v, x...)
clen = uint(bigen.Uint32(x))
v = append(v, d.d.decRd.readx(clen)...)
case mpFixExt1:
v = append(v, d.d.decRd.readn1()) // tag
v = append(v, d.d.decRd.readn1())
case mpFixExt2:
v = append(v, d.d.decRd.readn1()) // tag
v = append(v, d.d.decRd.readx(2)...)
case mpFixExt4:
v = append(v, d.d.decRd.readn1()) // tag
v = append(v, d.d.decRd.readx(4)...)
case mpFixExt8:
v = append(v, d.d.decRd.readn1()) // tag
v = append(v, d.d.decRd.readx(8)...)
case mpFixExt16:
v = append(v, d.d.decRd.readn1()) // tag
v = append(v, d.d.decRd.readx(16)...)
case mpExt8:
clen = uint(d.d.decRd.readn1())
v = append(v, uint8(clen))
v = append(v, d.d.decRd.readn1()) // tag
v = append(v, d.d.decRd.readx(clen)...)
case mpExt16:
x = d.d.decRd.readx(2)
clen = uint(bigen.Uint16(x))
v = append(v, x...)
v = append(v, d.d.decRd.readn1()) // tag
v = append(v, d.d.decRd.readx(clen)...)
case mpExt32:
x = d.d.decRd.readx(4)
clen = uint(bigen.Uint32(x))
v = append(v, x...)
v = append(v, d.d.decRd.readn1()) // tag
v = append(v, d.d.decRd.readx(clen)...)
case mpArray16:
x = d.d.decRd.readx(2)
clen = uint(bigen.Uint16(x))
v = append(v, x...)
for i := uint(0); i < clen; i++ {
v = d.nextValueBytesR(v)
}
case mpArray32:
x = d.d.decRd.readx(4)
clen = uint(bigen.Uint32(x))
v = append(v, x...)
for i := uint(0); i < clen; i++ {
v = d.nextValueBytesR(v)
}
case mpMap16:
x = d.d.decRd.readx(2)
clen = uint(bigen.Uint16(x))
v = append(v, x...)
for i := uint(0); i < clen; i++ {
v = d.nextValueBytesR(v)
v = d.nextValueBytesR(v)
}
case mpMap32:
x = d.d.decRd.readx(4)
clen = uint(bigen.Uint32(x))
v = append(v, x...)
for i := uint(0); i < clen; i++ {
v = d.nextValueBytesR(v)
v = d.nextValueBytesR(v)
}
default:
switch {
case bd >= mpPosFixNumMin && bd <= mpPosFixNumMax: // pass
case bd >= mpNegFixNumMin && bd <= mpNegFixNumMax: // pass
case bd >= mpFixStrMin && bd <= mpFixStrMax:
clen = uint(mpFixStrMin ^ bd)
v = append(v, d.d.decRd.readx(clen)...)
case bd >= mpFixArrayMin && bd <= mpFixArrayMax:
clen = uint(mpFixArrayMin ^ bd)
for i := uint(0); i < clen; i++ {
v = d.nextValueBytesR(v)
}
case bd >= mpFixMapMin && bd <= mpFixMapMax:
clen = uint(mpFixMapMin ^ bd)
for i := uint(0); i < clen; i++ {
v = d.nextValueBytesR(v)
v = d.nextValueBytesR(v)
}
default:
d.d.errorf("nextValueBytes: cannot infer value: %s: Ox%x/%d/%s", msgBadDesc, bd, bd, mpdesc(bd))
}
}
return
}
// int can be decoded from msgpack type: intXXX or uintXXX
func (d *msgpackDecDriver) DecodeInt64() (i int64) {
if d.advanceNil() {
@@ -582,7 +686,6 @@ func (d *msgpackDecDriver) DecodeInt64() (i int64) {
i = int64(int8(d.bd))
default:
d.d.errorf("cannot decode signed integer: %s: %x/%s", msgBadDesc, d.bd, mpdesc(d.bd))
return
}
}
d.bdRead = false
@@ -608,28 +711,24 @@ func (d *msgpackDecDriver) DecodeUint64() (ui uint64) {
ui = uint64(i)
} else {
d.d.errorf("assigning negative signed value: %v, to unsigned type", i)
return
}
case mpInt16:
if i := int64(int16(bigen.Uint16(d.d.decRd.readx(2)))); i >= 0 {
ui = uint64(i)
} else {
d.d.errorf("assigning negative signed value: %v, to unsigned type", i)
return
}
case mpInt32:
if i := int64(int32(bigen.Uint32(d.d.decRd.readx(4)))); i >= 0 {
ui = uint64(i)
} else {
d.d.errorf("assigning negative signed value: %v, to unsigned type", i)
return
}
case mpInt64:
if i := int64(bigen.Uint64(d.d.decRd.readx(8))); i >= 0 {
ui = uint64(i)
} else {
d.d.errorf("assigning negative signed value: %v, to unsigned type", i)
return
}
default:
switch {
@@ -637,10 +736,8 @@ func (d *msgpackDecDriver) DecodeUint64() (ui uint64) {
ui = uint64(d.bd)
case d.bd >= mpNegFixNumMin && d.bd <= mpNegFixNumMax:
d.d.errorf("assigning negative signed value: %v, to unsigned type", int(d.bd))
return
default:
d.d.errorf("cannot decode unsigned integer: %s: %x/%s", msgBadDesc, d.bd, mpdesc(d.bd))
return
}
}
d.bdRead = false
@@ -674,7 +771,6 @@ func (d *msgpackDecDriver) DecodeBool() (b bool) {
b = true
} else {
d.d.errorf("cannot decode bool: %s: %x/%s", msgBadDesc, d.bd, mpdesc(d.bd))
return
}
d.bdRead = false
return
@@ -707,16 +803,14 @@ func (d *msgpackDecDriver) DecodeBytes(bs []byte, zerocopy bool) (bsOut []byte)
return bs
} else {
d.d.errorf("invalid byte descriptor for decoding bytes, got: 0x%x", d.bd)
return
}
d.bdRead = false
if zerocopy {
if d.d.bytes {
return d.d.decRd.readx(uint(clen))
} else if len(bs) == 0 {
bs = d.d.b[:]
}
if d.d.bytes && (zerocopy || d.h.ZeroCopy) {
return d.d.decRd.rb.readx(uint(clen))
}
if zerocopy && len(bs) == 0 {
bs = d.d.b[:]
}
return decByteSlice(d.d.r(), clen, d.h.MaxInitLen, bs)
}
@@ -730,39 +824,24 @@ func (d *msgpackDecDriver) readNextBd() {
d.bdRead = true
}
func (d *msgpackDecDriver) uncacheRead() {
if d.bdRead {
d.d.decRd.unreadn1()
d.bdRead = false
}
}
func (d *msgpackDecDriver) advanceNil() (null bool) {
d.fnil = false
if !d.bdRead {
d.readNextBd()
}
if d.bd == mpNil {
d.bdRead = false
d.fnil = true
null = true
}
return
}
func (d *msgpackDecDriver) Nil() bool {
return d.fnil
}
func (d *msgpackDecDriver) ContainerType() (vt valueType) {
if !d.bdRead {
d.readNextBd()
}
bd := d.bd
d.fnil = false
if bd == mpNil {
d.bdRead = false
d.fnil = true
return valueTypeNil
} else if bd == mpBin8 || bd == mpBin16 || bd == mpBin32 {
return valueTypeBytes
@@ -796,7 +875,6 @@ func (d *msgpackDecDriver) readContainerLen(ct msgpackContainerType) (clen int)
clen = int(ct.bFixMin ^ bd)
} else {
d.d.errorf("cannot read container length: %s: hex: %x, decimal: %d", msgBadDesc, bd, bd)
return
}
d.bdRead = false
return
@@ -804,14 +882,14 @@ func (d *msgpackDecDriver) readContainerLen(ct msgpackContainerType) (clen int)
func (d *msgpackDecDriver) ReadMapStart() int {
if d.advanceNil() {
return decContainerLenNil
return containerLenNil
}
return d.readContainerLen(msgpackContainerMap)
}
func (d *msgpackDecDriver) ReadArrayStart() int {
if d.advanceNil() {
return decContainerLenNil
return containerLenNil
}
return d.readContainerLen(msgpackContainerList)
}
@@ -836,7 +914,6 @@ func (d *msgpackDecDriver) readExtLen() (clen int) {
clen = int(bigen.Uint32(d.d.decRd.readx(4)))
default:
d.d.errorf("decoding ext bytes: found unexpected byte: %x", d.bd)
return
}
return
}
@@ -865,7 +942,6 @@ func (d *msgpackDecDriver) DecodeTime() (t time.Time) {
clen = 12
} else {
d.d.errorf("invalid stream for decoding time as extension: got 0x%x, 0x%x", d.bd, b2)
return
}
}
return d.decodeTime(clen)
@@ -886,7 +962,6 @@ func (d *msgpackDecDriver) decodeTime(clen int) (t time.Time) {
t = time.Unix(int64(sec), int64(nsec)).UTC()
default:
d.d.errorf("invalid length of bytes for decoding time - expecting 4 or 8 or 12, got %d", clen)
return
}
return
}
@@ -894,7 +969,6 @@ func (d *msgpackDecDriver) decodeTime(clen int) (t time.Time) {
func (d *msgpackDecDriver) DecodeExt(rv interface{}, xtag uint64, ext Ext) {
if xtag > 0xff {
d.d.errorf("ext: tag must be <= 0xff; got: %v", xtag)
return
}
if d.advanceNil() {
return
@@ -924,10 +998,9 @@ func (d *msgpackDecDriver) decodeExtV(verifyTag bool, tag byte) (xtag byte, xbs
xtag = d.d.decRd.readn1()
if verifyTag && xtag != tag {
d.d.errorf("wrong extension tag - got %b, expecting %v", xtag, tag)
return
}
if d.d.bytes {
xbs = d.d.decRd.readx(uint(clen))
xbs = d.d.decRd.rb.readx(uint(clen))
} else {
xbs = decByteSlice(d.d.r(), clen, d.d.h.MaxInitLen, d.d.b[:])
}
@@ -969,6 +1042,8 @@ type MsgpackHandle struct {
// Name returns the name of the handle: msgpack
func (h *MsgpackHandle) Name() string { return "msgpack" }
func (h *MsgpackHandle) desc(bd byte) string { return mpdesc(bd) }
func (h *MsgpackHandle) newEncDriver() encDriver {
var e = &msgpackEncDriver{h: h}
e.e.e = e
@@ -990,7 +1065,6 @@ func (e *msgpackEncDriver) reset() {
func (d *msgpackDecDriver) reset() {
d.bd, d.bdRead = 0, false
d.fnil = false
}
//--------------------------------------------------
@@ -1071,8 +1145,7 @@ func (c *msgpackSpecRpcCodec) parseCustomHeader(expectTypeByte byte, msgid *uint
err = c.read(&b)
if err == nil {
if b != expectTypeByte {
err = fmt.Errorf("%s - expecting %v but got %x/%s",
msgBadDesc, expectTypeByte, b, mpdesc(b))
err = fmt.Errorf("%s - expecting %v but got %x/%s", msgBadDesc, expectTypeByte, b, mpdesc(b))
} else {
err = c.read(msgid)
if err == nil {

View File

@@ -1,136 +0,0 @@
// +build prebuild
package main
// prebuild.go generates sort implementations for
// various slice types and combination slice+reflect.Value types.
//
// The combination slice+reflect.Value types are used
// during canonical encode, and the others are used during fast-path
// encoding of map keys.
import (
"bytes"
"go/format"
"io/ioutil"
"os"
"strings"
"text/template"
)
// genInternalSortableTypes returns the types
// that are used for fast-path canonical's encoding of maps.
//
// For now, we only support the highest sizes for
// int64, uint64, float64, bool, string, bytes.
func genInternalSortableTypes() []string {
return []string{
"string",
// "float32",
"float64",
// "uint",
// "uint8",
// "uint16",
// "uint32",
"uint64",
"uintptr",
// "int",
// "int8",
// "int16",
// "int32",
"int64",
"bool",
"time",
"bytes",
}
}
// genInternalSortablePlusTypes returns the types
// that are used for reflection-based canonical's encoding of maps.
//
// For now, we only support the highest sizes for
// int64, uint64, float64, bool, string, bytes.
func genInternalSortablePlusTypes() []string {
return []string{
"string",
"float64",
"uint64",
"uintptr",
"int64",
"bool",
"time",
"bytes",
}
}
func genTypeForShortName(s string) string {
switch s {
case "time":
return "time.Time"
case "bytes":
return "[]byte"
}
return s
}
func genArgs(args ...interface{}) map[string]interface{} {
m := make(map[string]interface{}, len(args)/2)
for i := 0; i < len(args); {
m[args[i].(string)] = args[i+1]
i += 2
}
return m
}
func genEndsWith(s0 string, sn ...string) bool {
for _, s := range sn {
if strings.HasSuffix(s0, s) {
return true
}
}
return false
}
func chkerr(err error) {
if err != nil {
panic(err)
}
}
func run(fnameIn, fnameOut string) {
var err error
funcs := make(template.FuncMap)
funcs["sortables"] = genInternalSortableTypes
funcs["sortablesplus"] = genInternalSortablePlusTypes
funcs["tshort"] = genTypeForShortName
funcs["endswith"] = genEndsWith
funcs["args"] = genArgs
t := template.New("").Funcs(funcs)
fin, err := os.Open(fnameIn)
chkerr(err)
defer fin.Close()
fout, err := os.Create(fnameOut)
chkerr(err)
defer fout.Close()
tmplstr, err := ioutil.ReadAll(fin)
chkerr(err)
t, err = t.Parse(string(tmplstr))
chkerr(err)
var out bytes.Buffer
err = t.Execute(&out, 0)
chkerr(err)
bout, err := format.Source(out.Bytes())
if err != nil {
fout.Write(out.Bytes()) // write out if error, so we can still see.
}
chkerr(err)
// write out if error, as much as possible, so we can still see.
_, err = fout.Write(bout)
chkerr(err)
}
func main() {
run("sort-slice.go.tmpl", "sort-slice.generated.go")
}

View File

@@ -1,4 +1,4 @@
// Copyright (c) 2012-2018 Ugorji Nwoke. All rights reserved.
// Copyright (c) 2012-2020 Ugorji Nwoke. All rights reserved.
// Use of this source code is governed by a MIT license found in the LICENSE file.
package codec
@@ -8,23 +8,34 @@ import "io"
// decReader abstracts the reading source, allowing implementations that can
// read from an io.Reader or directly off a byte slice with zero-copying.
type decReader interface {
unreadn1()
// readx will use the implementation scratch buffer if possible i.e. n < len(scratchbuf), OR
// just return a view of the []byte being decoded from.
// Ensure you call detachZeroCopyBytes later if this needs to be sent outside codec control.
readx(n uint) []byte
readb([]byte)
readn1() uint8
// readn1eof() (v uint8, eof bool)
// read up to 7 bytes at a time
readn(num uint8) (v [rwNLen]byte)
numread() uint // number of bytes read
track()
stopTrack() []byte
// readNumber(includeLastByteRead bool) []byte
// skip any whitespace characters, and return the first non-matching byte
skipWhitespace() (token byte)
// jsonReadNum will include last read byte in first element of slice,
// and continue numeric characters until it sees a non-numeric char
// or EOF. If it sees a non-numeric character, it will unread that.
jsonReadNum() []byte
// skip will skip any byte that matches, and return the first non-matching byte
skip(accept *bitset256) (token byte)
// skip(accept *bitset256) (token byte)
// readTo will read any byte that matches, stopping once no-longer matching.
readTo(accept *bitset256) (out []byte)
// readTo(accept *bitset256) (out []byte)
// readUntil will read, only stopping once it matches the 'stop' byte.
readUntil(stop byte, includeLast bool) (out []byte)
}
@@ -48,44 +59,31 @@ type ioDecReaderCommon struct {
n uint // num read
l byte // last byte
ls unreadByteStatus // last byte status
trb bool // tracking bytes turned on
_ bool
b [4]byte // tiny buffer for reading single bytes
l byte // last byte
ls unreadByteStatus // last byte status
_ [2]byte
b [4]byte // tiny buffer for reading single bytes
blist *bytesFreelist
tr []byte // buffer for tracking bytes
bufr []byte // buffer for readTo/readUntil
}
func (z *ioDecReaderCommon) last() byte {
return z.l
}
// func (z *ioDecReaderCommon) last() byte {
// return z.l
// }
func (z *ioDecReaderCommon) reset(r io.Reader, blist *bytesFreelist) {
z.blist = blist
z.r = r
z.ls = unreadByteUndefined
z.l, z.n = 0, 0
z.trb = false
}
func (z *ioDecReaderCommon) numread() uint {
return z.n
}
func (z *ioDecReaderCommon) track() {
z.tr = z.blist.check(z.tr, 256)[:0]
z.trb = true
}
func (z *ioDecReaderCommon) stopTrack() (bs []byte) {
z.trb = false
return z.tr
}
// ------------------------------------------
// ioDecReader is a decReader that reads off an io.Reader.
@@ -193,13 +191,9 @@ func (z *ioDecReader) readx(n uint) (bs []byte) {
} else {
bs = make([]byte, n)
}
if _, err := decReadFull(z.r, bs); err != nil {
panic(err)
}
_, err := readFull(z.r, bs)
halt.onerror(err)
z.n += uint(len(bs))
if z.trb {
z.tr = append(z.tr, bs...)
}
return
}
@@ -207,77 +201,59 @@ func (z *ioDecReader) readb(bs []byte) {
if len(bs) == 0 {
return
}
if _, err := decReadFull(z.r, bs); err != nil {
panic(err)
}
_, err := readFull(z.r, bs)
halt.onerror(err)
z.n += uint(len(bs))
if z.trb {
z.tr = append(z.tr, bs...)
}
}
func (z *ioDecReader) readn1() (b uint8) {
b, err := z.ReadByte()
halt.onerror(err)
z.n++
return
}
func (z *ioDecReader) readn1eof() (b uint8, eof bool) {
b, err := z.ReadByte()
if err == nil {
z.n++
if z.trb {
z.tr = append(z.tr, b)
}
} else if err == io.EOF {
eof = true
} else {
panic(err)
halt.onerror(err)
}
return
}
func (z *ioDecReader) readn1() (b uint8) {
b, err := z.ReadByte()
if err == nil {
z.n++
if z.trb {
z.tr = append(z.tr, b)
}
return
}
panic(err)
}
func (z *ioDecReader) skip(accept *bitset256) (token byte) {
var eof bool
LOOP:
token, eof = z.readn1eof()
if eof {
return
}
if accept.isset(token) {
goto LOOP
}
return
}
func (z *ioDecReader) readTo(accept *bitset256) []byte {
func (z *ioDecReader) jsonReadNum() (bs []byte) {
z.unreadn1()
z.bufr = z.blist.check(z.bufr, 256)[:0]
LOOP:
token, eof := z.readn1eof()
i, eof := z.readn1eof()
if eof {
return z.bufr
}
if accept.isset(token) {
z.bufr = append(z.bufr, token)
if isNumberChar(i) {
z.bufr = append(z.bufr, i)
goto LOOP
}
z.unreadn1()
return z.bufr
}
func (z *ioDecReader) skipWhitespace() (token byte) {
LOOP:
token = z.readn1()
if isWhitespaceChar(token) {
goto LOOP
}
return
}
func (z *ioDecReader) readUntil(stop byte, includeLast bool) []byte {
z.bufr = z.blist.check(z.bufr, 256)[:0]
LOOP:
token, eof := z.readn1eof()
if eof {
panic(io.EOF)
}
token := z.readn1()
z.bufr = append(z.bufr, token)
if token == stop {
if includeLast {
@@ -288,18 +264,10 @@ LOOP:
goto LOOP
}
//go:noinline
func (z *ioDecReader) unreadn1() {
err := z.UnreadByte()
if err != nil {
panic(err)
}
halt.onerror(err)
z.n--
if z.trb {
if l := len(z.tr) - 1; l >= 0 {
z.tr = z.tr[:l]
}
}
}
// ------------------------------------
@@ -324,24 +292,28 @@ func (z *bufioDecReader) readb(p []byte) {
var n = uint(copy(p, z.buf[z.c:]))
z.n += n
z.c += n
if len(p) == int(n) {
if z.trb {
z.tr = append(z.tr, p...)
}
} else {
z.readbFill(p, n)
if len(p) != int(n) {
z.readbFill(p, n, true, false)
}
}
func (z *bufioDecReader) readbFill(p0 []byte, n uint) {
func (z *bufioDecReader) readbFill(p0 []byte, n uint, must bool, eof bool) (isEOF bool, err error) {
// at this point, there's nothing in z.buf to read (z.buf is fully consumed)
p := p0[n:]
var p []byte
if p0 != nil {
p = p0[n:]
}
var n2 uint
var err error
if len(p) > cap(z.buf) {
n2, err = decReadFull(z.r, p)
n2, err = readFull(z.r, p)
if err != nil {
panic(err)
if err == io.EOF {
isEOF = true
}
if must && !(eof && isEOF) {
halt.onerror(err)
}
return
}
n += n2
z.n += n2
@@ -349,57 +321,69 @@ func (z *bufioDecReader) readbFill(p0 []byte, n uint) {
z.buf = z.buf[:1]
z.buf[0] = p[len(p)-1]
z.c = 1
if z.trb {
z.tr = append(z.tr, p0[:n]...)
}
return
}
// z.c is now 0, and len(p) <= cap(z.buf)
var n1 int
LOOP:
// for len(p) > 0 && z.err == nil {
if len(p) > 0 {
z.buf = z.buf[0:cap(z.buf)]
var n1 int
n1, err = z.r.Read(z.buf)
n2 = uint(n1)
if n2 == 0 && err != nil {
panic(err)
z.buf = z.buf[0:cap(z.buf)]
n1, err = z.r.Read(z.buf)
n2 = uint(n1)
if n2 == 0 && err != nil {
if err == io.EOF {
isEOF = true
}
z.buf = z.buf[:n2]
if must && !(eof && isEOF) {
halt.onerror(err)
}
return
}
err = nil
z.buf = z.buf[:n2]
z.c = 0
if len(p) > 0 {
n2 = uint(copy(p, z.buf))
z.c = n2
n += n2
z.n += n2
p = p[n2:]
goto LOOP
}
if z.c == 0 {
z.buf = z.buf[:1]
z.buf[0] = p[len(p)-1]
z.c = 1
}
if z.trb {
z.tr = append(z.tr, p0[:n]...)
if len(p) > 0 {
goto LOOP
}
if z.c == 0 {
z.buf = z.buf[:1]
z.buf[0] = p[len(p)-1]
z.c = 1
}
}
return
}
func (z *bufioDecReader) last() byte {
return z.buf[z.c-1]
}
// func (z *bufioDecReader) last() byte {
// return z.buf[z.c-1]
// }
func (z *bufioDecReader) readn1() (b byte) {
// fast-path, so we elide calling into Read() most of the time
if z.c < uint(len(z.buf)) {
b = z.buf[z.c]
z.c++
z.n++
if z.trb {
z.tr = append(z.tr, b)
}
} else { // meaning z.c == len(z.buf) or greater ... so need to fill
z.readbFill(z.b[:1], 0)
b = z.b[0]
if z.c >= uint(len(z.buf)) {
z.readbFill(nil, 0, true, false)
}
b = z.buf[z.c]
z.c++
z.n++
return
}
func (z *bufioDecReader) readn1eof() (b byte, eof bool) {
if z.c >= uint(len(z.buf)) {
eof, _ = z.readbFill(nil, 0, true, true)
if eof {
return
}
}
b = z.buf[z.c]
z.c++
z.n++
return
}
@@ -409,9 +393,6 @@ func (z *bufioDecReader) unreadn1() {
}
z.c--
z.n--
if z.trb {
z.tr = z.tr[:len(z.tr)-1]
}
}
func (z *bufioDecReader) readn(num uint8) (bs [rwNLen]byte) {
@@ -427,135 +408,77 @@ func (z *bufioDecReader) readx(n uint) (bs []byte) {
bs = z.buf[z.c : z.c+n]
z.n += n
z.c += n
if z.trb {
z.tr = append(z.tr, bs...)
}
} else {
bs = make([]byte, n)
// n no longer used - can reuse
n = uint(copy(bs, z.buf[z.c:]))
z.n += n
z.c += n
z.readbFill(bs, n)
z.readbFill(bs, n, true, false)
}
return
}
func (z *bufioDecReader) skip(accept *bitset256) (token byte) {
func (z *bufioDecReader) jsonReadNum() (bs []byte) {
z.unreadn1()
z.bufr = z.blist.check(z.bufr, 256)[:0]
LOOP:
i, eof := z.readn1eof()
if eof {
return z.bufr
}
if isNumberChar(i) {
z.bufr = append(z.bufr, i)
goto LOOP
}
z.unreadn1()
return z.bufr
}
func (z *bufioDecReader) skipWhitespace() (token byte) {
i := z.c
LOOP:
if i < uint(len(z.buf)) {
// inline z.skipLoopFn(i) and refactor, so cost is within inline budget
token = z.buf[i]
i++
if accept.isset(token) {
if isWhitespaceChar(token) {
goto LOOP
}
z.n += i - 2 - z.c
if z.trb {
z.tr = append(z.tr, z.buf[z.c:i]...) // z.doTrack(i)
}
z.c = i
return
}
return z.skipFill(accept)
return z.skipFillWhitespace()
}
func (z *bufioDecReader) skipFill(accept *bitset256) (token byte) {
func (z *bufioDecReader) skipFillWhitespace() (token byte) {
z.n += uint(len(z.buf)) - z.c
if z.trb {
z.tr = append(z.tr, z.buf[z.c:]...)
}
var i, n2 int
var err error
for {
z.c = 0
z.buf = z.buf[0:cap(z.buf)]
n2, err = z.r.Read(z.buf)
if n2 == 0 && err != nil {
panic(err)
if n2 == 0 {
halt.onerror(err)
}
z.buf = z.buf[:n2]
for i, token = range z.buf {
// if !accept.isset(token) {
if accept.check(token) == 0 {
if !isWhitespaceChar(token) {
z.n += (uint(i) - z.c) - 1
z.loopFn(uint(i + 1))
return
}
}
z.n += uint(n2)
if z.trb {
z.tr = append(z.tr, z.buf...)
}
}
}
func (z *bufioDecReader) loopFn(i uint) {
if z.trb {
z.tr = append(z.tr, z.buf[z.c:i]...) // z.doTrack(i)
}
z.c = i
}
func (z *bufioDecReader) readTo(accept *bitset256) (out []byte) {
i := z.c
LOOP:
if i < uint(len(z.buf)) {
// if !accept.isset(z.buf[i]) {
if accept.check(z.buf[i]) == 0 {
// inline readToLoopFn here (for performance)
z.n += (i - z.c) - 1
out = z.buf[z.c:i]
if z.trb {
z.tr = append(z.tr, z.buf[z.c:i]...) // z.doTrack(i)
}
z.c = i
return
}
i++
goto LOOP
}
return z.readToFill(accept)
}
func (z *bufioDecReader) readToFill(accept *bitset256) []byte {
z.bufr = z.blist.check(z.bufr, 256)[:0]
z.n += uint(len(z.buf)) - z.c
z.bufr = append(z.bufr, z.buf[z.c:]...)
if z.trb {
z.tr = append(z.tr, z.buf[z.c:]...)
}
var n2 int
var err error
for {
z.c = 0
z.buf = z.buf[:cap(z.buf)]
n2, err = z.r.Read(z.buf)
if n2 == 0 && err != nil {
if err == io.EOF {
return z.bufr // readTo should read until it matches or end is reached
}
panic(err)
}
z.buf = z.buf[:n2]
for i, token := range z.buf {
// if !accept.isset(token) {
if accept.check(token) == 0 {
z.n += (uint(i) - z.c) - 1
z.bufr = append(z.bufr, z.buf[z.c:i]...)
z.loopFn(uint(i))
return z.bufr
}
}
z.bufr = append(z.bufr, z.buf...)
z.n += uint(n2)
if z.trb {
z.tr = append(z.tr, z.buf...)
}
}
}
func (z *bufioDecReader) readUntil(stop byte, includeLast bool) (out []byte) {
i := z.c
LOOP:
@@ -564,9 +487,6 @@ LOOP:
z.n += (i - z.c) - 1
i++
out = z.buf[z.c:i]
if z.trb {
z.tr = append(z.tr, z.buf[z.c:i]...) // z.doTrack(i)
}
z.c = i
goto FINISH
}
@@ -585,15 +505,12 @@ func (z *bufioDecReader) readUntilFill(stop byte) []byte {
z.bufr = z.blist.check(z.bufr, 256)[:0]
z.n += uint(len(z.buf)) - z.c
z.bufr = append(z.bufr, z.buf[z.c:]...)
if z.trb {
z.tr = append(z.tr, z.buf[z.c:]...)
}
for {
z.c = 0
z.buf = z.buf[0:cap(z.buf)]
n1, err := z.r.Read(z.buf)
if n1 == 0 && err != nil {
panic(err)
if n1 == 0 {
halt.onerror(err)
}
n2 := uint(n1)
z.buf = z.buf[:n2]
@@ -607,42 +524,44 @@ func (z *bufioDecReader) readUntilFill(stop byte) []byte {
}
z.bufr = append(z.bufr, z.buf...)
z.n += n2
if z.trb {
z.tr = append(z.tr, z.buf...)
}
}
}
// ------------------------------------
// bytesDecReader is a decReader that reads off a byte slice with zero copying
//
// Note: we do not try to convert index'ing out of bounds to an io.EOF.
// instead, we let it bubble up to the exported Encode/Decode method
// and recover it as an io.EOF.
//
// see panicValToErr(...) function in helper.go.
type bytesDecReader struct {
b []byte // data
c uint // cursor
t uint // track start
// a int // available
}
func (z *bytesDecReader) reset(in []byte) {
z.b = in
// z.b = in
z.b = in[:len(in):len(in)] // so reslicing will not go past capacity
z.c = 0
z.t = 0
}
func (z *bytesDecReader) numread() uint {
return z.c
}
func (z *bytesDecReader) last() byte {
return z.b[z.c-1]
}
// func (z *bytesDecReader) last() byte {
// return z.b[z.c-1]
// }
func (z *bytesDecReader) unreadn1() {
if z.c == 0 || len(z.b) == 0 {
panic(errBytesDecReaderCannotUnread)
}
z.c--
}
// func (z *bytesDecReader) unreadn1() {
// // if z.c == 0 || len(z.b) == 0 {
// // panic(errBytesDecReaderCannotUnread)
// // }
// z.c--
// }
func (z *bytesDecReader) readx(n uint) (bs []byte) {
// slicing from a non-constant start position is more expensive,
@@ -658,8 +577,8 @@ func (z *bytesDecReader) readb(bs []byte) {
}
func (z *bytesDecReader) readn1() (v uint8) {
v = z.b[z.c]
z.c++
v = z.b[z.c] // cost 7
z.c++ // cost 4
return
}
@@ -684,34 +603,30 @@ LOOP:
return
}
func (z *bytesDecReader) skip(accept *bitset256) (token byte) {
func (z *bytesDecReader) jsonReadNum() (out []byte) {
z.c--
i := z.c
LOOP:
// if i < uint(len(z.b)) {
token = z.b[i]
i++
if accept.isset(token) {
if i < uint(len(z.b)) && isNumberChar(z.b[i]) {
i++
goto LOOP
}
z.c = i
return
}
func (z *bytesDecReader) readTo(accept *bitset256) (out []byte) {
i := z.c
LOOP:
if i < uint(len(z.b)) {
if accept.isset(z.b[i]) {
i++
goto LOOP
}
}
out = z.b[z.c:i]
z.c = i
return // z.b[c:i]
}
func (z *bytesDecReader) skipWhitespace() (token byte) {
LOOP:
token = z.b[z.c]
z.c++
if isWhitespaceChar(token) {
goto LOOP
}
return
}
func (z *bytesDecReader) readUntil(stop byte, includeLast bool) (out []byte) {
i := z.c
LOOP:
@@ -733,14 +648,6 @@ LOOP:
// panic(io.EOF)
}
func (z *bytesDecReader) track() {
z.t = z.c
}
func (z *bytesDecReader) stopTrack() (bs []byte) {
return z.b[z.t:z.c]
}
// --------------
type decRd struct {
@@ -760,8 +667,6 @@ type decRd struct {
bi *bufioDecReader
}
// numread, track and stopTrack are always inlined, as they just check int fields, etc.
// the if/else-if/else block is expensive to inline.
// Each node of this construct costs a lot and dominates the budget.
// Best to only do an if fast-path else block (so fast-path is inlined).
@@ -789,35 +694,6 @@ func (z *decRd) numread() uint {
return z.ri.numread()
}
}
func (z *decRd) stopTrack() []byte {
if z.bytes {
return z.rb.stopTrack()
} else if z.bufio {
return z.bi.stopTrack()
} else {
return z.ri.stopTrack()
}
}
func (z *decRd) track() {
if z.bytes {
z.rb.track()
} else if z.bufio {
z.bi.track()
} else {
z.ri.track()
}
}
func (z *decRd) unreadn1() {
if z.bytes {
z.rb.unreadn1()
} else if z.bufio {
z.bi.unreadn1()
} else {
z.ri.unreadn1() // not inlined
}
}
func (z *decRd) readn(num uint8) [rwNLen]byte {
if z.bytes {
@@ -849,33 +725,32 @@ func (z *decRd) readb(s []byte) {
}
}
func (z *decRd) readn1() uint8 {
func (z *decRd) readn1() (v uint8) {
if z.bytes {
return z.rb.readn1()
} else if z.bufio {
// MARKER: manually inline, else this function is not inlined.
// Keep in sync with bytesDecReader.readn1
// return z.rb.readn1()
v = z.rb.b[z.rb.c]
z.rb.c++
} else {
v = z.readn1IO()
}
return
}
func (z *decRd) readn1IO() uint8 {
if z.bufio {
return z.bi.readn1()
} else {
return z.ri.readn1()
}
return z.ri.readn1()
}
func (z *decRd) skip(accept *bitset256) (token byte) {
func (z *decRd) skipWhitespace() (token byte) {
if z.bytes {
return z.rb.skip(accept)
return z.rb.skipWhitespace()
} else if z.bufio {
return z.bi.skip(accept)
return z.bi.skipWhitespace()
} else {
return z.ri.skip(accept)
}
}
func (z *decRd) readTo(accept *bitset256) (out []byte) {
if z.bytes {
return z.rb.readTo(accept)
} else if z.bufio {
return z.bi.readTo(accept)
} else {
return z.ri.readTo(accept)
return z.ri.skipWhitespace()
}
}
@@ -889,129 +764,31 @@ func (z *decRd) readUntil(stop byte, includeLast bool) (out []byte) {
}
}
/*
func (z *decRd) track() {
func (z *decRd) jsonReadNum() (bs []byte) {
if z.bytes {
z.rb.track()
return z.rb.jsonReadNum()
} else if z.bufio {
return z.bi.jsonReadNum()
} else {
z.trackIO()
}
}
func (z *decRd) trackIO() {
if z.bufio {
z.bi.track()
} else {
z.ri.track()
return z.ri.jsonReadNum()
}
}
func (z *decRd) unreadn1() {
if z.bytes {
z.rb.unreadn1()
} else {
z.unreadn1IO()
func readFull(r io.Reader, bs []byte) (n uint, err error) {
var nn int
for n < uint(len(bs)) && err == nil {
nn, err = r.Read(bs[n:])
if nn > 0 {
if err == io.EOF {
// leave EOF for next time
err = nil
}
n += uint(nn)
}
}
// do not do this - it serves no purpose
// if n != len(bs) && err == io.EOF { err = io.ErrUnexpectedEOF }
return
}
func (z *decRd) unreadn1IO() {
if z.bufio {
z.bi.unreadn1()
} else {
z.ri.unreadn1()
}
}
func (z *decRd) readn(num uint8) [rwNLen]byte {
if z.bytes {
return z.rb.readn(num)
}
return z.readnIO(num)
}
func (z *decRd) readnIO(num uint8) [rwNLen]byte {
if z.bufio {
return z.bi.readn(num)
}
return z.ri.readn(num)
}
func (z *decRd) readx(n uint) []byte {
if z.bytes {
return z.rb.readx(n)
}
return z.readxIO(n)
}
func (z *decRd) readxIO(n uint) []byte {
if z.bufio {
return z.bi.readx(n)
}
return z.ri.readx(n)
}
func (z *decRd) readb(s []byte) {
if z.bytes {
z.rb.readb(s)
} else {
z.readbIO(s)
}
}
func (z *decRd) readbIO(s []byte) {
if z.bufio {
z.bi.readb(s)
} else {
z.ri.readb(s)
}
}
func (z *decRd) readn1() uint8 {
if z.bytes {
return z.rb.readn1()
}
return z.readn1IO()
}
func (z *decRd) readn1IO() uint8 {
if z.bufio {
return z.bi.readn1()
}
return z.ri.readn1()
}
func (z *decRd) skip(accept *bitset256) (token byte) {
if z.bytes {
return z.rb.skip(accept)
}
return z.skipIO(accept)
}
func (z *decRd) skipIO(accept *bitset256) (token byte) {
if z.bufio {
return z.bi.skip(accept)
}
return z.ri.skip(accept)
}
func (z *decRd) readTo(accept *bitset256) (out []byte) {
if z.bytes {
return z.rb.readTo(accept)
}
return z.readToIO(accept)
}
func (z *decRd) readToIO(accept *bitset256) (out []byte) {
if z.bufio {
return z.bi.readTo(accept)
}
return z.ri.readTo(accept)
}
func (z *decRd) readUntil(stop byte, includeLast bool) (out []byte) {
if z.bytes {
return z.rb.readUntil(stop, includeLast)
}
return z.readUntilIO(stop, includeLast)
}
func (z *decRd) readUntilIO(stop byte, includeLast bool) (out []byte) {
if z.bufio {
return z.bi.readUntil(stop, includeLast)
}
return z.ri.readUntil(stop, includeLast)
}
*/
var _ decReader = (*decRd)(nil)

View File

@@ -1,4 +1,4 @@
// Copyright (c) 2012-2018 Ugorji Nwoke. All rights reserved.
// Copyright (c) 2012-2020 Ugorji Nwoke. All rights reserved.
// Use of this source code is governed by a MIT license found in the LICENSE file.
package codec

View File

@@ -1,4 +1,4 @@
// Copyright (c) 2012-2018 Ugorji Nwoke. All rights reserved.
// Copyright (c) 2012-2020 Ugorji Nwoke. All rights reserved.
// Use of this source code is governed by a MIT license found in the LICENSE file.
package codec
@@ -10,7 +10,11 @@ import (
"net/rpc"
)
var errRpcJsonNeedsTermWhitespace = errors.New("rpc requires JsonHandle with TermWhitespace=true")
var (
errRpcJsonNeedsTermWhitespace = errors.New("rpc - requires JsonHandle with TermWhitespace=true")
errRpcIsClosed = errors.New("rpc - connection has been closed")
errRpcNoConn = errors.New("rpc - no connection")
)
// Rpc provides a rpc Server or Client Codec for rpc communication.
type Rpc interface {
@@ -51,14 +55,14 @@ func newRPCCodec(conn io.ReadWriteCloser, h Handle) rpcCodec {
func newRPCCodec2(r io.Reader, w io.Writer, c io.Closer, h Handle) rpcCodec {
// defensive: ensure that jsonH has TermWhitespace turned on.
if jsonH, ok := h.(*JsonHandle); ok && !jsonH.TermWhitespace {
panic(errRpcJsonNeedsTermWhitespace)
jsonH, ok := h.(*JsonHandle)
if ok && !jsonH.TermWhitespace {
halt.onerror(errRpcJsonNeedsTermWhitespace)
}
// always ensure that we use a flusher, and always flush what was written to the connection.
// we lose nothing by using a buffered writer internally.
f, ok := w.(ioFlusher)
var f ioFlusher
bh := basicHandle(h)
if !bh.RPCNoBuffer {
f, ok = w.(ioFlusher)
if bh.WriterBufferSize <= 0 {
if !ok {
bw := bufio.NewWriter(w)
@@ -66,11 +70,8 @@ func newRPCCodec2(r io.Reader, w io.Writer, c io.Closer, h Handle) rpcCodec {
}
}
if bh.ReaderBufferSize <= 0 {
if _, ok = w.(ioPeeker); !ok {
if _, ok = w.(ioBuffered); !ok {
br := bufio.NewReader(r)
r = br
}
if _, ok = w.(ioBuffered); !ok {
r = bufio.NewReader(r)
}
}
}
@@ -86,62 +87,62 @@ func newRPCCodec2(r io.Reader, w io.Writer, c io.Closer, h Handle) rpcCodec {
}
func (c *rpcCodec) write(obj1, obj2 interface{}, writeObj2 bool) (err error) {
if c.c != nil {
cls := c.cls.load()
if cls.closed {
return cls.errClosed
}
if err = c.ready(); err != nil {
return
}
err = c.enc.Encode(obj1)
if err == nil {
if writeObj2 {
err = c.enc.Encode(obj2)
}
if err == nil && writeObj2 {
err = c.enc.Encode(obj2)
}
if c.f != nil {
if err == nil {
err = c.f.Flush()
} else {
_ = c.f.Flush() // swallow flush error, so we maintain prior error on write
if err2 := c.f.Flush(); err == nil {
// ignore flush error if prior error occurred during Encode
err = err2
}
}
return
}
func (c *rpcCodec) swallow(err *error) {
defer panicToErr(c.dec, err)
c.dec.swallow()
}
func (c *rpcCodec) read(obj interface{}) (err error) {
if c.c != nil {
cls := c.cls.load()
if cls.closed {
return cls.errClosed
}
if err = c.ready(); err != nil {
return
}
//If nil is passed in, we should read and discard
if obj == nil {
// var obj2 interface{}
// return c.dec.Decode(&obj2)
c.swallow(&err)
defer panicToErr(c.dec, &err)
c.dec.swallow()
return
}
return c.dec.Decode(obj)
}
func (c *rpcCodec) Close() error {
func (c *rpcCodec) Close() (err error) {
if c.c != nil {
cls := c.cls.load()
if !cls.closed {
cls.errClosed = c.c.Close()
cls.closed = true
c.cls.store(cls)
}
err = cls.errClosed
}
return
}
func (c *rpcCodec) ready() (err error) {
if c.c == nil {
return nil
err = errRpcNoConn
} else {
cls := c.cls.load()
if cls.closed {
if err = cls.errClosed; err == nil {
err = errRpcIsClosed
}
}
}
cls := c.cls.load()
if cls.closed {
return cls.errClosed
}
cls.errClosed = c.c.Close()
cls.closed = true
c.cls.store(cls)
return cls.errClosed
return
}
func (c *rpcCodec) ReadResponseBody(body interface{}) error {

View File

@@ -1,4 +1,4 @@
// Copyright (c) 2012-2018 Ugorji Nwoke. All rights reserved.
// Copyright (c) 2012-2020 Ugorji Nwoke. All rights reserved.
// Use of this source code is governed by a MIT license found in the LICENSE file.
package codec
@@ -30,6 +30,33 @@ const (
simpleVdExt = 248
)
var simpledescNames = map[byte]string{
simpleVdNil: "null",
simpleVdFalse: "false",
simpleVdTrue: "true",
simpleVdFloat32: "float32",
simpleVdFloat64: "float64",
simpleVdPosInt: "+int",
simpleVdNegInt: "-int",
simpleVdTime: "time",
simpleVdString: "string",
simpleVdByteArray: "binary",
simpleVdArray: "array",
simpleVdMap: "map",
simpleVdExt: "ext",
}
func simpledesc(bd byte) (s string) {
s = simpledescNames[bd]
if s == "" {
s = "unknown"
}
return
}
type simpleEncDriver struct {
noBuiltInTypes
encDriverNoopContainerWriter
@@ -194,7 +221,7 @@ func (e *simpleEncDriver) EncodeTime(t time.Time) {
}
v, err := t.MarshalBinary()
if err != nil {
e.e.errorv(err)
e.e.onerror(err)
return
}
// time.Time marshalbinary takes about 14 bytes.
@@ -208,7 +235,7 @@ type simpleDecDriver struct {
h *SimpleHandle
bdRead bool
bd byte
fnil bool
_ bool
noBuiltInTypes
decDriverNoopContainerReader
_ [6]uint64 // padding
@@ -224,39 +251,24 @@ func (d *simpleDecDriver) readNextBd() {
d.bdRead = true
}
func (d *simpleDecDriver) uncacheRead() {
if d.bdRead {
d.d.decRd.unreadn1()
d.bdRead = false
}
}
func (d *simpleDecDriver) advanceNil() (null bool) {
d.fnil = false
if !d.bdRead {
d.readNextBd()
}
if d.bd == simpleVdNil {
d.bdRead = false
d.fnil = true
null = true
}
return
}
func (d *simpleDecDriver) Nil() bool {
return d.fnil
}
func (d *simpleDecDriver) ContainerType() (vt valueType) {
if !d.bdRead {
d.readNextBd()
}
d.fnil = false
switch d.bd {
case simpleVdNil:
d.bdRead = false
d.fnil = true
return valueTypeNil
case simpleVdByteArray, simpleVdByteArray + 1,
simpleVdByteArray + 2, simpleVdByteArray + 3, simpleVdByteArray + 4:
@@ -302,7 +314,6 @@ func (d *simpleDecDriver) decCheckInteger() (ui uint64, neg bool) {
neg = true
default:
d.d.errorf("integer only valid from pos/neg integer1..8. Invalid descriptor: %v", d.bd)
return
}
// DO NOT do this check below, because callers may only want the unsigned value:
//
@@ -333,7 +344,6 @@ func (d *simpleDecDriver) DecodeUint64() (ui uint64) {
ui, neg := d.decCheckInteger()
if neg {
d.d.errorf("assigning negative signed value to unsigned type")
return
}
d.bdRead = false
return
@@ -352,7 +362,6 @@ func (d *simpleDecDriver) DecodeFloat64() (f float64) {
f = float64(d.DecodeInt64())
} else {
d.d.errorf("float only valid from float32/64: Invalid descriptor: %v", d.bd)
return
}
}
d.bdRead = false
@@ -369,7 +378,6 @@ func (d *simpleDecDriver) DecodeBool() (b bool) {
b = true
} else {
d.d.errorf("cannot decode bool - %s: %x", msgBadDesc, d.bd)
return
}
d.bdRead = false
return
@@ -377,7 +385,7 @@ func (d *simpleDecDriver) DecodeBool() (b bool) {
func (d *simpleDecDriver) ReadMapStart() (length int) {
if d.advanceNil() {
return decContainerLenNil
return containerLenNil
}
d.bdRead = false
return d.decLen()
@@ -385,12 +393,19 @@ func (d *simpleDecDriver) ReadMapStart() (length int) {
func (d *simpleDecDriver) ReadArrayStart() (length int) {
if d.advanceNil() {
return decContainerLenNil
return containerLenNil
}
d.bdRead = false
return d.decLen()
}
func (d *simpleDecDriver) uint2Len(ui uint64) int {
if chkOvf.Uint(ui, intBitsize) {
d.d.errorf("overflow integer: %v", ui)
}
return int(ui)
}
func (d *simpleDecDriver) decLen() int {
switch d.bd % 8 {
case 0:
@@ -400,19 +415,9 @@ func (d *simpleDecDriver) decLen() int {
case 2:
return int(bigen.Uint16(d.d.decRd.readx(2)))
case 3:
ui := uint64(bigen.Uint32(d.d.decRd.readx(4)))
if chkOvf.Uint(ui, intBitsize) {
d.d.errorf("overflow integer: %v", ui)
return 0
}
return int(ui)
return d.uint2Len(uint64(bigen.Uint32(d.d.decRd.readx(4))))
case 4:
ui := bigen.Uint64(d.d.decRd.readx(8))
if chkOvf.Uint(ui, intBitsize) {
d.d.errorf("overflow integer: %v", ui)
return 0
}
return int(ui)
return d.uint2Len(bigen.Uint64(d.d.decRd.readx(8)))
}
d.d.errorf("cannot read length: bd%%8 must be in range 0..4. Got: %d", d.bd%8)
return -1
@@ -442,12 +447,11 @@ func (d *simpleDecDriver) DecodeBytes(bs []byte, zerocopy bool) (bsOut []byte) {
clen := d.decLen()
d.bdRead = false
if zerocopy {
if d.d.bytes {
return d.d.decRd.readx(uint(clen))
} else if len(bs) == 0 {
bs = d.d.b[:]
}
if d.d.bytes && (zerocopy || d.h.ZeroCopy) {
return d.d.decRd.rb.readx(uint(clen))
}
if zerocopy && len(bs) == 0 {
bs = d.d.b[:]
}
return decByteSlice(d.d.r(), clen, d.d.h.MaxInitLen, bs)
}
@@ -458,21 +462,17 @@ func (d *simpleDecDriver) DecodeTime() (t time.Time) {
}
if d.bd != simpleVdTime {
d.d.errorf("invalid descriptor for time.Time - expect 0x%x, received 0x%x", simpleVdTime, d.bd)
return
}
d.bdRead = false
clen := int(d.d.decRd.readn1())
b := d.d.decRd.readx(uint(clen))
if err := (&t).UnmarshalBinary(b); err != nil {
d.d.errorv(err)
}
clen := uint(d.d.decRd.readn1())
b := d.d.decRd.readx(clen)
d.d.onerror((&t).UnmarshalBinary(b))
return
}
func (d *simpleDecDriver) DecodeExt(rv interface{}, xtag uint64, ext Ext) {
if xtag > 0xff {
d.d.errorf("ext: tag must be <= 0xff; got: %v", xtag)
return
}
if d.advanceNil() {
return
@@ -497,10 +497,9 @@ func (d *simpleDecDriver) decodeExtV(verifyTag bool, tag byte) (xtag byte, xbs [
xtag = d.d.decRd.readn1()
if verifyTag && xtag != tag {
d.d.errorf("wrong extension tag. Got %b. Expecting: %v", xtag, tag)
return
}
if d.d.bytes {
xbs = d.d.decRd.readx(uint(l))
xbs = d.d.decRd.rb.readx(uint(l))
} else {
xbs = decByteSlice(d.d.r(), l, d.d.h.MaxInitLen, d.d.b[:])
}
@@ -509,7 +508,6 @@ func (d *simpleDecDriver) decodeExtV(verifyTag bool, tag byte) (xtag byte, xbs [
xbs = d.DecodeBytes(nil, true)
default:
d.d.errorf("ext - %s - expecting extensions/bytearray, got: 0x%x", msgBadDesc, d.bd)
return
}
d.bdRead = false
return
@@ -520,14 +518,12 @@ func (d *simpleDecDriver) DecodeNaked() {
d.readNextBd()
}
d.fnil = false
n := d.d.naked()
var decodeFurther bool
switch d.bd {
case simpleVdNil:
n.v = valueTypeNil
d.fnil = true
case simpleVdFalse:
n.v = valueTypeBool
n.b = false
@@ -560,13 +556,13 @@ func (d *simpleDecDriver) DecodeNaked() {
n.s = string(d.DecodeStringAsBytes())
case simpleVdByteArray, simpleVdByteArray + 1,
simpleVdByteArray + 2, simpleVdByteArray + 3, simpleVdByteArray + 4:
decNakedReadRawBytes(d, &d.d, n, d.h.RawToString)
fauxUnionReadRawBytes(d, &d.d, n, d.h.RawToString)
case simpleVdExt, simpleVdExt + 1, simpleVdExt + 2, simpleVdExt + 3, simpleVdExt + 4:
n.v = valueTypeExt
l := d.decLen()
n.u = uint64(d.d.decRd.readn1())
if d.d.bytes {
n.l = d.d.decRd.readx(uint(l))
n.l = d.d.decRd.rb.readx(uint(l))
} else {
n.l = decByteSlice(d.d.r(), l, d.d.h.MaxInitLen, d.d.b[:])
}
@@ -586,6 +582,102 @@ func (d *simpleDecDriver) DecodeNaked() {
}
}
func (d *simpleDecDriver) nextValueBytes(start []byte) (v []byte) {
if !d.bdRead {
d.readNextBd()
}
v = append(start, d.bd)
v = d.nextValueBytesBdReadR(v)
d.bdRead = false
return
}
func (d *simpleDecDriver) nextValueBytesR(v0 []byte) (v []byte) {
d.readNextBd()
v = append(v0, d.bd)
return d.nextValueBytesBdReadR(v)
}
func (d *simpleDecDriver) nextValueBytesBdReadR(v0 []byte) (v []byte) {
v = v0
c := d.bd
var x []byte
var length uint
switch c {
case simpleVdNil, simpleVdFalse, simpleVdTrue, simpleVdString, simpleVdByteArray:
// pass
case simpleVdPosInt, simpleVdNegInt:
v = append(v, d.d.decRd.readn1())
case simpleVdPosInt + 1, simpleVdNegInt + 1:
v = append(v, d.d.decRd.readx(2)...)
case simpleVdPosInt + 2, simpleVdNegInt + 2, simpleVdFloat32:
v = append(v, d.d.decRd.readx(4)...)
case simpleVdPosInt + 3, simpleVdNegInt + 3, simpleVdFloat64:
v = append(v, d.d.decRd.readx(8)...)
case simpleVdTime:
c = d.d.decRd.readn1()
v = append(v, c)
v = append(v, d.d.decRd.readx(uint(c))...)
default:
switch c % 8 {
case 0:
x = nil
length = 0
case 1:
x = d.d.decRd.readx(1)
length = uint(x[0])
case 2:
x = d.d.decRd.readx(2)
length = uint(bigen.Uint16(x))
case 3:
x = d.d.decRd.readx(4)
length = uint(bigen.Uint32(x))
case 4:
x = d.d.decRd.readx(8)
length = uint(bigen.Uint64(x))
}
if len(x) > 0 {
v = append(v, x...)
}
bExt := c >= simpleVdExt && c <= simpleVdExt+7
bStr := c >= simpleVdString && c <= simpleVdString+7
bByteArray := c >= simpleVdByteArray && c <= simpleVdByteArray+7
bArray := c >= simpleVdArray && c <= simpleVdArray+7
bMap := c >= simpleVdMap && c <= simpleVdMap+7
if !(bExt || bStr || bByteArray || bArray || bMap) {
d.d.errorf("cannot infer value - %s 0x%x", msgBadDesc, c)
}
if bExt {
v = append(v, d.d.decRd.readn1()) // tag
}
if length == 0 {
break
}
if bArray {
for i := uint(0); i < length; i++ {
v = d.nextValueBytesR(v)
}
} else if bMap {
for i := uint(0); i < length; i++ {
v = d.nextValueBytesR(v)
v = d.nextValueBytesR(v)
}
} else {
v = append(v, d.d.decRd.readx(length)...)
}
}
return
}
//------------------------------------
// SimpleHandle is a Handle for a very simple encoding format.
@@ -619,6 +711,8 @@ type SimpleHandle struct {
// Name returns the name of the handle: simple
func (h *SimpleHandle) Name() string { return "simple" }
func (h *SimpleHandle) desc(bd byte) string { return simpledesc(bd) }
func (h *SimpleHandle) newEncDriver() encDriver {
var e = &simpleEncDriver{h: h}
e.e.e = e
@@ -640,7 +734,6 @@ func (e *simpleEncDriver) reset() {
func (d *simpleDecDriver) reset() {
d.bd, d.bdRead = 0, false
d.fnil = false
}
var _ decDriver = (*simpleDecDriver)(nil)

View File

@@ -1,4 +1,4 @@
// Copyright (c) 2012-2018 Ugorji Nwoke. All rights reserved.
// Copyright (c) 2012-2020 Ugorji Nwoke. All rights reserved.
// Use of this source code is governed by a MIT license found in the LICENSE file.
// Code generated from sort-slice.go.tmpl - DO NOT EDIT.
@@ -17,12 +17,12 @@ func (p stringSlice) Less(i, j int) bool {
return p[uint(i)] < p[uint(j)]
}
type float64Slice []float64
type uint8Slice []uint8
func (p float64Slice) Len() int { return len(p) }
func (p float64Slice) Swap(i, j int) { p[uint(i)], p[uint(j)] = p[uint(j)], p[uint(i)] }
func (p float64Slice) Less(i, j int) bool {
return p[uint(i)] < p[uint(j)] || isNaN64(p[uint(i)]) && !isNaN64(p[uint(j)])
func (p uint8Slice) Len() int { return len(p) }
func (p uint8Slice) Swap(i, j int) { p[uint(i)], p[uint(j)] = p[uint(j)], p[uint(i)] }
func (p uint8Slice) Less(i, j int) bool {
return p[uint(i)] < p[uint(j)]
}
type uint64Slice []uint64
@@ -33,11 +33,11 @@ func (p uint64Slice) Less(i, j int) bool {
return p[uint(i)] < p[uint(j)]
}
type uintptrSlice []uintptr
type intSlice []int
func (p uintptrSlice) Len() int { return len(p) }
func (p uintptrSlice) Swap(i, j int) { p[uint(i)], p[uint(j)] = p[uint(j)], p[uint(i)] }
func (p uintptrSlice) Less(i, j int) bool {
func (p intSlice) Len() int { return len(p) }
func (p intSlice) Swap(i, j int) { p[uint(i)], p[uint(j)] = p[uint(j)], p[uint(i)] }
func (p intSlice) Less(i, j int) bool {
return p[uint(i)] < p[uint(j)]
}
@@ -49,30 +49,6 @@ func (p int64Slice) Less(i, j int) bool {
return p[uint(i)] < p[uint(j)]
}
type boolSlice []bool
func (p boolSlice) Len() int { return len(p) }
func (p boolSlice) Swap(i, j int) { p[uint(i)], p[uint(j)] = p[uint(j)], p[uint(i)] }
func (p boolSlice) Less(i, j int) bool {
return !p[uint(i)] && p[uint(j)]
}
type timeSlice []time.Time
func (p timeSlice) Len() int { return len(p) }
func (p timeSlice) Swap(i, j int) { p[uint(i)], p[uint(j)] = p[uint(j)], p[uint(i)] }
func (p timeSlice) Less(i, j int) bool {
return p[uint(i)].Before(p[uint(j)])
}
type bytesSlice [][]byte
func (p bytesSlice) Len() int { return len(p) }
func (p bytesSlice) Swap(i, j int) { p[uint(i)], p[uint(j)] = p[uint(j)], p[uint(i)] }
func (p bytesSlice) Less(i, j int) bool {
return bytes.Compare(p[uint(i)], p[uint(j)]) == -1
}
type stringRv struct {
v string
r reflect.Value
@@ -85,18 +61,6 @@ func (p stringRvSlice) Less(i, j int) bool {
return p[uint(i)].v < p[uint(j)].v
}
type stringIntf struct {
v string
i interface{}
}
type stringIntfSlice []stringIntf
func (p stringIntfSlice) Len() int { return len(p) }
func (p stringIntfSlice) Swap(i, j int) { p[uint(i)], p[uint(j)] = p[uint(j)], p[uint(i)] }
func (p stringIntfSlice) Less(i, j int) bool {
return p[uint(i)].v < p[uint(j)].v
}
type float64Rv struct {
v float64
r reflect.Value
@@ -109,18 +73,6 @@ func (p float64RvSlice) Less(i, j int) bool {
return p[uint(i)].v < p[uint(j)].v || isNaN64(p[uint(i)].v) && !isNaN64(p[uint(j)].v)
}
type float64Intf struct {
v float64
i interface{}
}
type float64IntfSlice []float64Intf
func (p float64IntfSlice) Len() int { return len(p) }
func (p float64IntfSlice) Swap(i, j int) { p[uint(i)], p[uint(j)] = p[uint(j)], p[uint(i)] }
func (p float64IntfSlice) Less(i, j int) bool {
return p[uint(i)].v < p[uint(j)].v || isNaN64(p[uint(i)].v) && !isNaN64(p[uint(j)].v)
}
type uint64Rv struct {
v uint64
r reflect.Value
@@ -133,42 +85,6 @@ func (p uint64RvSlice) Less(i, j int) bool {
return p[uint(i)].v < p[uint(j)].v
}
type uint64Intf struct {
v uint64
i interface{}
}
type uint64IntfSlice []uint64Intf
func (p uint64IntfSlice) Len() int { return len(p) }
func (p uint64IntfSlice) Swap(i, j int) { p[uint(i)], p[uint(j)] = p[uint(j)], p[uint(i)] }
func (p uint64IntfSlice) Less(i, j int) bool {
return p[uint(i)].v < p[uint(j)].v
}
type uintptrRv struct {
v uintptr
r reflect.Value
}
type uintptrRvSlice []uintptrRv
func (p uintptrRvSlice) Len() int { return len(p) }
func (p uintptrRvSlice) Swap(i, j int) { p[uint(i)], p[uint(j)] = p[uint(j)], p[uint(i)] }
func (p uintptrRvSlice) Less(i, j int) bool {
return p[uint(i)].v < p[uint(j)].v
}
type uintptrIntf struct {
v uintptr
i interface{}
}
type uintptrIntfSlice []uintptrIntf
func (p uintptrIntfSlice) Len() int { return len(p) }
func (p uintptrIntfSlice) Swap(i, j int) { p[uint(i)], p[uint(j)] = p[uint(j)], p[uint(i)] }
func (p uintptrIntfSlice) Less(i, j int) bool {
return p[uint(i)].v < p[uint(j)].v
}
type int64Rv struct {
v int64
r reflect.Value
@@ -181,18 +97,6 @@ func (p int64RvSlice) Less(i, j int) bool {
return p[uint(i)].v < p[uint(j)].v
}
type int64Intf struct {
v int64
i interface{}
}
type int64IntfSlice []int64Intf
func (p int64IntfSlice) Len() int { return len(p) }
func (p int64IntfSlice) Swap(i, j int) { p[uint(i)], p[uint(j)] = p[uint(j)], p[uint(i)] }
func (p int64IntfSlice) Less(i, j int) bool {
return p[uint(i)].v < p[uint(j)].v
}
type boolRv struct {
v bool
r reflect.Value
@@ -205,18 +109,6 @@ func (p boolRvSlice) Less(i, j int) bool {
return !p[uint(i)].v && p[uint(j)].v
}
type boolIntf struct {
v bool
i interface{}
}
type boolIntfSlice []boolIntf
func (p boolIntfSlice) Len() int { return len(p) }
func (p boolIntfSlice) Swap(i, j int) { p[uint(i)], p[uint(j)] = p[uint(j)], p[uint(i)] }
func (p boolIntfSlice) Less(i, j int) bool {
return !p[uint(i)].v && p[uint(j)].v
}
type timeRv struct {
v time.Time
r reflect.Value
@@ -229,18 +121,6 @@ func (p timeRvSlice) Less(i, j int) bool {
return p[uint(i)].v.Before(p[uint(j)].v)
}
type timeIntf struct {
v time.Time
i interface{}
}
type timeIntfSlice []timeIntf
func (p timeIntfSlice) Len() int { return len(p) }
func (p timeIntfSlice) Swap(i, j int) { p[uint(i)], p[uint(j)] = p[uint(j)], p[uint(i)] }
func (p timeIntfSlice) Less(i, j int) bool {
return p[uint(i)].v.Before(p[uint(j)].v)
}
type bytesRv struct {
v []byte
r reflect.Value

View File

@@ -1,4 +1,4 @@
// Copyright (c) 2012-2018 Ugorji Nwoke. All rights reserved.
// Copyright (c) 2012-2020 Ugorji Nwoke. All rights reserved.
// Use of this source code is governed by a MIT license found in the LICENSE file.
// Code generated from sort-slice.go.tmpl - DO NOT EDIT.
@@ -54,11 +54,13 @@ type {{ $v }}Rv struct {
type {{ $v }}RvSlice []{{ $v }}Rv
{{template "T" args "Kind" $v "Type" (print $v "RvSlice") "V" ".v"}}
{{if eq $v "bytes" -}}
type {{ $v }}Intf struct {
v {{ $t }}
i interface{}
}
type {{ $v }}IntfSlice []{{ $v }}Intf
{{template "T" args "Kind" $v "Type" (print $v "IntfSlice") "V" ".v"}}
{{end}}
{{end}}

View File

@@ -11,8 +11,11 @@
# Ensure all "string" keys are utf strings (else encoded as bytes)
from __future__ import print_function
import cbor, msgpack, msgpackrpc, sys, os, threading
mylocaladdr="127.0.0.1" # localhost.localdomain localhost 127.0.0.1
def get_test_data_list():
# get list with all primitive types, and a combo type
l0 = [
@@ -69,13 +72,11 @@ def build_test_data(destdir):
for i in range(len(l)):
# packer = msgpack.Packer()
serialized = msgpack.dumps(l[i])
f = open(os.path.join(destdir, str(i) + '.msgpack.golden'), 'wb')
f.write(serialized)
f.close()
with open(os.path.join(destdir, str(i) + '.msgpack.golden'), 'wb') as f:
f.write(serialized)
serialized = cbor.dumps(l[i])
f = open(os.path.join(destdir, str(i) + '.cbor.golden'), 'wb')
f.write(serialized)
f.close()
with open(os.path.join(destdir, str(i) + '.cbor.golden'), 'wb') as f:
f.write(serialized)
def doRpcServer(port, stopTimeSec):
class EchoHandler(object):
@@ -84,7 +85,7 @@ def doRpcServer(port, stopTimeSec):
def EchoStruct(self, msg):
return ("%s" % msg)
addr = msgpackrpc.Address('127.0.0.1', port)
addr = msgpackrpc.Address(mylocaladdr, port)
server = msgpackrpc.Server(EchoHandler())
server.listen(addr)
# run thread to stop it after stopTimeSec seconds if > 0
@@ -96,17 +97,17 @@ def doRpcServer(port, stopTimeSec):
server.start()
def doRpcClientToPythonSvc(port):
address = msgpackrpc.Address('127.0.0.1', port)
address = msgpackrpc.Address(mylocaladdr, port)
client = msgpackrpc.Client(address, unpack_encoding='utf-8')
print client.call("Echo123", "A1", "B2", "C3")
print client.call("EchoStruct", {"A" :"Aa", "B":"Bb", "C":"Cc"})
print(client.call("Echo123", "A1", "B2", "C3"))
print(client.call("EchoStruct", {"A" :"Aa", "B":"Bb", "C":"Cc"}))
def doRpcClientToGoSvc(port):
# print ">>>> port: ", port, " <<<<<"
address = msgpackrpc.Address('127.0.0.1', port)
# print(">>>> port: ", port, " <<<<<")
address = msgpackrpc.Address(mylocaladdr, port)
client = msgpackrpc.Client(address, unpack_encoding='utf-8')
print client.call("TestRpcInt.Echo123", ["A1", "B2", "C3"])
print client.call("TestRpcInt.EchoStruct", {"A" :"Aa", "B":"Bb", "C":"Cc"})
print(client.call("TestRpcInt.Echo123", ["A1", "B2", "C3"]))
print(client.call("TestRpcInt.EchoStruct", {"A" :"Aa", "B":"Bb", "C":"Cc"}))
def doMain(args):
if len(args) == 2 and args[0] == "testdata":
@@ -123,4 +124,3 @@ def doMain(args):
if __name__ == "__main__":
doMain(sys.argv[1:])

View File

@@ -1,4 +1,4 @@
// Copyright (c) 2012-2018 Ugorji Nwoke. All rights reserved.
// Copyright (c) 2012-2020 Ugorji Nwoke. All rights reserved.
// Use of this source code is governed by a MIT license found in the LICENSE file.
package codec
@@ -11,9 +11,11 @@ type encWriter interface {
writestr(string)
writeqstr(string) // write string wrapped in quotes ie "..."
writen1(byte)
// add convenience functions for writing 2,4
writen2(byte, byte)
// writen will write up to 7 bytes at a time.
writen(b [rwNLen]byte, num uint8)
writen4(byte, byte, byte, byte)
end()
}
@@ -53,23 +55,22 @@ func (z *bufioEncWriter) reset(w io.Writer, bufsize int, blist *bytesFreelist) {
z.buf = z.buf[:cap(z.buf)]
}
//go:noinline - flush only called intermittently
func (z *bufioEncWriter) flushErr() (err error) {
n, err := z.w.Write(z.buf[:z.n])
z.n -= n
if z.n > 0 && err == nil {
err = io.ErrShortWrite
}
if n > 0 && z.n > 0 {
copy(z.buf, z.buf[n:z.n+n])
if z.n > 0 {
if err == nil {
err = io.ErrShortWrite
}
if n > 0 {
copy(z.buf, z.buf[n:z.n+n])
}
}
return err
}
func (z *bufioEncWriter) flush() {
if err := z.flushErr(); err != nil {
panic(err)
}
halt.onerror(z.flushErr())
}
func (z *bufioEncWriter) writeb(s []byte) {
@@ -127,7 +128,6 @@ func (z *bufioEncWriter) writen1(b1 byte) {
z.buf[z.n] = b1
z.n++
}
func (z *bufioEncWriter) writen2(b1, b2 byte) {
if 2 > len(z.buf)-z.n {
z.flush()
@@ -136,13 +136,15 @@ func (z *bufioEncWriter) writen2(b1, b2 byte) {
z.buf[z.n] = b1
z.n += 2
}
func (z *bufioEncWriter) writen(b [rwNLen]byte, num uint8) {
if int(num) > len(z.buf)-z.n {
func (z *bufioEncWriter) writen4(b1, b2, b3, b4 byte) {
if 4 > len(z.buf)-z.n {
z.flush()
}
copy(z.buf[z.n:], b[:num])
z.n += int(num)
z.buf[z.n+3] = b4
z.buf[z.n+2] = b3
z.buf[z.n+1] = b2
z.buf[z.n] = b1
z.n += 4
}
func (z *bufioEncWriter) endErr() (err error) {
@@ -168,7 +170,6 @@ func (z *bytesEncAppender) writestr(s string) {
}
func (z *bytesEncAppender) writeqstr(s string) {
z.b = append(append(append(z.b, '"'), s...), '"')
// z.b = append(z.b, '"')
// z.b = append(z.b, s...)
// z.b = append(z.b, '"')
@@ -177,13 +178,10 @@ func (z *bytesEncAppender) writen1(b1 byte) {
z.b = append(z.b, b1)
}
func (z *bytesEncAppender) writen2(b1, b2 byte) {
z.b = append(z.b, b1, b2) // cost: 81
z.b = append(z.b, b1, b2)
}
func (z *bytesEncAppender) writen(s [rwNLen]byte, num uint8) {
// if num <= rwNLen {
if int(num) <= len(s) {
z.b = append(z.b, s[:num]...)
}
func (z *bytesEncAppender) writen4(b1, b2, b3, b4 byte) {
z.b = append(z.b, b1, b2, b3, b4)
}
func (z *bytesEncAppender) endErr() error {
*(z.out) = z.b
@@ -209,6 +207,11 @@ type encWr struct {
wf *bufioEncWriter
}
// MARKER: manually inline bytesEncAppender.writenx/writeqstr methods,
// as calling them causes encWr.writenx/writeqstr methods to not be inlined (cost > 80).
//
// i.e. e.g. instead of writing z.wb.writen2(b1, b2), use z.wb.b = append(z.wb.b, b1, b2)
func (z *encWr) writeb(s []byte) {
if z.bytes {
z.wb.writeb(s)
@@ -218,7 +221,8 @@ func (z *encWr) writeb(s []byte) {
}
func (z *encWr) writeqstr(s string) {
if z.bytes {
z.wb.writeqstr(s)
// MARKER: z.wb.writeqstr(s)
z.wb.b = append(append(append(z.wb.b, '"'), s...), '"')
} else {
z.wf.writeqstr(s)
}
@@ -237,20 +241,24 @@ func (z *encWr) writen1(b1 byte) {
z.wf.writen1(b1)
}
}
func (z *encWr) writen2(b1, b2 byte) {
if z.bytes {
z.wb.writen2(b1, b2)
// MARKER: z.wb.writen2(b1, b2)
z.wb.b = append(z.wb.b, b1, b2)
} else {
z.wf.writen2(b1, b2)
}
}
func (z *encWr) writen(b [rwNLen]byte, num uint8) {
func (z *encWr) writen4(b1, b2, b3, b4 byte) {
if z.bytes {
z.wb.writen(b, num)
// MARKER: z.wb.writen4(b1, b2, b3, b4)
z.wb.b = append(z.wb.b, b1, b2, b3, b4)
} else {
z.wf.writen(b, num)
z.wf.writen4(b1, b2, b3, b4)
}
}
func (z *encWr) endErr() error {
if z.bytes {
return z.wb.endErr()
@@ -259,9 +267,7 @@ func (z *encWr) endErr() error {
}
func (z *encWr) end() {
if err := z.endErr(); err != nil {
panic(err)
}
halt.onerror(z.endErr())
}
var _ encWriter = (*encWr)(nil)

Some files were not shown because too many files have changed in this diff Show More