summaryrefslogtreecommitdiff
path: root/vendor/github.com/tdewolff
diff options
context:
space:
mode:
Diffstat (limited to 'vendor/github.com/tdewolff')
-rw-r--r--vendor/github.com/tdewolff/minify/v2/.gitattributes2
-rw-r--r--vendor/github.com/tdewolff/minify/v2/.gitignore30
-rw-r--r--vendor/github.com/tdewolff/minify/v2/.golangci.yml16
-rw-r--r--vendor/github.com/tdewolff/minify/v2/Dockerfile17
-rw-r--r--vendor/github.com/tdewolff/minify/v2/LICENSE22
-rw-r--r--vendor/github.com/tdewolff/minify/v2/Makefile58
-rw-r--r--vendor/github.com/tdewolff/minify/v2/README.md735
-rw-r--r--vendor/github.com/tdewolff/minify/v2/common.go524
-rw-r--r--vendor/github.com/tdewolff/minify/v2/css/css.go1549
-rw-r--r--vendor/github.com/tdewolff/minify/v2/css/hash.go1392
-rw-r--r--vendor/github.com/tdewolff/minify/v2/css/table.go198
-rw-r--r--vendor/github.com/tdewolff/minify/v2/css/util.go55
-rw-r--r--vendor/github.com/tdewolff/minify/v2/html/buffer.go137
-rw-r--r--vendor/github.com/tdewolff/minify/v2/html/hash.go543
-rw-r--r--vendor/github.com/tdewolff/minify/v2/html/html.go514
-rw-r--r--vendor/github.com/tdewolff/minify/v2/html/table.go1346
-rw-r--r--vendor/github.com/tdewolff/minify/v2/js/js.go1277
-rw-r--r--vendor/github.com/tdewolff/minify/v2/js/stmtlist.go341
-rw-r--r--vendor/github.com/tdewolff/minify/v2/js/util.go1361
-rw-r--r--vendor/github.com/tdewolff/minify/v2/js/vars.go443
-rw-r--r--vendor/github.com/tdewolff/minify/v2/minify.go371
-rw-r--r--vendor/github.com/tdewolff/parse/v2/.gitattributes1
-rw-r--r--vendor/github.com/tdewolff/parse/v2/.gitignore5
-rw-r--r--vendor/github.com/tdewolff/parse/v2/.golangci.yml16
-rw-r--r--vendor/github.com/tdewolff/parse/v2/LICENSE.md22
-rw-r--r--vendor/github.com/tdewolff/parse/v2/README.md64
-rw-r--r--vendor/github.com/tdewolff/parse/v2/buffer/buffer.go12
-rw-r--r--vendor/github.com/tdewolff/parse/v2/buffer/lexer.go164
-rw-r--r--vendor/github.com/tdewolff/parse/v2/buffer/reader.go44
-rw-r--r--vendor/github.com/tdewolff/parse/v2/buffer/streamlexer.go223
-rw-r--r--vendor/github.com/tdewolff/parse/v2/buffer/writer.go65
-rw-r--r--vendor/github.com/tdewolff/parse/v2/common.go237
-rw-r--r--vendor/github.com/tdewolff/parse/v2/css/README.md170
-rw-r--r--vendor/github.com/tdewolff/parse/v2/css/hash.go75
-rw-r--r--vendor/github.com/tdewolff/parse/v2/css/lex.go698
-rw-r--r--vendor/github.com/tdewolff/parse/v2/css/parse.go462
-rw-r--r--vendor/github.com/tdewolff/parse/v2/css/util.go47
-rw-r--r--vendor/github.com/tdewolff/parse/v2/error.go47
-rw-r--r--vendor/github.com/tdewolff/parse/v2/html/README.md98
-rw-r--r--vendor/github.com/tdewolff/parse/v2/html/hash.go81
-rw-r--r--vendor/github.com/tdewolff/parse/v2/html/lex.go494
-rw-r--r--vendor/github.com/tdewolff/parse/v2/html/util.go113
-rw-r--r--vendor/github.com/tdewolff/parse/v2/input.go173
-rw-r--r--vendor/github.com/tdewolff/parse/v2/js/README.md80
-rw-r--r--vendor/github.com/tdewolff/parse/v2/js/ast.go3884
-rw-r--r--vendor/github.com/tdewolff/parse/v2/js/lex.go793
-rw-r--r--vendor/github.com/tdewolff/parse/v2/js/parse.go2292
-rw-r--r--vendor/github.com/tdewolff/parse/v2/js/table.go142
-rw-r--r--vendor/github.com/tdewolff/parse/v2/js/tokentype.go404
-rw-r--r--vendor/github.com/tdewolff/parse/v2/js/util.go38
-rw-r--r--vendor/github.com/tdewolff/parse/v2/js/walk.go288
-rw-r--r--vendor/github.com/tdewolff/parse/v2/position.go95
-rw-r--r--vendor/github.com/tdewolff/parse/v2/strconv/float.go257
-rw-r--r--vendor/github.com/tdewolff/parse/v2/strconv/int.go108
-rw-r--r--vendor/github.com/tdewolff/parse/v2/strconv/price.go83
-rw-r--r--vendor/github.com/tdewolff/parse/v2/util.go481
56 files changed, 23187 insertions, 0 deletions
diff --git a/vendor/github.com/tdewolff/minify/v2/.gitattributes b/vendor/github.com/tdewolff/minify/v2/.gitattributes
new file mode 100644
index 0000000..16a3a8b
--- /dev/null
+++ b/vendor/github.com/tdewolff/minify/v2/.gitattributes
@@ -0,0 +1,2 @@
+benchmarks/sample_* linguist-generated
+tests/*/corpus/* linguist-generated
diff --git a/vendor/github.com/tdewolff/minify/v2/.gitignore b/vendor/github.com/tdewolff/minify/v2/.gitignore
new file mode 100644
index 0000000..c9ea38d
--- /dev/null
+++ b/vendor/github.com/tdewolff/minify/v2/.gitignore
@@ -0,0 +1,30 @@
+release.sh
+dist
+benchmarks/*
+!benchmarks/*.go
+!benchmarks/sample_*
+tests/*/fuzz-fuzz.zip
+tests/*/crashers
+tests/*/suppressions
+tests/*/corpus/*
+!tests/*/corpus/*.*
+parse/tests/*/fuzz-fuzz.zip
+parse/tests/*/crashers
+parse/tests/*/suppressions
+parse/tests/*/corpus/*
+!parse/tests/*/corpus/*.*
+bindings/js/build
+bindings/js/prebuilds
+bindings/js/minify.h
+bindings/js/minify.a
+bindings/js/node_modules
+bindings/js/example/package-lock.json
+bindings/js/example/node_modules
+bindings/js/example/test.min.html
+bindings/py/go.mod
+bindings/py/go.sum
+bindings/py/minify.h
+bindings/py/minify.so
+bindings/py/tdewolff_minify.egg-info
+bindings/py/example/example.min.html
+bindings/py/dist
diff --git a/vendor/github.com/tdewolff/minify/v2/.golangci.yml b/vendor/github.com/tdewolff/minify/v2/.golangci.yml
new file mode 100644
index 0000000..7009f92
--- /dev/null
+++ b/vendor/github.com/tdewolff/minify/v2/.golangci.yml
@@ -0,0 +1,16 @@
+linters:
+ enable:
+ - depguard
+ - dogsled
+ - gofmt
+ - goimports
+ - golint
+ - gosec
+ - govet
+ - megacheck
+ - misspell
+ - nakedret
+ - prealloc
+ - unconvert
+ - unparam
+ - wastedassign
diff --git a/vendor/github.com/tdewolff/minify/v2/Dockerfile b/vendor/github.com/tdewolff/minify/v2/Dockerfile
new file mode 100644
index 0000000..0f7fde4
--- /dev/null
+++ b/vendor/github.com/tdewolff/minify/v2/Dockerfile
@@ -0,0 +1,17 @@
+# Use this image to build the executable
+FROM golang:1.18-alpine AS build
+
+WORKDIR /go/src/github.com/tdewolff/minify
+COPY . /go/src/github.com/tdewolff/minify/
+
+RUN apk add --no-cache git ca-certificates make bash
+RUN /usr/bin/env bash -c make install
+
+
+# Final image containing the executable from the previous step
+FROM alpine:3
+
+COPY --from=build /go/bin/minify /usr/bin/minify
+COPY "containerfiles/container-entrypoint.sh" "/init.sh"
+
+ENTRYPOINT ["/init.sh"]
diff --git a/vendor/github.com/tdewolff/minify/v2/LICENSE b/vendor/github.com/tdewolff/minify/v2/LICENSE
new file mode 100644
index 0000000..41677de
--- /dev/null
+++ b/vendor/github.com/tdewolff/minify/v2/LICENSE
@@ -0,0 +1,22 @@
+Copyright (c) 2015 Taco de Wolff
+
+ Permission is hereby granted, free of charge, to any person
+ obtaining a copy of this software and associated documentation
+ files (the "Software"), to deal in the Software without
+ restriction, including without limitation the rights to use,
+ copy, modify, merge, publish, distribute, sublicense, and/or sell
+ copies of the Software, and to permit persons to whom the
+ Software is furnished to do so, subject to the following
+ conditions:
+
+ The above copyright notice and this permission notice shall be
+ included in all copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ OTHER DEALINGS IN THE SOFTWARE. \ No newline at end of file
diff --git a/vendor/github.com/tdewolff/minify/v2/Makefile b/vendor/github.com/tdewolff/minify/v2/Makefile
new file mode 100644
index 0000000..9eede28
--- /dev/null
+++ b/vendor/github.com/tdewolff/minify/v2/Makefile
@@ -0,0 +1,58 @@
+SHELL=/usr/bin/env bash
+NAME=minify
+CMD=./cmd/minify
+TARGETS=linux_amd64 linux_arm64 darwin_amd64 darwin_arm64 freebsd_amd64 netbsd_amd64 openbsd_amd64 windows_amd64
+VERSION=`git describe --tags`
+FLAGS=-ldflags "-s -w -X 'main.Version=${VERSION}'" -trimpath
+ENVS=GO111MODULES=on CGO_ENABLED=0
+
+all: install
+
+install:
+ echo "Installing ${VERSION}"
+ ${ENVS} go install ${FLAGS} ./cmd/minify
+ . cmd/minify/bash_completion
+
+release:
+ TAG=$(shell git describe --tags --exact-match 2> /dev/null);
+ if [ "${.SHELLSTATUS}" -eq 0 ]; then \
+ echo "Releasing ${VERSION}"; \
+ else \
+ echo "ERROR: commit is not tagged with a version"; \
+ echo ""; \
+ exit 1; \
+ fi
+ rm -rf dist
+ mkdir -p dist
+ for t in ${TARGETS}; do \
+ echo Building $$t...; \
+ mkdir dist/$$t; \
+ os=$$(echo $$t | cut -f1 -d_); \
+ arch=$$(echo $$t | cut -f2 -d_); \
+ ${ENVS} GOOS=$$os GOARCH=$$arch go build ${FLAGS} -o dist/$$t/${NAME} ${CMD}; \
+ \
+ cp LICENSE dist/$$t/.; \
+ cp cmd/minify/README.md dist/$$t/.; \
+ if [ "$$os" == "windows" ]; then \
+ mv dist/$$t/${NAME} dist/$$t/${NAME}.exe; \
+ zip -jq dist/${NAME}_$$t.zip dist/$$t/*; \
+ cd dist; \
+ sha256sum ${NAME}_$$t.zip >> checksums.txt; \
+ cd ..; \
+ else \
+ cp cmd/minify/bash_completion dist/$$t/.; \
+ cd dist/$$t; \
+ tar -cf - * | gzip -9 > ../${NAME}_$$t.tar.gz; \
+ cd ..; \
+ sha256sum ${NAME}_$$t.tar.gz >> checksums.txt; \
+ cd ..; \
+ fi; \
+ rm -rf dist/$$t; \
+ done
+
+clean:
+ echo "Cleaning dist/"
+ rm -rf dist
+
+.PHONY: install release clean
+.SILENT: install release clean
diff --git a/vendor/github.com/tdewolff/minify/v2/README.md b/vendor/github.com/tdewolff/minify/v2/README.md
new file mode 100644
index 0000000..a65ffee
--- /dev/null
+++ b/vendor/github.com/tdewolff/minify/v2/README.md
@@ -0,0 +1,735 @@
+# Minify <a name="minify"></a> [![API reference](https://img.shields.io/badge/godoc-reference-5272B4)](https://pkg.go.dev/github.com/tdewolff/minify/v2?tab=doc) [![Go Report Card](https://goreportcard.com/badge/github.com/tdewolff/minify)](https://goreportcard.com/report/github.com/tdewolff/minify) [![codecov](https://codecov.io/gh/tdewolff/minify/branch/master/graph/badge.svg?token=Cr7r2EKPj2)](https://codecov.io/gh/tdewolff/minify) [![Donate](https://img.shields.io/badge/patreon-donate-DFB317)](https://www.patreon.com/tdewolff)
+
+**[Online demo](https://go.tacodewolff.nl/minify)** if you need to minify files *now*.
+
+**[Binaries](https://github.com/tdewolff/minify/releases) of CLI for various platforms.** See [CLI](https://github.com/tdewolff/minify/tree/master/cmd/minify) for more installation instructions.
+
+**[Python bindings](https://pypi.org/project/tdewolff-minify/)** install with `pip install tdewolff-minify`
+
+**[JavaScript bindings](https://www.npmjs.com/package/@tdewolff/minify)** install with `npm i @tdewolff/minify`
+
+**[.NET bindings](https://github.com/JKamsker/NMinify)** install with `Install-Package NMinify` or `dotnet add package NMinify`, thanks to Jonas Kamsker for the port
+
+---
+
+*Did you know that the shortest valid piece of HTML5 is `<!doctype html><title>x</title>`? See for yourself at the [W3C Validator](http://validator.w3.org/)!*
+
+Minify is a minifier package written in [Go][1]. It provides HTML5, CSS3, JS, JSON, SVG and XML minifiers and an interface to implement any other minifier. Minification is the process of removing bytes from a file (such as whitespace) without changing its output and therefore shrinking its size and speeding up transmission over the internet and possibly parsing. The implemented minifiers are designed for high performance.
+
+The core functionality associates mimetypes with minification functions, allowing embedded resources (like CSS or JS within HTML files) to be minified as well. Users can add new implementations that are triggered based on a mimetype (or pattern), or redirect to an external command (like ClosureCompiler, UglifyCSS, ...).
+
+### Sponsors
+
+[![SiteGround](https://www.siteground.com/img/downloads/siteground-logo-black-transparent-vector.svg)](https://www.siteground.com/)
+
+Please see https://www.patreon.com/tdewolff for ways to contribute, otherwise please contact me directly!
+
+#### Table of Contents
+
+- [Minify](#minify)
+ - [Prologue](#prologue)
+ - [Installation](#installation)
+ - [API stability](#api-stability)
+ - [Testing](#testing)
+ - [Performance](#performance)
+ - [HTML](#html)
+ - [Whitespace removal](#whitespace-removal)
+ - [CSS](#css)
+ - [JS](#js)
+ - [Comparison with other tools](#comparison-with-other-tools)
+ - [Compression ratio (lower is better)](#compression-ratio-lower-is-better)
+ - [Time (lower is better)](#time-lower-is-better)
+ - [JSON](#json)
+ - [SVG](#svg)
+ - [XML](#xml)
+ - [Usage](#usage)
+ - [New](#new)
+ - [From reader](#from-reader)
+ - [From bytes](#from-bytes)
+ - [From string](#from-string)
+ - [To reader](#to-reader)
+ - [To writer](#to-writer)
+ - [Middleware](#middleware)
+ - [Custom minifier](#custom-minifier)
+ - [Mediatypes](#mediatypes)
+ - [Examples](#examples)
+ - [Common minifiers](#common-minifiers)
+ - [External minifiers](#external-minifiers)
+ - [Closure Compiler](#closure-compiler)
+ - [UglifyJS](#uglifyjs)
+ - [esbuild](#esbuild)
+ - [Custom minifier](#custom-minifier-example)
+ - [ResponseWriter](#responsewriter)
+ - [Templates](#templates)
+ - [FAQ](#faq)
+ - [License](#license)
+
+### Roadmap
+
+- [ ] Use ASM/SSE to further speed-up core parts of the parsers/minifiers
+- [x] Improve JS minifiers by shortening variables and proper semicolon omission
+- [ ] Speed-up SVG minifier, it is very slow
+- [x] Proper parser error reporting and line number + column information
+- [ ] Generation of source maps (uncertain, might slow down parsers too much if it cannot run separately nicely)
+- [ ] Create a cmd to pack webfiles (much like webpack), ie. merging CSS and JS files, inlining small external files, minification and gzipping. This would work on HTML files.
+
+## Prologue
+Minifiers or bindings to minifiers exist in almost all programming languages. Some implementations are merely using several regular expressions to trim whitespace and comments (even though regex for parsing HTML/XML is ill-advised, for a good read see [Regular Expressions: Now You Have Two Problems](http://blog.codinghorror.com/regular-expressions-now-you-have-two-problems/)). Some implementations are much more profound, such as the [YUI Compressor](http://yui.github.io/yuicompressor/) and [Google Closure Compiler](https://github.com/google/closure-compiler) for JS. As most existing implementations either use JavaScript, use regexes, and don't focus on performance, they are pretty slow.
+
+This minifier proves to be that fast and extensive minifier that can handle HTML and any other filetype it may contain (CSS, JS, ...). It is usually orders of magnitude faster than existing minifiers.
+
+## Installation
+Make sure you have [Git](https://git-scm.com/) and [Go](https://golang.org/dl/) (1.13 or higher) installed, run
+```
+mkdir Project
+cd Project
+go mod init
+go get -u github.com/tdewolff/minify/v2
+```
+
+Then add the following imports to be able to use the various minifiers
+``` go
+import (
+ "github.com/tdewolff/minify/v2"
+ "github.com/tdewolff/minify/v2/css"
+ "github.com/tdewolff/minify/v2/html"
+ "github.com/tdewolff/minify/v2/js"
+ "github.com/tdewolff/minify/v2/json"
+ "github.com/tdewolff/minify/v2/svg"
+ "github.com/tdewolff/minify/v2/xml"
+)
+```
+
+You can optionally run `go mod tidy` to clean up the `go.mod` and `go.sum` files.
+
+See [CLI tool](https://github.com/tdewolff/minify/tree/master/cmd/minify) for installation instructions of the binary.
+
+### Docker
+
+If you want to use Docker, please see https://hub.docker.com/r/tdewolff/minify.
+
+```bash
+$ docker run -it tdewolff/minify --help
+```
+
+## API stability
+There is no guarantee for absolute stability, but I take issues and bugs seriously and don't take API changes lightly. The library will be maintained in a compatible way unless vital bugs prevent me from doing so. There has been one API change after v1 which added options support and I took the opportunity to push through some more API clean up as well. There are no plans whatsoever for future API changes.
+
+## Testing
+For all subpackages and the imported `parse` package, test coverage of 100% is pursued. Besides full coverage, the minifiers are [fuzz tested](https://github.com/tdewolff/fuzz) using [github.com/dvyukov/go-fuzz](http://www.github.com/dvyukov/go-fuzz), see [the wiki](https://github.com/tdewolff/minify/wiki) for the most important bugs found by fuzz testing. These tests ensure that everything works as intended and that the code does not crash (whatever the input). If you still encounter a bug, please file a [bug report](https://github.com/tdewolff/minify/issues)!
+
+## Performance
+The benchmarks directory contains a number of standardized samples used to compare performance between changes. To give an indication of the speed of this library, I've ran the tests on my Thinkpad T460 (i5-6300U quad-core 2.4GHz running Arch Linux) using Go 1.15.
+
+```
+name time/op
+CSS/sample_bootstrap.css-4 2.70ms ± 0%
+CSS/sample_gumby.css-4 3.57ms ± 0%
+CSS/sample_fontawesome.css-4 767µs ± 0%
+CSS/sample_normalize.css-4 85.5µs ± 0%
+HTML/sample_amazon.html-4 15.2ms ± 0%
+HTML/sample_bbc.html-4 3.90ms ± 0%
+HTML/sample_blogpost.html-4 420µs ± 0%
+HTML/sample_es6.html-4 15.6ms ± 0%
+HTML/sample_stackoverflow.html-4 3.73ms ± 0%
+HTML/sample_wikipedia.html-4 6.60ms ± 0%
+JS/sample_ace.js-4 28.7ms ± 0%
+JS/sample_dot.js-4 357µs ± 0%
+JS/sample_jquery.js-4 10.0ms ± 0%
+JS/sample_jqueryui.js-4 20.4ms ± 0%
+JS/sample_moment.js-4 3.47ms ± 0%
+JSON/sample_large.json-4 3.25ms ± 0%
+JSON/sample_testsuite.json-4 1.74ms ± 0%
+JSON/sample_twitter.json-4 24.2µs ± 0%
+SVG/sample_arctic.svg-4 34.7ms ± 0%
+SVG/sample_gopher.svg-4 307µs ± 0%
+SVG/sample_usa.svg-4 57.4ms ± 0%
+SVG/sample_car.svg-4 18.0ms ± 0%
+SVG/sample_tiger.svg-4 5.61ms ± 0%
+XML/sample_books.xml-4 54.7µs ± 0%
+XML/sample_catalog.xml-4 33.0µs ± 0%
+XML/sample_omg.xml-4 7.17ms ± 0%
+
+name speed
+CSS/sample_bootstrap.css-4 50.7MB/s ± 0%
+CSS/sample_gumby.css-4 52.1MB/s ± 0%
+CSS/sample_fontawesome.css-4 61.2MB/s ± 0%
+CSS/sample_normalize.css-4 70.8MB/s ± 0%
+HTML/sample_amazon.html-4 31.1MB/s ± 0%
+HTML/sample_bbc.html-4 29.5MB/s ± 0%
+HTML/sample_blogpost.html-4 49.8MB/s ± 0%
+HTML/sample_es6.html-4 65.6MB/s ± 0%
+HTML/sample_stackoverflow.html-4 55.0MB/s ± 0%
+HTML/sample_wikipedia.html-4 67.5MB/s ± 0%
+JS/sample_ace.js-4 22.4MB/s ± 0%
+JS/sample_dot.js-4 14.5MB/s ± 0%
+JS/sample_jquery.js-4 24.8MB/s ± 0%
+JS/sample_jqueryui.js-4 23.0MB/s ± 0%
+JS/sample_moment.js-4 28.6MB/s ± 0%
+JSON/sample_large.json-4 234MB/s ± 0%
+JSON/sample_testsuite.json-4 394MB/s ± 0%
+JSON/sample_twitter.json-4 63.0MB/s ± 0%
+SVG/sample_arctic.svg-4 42.4MB/s ± 0%
+SVG/sample_gopher.svg-4 19.0MB/s ± 0%
+SVG/sample_usa.svg-4 17.8MB/s ± 0%
+SVG/sample_car.svg-4 29.3MB/s ± 0%
+SVG/sample_tiger.svg-4 12.2MB/s ± 0%
+XML/sample_books.xml-4 81.0MB/s ± 0%
+XML/sample_catalog.xml-4 58.6MB/s ± 0%
+XML/sample_omg.xml-4 159MB/s ± 0%
+```
+
+## HTML
+
+HTML (with JS and CSS) minification typically shaves off about 10%.
+
+The HTML5 minifier uses these minifications:
+
+- strip unnecessary whitespace and otherwise collapse it to one space (or newline if it originally contained a newline)
+- strip superfluous quotes, or uses single/double quotes whichever requires fewer escapes
+- strip default attribute values and attribute boolean values
+- strip some empty attributes
+- strip unrequired tags (`html`, `head`, `body`, ...)
+- strip unrequired end tags (`tr`, `td`, `li`, ... and often `p`)
+- strip default protocols (`http:`, `https:` and `javascript:`)
+- strip all comments (including conditional comments, old IE versions are not supported anymore by Microsoft)
+- shorten `doctype` and `meta` charset
+- lowercase tags, attributes and some values to enhance gzip compression
+
+Options:
+
+- `KeepConditionalComments` preserve all IE conditional comments such as `<!--[if IE 6]><![endif]-->` and `<![if IE 6]><![endif]>`, see https://msdn.microsoft.com/en-us/library/ms537512(v=vs.85).aspx#syntax
+- `KeepDefaultAttrVals` preserve default attribute values such as `<script type="application/javascript">`
+- `KeepDocumentTags` preserve `html`, `head` and `body` tags
+- `KeepEndTags` preserve all end tags
+- `KeepQuotes` preserve quotes around attribute values
+- `KeepWhitespace` preserve whitespace between inline tags but still collapse multiple whitespace characters into one
+
+After recent benchmarking and profiling it became really fast and minifies pages in the 10ms range, making it viable for on-the-fly minification.
+
+However, be careful when doing on-the-fly minification. Minification typically trims off 10% and does this at worst around about 20MB/s. This means users have to download slower than 2MB/s to make on-the-fly minification worthwhile. This may or may not apply in your situation. Rather use caching!
+
+### Whitespace removal
+The whitespace removal mechanism collapses all sequences of whitespace (spaces, newlines, tabs) to a single space. If the sequence contained a newline or carriage return it will collapse into a newline character instead. It trims all text parts (in between tags) depending on whether it was preceded by a space from a previous piece of text and whether it is followed up by a block element or an inline element. In the former case we can omit spaces while for inline elements whitespace has significance.
+
+Make sure your HTML doesn't depend on whitespace between `block` elements that have been changed to `inline` or `inline-block` elements using CSS. Your layout *should not* depend on those whitespaces as the minifier will remove them. An example is a menu consisting of multiple `<li>` that have `display:inline-block` applied and have whitespace in between them. It is bad practise to rely on whitespace for element positioning anyways!
+
+## CSS
+
+Minification typically shaves off about 10%-15%. This CSS minifier will _not_ do structural changes to your stylesheets. Although this could result in smaller files, the complexity is quite high and the risk of breaking website is high too.
+
+The CSS minifier will only use safe minifications:
+
+- remove comments and unnecessary whitespace (but keep `/*! ... */` which usually contains the license)
+- remove trailing semicolons
+- optimize `margin`, `padding` and `border-width` number of sides
+- shorten numbers by removing unnecessary `+` and zeros and rewriting with/without exponent
+- remove dimension and percentage for zero values
+- remove quotes for URLs
+- remove quotes for font families and make lowercase
+- rewrite hex colors to/from color names, or to three digit hex
+- rewrite `rgb(`, `rgba(`, `hsl(` and `hsla(` colors to hex or name
+- use four digit hex for alpha values (`transparent` &#8594; `#0000`)
+- replace `normal` and `bold` by numbers for `font-weight` and `font`
+- replace `none` &#8594; `0` for `border`, `background` and `outline`
+- lowercase all identifiers except classes, IDs and URLs to enhance gzip compression
+- shorten MS alpha function
+- rewrite data URIs with base64 or ASCII whichever is shorter
+- calls minifier for data URI mediatypes, thus you can compress embedded SVG files if you have that minifier attached
+- shorten aggregate declarations such as `background` and `font`
+
+It does purposely not use the following techniques:
+
+- (partially) merge rulesets
+- (partially) split rulesets
+- collapse multiple declarations when main declaration is defined within a ruleset (don't put `font-weight` within an already existing `font`, too complex)
+- remove overwritten properties in ruleset (this not always overwrites it, for example with `!important`)
+- rewrite properties into one ruleset if possible (like `margin-top`, `margin-right`, `margin-bottom` and `margin-left` &#8594; `margin`)
+- put nested ID selector at the front (`body > div#elem p` &#8594; `#elem p`)
+- rewrite attribute selectors for IDs and classes (`div[id=a]` &#8594; `div#a`)
+- put space after pseudo-selectors (IE6 is old, move on!)
+
+There are a couple of comparison tables online, such as [CSS Minifier Comparison](http://www.codenothing.com/benchmarks/css-compressor-3.0/full.html), [CSS minifiers comparison](http://www.phpied.com/css-minifiers-comparison/) and [CleanCSS tests](http://goalsmashers.github.io/css-minification-benchmark/). Comparing speed between each, this minifier will usually be between 10x-300x faster than existing implementations, and even rank among the top for minification ratios. It falls short with the purposely not implemented and often unsafe techniques.
+
+Options:
+
+- `KeepCSS2` prohibits using CSS3 syntax (such as exponents in numbers, or `rgba(` &#8594; `rgb(`), might be incomplete
+- `Precision` number of significant digits to preserve for numbers, `0` means no trimming
+
+## JS
+
+The JS minifier typically shaves off about 35% -- 65% of filesize depening on the file, which is a compression close to many other minifiers. Common speeds of PHP and JS implementations are about 100-300kB/s (see [Uglify2](http://lisperator.net/uglifyjs/), [Adventures in PHP web asset minimization](https://www.happyassassin.net/2014/12/29/adventures-in-php-web-asset-minimization/)). This implementation is orders of magnitude faster at around ~25MB/s.
+
+The following features are implemented:
+
+- remove superfluous whitespace
+- remove superfluous semicolons
+- shorten `true`, `false`, and `undefined` to `!0`, `!1` and `void 0`
+- rename variables and functions to shorter names (not in global scope)
+- move `var` declarations to the top of the global/function scope (if more than one)
+- collapse if/else statements to expressions
+- minify conditional expressions to simpler ones
+- merge sequential expression statements to one, including into `return` and `throw`
+- remove superfluous grouping in expressions
+- shorten or remove string escapes
+- convert object key or index expression from string to identifier or decimal
+- merge concatenated strings
+- rewrite numbers (binary, octal, decimal, hexadecimal) to shorter representations
+
+Options:
+
+- `KeepVarNames` keeps variable names as they are and omits shortening variable names
+- `Precision` number of significant digits to preserve for numbers, `0` means no trimming
+- `Version` ECMAScript version to use for output, `0` is the latest
+
+### Comparison with other tools
+
+Performance is measured with `time [command]` ran 10 times and selecting the fastest one, on a Thinkpad T460 (i5-6300U quad-core 2.4GHz running Arch Linux) using Go 1.15.
+
+- [minify](https://github.com/tdewolff/minify): `minify -o script.min.js script.js`
+- [esbuild](https://github.com/evanw/esbuild): `esbuild --minify --outfile=script.min.js script.js`
+- [terser](https://github.com/terser/terser): `terser script.js --compress --mangle -o script.min.js`
+- [UglifyJS](https://github.com/Skalman/UglifyJS-online): `uglifyjs --compress --mangle -o script.min.js script.js`
+- [Closure Compiler](https://github.com/google/closure-compiler): `closure-compiler -O SIMPLE --js script.js --js_output_file script.min.js --language_in ECMASCRIPT_NEXT -W QUIET --jscomp_off=checkVars` optimization level `SIMPLE` instead of `ADVANCED` to make similar assumptions as do the other tools (do not rename/assume anything of global level variables)
+
+#### Compression ratio (lower is better)
+All tools give very similar results, although UglifyJS compresses slightly better.
+
+| Tool | ace.js | dot.js | jquery.js | jqueryui.js | moment.js |
+| --- | --- | --- | --- | --- | --- |
+| **minify** | 53.7% | 64.8% | 34.2% | 51.3% | 34.8% |
+| esbuild | 53.8% | 66.3% | 34.4% | 53.1% | 34.8% |
+| terser | 53.2% | 65.2% | 34.2% | 51.8% | 34.7% |
+| UglifyJS | 53.1% | 64.7% | 33.8% | 50.7% | 34.2% |
+| Closure Compiler | 53.4% | 64.0% | 35.7% | 53.6% | 34.3% |
+
+#### Time (lower is better)
+Most tools are extremely slow, with `minify` and `esbuild` being orders of magnitudes faster.
+
+| Tool | ace.js | dot.js | jquery.js | jqueryui.js | moment.js |
+| --- | --- | --- | --- | --- | --- |
+| **minify** | 49ms | 5ms | 22ms | 35ms | 13ms |
+| esbuild | 64ms | 9ms | 31ms | 51ms | 17ms |
+| terser | 2900s | 180ms | 1400ms | 2200ms | 730ms |
+| UglifyJS | 3900ms | 210ms | 2000ms | 3100ms | 910ms |
+| Closure Compiler | 6100ms | 2500ms | 4400ms | 5300ms | 3500ms |
+
+## JSON
+
+Minification typically shaves off about 15% of filesize for common indented JSON such as generated by [JSON Generator](http://www.json-generator.com/).
+
+The JSON minifier only removes whitespace, which is the only thing that can be left out, and minifies numbers (`1000` => `1e3`).
+
+Options:
+
+- `Precision` number of significant digits to preserve for numbers, `0` means no trimming
+- `KeepNumbers` do not minify numbers if set to `true`, by default numbers will be minified
+
+## SVG
+
+The SVG minifier uses these minifications:
+
+- trim and collapse whitespace between all tags
+- strip comments, empty `doctype`, XML prelude, `metadata`
+- strip SVG version
+- strip CDATA sections wherever possible
+- collapse tags with no content to a void tag
+- minify style tag and attributes with the CSS minifier
+- minify colors
+- shorten lengths and numbers and remove default `px` unit
+- shorten `path` data
+- use relative or absolute positions in path data whichever is shorter
+
+TODO:
+- convert attributes to style attribute whenever shorter
+- merge path data? (same style and no intersection -- the latter is difficult)
+
+Options:
+
+- `Precision` number of significant digits to preserve for numbers, `0` means no trimming
+
+## XML
+
+The XML minifier uses these minifications:
+
+- strip unnecessary whitespace and otherwise collapse it to one space (or newline if it originally contained a newline)
+- strip comments
+- collapse tags with no content to a void tag
+- strip CDATA sections wherever possible
+
+Options:
+
+- `KeepWhitespace` preserve whitespace between inline tags but still collapse multiple whitespace characters into one
+
+## Usage
+Any input stream is being buffered by the minification functions. This is how the underlying buffer package inherently works to ensure high performance. The output stream however is not buffered. It is wise to preallocate a buffer as big as the input to which the output is written, or otherwise use `bufio` to buffer to a streaming writer.
+
+### New
+Retrieve a minifier struct which holds a map of mediatype &#8594; minifier functions.
+``` go
+m := minify.New()
+```
+
+The following loads all provided minifiers.
+``` go
+m := minify.New()
+m.AddFunc("text/css", css.Minify)
+m.AddFunc("text/html", html.Minify)
+m.AddFunc("image/svg+xml", svg.Minify)
+m.AddFuncRegexp(regexp.MustCompile("^(application|text)/(x-)?(java|ecma)script$"), js.Minify)
+m.AddFuncRegexp(regexp.MustCompile("[/+]json$"), json.Minify)
+m.AddFuncRegexp(regexp.MustCompile("[/+]xml$"), xml.Minify)
+```
+
+You can set options to several minifiers.
+``` go
+m.Add("text/html", &html.Minifier{
+ KeepDefaultAttrVals: true,
+ KeepWhitespace: true,
+})
+```
+
+### From reader
+Minify from an `io.Reader` to an `io.Writer` for a specific mediatype.
+``` go
+if err := m.Minify(mediatype, w, r); err != nil {
+ panic(err)
+}
+```
+
+### From bytes
+Minify from and to a `[]byte` for a specific mediatype.
+``` go
+b, err = m.Bytes(mediatype, b)
+if err != nil {
+ panic(err)
+}
+```
+
+### From string
+Minify from and to a `string` for a specific mediatype.
+``` go
+s, err = m.String(mediatype, s)
+if err != nil {
+ panic(err)
+}
+```
+
+### To reader
+Get a minifying reader for a specific mediatype.
+``` go
+mr := m.Reader(mediatype, r)
+if _, err := mr.Read(b); err != nil {
+ panic(err)
+}
+```
+
+### To writer
+Get a minifying writer for a specific mediatype. Must be explicitly closed because it uses an `io.Pipe` underneath.
+``` go
+mw := m.Writer(mediatype, w)
+if mw.Write([]byte("input")); err != nil {
+ panic(err)
+}
+if err := mw.Close(); err != nil {
+ panic(err)
+}
+```
+
+### Middleware
+Minify resources on the fly using middleware. It passes a wrapped response writer to the handler that removes the Content-Length header. The minifier is chosen based on the Content-Type header or, if the header is empty, by the request URI file extension. This is on-the-fly processing, you should preferably cache the results though!
+``` go
+fs := http.FileServer(http.Dir("www/"))
+http.Handle("/", m.Middleware(fs))
+```
+
+### Custom minifier
+Add a minifier for a specific mimetype.
+``` go
+type CustomMinifier struct {
+ KeepLineBreaks bool
+}
+
+func (c *CustomMinifier) Minify(m *minify.M, w io.Writer, r io.Reader, params map[string]string) error {
+ // ...
+ return nil
+}
+
+m.Add(mimetype, &CustomMinifier{KeepLineBreaks: true})
+// or
+m.AddRegexp(regexp.MustCompile("/x-custom$"), &CustomMinifier{KeepLineBreaks: true})
+```
+
+Add a minify function for a specific mimetype.
+``` go
+m.AddFunc(mimetype, func(m *minify.M, w io.Writer, r io.Reader, params map[string]string) error {
+ // ...
+ return nil
+})
+m.AddFuncRegexp(regexp.MustCompile("/x-custom$"), func(m *minify.M, w io.Writer, r io.Reader, params map[string]string) error {
+ // ...
+ return nil
+})
+```
+
+Add a command `cmd` with arguments `args` for a specific mimetype.
+``` go
+m.AddCmd(mimetype, exec.Command(cmd, args...))
+m.AddCmdRegexp(regexp.MustCompile("/x-custom$"), exec.Command(cmd, args...))
+```
+
+### Mediatypes
+Using the `params map[string]string` argument one can pass parameters to the minifier such as seen in mediatypes (`type/subtype; key1=val2; key2=val2`). Examples are the encoding or charset of the data. Calling `Minify` will split the mimetype and parameters for the minifiers for you, but `MinifyMimetype` can be used if you already have them split up.
+
+Minifiers can also be added using a regular expression. For example a minifier with `image/.*` will match any image mime.
+
+## Examples
+### Common minifiers
+Basic example that minifies from stdin to stdout and loads the default HTML, CSS and JS minifiers. Optionally, one can enable `java -jar build/compiler.jar` to run for JS (for example the [ClosureCompiler](https://code.google.com/p/closure-compiler/)). Note that reading the file into a buffer first and writing to a pre-allocated buffer would be faster (but would disable streaming).
+``` go
+package main
+
+import (
+ "log"
+ "os"
+ "os/exec"
+
+ "github.com/tdewolff/minify/v2"
+ "github.com/tdewolff/minify/v2/css"
+ "github.com/tdewolff/minify/v2/html"
+ "github.com/tdewolff/minify/v2/js"
+ "github.com/tdewolff/minify/v2/json"
+ "github.com/tdewolff/minify/v2/svg"
+ "github.com/tdewolff/minify/v2/xml"
+)
+
+func main() {
+ m := minify.New()
+ m.AddFunc("text/css", css.Minify)
+ m.AddFunc("text/html", html.Minify)
+ m.AddFunc("image/svg+xml", svg.Minify)
+ m.AddFuncRegexp(regexp.MustCompile("^(application|text)/(x-)?(java|ecma)script$"), js.Minify)
+ m.AddFuncRegexp(regexp.MustCompile("[/+]json$"), json.Minify)
+ m.AddFuncRegexp(regexp.MustCompile("[/+]xml$"), xml.Minify)
+
+ if err := m.Minify("text/html", os.Stdout, os.Stdin); err != nil {
+ panic(err)
+ }
+}
+```
+
+### External minifiers
+Below are some examples of using common external minifiers.
+
+#### Closure Compiler
+See [Closure Compiler Application](https://developers.google.com/closure/compiler/docs/gettingstarted_app). Not tested.
+
+``` go
+m.AddCmdRegexp(regexp.MustCompile("^(application|text)/(x-)?(java|ecma)script$"),
+ exec.Command("java", "-jar", "build/compiler.jar"))
+```
+
+### UglifyJS
+See [UglifyJS](https://github.com/mishoo/UglifyJS2).
+
+``` go
+m.AddCmdRegexp(regexp.MustCompile("^(application|text)/(x-)?(java|ecma)script$"),
+ exec.Command("uglifyjs"))
+```
+
+### esbuild
+See [esbuild](https://github.com/evanw/esbuild).
+
+``` go
+m.AddCmdRegexp(regexp.MustCompile("^(application|text)/(x-)?(java|ecma)script$"),
+ exec.Command("esbuild", "$in.js", "--minify", "--outfile=$out.js"))
+```
+
+### <a name="custom-minifier-example"></a> Custom minifier
+Custom minifier showing an example that implements the minifier function interface. Within a custom minifier, it is possible to call any minifier function (through `m minify.Minifier`) recursively when dealing with embedded resources.
+``` go
+package main
+
+import (
+ "bufio"
+ "fmt"
+ "io"
+ "log"
+ "strings"
+
+ "github.com/tdewolff/minify/v2"
+)
+
+func main() {
+ m := minify.New()
+ m.AddFunc("text/plain", func(m *minify.M, w io.Writer, r io.Reader, _ map[string]string) error {
+ // remove newlines and spaces
+ rb := bufio.NewReader(r)
+ for {
+ line, err := rb.ReadString('\n')
+ if err != nil && err != io.EOF {
+ return err
+ }
+ if _, errws := io.WriteString(w, strings.Replace(line, " ", "", -1)); errws != nil {
+ return errws
+ }
+ if err == io.EOF {
+ break
+ }
+ }
+ return nil
+ })
+
+ in := "Because my coffee was too cold, I heated it in the microwave."
+ out, err := m.String("text/plain", in)
+ if err != nil {
+ panic(err)
+ }
+ fmt.Println(out)
+ // Output: Becausemycoffeewastoocold,Iheateditinthemicrowave.
+}
+```
+
+### ResponseWriter
+#### Middleware
+``` go
+func main() {
+ m := minify.New()
+ m.AddFunc("text/css", css.Minify)
+ m.AddFunc("text/html", html.Minify)
+ m.AddFunc("image/svg+xml", svg.Minify)
+ m.AddFuncRegexp(regexp.MustCompile("^(application|text)/(x-)?(java|ecma)script$"), js.Minify)
+ m.AddFuncRegexp(regexp.MustCompile("[/+]json$"), json.Minify)
+ m.AddFuncRegexp(regexp.MustCompile("[/+]xml$"), xml.Minify)
+
+ fs := http.FileServer(http.Dir("www/"))
+ http.Handle("/", m.MiddlewareWithError(fs))
+}
+
+func handleError(w http.ResponseWriter, r *http.Request, err error) {
+ http.Error(w, err.Error(), http.StatusInternalServerError)
+}
+```
+
+In order to properly handle minify errors, it is necessary to close the response writer since all writes are concurrently handled. There is no need to check errors on writes since they will be returned on closing.
+
+```go
+func main() {
+ m := minify.New()
+ m.AddFunc("text/html", html.Minify)
+ m.AddFuncRegexp(regexp.MustCompile("^(application|text)/(x-)?(java|ecma)script$"), js.Minify)
+
+ input := `<script>const i = 1_000_</script>` // Faulty JS
+ req := httptest.NewRequest(http.MethodGet, "/", nil)
+ rec := httptest.NewRecorder()
+ m.Middleware(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ w.Header().Set("Content-Type", "text/html")
+ _, _ = w.Write([]byte(input))
+
+ if err = w.(io.Closer).Close(); err != nil {
+ panic(err)
+ }
+ })).ServeHTTP(rec, req)
+}
+```
+
+#### ResponseWriter
+``` go
+func Serve(w http.ResponseWriter, r *http.Request) {
+ mw := m.ResponseWriter(w, r)
+ defer mw.Close()
+ w = mw
+
+ http.ServeFile(w, r, path.Join("www", r.URL.Path))
+}
+```
+
+#### Custom response writer
+ResponseWriter example which returns a ResponseWriter that minifies the content and then writes to the original ResponseWriter. Any write after applying this filter will be minified.
+``` go
+type MinifyResponseWriter struct {
+ http.ResponseWriter
+ io.WriteCloser
+}
+
+func (m MinifyResponseWriter) Write(b []byte) (int, error) {
+ return m.WriteCloser.Write(b)
+}
+
+// MinifyResponseWriter must be closed explicitly by calling site.
+func MinifyFilter(mediatype string, res http.ResponseWriter) MinifyResponseWriter {
+ m := minify.New()
+ // add minfiers
+
+ mw := m.Writer(mediatype, res)
+ return MinifyResponseWriter{res, mw}
+}
+```
+
+``` go
+// Usage
+func(w http.ResponseWriter, req *http.Request) {
+ w = MinifyFilter("text/html", w)
+ if _, err := io.WriteString(w, "<p class="message"> This HTTP response will be minified. </p>"); err != nil {
+ panic(err)
+ }
+ if err := w.Close(); err != nil {
+ panic(err)
+ }
+ // Output: <p class=message>This HTTP response will be minified.
+}
+```
+
+### Templates
+
+Here's an example of a replacement for `template.ParseFiles` from `template/html`, which automatically minifies each template before parsing it.
+
+Be aware that minifying templates will work in most cases but not all. Because the HTML minifier only works for valid HTML5, your template must be valid HTML5 of itself. Template tags are parsed as regular text by the minifier.
+
+``` go
+func compileTemplates(filenames ...string) (*template.Template, error) {
+ m := minify.New()
+ m.AddFunc("text/html", html.Minify)
+
+ var tmpl *template.Template
+ for _, filename := range filenames {
+ name := filepath.Base(filename)
+ if tmpl == nil {
+ tmpl = template.New(name)
+ } else {
+ tmpl = tmpl.New(name)
+ }
+
+ b, err := ioutil.ReadFile(filename)
+ if err != nil {
+ return nil, err
+ }
+
+ mb, err := m.Bytes("text/html", b)
+ if err != nil {
+ return nil, err
+ }
+ tmpl.Parse(string(mb))
+ }
+ return tmpl, nil
+}
+```
+
+Example usage:
+
+``` go
+templates := template.Must(compileTemplates("view.html", "home.html"))
+```
+
+## FAQ
+### Newlines remain in minified output
+While you might expect the minified output to be on a single line for it to be fully minified, this is not true. In many cases, using a literal newline doesn't affect the file size, and in some cases it may even reduce the file size.
+
+A typical example is HTML. Whitespace is significant in HTML, meaning that spaces and newlines between or around tags may affect how they are displayed. There is no distinction between a space or a newline and they may be interchanged without affecting the displayed HTML. Remember that a space (0x20) and a newline (0x0A) are both one byte long, so that there is no difference in file size when interchanging them. This minifier removes unnecessary whitespace by replacing stretches of spaces and newlines by a single whitespace character. Specifically, if the stretch of white space characters contains a newline, it will replace it by a newline and otherwise by a space. This doesn't affect the file size, but may help somewhat for debugging or file transmission objectives.
+
+Another example is JavaScript. Single or double quoted string literals may not contain newline characters but instead need to escape them as `\n`. These are two bytes instead of a single newline byte. Using template literals it is allowed to have literal newline characters and we can use that fact to shave-off one byte! The result is that the minified output contains newlines instead of escaped newline characters, which makes the final file size smaller. Of course, changing from single or double quotes to template literals depends on other factors as well, and this minifier makes a calculation whether the template literal results in a shorter file size or not before converting a string literal.
+
+## License
+Released under the [MIT license](LICENSE.md).
+
+[1]: http://golang.org/ "Go Language"
diff --git a/vendor/github.com/tdewolff/minify/v2/common.go b/vendor/github.com/tdewolff/minify/v2/common.go
new file mode 100644
index 0000000..3773a9b
--- /dev/null
+++ b/vendor/github.com/tdewolff/minify/v2/common.go
@@ -0,0 +1,524 @@
+package minify
+
+import (
+ "bytes"
+ "encoding/base64"
+
+ "github.com/tdewolff/parse/v2"
+ "github.com/tdewolff/parse/v2/strconv"
+)
+
+var (
+ textMimeBytes = []byte("text/plain")
+ charsetASCIIBytes = []byte("charset=us-ascii")
+ dataBytes = []byte("data:")
+ base64Bytes = []byte(";base64")
+)
+
+// Epsilon is the closest number to zero that is not considered to be zero.
+var Epsilon = 0.00001
+
+// Mediatype minifies a given mediatype by removing all whitespace and lowercasing all parts except strings (which may be case sensitive).
+func Mediatype(b []byte) []byte {
+ j := 0
+ inString := false
+ start, lastString := 0, 0
+ for i, c := range b {
+ if !inString && parse.IsWhitespace(c) {
+ if start != 0 {
+ j += copy(b[j:], b[start:i])
+ } else {
+ j += i
+ }
+ start = i + 1
+ } else if c == '"' {
+ inString = !inString
+ if inString {
+ if i-lastString < 1024 { // ToLower may otherwise slow down minification greatly
+ parse.ToLower(b[lastString:i])
+ }
+ } else {
+ lastString = j + (i + 1 - start)
+ }
+ }
+ }
+ if start != 0 {
+ j += copy(b[j:], b[start:])
+ parse.ToLower(b[lastString:j])
+ return b[:j]
+ }
+ parse.ToLower(b[lastString:])
+ return b
+}
+
+// DataURI minifies a data URI and calls a minifier by the specified mediatype. Specifications: https://www.ietf.org/rfc/rfc2397.txt.
+func DataURI(m *M, dataURI []byte) []byte {
+ origData := parse.Copy(dataURI)
+ mediatype, data, err := parse.DataURI(dataURI)
+ if err != nil {
+ return dataURI
+ }
+
+ data, _ = m.Bytes(string(mediatype), data)
+ base64Len := len(";base64") + base64.StdEncoding.EncodedLen(len(data))
+ asciiLen := len(data)
+ for _, c := range data {
+ if parse.DataURIEncodingTable[c] {
+ asciiLen += 2
+ }
+ if asciiLen > base64Len {
+ break
+ }
+ }
+ if len(origData) < base64Len && len(origData) < asciiLen {
+ return origData
+ }
+ if base64Len < asciiLen {
+ encoded := make([]byte, base64Len-len(";base64"))
+ base64.StdEncoding.Encode(encoded, data)
+ data = encoded
+ mediatype = append(mediatype, base64Bytes...)
+ } else {
+ data = parse.EncodeURL(data, parse.DataURIEncodingTable)
+ }
+ if len("text/plain") <= len(mediatype) && parse.EqualFold(mediatype[:len("text/plain")], textMimeBytes) {
+ mediatype = mediatype[len("text/plain"):]
+ }
+ for i := 0; i+len(";charset=us-ascii") <= len(mediatype); i++ {
+ // must start with semicolon and be followed by end of mediatype or semicolon
+ if mediatype[i] == ';' && parse.EqualFold(mediatype[i+1:i+len(";charset=us-ascii")], charsetASCIIBytes) && (i+len(";charset=us-ascii") >= len(mediatype) || mediatype[i+len(";charset=us-ascii")] == ';') {
+ mediatype = append(mediatype[:i], mediatype[i+len(";charset=us-ascii"):]...)
+ break
+ }
+ }
+ return append(append(append(dataBytes, mediatype...), ','), data...)
+}
+
+// MaxInt is the maximum value of int.
+const MaxInt = int(^uint(0) >> 1)
+
+// MinInt is the minimum value of int.
+const MinInt = -MaxInt - 1
+
+// Decimal minifies a given byte slice containing a decimal and removes superfluous characters. It differs from Number in that it does not parse exponents.
+// It does not parse or output exponents. prec is the number of significant digits. When prec is zero it will keep all digits. Only digits after the dot can be removed to reach the number of significant digits. Very large number may thus have more significant digits.
+func Decimal(num []byte, prec int) []byte {
+ if len(num) <= 1 {
+ return num
+ }
+
+ // omit first + and register mantissa start and end, whether it's negative and the exponent
+ neg := false
+ start := 0
+ dot := -1
+ end := len(num)
+ if 0 < end && (num[0] == '+' || num[0] == '-') {
+ if num[0] == '-' {
+ neg = true
+ }
+ start++
+ }
+ for i, c := range num[start:] {
+ if c == '.' {
+ dot = start + i
+ break
+ }
+ }
+ if dot == -1 {
+ dot = end
+ }
+
+ // trim leading zeros but leave at least one digit
+ for start < end-1 && num[start] == '0' {
+ start++
+ }
+ // trim trailing zeros
+ i := end - 1
+ for ; dot < i; i-- {
+ if num[i] != '0' {
+ end = i + 1
+ break
+ }
+ }
+ if i == dot {
+ end = dot
+ if start == end {
+ num[start] = '0'
+ return num[start : start+1]
+ }
+ } else if start == end-1 && num[start] == '0' {
+ return num[start:end]
+ }
+
+ // apply precision
+ if 0 < prec && dot <= start+prec {
+ precEnd := start + prec + 1 // include dot
+ if dot == start { // for numbers like .012
+ digit := start + 1
+ for digit < end && num[digit] == '0' {
+ digit++
+ }
+ precEnd = digit + prec
+ }
+ if precEnd < end {
+ end = precEnd
+
+ // process either an increase from a lesser significant decimal (>= 5)
+ // or remove trailing zeros after the dot, or both
+ i := end - 1
+ inc := '5' <= num[end]
+ for ; start < i; i-- {
+ if i == dot {
+ // no-op
+ } else if inc && num[i] != '9' {
+ num[i]++
+ inc = false
+ break
+ } else if inc && i < dot { // end inc for integer
+ num[i] = '0'
+ } else if !inc && (i < dot || num[i] != '0') {
+ break
+ }
+ }
+ if i < dot {
+ end = dot
+ } else {
+ end = i + 1
+ }
+
+ if inc {
+ if dot == start && end == start+1 {
+ num[start] = '1'
+ } else if num[start] == '9' {
+ num[start] = '1'
+ num[start+1] = '0'
+ end++
+ } else {
+ num[start]++
+ }
+ }
+ }
+ }
+
+ if neg {
+ start--
+ num[start] = '-'
+ }
+ return num[start:end]
+}
+
+// Number minifies a given byte slice containing a number and removes superfluous characters.
+func Number(num []byte, prec int) []byte {
+ if len(num) <= 1 {
+ return num
+ }
+
+ // omit first + and register mantissa start and end, whether it's negative and the exponent
+ neg := false
+ start := 0
+ dot := -1
+ end := len(num)
+ origExp := 0
+ if num[0] == '+' || num[0] == '-' {
+ if num[0] == '-' {
+ neg = true
+ }
+ start++
+ }
+ for i, c := range num[start:] {
+ if c == '.' {
+ dot = start + i
+ } else if c == 'e' || c == 'E' {
+ end = start + i
+ i += start + 1
+ if i < len(num) && num[i] == '+' {
+ i++
+ }
+ if tmpOrigExp, n := strconv.ParseInt(num[i:]); 0 < n && int64(MinInt) <= tmpOrigExp && tmpOrigExp <= int64(MaxInt) {
+ // range checks for when int is 32 bit
+ origExp = int(tmpOrigExp)
+ } else {
+ return num
+ }
+ break
+ }
+ }
+ if dot == -1 {
+ dot = end
+ }
+
+ // trim leading zeros but leave at least one digit
+ for start < end-1 && num[start] == '0' {
+ start++
+ }
+ // trim trailing zeros
+ i := end - 1
+ for ; dot < i; i-- {
+ if num[i] != '0' {
+ end = i + 1
+ break
+ }
+ }
+ if i == dot {
+ end = dot
+ if start == end {
+ num[start] = '0'
+ return num[start : start+1]
+ }
+ } else if start == end-1 && num[start] == '0' {
+ return num[start:end]
+ }
+
+ // apply precision
+ if 0 < prec { //&& (dot <= start+prec || start+prec+1 < dot || 0 < origExp) { // don't minify 9 to 10, but do 999 to 1e3 and 99e1 to 1e3
+ precEnd := start + prec
+ if dot == start { // for numbers like .012
+ digit := start + 1
+ for digit < end && num[digit] == '0' {
+ digit++
+ }
+ precEnd = digit + prec
+ } else if dot < precEnd { // for numbers where precision will include the dot
+ precEnd++
+ }
+ if precEnd < end && (dot < end || 1 < dot-precEnd+origExp) { // do not minify 9=>10 or 99=>100 or 9e1=>1e2 (but 90), but 999=>1e3 and 99e1=>1e3
+ end = precEnd
+ inc := '5' <= num[end]
+ if dot == end {
+ inc = end+1 < len(num) && '5' <= num[end+1]
+ }
+ if precEnd < dot {
+ origExp += dot - precEnd
+ dot = precEnd
+ }
+ // process either an increase from a lesser significant decimal (>= 5)
+ // and remove trailing zeros
+ i := end - 1
+ for ; start < i; i-- {
+ if i == dot {
+ // no-op
+ } else if inc && num[i] != '9' {
+ num[i]++
+ inc = false
+ break
+ } else if !inc && num[i] != '0' {
+ break
+ }
+ }
+ end = i + 1
+ if end < dot {
+ origExp += dot - end
+ dot = end
+ }
+ if inc { // single digit left
+ if dot == start {
+ num[start] = '1'
+ dot = start + 1
+ } else if num[start] == '9' {
+ num[start] = '1'
+ origExp++
+ } else {
+ num[start]++
+ }
+ }
+ }
+ }
+
+ // n is the number of significant digits
+ // normExp would be the exponent if it were normalised (0.1 <= f < 1)
+ n := 0
+ normExp := 0
+ if dot == start {
+ for i = dot + 1; i < end; i++ {
+ if num[i] != '0' {
+ n = end - i
+ normExp = dot - i + 1
+ break
+ }
+ }
+ } else if dot == end {
+ normExp = end - start
+ for i = end - 1; start <= i; i-- {
+ if num[i] != '0' {
+ n = i + 1 - start
+ end = i + 1
+ break
+ }
+ }
+ } else {
+ n = end - start - 1
+ normExp = dot - start
+ }
+
+ if origExp < 0 && (normExp < MinInt-origExp || normExp-n < MinInt-origExp) || 0 < origExp && (MaxInt-origExp < normExp || MaxInt-origExp < normExp-n) {
+ return num // exponent overflow
+ }
+ normExp += origExp
+
+ // intExp would be the exponent if it were an integer
+ intExp := normExp - n
+ lenIntExp := strconv.LenInt(int64(intExp))
+ lenNormExp := strconv.LenInt(int64(normExp))
+
+ // there are three cases to consider when printing the number
+ // case 1: without decimals and with a positive exponent (large numbers: 5e4)
+ // case 2: with decimals and with a negative exponent (small numbers with many digits: .123456e-4)
+ // case 3: with decimals and without an exponent (around zero: 5.6)
+ // case 4: without decimals and with a negative exponent (small numbers: 123456e-9)
+ if n <= normExp {
+ // case 1: print number with positive exponent
+ if dot < end {
+ // remove dot, either from the front or copy the smallest part
+ if dot == start {
+ start = end - n
+ } else if dot-start < end-dot-1 {
+ copy(num[start+1:], num[start:dot])
+ start++
+ } else {
+ copy(num[dot:], num[dot+1:end])
+ end--
+ }
+ }
+ if n+3 <= normExp {
+ num[end] = 'e'
+ end++
+ for i := end + lenIntExp - 1; end <= i; i-- {
+ num[i] = byte(intExp%10) + '0'
+ intExp /= 10
+ }
+ end += lenIntExp
+ } else if n+2 == normExp {
+ num[end] = '0'
+ num[end+1] = '0'
+ end += 2
+ } else if n+1 == normExp {
+ num[end] = '0'
+ end++
+ }
+ } else if normExp < -3 && lenNormExp < lenIntExp && dot < end {
+ // case 2: print normalized number (0.1 <= f < 1)
+ zeroes := -normExp + origExp
+ if 0 < zeroes {
+ copy(num[start+1:], num[start+1+zeroes:end])
+ end -= zeroes
+ } else if zeroes < 0 {
+ copy(num[start+1:], num[start:dot])
+ num[start] = '.'
+ }
+ num[end] = 'e'
+ num[end+1] = '-'
+ end += 2
+ for i := end + lenNormExp - 1; end <= i; i-- {
+ num[i] = -byte(normExp%10) + '0'
+ normExp /= 10
+ }
+ end += lenNormExp
+ } else if -lenIntExp-1 <= normExp {
+ // case 3: print number without exponent
+ zeroes := -normExp
+ if 0 < zeroes {
+ // dot placed at the front and negative exponent, adding zeroes
+ newDot := end - n - zeroes - 1
+ if newDot != dot {
+ d := start - newDot
+ if 0 < d {
+ if dot < end {
+ // copy original digits after the dot towards the end
+ copy(num[dot+1+d:], num[dot+1:end])
+ if start < dot {
+ // copy original digits before the dot towards the end
+ copy(num[start+d+1:], num[start:dot])
+ }
+ } else if start < dot {
+ // copy original digits before the dot towards the end
+ copy(num[start+d:], num[start:dot])
+ }
+ newDot = start
+ end += d
+ } else {
+ start += -d
+ }
+ num[newDot] = '.'
+ for i := 0; i < zeroes; i++ {
+ num[newDot+1+i] = '0'
+ }
+ }
+ } else {
+ // dot placed in the middle of the number
+ if dot == start {
+ // when there are zeroes after the dot
+ dot = end - n - 1
+ start = dot
+ } else if end <= dot {
+ // when input has no dot in it
+ dot = end
+ end++
+ }
+ newDot := start + normExp
+ // move digits between dot and newDot towards the end
+ if dot < newDot {
+ copy(num[dot:], num[dot+1:newDot+1])
+ } else if newDot < dot {
+ copy(num[newDot+1:], num[newDot:dot])
+ }
+ num[newDot] = '.'
+ }
+ } else {
+ // case 4: print number with negative exponent
+ // find new end, considering moving numbers to the front, removing the dot and increasing the length of the exponent
+ newEnd := end
+ if dot == start {
+ newEnd = start + n
+ } else {
+ newEnd--
+ }
+ newEnd += 2 + lenIntExp
+
+ exp := intExp
+ lenExp := lenIntExp
+ if newEnd < len(num) {
+ // it saves space to convert the decimal to an integer and decrease the exponent
+ if dot < end {
+ if dot == start {
+ copy(num[start:], num[end-n:end])
+ end = start + n
+ } else {
+ copy(num[dot:], num[dot+1:end])
+ end--
+ }
+ }
+ } else {
+ // it does not save space and will panic, so we revert to the original representation
+ exp = origExp
+ lenExp = 1
+ if origExp <= -10 || 10 <= origExp {
+ lenExp = strconv.LenInt(int64(origExp))
+ }
+ }
+ num[end] = 'e'
+ num[end+1] = '-'
+ end += 2
+ for i := end + lenExp - 1; end <= i; i-- {
+ num[i] = -byte(exp%10) + '0'
+ exp /= 10
+ }
+ end += lenExp
+ }
+
+ if neg {
+ start--
+ num[start] = '-'
+ }
+ return num[start:end]
+}
+
+func UpdateErrorPosition(err error, input *parse.Input, offset int) error {
+ if perr, ok := err.(*parse.Error); ok {
+ r := bytes.NewBuffer(input.Bytes())
+ line, column, _ := parse.Position(r, offset)
+ perr.Line += line - 1
+ perr.Column += column - 1
+ return perr
+ }
+ return err
+}
diff --git a/vendor/github.com/tdewolff/minify/v2/css/css.go b/vendor/github.com/tdewolff/minify/v2/css/css.go
new file mode 100644
index 0000000..4929609
--- /dev/null
+++ b/vendor/github.com/tdewolff/minify/v2/css/css.go
@@ -0,0 +1,1549 @@
+// Package css minifies CSS3 following the specifications at http://www.w3.org/TR/css-syntax-3/.
+package css
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "math"
+ "sort"
+ "strconv"
+ "strings"
+
+ "github.com/tdewolff/minify/v2"
+ "github.com/tdewolff/parse/v2"
+ "github.com/tdewolff/parse/v2/css"
+ strconvParse "github.com/tdewolff/parse/v2/strconv"
+)
+
+var (
+ spaceBytes = []byte(" ")
+ colonBytes = []byte(":")
+ semicolonBytes = []byte(";")
+ commaBytes = []byte(",")
+ leftBracketBytes = []byte("{")
+ rightBracketBytes = []byte("}")
+ rightParenBytes = []byte(")")
+ urlBytes = []byte("url(")
+ zeroBytes = []byte("0")
+ oneBytes = []byte("1")
+ transparentBytes = []byte("transparent")
+ blackBytes = []byte("#0000")
+ initialBytes = []byte("initial")
+ noneBytes = []byte("none")
+ autoBytes = []byte("auto")
+ leftBytes = []byte("left")
+ topBytes = []byte("top")
+ n400Bytes = []byte("400")
+ n700Bytes = []byte("700")
+ n50pBytes = []byte("50%")
+ n100pBytes = []byte("100%")
+ repeatXBytes = []byte("repeat-x")
+ repeatYBytes = []byte("repeat-y")
+ importantBytes = []byte("!important")
+ dataSchemeBytes = []byte("data:")
+)
+
+type cssMinifier struct {
+ m *minify.M
+ w io.Writer
+ p *css.Parser
+ o *Minifier
+
+ tokenBuffer []Token
+ tokensLevel int
+}
+
+////////////////////////////////////////////////////////////////
+
+// Minifier is a CSS minifier.
+type Minifier struct {
+ KeepCSS2 bool
+ Precision int // number of significant digits
+ newPrecision int // precision for new numbers
+}
+
+// Minify minifies CSS data, it reads from r and writes to w.
+func Minify(m *minify.M, w io.Writer, r io.Reader, params map[string]string) error {
+ return (&Minifier{}).Minify(m, w, r, params)
+}
+
+// Token is a parsed token with extra information for functions.
+type Token struct {
+ css.TokenType
+ Data []byte
+ Args []Token // only filled for functions
+ Fun, Ident Hash // only filled for functions and identifiers respectively
+}
+
+func (t Token) String() string {
+ if len(t.Args) == 0 {
+ return t.TokenType.String() + "(" + string(t.Data) + ")"
+ }
+ return fmt.Sprint(t.Args)
+}
+
+// Equal returns true if both tokens are equal.
+func (t Token) Equal(t2 Token) bool {
+ if t.TokenType == t2.TokenType && bytes.Equal(t.Data, t2.Data) && len(t.Args) == len(t2.Args) {
+ for i := 0; i < len(t.Args); i++ {
+ if t.Args[i].TokenType != t2.Args[i].TokenType || !bytes.Equal(t.Args[i].Data, t2.Args[i].Data) {
+ return false
+ }
+ }
+ return true
+ }
+ return false
+}
+
+// IsZero return true if a dimension, percentage, or number token is zero.
+func (t Token) IsZero() bool {
+ // as each number is already minified, starting with a zero means it is zero
+ return (t.TokenType == css.DimensionToken || t.TokenType == css.PercentageToken || t.TokenType == css.NumberToken) && t.Data[0] == '0'
+}
+
+// IsLength returns true if the token is a length.
+func (t Token) IsLength() bool {
+ if t.TokenType == css.DimensionToken {
+ return true
+ } else if t.TokenType == css.NumberToken && t.Data[0] == '0' {
+ return true
+ } else if t.TokenType == css.FunctionToken {
+ fun := ToHash(t.Data[:len(t.Data)-1])
+ if fun == Calc || fun == Min || fun == Max || fun == Clamp || fun == Attr || fun == Var || fun == Env {
+ return true
+ }
+ }
+ return false
+}
+
+// IsLengthPercentage returns true if the token is a length or percentage token.
+func (t Token) IsLengthPercentage() bool {
+ return t.TokenType == css.PercentageToken || t.IsLength()
+}
+
+////////////////////////////////////////////////////////////////
+
+// Minify minifies CSS data, it reads from r and writes to w.
+func (o *Minifier) Minify(m *minify.M, w io.Writer, r io.Reader, params map[string]string) error {
+ o.newPrecision = o.Precision
+ if o.newPrecision <= 0 || 15 < o.newPrecision {
+ o.newPrecision = 15 // minimum number of digits a double can represent exactly
+ }
+
+ z := parse.NewInput(r)
+ defer z.Restore()
+
+ isInline := params != nil && params["inline"] == "1"
+ c := &cssMinifier{
+ m: m,
+ w: w,
+ p: css.NewParser(z, isInline),
+ o: o,
+ }
+ c.minifyGrammar()
+
+ if _, err := w.Write(nil); err != nil {
+ return err
+ }
+ if c.p.Err() == io.EOF {
+ return nil
+ }
+ return c.p.Err()
+}
+
+func (c *cssMinifier) minifyGrammar() {
+ semicolonQueued := false
+ for {
+ gt, _, data := c.p.Next()
+ switch gt {
+ case css.ErrorGrammar:
+ if c.p.HasParseError() {
+ if semicolonQueued {
+ c.w.Write(semicolonBytes)
+ }
+
+ // write out the offending declaration (but save the semicolon)
+ vals := c.p.Values()
+ if len(vals) > 0 && vals[len(vals)-1].TokenType == css.SemicolonToken {
+ vals = vals[:len(vals)-1]
+ semicolonQueued = true
+ }
+ for _, val := range vals {
+ c.w.Write(val.Data)
+ }
+ continue
+ }
+ return
+ case css.EndAtRuleGrammar, css.EndRulesetGrammar:
+ c.w.Write(rightBracketBytes)
+ semicolonQueued = false
+ continue
+ }
+
+ if semicolonQueued {
+ c.w.Write(semicolonBytes)
+ semicolonQueued = false
+ }
+
+ switch gt {
+ case css.AtRuleGrammar:
+ c.w.Write(data)
+ values := c.p.Values()
+ if ToHash(data[1:]) == Import && len(values) == 2 && values[1].TokenType == css.URLToken && 4 < len(values[1].Data) && values[1].Data[len(values[1].Data)-1] == ')' {
+ url := values[1].Data
+ if url[4] != '"' && url[4] != '\'' {
+ a := 4
+ for parse.IsWhitespace(url[a]) || parse.IsNewline(url[a]) {
+ a++
+ }
+ b := len(url) - 2
+ for a < b && (parse.IsWhitespace(url[b]) || parse.IsNewline(url[b])) {
+ b--
+ }
+ if a == b {
+ url = url[:2]
+ } else {
+ url = url[a-1 : b+2]
+ }
+ url[0] = '"'
+ url[len(url)-1] = '"'
+ } else {
+ url = url[4 : len(url)-1]
+ }
+ values[1].Data = url
+ }
+ for _, val := range values {
+ c.w.Write(val.Data)
+ }
+ semicolonQueued = true
+ case css.BeginAtRuleGrammar:
+ c.w.Write(data)
+ for _, val := range c.p.Values() {
+ c.w.Write(val.Data)
+ }
+ c.w.Write(leftBracketBytes)
+ case css.QualifiedRuleGrammar:
+ c.minifySelectors(data, c.p.Values())
+ c.w.Write(commaBytes)
+ case css.BeginRulesetGrammar:
+ c.minifySelectors(data, c.p.Values())
+ c.w.Write(leftBracketBytes)
+ case css.DeclarationGrammar:
+ c.minifyDeclaration(data, c.p.Values())
+ semicolonQueued = true
+ case css.CustomPropertyGrammar:
+ c.w.Write(data)
+ c.w.Write(colonBytes)
+ value := parse.TrimWhitespace(c.p.Values()[0].Data)
+ if len(c.p.Values()[0].Data) != 0 && len(value) == 0 {
+ value = spaceBytes
+ }
+ c.w.Write(value)
+ semicolonQueued = true
+ case css.CommentGrammar:
+ if len(data) > 5 && data[1] == '*' && data[2] == '!' {
+ c.w.Write(data[:3])
+ comment := parse.TrimWhitespace(parse.ReplaceMultipleWhitespace(data[3 : len(data)-2]))
+ c.w.Write(comment)
+ c.w.Write(data[len(data)-2:])
+ }
+ default:
+ c.w.Write(data)
+ }
+ }
+}
+
+func (c *cssMinifier) minifySelectors(property []byte, values []css.Token) {
+ inAttr := false
+ isClass := false
+ for _, val := range c.p.Values() {
+ if !inAttr {
+ if val.TokenType == css.IdentToken {
+ if !isClass {
+ parse.ToLower(val.Data)
+ }
+ isClass = false
+ } else if val.TokenType == css.DelimToken && val.Data[0] == '.' {
+ isClass = true
+ } else if val.TokenType == css.LeftBracketToken {
+ inAttr = true
+ }
+ } else {
+ if val.TokenType == css.StringToken && len(val.Data) > 2 {
+ s := val.Data[1 : len(val.Data)-1]
+ if css.IsIdent(s) {
+ c.w.Write(s)
+ continue
+ }
+ } else if val.TokenType == css.RightBracketToken {
+ inAttr = false
+ } else if val.TokenType == css.IdentToken && len(val.Data) == 1 && (val.Data[0] == 'i' || val.Data[0] == 'I') {
+ c.w.Write(spaceBytes)
+ }
+ }
+ c.w.Write(val.Data)
+ }
+}
+
+func (c *cssMinifier) parseFunction(values []css.Token) ([]Token, int) {
+ i := 1
+ level := 0
+ args := []Token{}
+ for ; i < len(values); i++ {
+ tt := values[i].TokenType
+ data := values[i].Data
+ if tt == css.LeftParenthesisToken {
+ level++
+ } else if tt == css.RightParenthesisToken {
+ if level == 0 {
+ i++
+ break
+ }
+ level--
+ }
+ if tt == css.FunctionToken {
+ subArgs, di := c.parseFunction(values[i:])
+ h := ToHash(parse.ToLower(parse.Copy(data[:len(data)-1]))) // TODO: use ToHashFold
+ args = append(args, Token{tt, data, subArgs, h, 0})
+ i += di - 1
+ } else {
+ var h Hash
+ if tt == css.IdentToken {
+ h = ToHash(parse.ToLower(parse.Copy(data))) // TODO: use ToHashFold
+ }
+ args = append(args, Token{tt, data, nil, 0, h})
+ }
+ }
+ return args, i
+}
+
+func (c *cssMinifier) parseDeclaration(values []css.Token) []Token {
+ // Check if this is a simple list of values separated by whitespace or commas, otherwise we'll not be processing
+ prevSep := true
+ tokens := c.tokenBuffer[:0]
+ for i := 0; i < len(values); i++ {
+ tt := values[i].TokenType
+ data := values[i].Data
+ if tt == css.LeftParenthesisToken || tt == css.LeftBraceToken || tt == css.LeftBracketToken ||
+ tt == css.RightParenthesisToken || tt == css.RightBraceToken || tt == css.RightBracketToken {
+ return nil
+ }
+
+ if !prevSep && tt != css.WhitespaceToken && tt != css.CommaToken && (tt != css.DelimToken || values[i].Data[0] != '/') {
+ return nil
+ }
+
+ if tt == css.WhitespaceToken || tt == css.CommaToken || tt == css.DelimToken && values[i].Data[0] == '/' {
+ if tt != css.WhitespaceToken {
+ tokens = append(tokens, Token{tt, data, nil, 0, 0})
+ }
+ prevSep = true
+ } else if tt == css.FunctionToken {
+ args, di := c.parseFunction(values[i:])
+ h := ToHash(parse.ToLower(parse.Copy(data[:len(data)-1]))) // TODO: use ToHashFold
+ tokens = append(tokens, Token{tt, data, args, h, 0})
+ prevSep = true
+ i += di - 1
+ } else {
+ var h Hash
+ if tt == css.IdentToken {
+ h = ToHash(parse.ToLower(parse.Copy(data))) // TODO: use ToHashFold
+ }
+ tokens = append(tokens, Token{tt, data, nil, 0, h})
+ prevSep = tt == css.URLToken
+ }
+ }
+ c.tokenBuffer = tokens // update buffer size for memory reuse
+ return tokens
+}
+
+func (c *cssMinifier) minifyDeclaration(property []byte, components []css.Token) {
+ c.w.Write(property)
+ c.w.Write(colonBytes)
+
+ if len(components) == 0 {
+ return
+ }
+
+ // Strip !important from the component list, this will be added later separately
+ important := false
+ if len(components) > 2 && components[len(components)-2].TokenType == css.DelimToken && components[len(components)-2].Data[0] == '!' && ToHash(components[len(components)-1].Data) == Important {
+ components = components[:len(components)-2]
+ important = true
+ }
+
+ prop := ToHash(property)
+ values := c.parseDeclaration(components)
+
+ // Do not process complex values (eg. containing blocks or is not alternated between whitespace/commas and flat values
+ if values == nil {
+ if prop == Filter && len(components) == 11 {
+ if bytes.Equal(components[0].Data, []byte("progid")) &&
+ components[1].TokenType == css.ColonToken &&
+ bytes.Equal(components[2].Data, []byte("DXImageTransform")) &&
+ components[3].Data[0] == '.' &&
+ bytes.Equal(components[4].Data, []byte("Microsoft")) &&
+ components[5].Data[0] == '.' &&
+ bytes.Equal(components[6].Data, []byte("Alpha(")) &&
+ bytes.Equal(parse.ToLower(components[7].Data), []byte("opacity")) &&
+ components[8].Data[0] == '=' &&
+ components[10].Data[0] == ')' {
+ components = components[6:]
+ components[0].Data = []byte("alpha(")
+ }
+ }
+
+ for _, component := range components {
+ c.w.Write(component.Data)
+ }
+ if important {
+ c.w.Write(importantBytes)
+ }
+ return
+ }
+
+ values = c.minifyTokens(prop, 0, values)
+ if len(values) > 0 {
+ values = c.minifyProperty(prop, values)
+ }
+ c.writeDeclaration(values, important)
+}
+
+func (c *cssMinifier) writeFunction(args []Token) {
+ for _, arg := range args {
+ c.w.Write(arg.Data)
+ if arg.TokenType == css.FunctionToken {
+ c.writeFunction(arg.Args)
+ c.w.Write(rightParenBytes)
+ }
+ }
+}
+
+func (c *cssMinifier) writeDeclaration(values []Token, important bool) {
+ prevSep := true
+ for _, value := range values {
+ if !prevSep && value.TokenType != css.CommaToken && (value.TokenType != css.DelimToken || value.Data[0] != '/') {
+ c.w.Write(spaceBytes)
+ }
+
+ c.w.Write(value.Data)
+ if value.TokenType == css.FunctionToken {
+ c.writeFunction(value.Args)
+ c.w.Write(rightParenBytes)
+ }
+
+ if value.TokenType == css.CommaToken || value.TokenType == css.DelimToken && value.Data[0] == '/' || value.TokenType == css.FunctionToken || value.TokenType == css.URLToken {
+ prevSep = true
+ } else {
+ prevSep = false
+ }
+ }
+
+ if important {
+ c.w.Write(importantBytes)
+ }
+}
+
+func (c *cssMinifier) minifyTokens(prop Hash, fun Hash, values []Token) []Token {
+ if 100 < c.tokensLevel+1 {
+ return values
+ }
+ c.tokensLevel++
+
+ for i, value := range values {
+ tt := value.TokenType
+ switch tt {
+ case css.NumberToken:
+ if prop == Z_Index || prop == Counter_Increment || prop == Counter_Reset || prop == Orphans || prop == Widows {
+ break // integers
+ }
+ if c.o.KeepCSS2 {
+ values[i].Data = minify.Decimal(values[i].Data, c.o.Precision) // don't use exponents
+ } else {
+ values[i].Data = minify.Number(values[i].Data, c.o.Precision)
+ }
+ case css.PercentageToken:
+ n := len(values[i].Data) - 1
+ if c.o.KeepCSS2 {
+ values[i].Data = minify.Decimal(values[i].Data[:n], c.o.Precision) // don't use exponents
+ } else {
+ values[i].Data = minify.Number(values[i].Data[:n], c.o.Precision)
+ }
+ values[i].Data = append(values[i].Data, '%')
+ case css.DimensionToken:
+ var dim []byte
+ values[i], dim = c.minifyDimension(values[i])
+ if 1 < len(values[i].Data) && values[i].Data[0] == '0' && optionalZeroDimension[string(dim)] && prop != Flex && fun == 0 {
+ // cut dimension for zero value, TODO: don't hardcode check for Flex and remove the dimension in minifyDimension
+ values[i].Data = values[i].Data[:1]
+ }
+ case css.StringToken:
+ values[i].Data = removeMarkupNewlines(values[i].Data)
+ case css.URLToken:
+ if 10 < len(values[i].Data) {
+ uri := parse.TrimWhitespace(values[i].Data[4 : len(values[i].Data)-1])
+ delim := byte('"')
+ if 1 < len(uri) && (uri[0] == '\'' || uri[0] == '"') {
+ delim = uri[0]
+ uri = removeMarkupNewlines(uri)
+ uri = uri[1 : len(uri)-1]
+ }
+ if 4 < len(uri) && parse.EqualFold(uri[:5], dataSchemeBytes) {
+ uri = minify.DataURI(c.m, uri)
+ }
+ if css.IsURLUnquoted(uri) {
+ values[i].Data = append(append(urlBytes, uri...), ')')
+ } else {
+ values[i].Data = append(append(append(urlBytes, delim), uri...), delim, ')')
+ }
+ }
+ case css.FunctionToken:
+ values[i].Args = c.minifyTokens(prop, values[i].Fun, values[i].Args)
+
+ fun := values[i].Fun
+ args := values[i].Args
+ if fun == Rgb || fun == Rgba || fun == Hsl || fun == Hsla {
+ valid := true
+ vals := []float64{}
+ for i, arg := range args {
+ numeric := arg.TokenType == css.NumberToken || arg.TokenType == css.PercentageToken
+ separator := arg.TokenType == css.CommaToken || i != 5 && arg.TokenType == css.WhitespaceToken || i == 5 && arg.TokenType == css.DelimToken && arg.Data[0] == '/'
+ if i%2 == 0 && !numeric || i%2 == 1 && !separator {
+ valid = false
+ break
+ } else if numeric {
+ var d float64
+ if arg.TokenType == css.PercentageToken {
+ var err error
+ d, err = strconv.ParseFloat(string(arg.Data[:len(arg.Data)-1]), 32) // can overflow
+ if err != nil {
+ valid = false
+ break
+ }
+ d /= 100.0
+ if d < minify.Epsilon {
+ d = 0.0
+ } else if 1.0-minify.Epsilon < d {
+ d = 1.0
+ }
+ } else {
+ var err error
+ d, err = strconv.ParseFloat(string(arg.Data), 32) // can overflow
+ if err != nil {
+ valid = false
+ break
+ }
+ }
+ vals = append(vals, d)
+ }
+ }
+ if !valid {
+ break
+ }
+
+ a := 1.0
+ if len(vals) == 4 {
+ if vals[0] < minify.Epsilon && vals[1] < minify.Epsilon && vals[2] < minify.Epsilon && vals[3] < minify.Epsilon {
+ values[i] = Token{css.IdentToken, transparentBytes, nil, 0, Transparent}
+ break
+ } else if 1.0-minify.Epsilon < vals[3] {
+ vals = vals[:3]
+ values[i].Args = values[i].Args[:len(values[i].Args)-2]
+ if fun == Rgba || fun == Hsla {
+ values[i].Data = values[i].Data[:len(values[i].Data)-1]
+ values[i].Data[len(values[i].Data)-1] = '('
+ }
+ } else {
+ a = vals[3]
+ }
+ }
+
+ if a == 1.0 && (len(vals) == 3 || len(vals) == 4) { // only minify color if fully opaque
+ if fun == Rgb || fun == Rgba {
+ for j := 0; j < 3; j++ {
+ if args[j*2].TokenType == css.NumberToken {
+ vals[j] /= 255.0
+ if vals[j] < minify.Epsilon {
+ vals[j] = 0.0
+ } else if 1.0-minify.Epsilon < vals[j] {
+ vals[j] = 1.0
+ }
+ }
+ }
+ values[i] = rgbToToken(vals[0], vals[1], vals[2])
+ break
+ } else if fun == Hsl || fun == Hsla && args[0].TokenType == css.NumberToken && args[2].TokenType == css.PercentageToken && args[4].TokenType == css.PercentageToken {
+ vals[0] /= 360.0
+ _, vals[0] = math.Modf(vals[0])
+ if vals[0] < 0.0 {
+ vals[0] = 1.0 + vals[0]
+ }
+ r, g, b := css.HSL2RGB(vals[0], vals[1], vals[2])
+ values[i] = rgbToToken(r, g, b)
+ break
+ }
+ } else if len(vals) == 4 {
+ args[6] = minifyNumberPercentage(args[6])
+ }
+
+ if 3 <= len(vals) && (fun == Rgb || fun == Rgba) {
+ // 0%, 20%, 40%, 60%, 80% and 100% can be represented exactly as, 51, 102, 153, 204, and 255 respectively
+ removePercentage := true
+ for j := 0; j < 3; j++ {
+ if args[j*2].TokenType != css.PercentageToken || 2.0*minify.Epsilon <= math.Mod(vals[j]+minify.Epsilon, 0.2) {
+ removePercentage = false
+ break
+ }
+ }
+ if removePercentage {
+ for j := 0; j < 3; j++ {
+ args[j*2].TokenType = css.NumberToken
+ if vals[j] < minify.Epsilon {
+ args[j*2].Data = zeroBytes
+ } else if math.Abs(vals[j]-0.2) < minify.Epsilon {
+ args[j*2].Data = []byte("51")
+ } else if math.Abs(vals[j]-0.4) < minify.Epsilon {
+ args[j*2].Data = []byte("102")
+ } else if math.Abs(vals[j]-0.6) < minify.Epsilon {
+ args[j*2].Data = []byte("153")
+ } else if math.Abs(vals[j]-0.8) < minify.Epsilon {
+ args[j*2].Data = []byte("204")
+ } else if math.Abs(vals[j]-1.0) < minify.Epsilon {
+ args[j*2].Data = []byte("255")
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ c.tokensLevel--
+ return values
+}
+
+func (c *cssMinifier) minifyProperty(prop Hash, values []Token) []Token {
+ // limit maximum to prevent slow recursions (e.g. for background's append)
+ if 100 < len(values) {
+ return values
+ }
+
+ switch prop {
+ case Font:
+ if len(values) > 1 { // must contain atleast font-size and font-family
+ // the font-families are separated by commas and are at the end of font
+ // get index for last token before font family names
+ i := len(values) - 1
+ for j, value := range values[2:] {
+ if value.TokenType == css.CommaToken {
+ i = 2 + j - 1 // identifier before first comma is a font-family
+ break
+ }
+ }
+ i--
+
+ // advance i while still at font-families when they contain spaces but no quotes
+ for ; i > 0; i-- { // i cannot be 0, font-family must be prepended by font-size
+ if values[i-1].TokenType == css.DelimToken && values[i-1].Data[0] == '/' {
+ break
+ } else if values[i].TokenType != css.IdentToken && values[i].TokenType != css.StringToken {
+ break
+ } else if h := values[i].Ident; h == Xx_Small || h == X_Small || h == Small || h == Medium || h == Large || h == X_Large || h == Xx_Large || h == Smaller || h == Larger || h == Inherit || h == Initial || h == Unset {
+ // inherit, initial and unset are followed by an IdentToken/StringToken, so must be for font-size
+ break
+ }
+ }
+
+ // font-family minified in place
+ values = append(values[:i+1], c.minifyProperty(Font_Family, values[i+1:])...)
+
+ // fix for IE9, IE10, IE11: font name starting with `-` is not recognized
+ if values[i+1].Data[0] == '-' {
+ v := make([]byte, len(values[i+1].Data)+2)
+ v[0] = '\''
+ copy(v[1:], values[i+1].Data)
+ v[len(v)-1] = '\''
+ values[i+1].Data = v
+ }
+
+ if i > 0 {
+ // line-height
+ if i > 1 && values[i-1].TokenType == css.DelimToken && values[i-1].Data[0] == '/' {
+ if values[i].Ident == Normal {
+ values = append(values[:i-1], values[i+1:]...)
+ }
+ i -= 2
+ }
+
+ // font-size
+ i--
+
+ for ; i > -1; i-- {
+ if values[i].Ident == Normal {
+ values = append(values[:i], values[i+1:]...)
+ } else if values[i].Ident == Bold {
+ values[i].TokenType = css.NumberToken
+ values[i].Data = n700Bytes
+ } else if values[i].TokenType == css.NumberToken && bytes.Equal(values[i].Data, n400Bytes) {
+ values = append(values[:i], values[i+1:]...)
+ }
+ }
+ }
+ }
+ case Font_Family:
+ for i, value := range values {
+ if value.TokenType == css.StringToken && 2 < len(value.Data) {
+ unquote := true
+ parse.ToLower(value.Data)
+ s := value.Data[1 : len(value.Data)-1]
+ if 0 < len(s) {
+ for _, split := range bytes.Split(s, spaceBytes) {
+ // if len is zero, it contains two consecutive spaces
+ if len(split) == 0 || !css.IsIdent(split) {
+ unquote = false
+ break
+ }
+ }
+ }
+ if unquote {
+ values[i].Data = s
+ }
+ }
+ }
+ case Font_Weight:
+ if values[0].Ident == Normal {
+ values[0].TokenType = css.NumberToken
+ values[0].Data = n400Bytes
+ } else if values[0].Ident == Bold {
+ values[0].TokenType = css.NumberToken
+ values[0].Data = n700Bytes
+ }
+ case Url:
+ for i := 0; i < len(values); i++ {
+ if values[i].TokenType == css.FunctionToken && len(values[i].Args) == 1 {
+ fun := values[i].Fun
+ data := values[i].Args[0].Data
+ if fun == Local && (data[0] == '\'' || data[0] == '"') {
+ if css.IsURLUnquoted(data[1 : len(data)-1]) {
+ data = data[1 : len(data)-1]
+ }
+ values[i].Args[0].Data = data
+ }
+ }
+ }
+ case Margin, Padding, Border_Width:
+ switch len(values) {
+ case 2:
+ if values[0].Equal(values[1]) {
+ values = values[:1]
+ }
+ case 3:
+ if values[0].Equal(values[1]) && values[0].Equal(values[2]) {
+ values = values[:1]
+ } else if values[0].Equal(values[2]) {
+ values = values[:2]
+ }
+ case 4:
+ if values[0].Equal(values[1]) && values[0].Equal(values[2]) && values[0].Equal(values[3]) {
+ values = values[:1]
+ } else if values[0].Equal(values[2]) && values[1].Equal(values[3]) {
+ values = values[:2]
+ } else if values[1].Equal(values[3]) {
+ values = values[:3]
+ }
+ }
+ case Border, Border_Bottom, Border_Left, Border_Right, Border_Top:
+ for i := 0; i < len(values); i++ {
+ if values[i].Ident == None || values[i].Ident == Currentcolor || values[i].Ident == Medium {
+ values = append(values[:i], values[i+1:]...)
+ i--
+ } else {
+ values[i] = minifyColor(values[i])
+ }
+ }
+ if len(values) == 0 {
+ values = []Token{{css.IdentToken, noneBytes, nil, 0, None}}
+ }
+ case Outline:
+ for i := 0; i < len(values); i++ {
+ if values[i].Ident == Invert || values[i].Ident == None || values[i].Ident == Medium {
+ values = append(values[:i], values[i+1:]...)
+ i--
+ } else {
+ values[i] = minifyColor(values[i])
+ }
+ }
+ if len(values) == 0 {
+ values = []Token{{css.IdentToken, noneBytes, nil, 0, None}}
+ }
+ case Background:
+ start := 0
+ for end := 0; end <= len(values); end++ { // loop over comma-separated lists
+ if end != len(values) && values[end].TokenType != css.CommaToken {
+ continue
+ } else if start == end {
+ start++
+ continue
+ }
+
+ // minify background-size and lowercase all identifiers
+ for i := start; i < end; i++ {
+ if values[i].TokenType == css.DelimToken && values[i].Data[0] == '/' {
+ // background-size consists of either [<length-percentage> | auto | cover | contain] or [<length-percentage> | auto]{2}
+ // we can only minify the latter
+ if i+1 < end && (values[i+1].TokenType == css.NumberToken || values[i+1].IsLengthPercentage() || values[i+1].Ident == Auto) {
+ if i+2 < end && (values[i+2].TokenType == css.NumberToken || values[i+2].IsLengthPercentage() || values[i+2].Ident == Auto) {
+ sizeValues := c.minifyProperty(Background_Size, values[i+1:i+3])
+ if len(sizeValues) == 1 && sizeValues[0].Ident == Auto {
+ // remove background-size if it is '/ auto' after minifying the property
+ values = append(values[:i], values[i+3:]...)
+ end -= 3
+ i--
+ } else {
+ values = append(values[:i+1], append(sizeValues, values[i+3:]...)...)
+ end -= 2 - len(sizeValues)
+ i += len(sizeValues) - 1
+ }
+ } else if values[i+1].Ident == Auto {
+ // remove background-size if it is '/ auto'
+ values = append(values[:i], values[i+2:]...)
+ end -= 2
+ i--
+ }
+ }
+ }
+ }
+
+ // minify all other values
+ iPaddingBox := -1 // position of background-origin that is padding-box
+ for i := start; i < end; i++ {
+ h := values[i].Ident
+ values[i] = minifyColor(values[i])
+ if values[i].TokenType == css.IdentToken {
+ if i+1 < end && values[i+1].TokenType == css.IdentToken && (h == Space || h == Round || h == Repeat || h == No_Repeat) {
+ if h2 := values[i+1].Ident; h2 == Space || h2 == Round || h2 == Repeat || h2 == No_Repeat {
+ repeatValues := c.minifyProperty(Background_Repeat, values[i:i+2])
+ if len(repeatValues) == 1 && repeatValues[0].Ident == Repeat {
+ values = append(values[:i], values[i+2:]...)
+ end -= 2
+ i--
+ } else {
+ values = append(values[:i], append(repeatValues, values[i+2:]...)...)
+ end -= 2 - len(repeatValues)
+ i += len(repeatValues) - 1
+ }
+ continue
+ }
+ } else if h == None || h == Scroll || h == Transparent {
+ values = append(values[:i], values[i+1:]...)
+ end--
+ i--
+ continue
+ } else if h == Border_Box || h == Padding_Box {
+ if iPaddingBox == -1 && h == Padding_Box { // background-origin
+ iPaddingBox = i
+ } else if iPaddingBox != -1 && h == Border_Box { // background-clip
+ values = append(values[:i], values[i+1:]...)
+ values = append(values[:iPaddingBox], values[iPaddingBox+1:]...)
+ end -= 2
+ i -= 2
+ }
+ continue
+ }
+ } else if values[i].TokenType == css.HashToken && bytes.Equal(values[i].Data, blackBytes) {
+ values = append(values[:i], values[i+1:]...)
+ end--
+ i--
+ continue
+ }
+
+ // further minify background-position and background-size combination
+ if values[i].TokenType == css.NumberToken || values[i].IsLengthPercentage() || h == Left || h == Right || h == Top || h == Bottom || h == Center {
+ j := i + 1
+ for ; j < len(values); j++ {
+ if h := values[j].Ident; h == Left || h == Right || h == Top || h == Bottom || h == Center {
+ continue
+ } else if values[j].TokenType == css.NumberToken || values[j].IsLengthPercentage() {
+ continue
+ }
+ break
+ }
+
+ positionValues := c.minifyProperty(Background_Position, values[i:j])
+ hasSize := j < len(values) && values[j].TokenType == css.DelimToken && values[j].Data[0] == '/'
+ if !hasSize && len(positionValues) == 2 && positionValues[0].IsZero() && positionValues[1].IsZero() {
+ if end-start == 2 {
+ values[i] = Token{css.NumberToken, zeroBytes, nil, 0, 0}
+ values[i+1] = Token{css.NumberToken, zeroBytes, nil, 0, 0}
+ i++
+ } else {
+ values = append(values[:i], values[j:]...)
+ end -= j - i
+ i--
+ }
+ } else {
+ if len(positionValues) == j-i {
+ for k, positionValue := range positionValues {
+ values[i+k] = positionValue
+ }
+ } else {
+ values = append(values[:i], append(positionValues, values[j:]...)...)
+ end -= j - i - len(positionValues)
+ }
+ i += len(positionValues) - 1
+ }
+ }
+ }
+
+ if end-start == 0 {
+ values = append(values[:start], append([]Token{{css.NumberToken, zeroBytes, nil, 0, 0}, {css.NumberToken, zeroBytes, nil, 0, 0}}, values[end:]...)...)
+ end += 2
+ }
+ start = end + 1
+ }
+ case Background_Size:
+ start := 0
+ for end := 0; end <= len(values); end++ { // loop over comma-separated lists
+ if end != len(values) && values[end].TokenType != css.CommaToken {
+ continue
+ } else if start == end {
+ start++
+ continue
+ }
+
+ if end-start == 2 && values[start+1].Ident == Auto {
+ values = append(values[:start+1], values[start+2:]...)
+ end--
+ }
+ start = end + 1
+ }
+ case Background_Repeat:
+ start := 0
+ for end := 0; end <= len(values); end++ { // loop over comma-separated lists
+ if end != len(values) && values[end].TokenType != css.CommaToken {
+ continue
+ } else if start == end {
+ start++
+ continue
+ }
+
+ if end-start == 2 && values[start].TokenType == css.IdentToken && values[start+1].TokenType == css.IdentToken {
+ if values[start].Ident == values[start+1].Ident {
+ values = append(values[:start+1], values[start+2:]...)
+ end--
+ } else if values[start].Ident == Repeat && values[start+1].Ident == No_Repeat {
+ values[start].Data = repeatXBytes
+ values[start].Ident = Repeat_X
+ values = append(values[:start+1], values[start+2:]...)
+ end--
+ } else if values[start].Ident == No_Repeat && values[start+1].Ident == Repeat {
+ values[start].Data = repeatYBytes
+ values[start].Ident = Repeat_Y
+ values = append(values[:start+1], values[start+2:]...)
+ end--
+ }
+ }
+ start = end + 1
+ }
+ case Background_Position:
+ start := 0
+ for end := 0; end <= len(values); end++ { // loop over comma-separated lists
+ if end != len(values) && values[end].TokenType != css.CommaToken {
+ continue
+ } else if start == end {
+ start++
+ continue
+ }
+
+ if end-start == 3 || end-start == 4 {
+ // remove zero offsets
+ for _, i := range []int{end - start - 1, start + 1} {
+ if 2 < end-start && values[i].IsZero() {
+ values = append(values[:i], values[i+1:]...)
+ end--
+ }
+ }
+
+ j := start + 1 // position of second set of horizontal/vertical values
+ if 2 < end-start && values[start+2].TokenType == css.IdentToken {
+ j = start + 2
+ }
+
+ b := make([]byte, 0, 4)
+ offsets := make([]Token, 2)
+ for _, i := range []int{j, start} {
+ if i+1 < end && i+1 != j {
+ if values[i+1].TokenType == css.PercentageToken {
+ // change right or bottom with percentage offset to left or top respectively
+ if values[i].Ident == Right || values[i].Ident == Bottom {
+ n, _ := strconvParse.ParseInt(values[i+1].Data[:len(values[i+1].Data)-1])
+ b = strconv.AppendInt(b[:0], 100-n, 10)
+ b = append(b, '%')
+ values[i+1].Data = b
+ if values[i].Ident == Right {
+ values[i].Data = leftBytes
+ values[i].Ident = Left
+ } else {
+ values[i].Data = topBytes
+ values[i].Ident = Top
+ }
+ }
+ }
+ if values[i].Ident == Left {
+ offsets[0] = values[i+1]
+ } else if values[i].Ident == Top {
+ offsets[1] = values[i+1]
+ }
+ } else if values[i].Ident == Left {
+ offsets[0] = Token{css.NumberToken, zeroBytes, nil, 0, 0}
+ } else if values[i].Ident == Top {
+ offsets[1] = Token{css.NumberToken, zeroBytes, nil, 0, 0}
+ } else if values[i].Ident == Right {
+ offsets[0] = Token{css.PercentageToken, n100pBytes, nil, 0, 0}
+ values[i].Ident = Left
+ } else if values[i].Ident == Bottom {
+ offsets[1] = Token{css.PercentageToken, n100pBytes, nil, 0, 0}
+ values[i].Ident = Top
+ }
+ }
+
+ if values[start].Ident == Center || values[j].Ident == Center {
+ if values[start].Ident == Left || values[j].Ident == Left {
+ offsets = offsets[:1]
+ } else if values[start].Ident == Top || values[j].Ident == Top {
+ offsets[0] = Token{css.NumberToken, n50pBytes, nil, 0, 0}
+ }
+ }
+
+ if offsets[0].Data != nil && (len(offsets) == 1 || offsets[1].Data != nil) {
+ values = append(append(values[:start], offsets...), values[end:]...)
+ end -= end - start - len(offsets)
+ }
+ }
+ // removing zero offsets in the previous loop might make it eligible for the next loop
+ if end-start == 1 || end-start == 2 {
+ if end-start == 1 && (values[start].Ident == Top || values[start].Ident == Bottom) {
+ // we can't make this smaller, and converting to a number will break it
+ // (https://github.com/tdewolff/minify/issues/221#issuecomment-415419918)
+ break
+ }
+
+ if end-start == 2 && (values[start].Ident == Top || values[start].Ident == Bottom || values[start+1].Ident == Left || values[start+1].Ident == Right) {
+ // if it's a vertical position keyword, swap it with the next element
+ // since otherwise converted number positions won't be valid anymore
+ // (https://github.com/tdewolff/minify/issues/221#issue-353067229)
+ values[start], values[start+1] = values[start+1], values[start]
+ }
+
+ // transform keywords to lengths|percentages
+ for i := start; i < end; i++ {
+ if values[i].TokenType == css.IdentToken {
+ if values[i].Ident == Left || values[i].Ident == Top {
+ values[i].TokenType = css.NumberToken
+ values[i].Data = zeroBytes
+ values[i].Ident = 0
+ } else if values[i].Ident == Right || values[i].Ident == Bottom {
+ values[i].TokenType = css.PercentageToken
+ values[i].Data = n100pBytes
+ values[i].Ident = 0
+ } else if values[i].Ident == Center {
+ if i == start {
+ values[i].TokenType = css.PercentageToken
+ values[i].Data = n50pBytes
+ values[i].Ident = 0
+ } else {
+ values = append(values[:start+1], values[start+2:]...)
+ end--
+ }
+ }
+ } else if i == start+1 && values[i].TokenType == css.PercentageToken && bytes.Equal(values[i].Data, n50pBytes) {
+ values = append(values[:start+1], values[start+2:]...)
+ end--
+ } else if values[i].TokenType == css.PercentageToken && values[i].Data[0] == '0' {
+ values[i].TokenType = css.NumberToken
+ values[i].Data = zeroBytes
+ values[i].Ident = 0
+ }
+ }
+ }
+ start = end + 1
+ }
+ case Box_Shadow:
+ start := 0
+ for end := 0; end <= len(values); end++ { // loop over comma-separated lists
+ if end != len(values) && values[end].TokenType != css.CommaToken {
+ continue
+ } else if start == end {
+ start++
+ continue
+ }
+
+ if end-start == 1 && values[start].Ident == Initial {
+ values[start].Ident = None
+ values[start].Data = noneBytes
+ } else {
+ numbers := []int{}
+ for i := start; i < end; i++ {
+ if values[i].IsLength() {
+ numbers = append(numbers, i)
+ }
+ }
+ if len(numbers) == 4 && values[numbers[3]].IsZero() {
+ values = append(values[:numbers[3]], values[numbers[3]+1:]...)
+ numbers = numbers[:3]
+ end--
+ }
+ if len(numbers) == 3 && values[numbers[2]].IsZero() {
+ values = append(values[:numbers[2]], values[numbers[2]+1:]...)
+ end--
+ }
+ }
+ start = end + 1
+ }
+ case Ms_Filter:
+ alpha := []byte("progid:DXImageTransform.Microsoft.Alpha(Opacity=")
+ if values[0].TokenType == css.StringToken && 2 < len(values[0].Data) && bytes.HasPrefix(values[0].Data[1:len(values[0].Data)-1], alpha) {
+ values[0].Data = append(append([]byte{values[0].Data[0]}, []byte("alpha(opacity=")...), values[0].Data[1+len(alpha):]...)
+ }
+ case Color:
+ values[0] = minifyColor(values[0])
+ case Background_Color:
+ values[0] = minifyColor(values[0])
+ if !c.o.KeepCSS2 {
+ if values[0].Ident == Transparent {
+ values[0].Data = initialBytes
+ values[0].Ident = Initial
+ }
+ }
+ case Border_Color:
+ sameValues := true
+ for i := range values {
+ if values[i].Ident == Currentcolor {
+ values[i].Data = initialBytes
+ values[i].Ident = Initial
+ } else {
+ values[i] = minifyColor(values[i])
+ }
+ if 0 < i && sameValues && !bytes.Equal(values[0].Data, values[i].Data) {
+ sameValues = false
+ }
+ }
+ if sameValues {
+ values = values[:1]
+ }
+ case Border_Left_Color, Border_Right_Color, Border_Top_Color, Border_Bottom_Color, Text_Decoration_Color, Text_Emphasis_Color:
+ if values[0].Ident == Currentcolor {
+ values[0].Data = initialBytes
+ values[0].Ident = Initial
+ } else {
+ values[0] = minifyColor(values[0])
+ }
+ case Caret_Color, Outline_Color, Fill, Stroke:
+ values[0] = minifyColor(values[0])
+ case Column_Rule:
+ for i := 0; i < len(values); i++ {
+ if values[i].Ident == Currentcolor || values[i].Ident == None || values[i].Ident == Medium {
+ values = append(values[:i], values[i+1:]...)
+ i--
+ } else {
+ values[i] = minifyColor(values[i])
+ }
+ }
+ if len(values) == 0 {
+ values = []Token{{css.IdentToken, noneBytes, nil, 0, None}}
+ }
+ case Text_Shadow:
+ // TODO: minify better (can be comma separated list)
+ for i := 0; i < len(values); i++ {
+ values[i] = minifyColor(values[i])
+ }
+ case Text_Decoration:
+ for i := 0; i < len(values); i++ {
+ if values[i].Ident == Currentcolor || values[i].Ident == None || values[i].Ident == Solid {
+ values = append(values[:i], values[i+1:]...)
+ i--
+ } else {
+ values[i] = minifyColor(values[i])
+ }
+ }
+ if len(values) == 0 {
+ values = []Token{{css.IdentToken, noneBytes, nil, 0, None}}
+ }
+ case Text_Emphasis:
+ for i := 0; i < len(values); i++ {
+ if values[i].Ident == Currentcolor || values[i].Ident == None {
+ values = append(values[:i], values[i+1:]...)
+ i--
+ } else {
+ values[i] = minifyColor(values[i])
+ }
+ }
+ if len(values) == 0 {
+ values = []Token{{css.IdentToken, noneBytes, nil, 0, None}}
+ }
+ case Flex:
+ if len(values) == 2 && values[0].TokenType == css.NumberToken {
+ if values[1].TokenType != css.NumberToken && values[1].IsZero() {
+ values = values[:1] // remove <flex-basis> if it is zero
+ }
+ } else if len(values) == 3 && values[0].TokenType == css.NumberToken && values[1].TokenType == css.NumberToken {
+ if len(values[0].Data) == 1 && len(values[1].Data) == 1 {
+ if values[2].Ident == Auto {
+ if values[0].Data[0] == '0' && values[1].Data[0] == '1' {
+ values = values[:1]
+ values[0].TokenType = css.IdentToken
+ values[0].Data = initialBytes
+ values[0].Ident = Initial
+ } else if values[0].Data[0] == '1' && values[1].Data[0] == '1' {
+ values = values[:1]
+ values[0].TokenType = css.IdentToken
+ values[0].Data = autoBytes
+ values[0].Ident = Auto
+ } else if values[0].Data[0] == '0' && values[1].Data[0] == '0' {
+ values = values[:1]
+ values[0].TokenType = css.IdentToken
+ values[0].Data = noneBytes
+ values[0].Ident = None
+ }
+ } else if values[1].Data[0] == '1' && values[2].IsZero() {
+ values = values[:1] // remove <flex-shrink> and <flex-basis> if they are 1 and 0 respectively
+ } else if values[2].IsZero() {
+ values = values[:2] // remove auto to write 2-value syntax of <flex-grow> <flex-shrink>
+ } else {
+ values[2] = minifyLengthPercentage(values[2])
+ }
+ }
+ }
+ case Flex_Basis:
+ if values[0].Ident == Initial {
+ values[0].Data = autoBytes
+ values[0].Ident = Auto
+ } else {
+ values[0] = minifyLengthPercentage(values[0])
+ }
+ case Order, Flex_Grow:
+ if values[0].Ident == Initial {
+ values[0].TokenType = css.NumberToken
+ values[0].Data = zeroBytes
+ values[0].Ident = 0
+ }
+ case Flex_Shrink:
+ if values[0].Ident == Initial {
+ values[0].TokenType = css.NumberToken
+ values[0].Data = oneBytes
+ values[0].Ident = 0
+ }
+ case Unicode_Range:
+ ranges := [][2]int{}
+ for _, value := range values {
+ if value.TokenType == css.CommaToken {
+ continue
+ } else if value.TokenType != css.UnicodeRangeToken {
+ return values
+ }
+
+ i := 2
+ iWildcard := 0
+ start := 0
+ for i < len(value.Data) && value.Data[i] != '-' {
+ start *= 16
+ if '0' <= value.Data[i] && value.Data[i] <= '9' {
+ start += int(value.Data[i] - '0')
+ } else if 'a' <= value.Data[i]|32 && value.Data[i]|32 <= 'f' {
+ start += int(value.Data[i]|32-'a') + 10
+ } else if iWildcard == 0 && value.Data[i] == '?' {
+ iWildcard = i
+ }
+ i++
+ }
+ end := start
+ if iWildcard != 0 {
+ end = start + int(math.Pow(16.0, float64(len(value.Data)-iWildcard))) - 1
+ } else if i < len(value.Data) && value.Data[i] == '-' {
+ i++
+ end = 0
+ for i < len(value.Data) {
+ end *= 16
+ if '0' <= value.Data[i] && value.Data[i] <= '9' {
+ end += int(value.Data[i] - '0')
+ } else if 'a' <= value.Data[i]|32 && value.Data[i]|32 <= 'f' {
+ end += int(value.Data[i]|32-'a') + 10
+ }
+ i++
+ }
+ if end <= start {
+ end = start
+ }
+ }
+ ranges = append(ranges, [2]int{start, end})
+ }
+
+ // sort and remove overlapping ranges
+ sort.Slice(ranges, func(i, j int) bool { return ranges[i][0] < ranges[j][0] })
+ for i := 0; i < len(ranges)-1; i++ {
+ if ranges[i+1][1] <= ranges[i][1] {
+ // next range is fully contained in the current range
+ ranges = append(ranges[:i+1], ranges[i+2:]...)
+ } else if ranges[i+1][0] <= ranges[i][1]+1 {
+ // next range is partially covering the current range
+ ranges[i][1] = ranges[i+1][1]
+ ranges = append(ranges[:i+1], ranges[i+2:]...)
+ }
+ }
+
+ values = values[:0]
+ for i, ran := range ranges {
+ if i != 0 {
+ values = append(values, Token{css.CommaToken, commaBytes, nil, 0, None})
+ }
+ if ran[0] == ran[1] {
+ urange := []byte(fmt.Sprintf("U+%X", ran[0]))
+ values = append(values, Token{css.UnicodeRangeToken, urange, nil, 0, None})
+ } else if ran[0] == 0 && ran[1] == 0x10FFFF {
+ values = append(values, Token{css.IdentToken, initialBytes, nil, 0, None})
+ } else {
+ k := 0
+ for k < 6 && (ran[0]>>(k*4))&0xF == 0 && (ran[1]>>(k*4))&0xF == 0xF {
+ k++
+ }
+ wildcards := k
+ for k < 6 {
+ if (ran[0]>>(k*4))&0xF != (ran[1]>>(k*4))&0xF {
+ wildcards = 0
+ break
+ }
+ k++
+ }
+ var urange []byte
+ if wildcards != 0 {
+ if ran[0]>>(wildcards*4) == 0 {
+ urange = []byte(fmt.Sprintf("U+%s", strings.Repeat("?", wildcards)))
+ } else {
+ urange = []byte(fmt.Sprintf("U+%X%s", ran[0]>>(wildcards*4), strings.Repeat("?", wildcards)))
+ }
+ } else {
+ urange = []byte(fmt.Sprintf("U+%X-%X", ran[0], ran[1]))
+ }
+ values = append(values, Token{css.UnicodeRangeToken, urange, nil, 0, None})
+ }
+ }
+ }
+ return values
+}
+
+func minifyColor(value Token) Token {
+ data := value.Data
+ if value.TokenType == css.IdentToken {
+ if hexValue, ok := ShortenColorName[value.Ident]; ok {
+ value.TokenType = css.HashToken
+ value.Data = hexValue
+ }
+ } else if value.TokenType == css.HashToken {
+ parse.ToLower(data[1:])
+ if len(data) == 9 && data[7] == data[8] {
+ if data[7] == 'f' {
+ data = data[:7]
+ } else if data[7] == '0' {
+ data = blackBytes
+ }
+ }
+ if ident, ok := ShortenColorHex[string(data)]; ok {
+ value.TokenType = css.IdentToken
+ data = ident
+ } else if len(data) == 7 && data[1] == data[2] && data[3] == data[4] && data[5] == data[6] {
+ value.TokenType = css.HashToken
+ data[2] = data[3]
+ data[3] = data[5]
+ data = data[:4]
+ } else if len(data) == 9 && data[1] == data[2] && data[3] == data[4] && data[5] == data[6] && data[7] == data[8] {
+ // from working draft Color Module Level 4
+ value.TokenType = css.HashToken
+ data[2] = data[3]
+ data[3] = data[5]
+ data[4] = data[7]
+ data = data[:5]
+ }
+ value.Data = data
+ }
+ return value
+}
+
+func minifyNumberPercentage(value Token) Token {
+ // assumes input already minified
+ if value.TokenType == css.PercentageToken && len(value.Data) == 3 && value.Data[len(value.Data)-2] == '0' {
+ value.Data[1] = value.Data[0]
+ value.Data[0] = '.'
+ value.Data = value.Data[:2]
+ value.TokenType = css.NumberToken
+ } else if value.TokenType == css.NumberToken && 2 < len(value.Data) && value.Data[0] == '.' && value.Data[1] == '0' {
+ if value.Data[2] == '0' {
+ value.Data[0] = '.'
+ copy(value.Data[1:], value.Data[3:])
+ value.Data[len(value.Data)-2] = '%'
+ value.Data = value.Data[:len(value.Data)-1]
+ value.TokenType = css.PercentageToken
+ } else if len(value.Data) == 3 {
+ value.Data[0] = value.Data[2]
+ value.Data[1] = '%'
+ value.Data = value.Data[:2]
+ value.TokenType = css.PercentageToken
+ }
+ }
+ return value
+}
+
+func minifyLengthPercentage(value Token) Token {
+ if value.TokenType != css.NumberToken && value.IsZero() {
+ value.TokenType = css.NumberToken
+ value.Data = value.Data[:1] // remove dimension for zero value
+ }
+ return value
+}
+
+func (c *cssMinifier) minifyDimension(value Token) (Token, []byte) {
+ // TODO: add check for zero value
+ var dim []byte
+ if value.TokenType == css.DimensionToken {
+ n := len(value.Data)
+ for 0 < n {
+ lower := 'a' <= value.Data[n-1] && value.Data[n-1] <= 'z'
+ upper := 'A' <= value.Data[n-1] && value.Data[n-1] <= 'Z'
+ if !lower && !upper {
+ break
+ } else if upper {
+ value.Data[n-1] = value.Data[n-1] + ('a' - 'A')
+ }
+ n--
+ }
+
+ num := value.Data[:n]
+ if c.o.KeepCSS2 {
+ num = minify.Decimal(num, c.o.Precision) // don't use exponents
+ } else {
+ num = minify.Number(num, c.o.Precision)
+ }
+ dim = value.Data[n:]
+ value.Data = append(num, dim...)
+ }
+ return value, dim
+
+ // TODO: optimize
+ //if value.TokenType == css.DimensionToken {
+ // // TODO: reverse; parse dim not number
+ // n := parse.Number(value.Data)
+ // num := value.Data[:n]
+ // dim = value.Data[n:]
+ // parse.ToLower(dim)
+
+ // if c.o.KeepCSS2 {
+ // num = minify.Decimal(num, c.o.Precision) // don't use exponents
+ // } else {
+ // num = minify.Number(num, c.o.Precision)
+ // }
+
+ // // change dimension to compress number
+ // h := ToHash(dim)
+ // if h == Px || h == Pt || h == Pc || h == In || h == Mm || h == Cm || h == Q || h == Deg || h == Grad || h == Rad || h == Turn || h == S || h == Ms || h == Hz || h == Khz || h == Dpi || h == Dpcm || h == Dppx {
+ // d, _ := strconv.ParseFloat(string(num), 64) // can never fail
+ // var dimensions []Hash
+ // var multipliers []float64
+ // switch h {
+ // case Px:
+ // //dimensions = []Hash{In, Cm, Pc, Mm, Pt, Q}
+ // //multipliers = []float64{0.010416666666666667, 0.026458333333333333, 0.0625, 0.26458333333333333, 0.75, 1.0583333333333333}
+ // dimensions = []Hash{In, Pc, Pt}
+ // multipliers = []float64{0.010416666666666667, 0.0625, 0.75}
+ // case Pt:
+ // //dimensions = []Hash{In, Cm, Pc, Mm, Px, Q}
+ // //multipliers = []float64{0.013888888888888889, 0.035277777777777778, 0.083333333333333333, 0.35277777777777778, 1.3333333333333333, 1.4111111111111111}
+ // dimensions = []Hash{In, Pc, Px}
+ // multipliers = []float64{0.013888888888888889, 0.083333333333333333, 1.3333333333333333}
+ // case Pc:
+ // //dimensions = []Hash{In, Cm, Mm, Pt, Px, Q}
+ // //multipliers = []float64{0.16666666666666667, 0.42333333333333333, 4.2333333333333333, 12.0, 16.0, 16.933333333333333}
+ // dimensions = []Hash{In, Pt, Px}
+ // multipliers = []float64{0.16666666666666667, 12.0, 16.0}
+ // case In:
+ // //dimensions = []Hash{Cm, Pc, Mm, Pt, Px, Q}
+ // //multipliers = []float64{2.54, 6.0, 25.4, 72.0, 96.0, 101.6}
+ // dimensions = []Hash{Pc, Pt, Px}
+ // multipliers = []float64{6.0, 72.0, 96.0}
+ // case Cm:
+ // //dimensions = []Hash{In, Pc, Mm, Pt, Px, Q}
+ // //multipliers = []float64{0.39370078740157480, 2.3622047244094488, 10.0, 28.346456692913386, 37.795275590551181, 40.0}
+ // dimensions = []Hash{Mm, Q}
+ // multipliers = []float64{10.0, 40.0}
+ // case Mm:
+ // //dimensions = []Hash{In, Cm, Pc, Pt, Px, Q}
+ // //multipliers = []float64{0.039370078740157480, 0.1, 0.23622047244094488, 2.8346456692913386, 3.7795275590551181, 4.0}
+ // dimensions = []Hash{Cm, Q}
+ // multipliers = []float64{0.1, 4.0}
+ // case Q:
+ // //dimensions = []Hash{In, Cm, Pc, Pt, Px} // Q to mm is never smaller
+ // //multipliers = []float64{0.0098425196850393701, 0.025, 0.059055118110236220, 0.70866141732283465, 0.94488188976377953}
+ // dimensions = []Hash{Cm} // Q to mm is never smaller
+ // multipliers = []float64{0.025}
+ // case Deg:
+ // //dimensions = []Hash{Turn, Rad, Grad}
+ // //multipliers = []float64{0.0027777777777777778, 0.017453292519943296, 1.1111111111111111}
+ // dimensions = []Hash{Turn, Grad}
+ // multipliers = []float64{0.0027777777777777778, 1.1111111111111111}
+ // case Grad:
+ // //dimensions = []Hash{Turn, Rad, Deg}
+ // //multipliers = []float64{0.0025, 0.015707963267948966, 0.9}
+ // dimensions = []Hash{Turn, Deg}
+ // multipliers = []float64{0.0025, 0.9}
+ // case Turn:
+ // //dimensions = []Hash{Rad, Deg, Grad}
+ // //multipliers = []float64{6.2831853071795865, 360.0, 400.0}
+ // dimensions = []Hash{Deg, Grad}
+ // multipliers = []float64{360.0, 400.0}
+ // case Rad:
+ // //dimensions = []Hash{Turn, Deg, Grad}
+ // //multipliers = []float64{0.15915494309189534, 57.295779513082321, 63.661977236758134}
+ // case S:
+ // dimensions = []Hash{Ms}
+ // multipliers = []float64{1000.0}
+ // case Ms:
+ // dimensions = []Hash{S}
+ // multipliers = []float64{0.001}
+ // case Hz:
+ // dimensions = []Hash{Khz}
+ // multipliers = []float64{0.001}
+ // case Khz:
+ // dimensions = []Hash{Hz}
+ // multipliers = []float64{1000.0}
+ // case Dpi:
+ // dimensions = []Hash{Dppx, Dpcm}
+ // multipliers = []float64{0.010416666666666667, 0.39370078740157480}
+ // case Dpcm:
+ // //dimensions = []Hash{Dppx, Dpi}
+ // //multipliers = []float64{0.026458333333333333, 2.54}
+ // dimensions = []Hash{Dpi}
+ // multipliers = []float64{2.54}
+ // case Dppx:
+ // //dimensions = []Hash{Dpcm, Dpi}
+ // //multipliers = []float64{37.795275590551181, 96.0}
+ // dimensions = []Hash{Dpi}
+ // multipliers = []float64{96.0}
+ // }
+ // for i := range dimensions {
+ // if dimensions[i] != h { //&& (d < 1.0) == (multipliers[i] > 1.0) {
+ // b, _ := strconvParse.AppendFloat([]byte{}, d*multipliers[i], -1)
+ // if c.o.KeepCSS2 {
+ // b = minify.Decimal(b, c.o.newPrecision) // don't use exponents
+ // } else {
+ // b = minify.Number(b, c.o.newPrecision)
+ // }
+ // newDim := []byte(dimensions[i].String())
+ // if len(b)+len(newDim) < len(num)+len(dim) {
+ // num = b
+ // dim = newDim
+ // }
+ // }
+ // }
+ // }
+ // value.Data = append(num, dim...)
+ //}
+ //return value, dim
+}
diff --git a/vendor/github.com/tdewolff/minify/v2/css/hash.go b/vendor/github.com/tdewolff/minify/v2/css/hash.go
new file mode 100644
index 0000000..98692c8
--- /dev/null
+++ b/vendor/github.com/tdewolff/minify/v2/css/hash.go
@@ -0,0 +1,1392 @@
+package css
+
+// uses github.com/tdewolff/hasher
+//go:generate hasher -type=Hash -file=hash.go
+
+// Hash defines perfect hashes for a predefined list of strings
+type Hash uint32
+
+// Identifiers for the hashes associated with the text in the comments.
+const (
+ Ms_Filter Hash = 0xa // -ms-filter
+ Accelerator Hash = 0x3760b // accelerator
+ Aliceblue Hash = 0x7a209 // aliceblue
+ Align_Content Hash = 0xd980d // align-content
+ Align_Items Hash = 0x7ef0b // align-items
+ Align_Self Hash = 0x8cb0a // align-self
+ All Hash = 0x69103 // all
+ Alpha Hash = 0x37205 // alpha
+ Animation Hash = 0xca09 // animation
+ Animation_Delay Hash = 0x2050f // animation-delay
+ Animation_Direction Hash = 0x8e913 // animation-direction
+ Animation_Duration Hash = 0x35d12 // animation-duration
+ Animation_Fill_Mode Hash = 0x66c13 // animation-fill-mode
+ Animation_Iteration_Count Hash = 0xd4919 // animation-iteration-count
+ Animation_Name Hash = 0xca0e // animation-name
+ Animation_Play_State Hash = 0xfc14 // animation-play-state
+ Animation_Timing_Function Hash = 0x14119 // animation-timing-function
+ Antiquewhite Hash = 0x6490c // antiquewhite
+ Aquamarine Hash = 0x9ec0a // aquamarine
+ Attr Hash = 0x59804 // attr
+ Auto Hash = 0x44504 // auto
+ Azimuth Hash = 0x15a07 // azimuth
+ Background Hash = 0x2b0a // background
+ Background_Attachment Hash = 0x2b15 // background-attachment
+ Background_Clip Hash = 0xb6e0f // background-clip
+ Background_Color Hash = 0x21710 // background-color
+ Background_Image Hash = 0x5ad10 // background-image
+ Background_Origin Hash = 0x17111 // background-origin
+ Background_Position Hash = 0x18e13 // background-position
+ Background_Position_X Hash = 0x18e15 // background-position-x
+ Background_Position_Y Hash = 0x1a315 // background-position-y
+ Background_Repeat Hash = 0x1b811 // background-repeat
+ Background_Size Hash = 0x1cb0f // background-size
+ Behavior Hash = 0x1da08 // behavior
+ Black Hash = 0x1e205 // black
+ Blanchedalmond Hash = 0x1e70e // blanchedalmond
+ Blueviolet Hash = 0x7a70a // blueviolet
+ Bold Hash = 0x1fc04 // bold
+ Border Hash = 0x22706 // border
+ Border_Bottom Hash = 0x2270d // border-bottom
+ Border_Bottom_Color Hash = 0x22713 // border-bottom-color
+ Border_Bottom_Style Hash = 0x23a13 // border-bottom-style
+ Border_Bottom_Width Hash = 0x25d13 // border-bottom-width
+ Border_Box Hash = 0x27e0a // border-box
+ Border_Collapse Hash = 0x2b60f // border-collapse
+ Border_Color Hash = 0x2d30c // border-color
+ Border_Left Hash = 0x2df0b // border-left
+ Border_Left_Color Hash = 0x2df11 // border-left-color
+ Border_Left_Style Hash = 0x2f011 // border-left-style
+ Border_Left_Width Hash = 0x30111 // border-left-width
+ Border_Right Hash = 0x3120c // border-right
+ Border_Right_Color Hash = 0x31212 // border-right-color
+ Border_Right_Style Hash = 0x32412 // border-right-style
+ Border_Right_Width Hash = 0x33612 // border-right-width
+ Border_Spacing Hash = 0x3480e // border-spacing
+ Border_Style Hash = 0x3ab0c // border-style
+ Border_Top Hash = 0x3b70a // border-top
+ Border_Top_Color Hash = 0x3b710 // border-top-color
+ Border_Top_Style Hash = 0x3c710 // border-top-style
+ Border_Top_Width Hash = 0x3d710 // border-top-width
+ Border_Width Hash = 0x3e70c // border-width
+ Bottom Hash = 0x22e06 // bottom
+ Box_Shadow Hash = 0x2850a // box-shadow
+ Burlywood Hash = 0x3f309 // burlywood
+ Cadetblue Hash = 0x9c609 // cadetblue
+ Calc Hash = 0x9c304 // calc
+ Caption_Side Hash = 0x40f0c // caption-side
+ Caret_Color Hash = 0x4240b // caret-color
+ Center Hash = 0xdb06 // center
+ Charset Hash = 0x62f07 // charset
+ Chartreuse Hash = 0x42f0a // chartreuse
+ Chocolate Hash = 0x43909 // chocolate
+ Clamp Hash = 0x44e05 // clamp
+ Clear Hash = 0x45d05 // clear
+ Clip Hash = 0xb7904 // clip
+ Cm Hash = 0x53802 // cm
+ Color Hash = 0x2505 // color
+ Column_Count Hash = 0x4620c // column-count
+ Column_Gap Hash = 0x6a30a // column-gap
+ Column_Rule Hash = 0x4880b // column-rule
+ Column_Rule_Color Hash = 0x48811 // column-rule-color
+ Column_Rule_Style Hash = 0x49911 // column-rule-style
+ Column_Rule_Width Hash = 0x4aa11 // column-rule-width
+ Column_Width Hash = 0x4bb0c // column-width
+ Columns Hash = 0x74607 // columns
+ Content Hash = 0x5607 // content
+ Cornflowerblue Hash = 0x4c70e // cornflowerblue
+ Cornsilk Hash = 0x4d508 // cornsilk
+ Counter_Increment Hash = 0xd5d11 // counter-increment
+ Counter_Reset Hash = 0x4690d // counter-reset
+ Cue Hash = 0x4dd03 // cue
+ Cue_After Hash = 0x4dd09 // cue-after
+ Cue_Before Hash = 0x4e60a // cue-before
+ Currentcolor Hash = 0x5010c // currentcolor
+ Cursive Hash = 0x50d07 // cursive
+ Cursor Hash = 0x51406 // cursor
+ Darkblue Hash = 0x1f408 // darkblue
+ Darkcyan Hash = 0x1ff08 // darkcyan
+ Darkgoldenrod Hash = 0x3fb0d // darkgoldenrod
+ Darkgray Hash = 0x40708 // darkgray
+ Darkgreen Hash = 0x75c09 // darkgreen
+ Darkkhaki Hash = 0xa1409 // darkkhaki
+ Darkmagenta Hash = 0xce90b // darkmagenta
+ Darkolivegreen Hash = 0x6d90e // darkolivegreen
+ Darkorange Hash = 0x7500a // darkorange
+ Darkorchid Hash = 0xa0b0a // darkorchid
+ Darksalmon Hash = 0xa990a // darksalmon
+ Darkseagreen Hash = 0xb110c // darkseagreen
+ Darkslateblue Hash = 0xc1c0d // darkslateblue
+ Darkslategray Hash = 0xbfa0d // darkslategray
+ Darkturquoise Hash = 0xcaa0d // darkturquoise
+ Darkviolet Hash = 0x51a0a // darkviolet
+ Deeppink Hash = 0x67d08 // deeppink
+ Deepskyblue Hash = 0x4190b // deepskyblue
+ Default Hash = 0xa2207 // default
+ Deg Hash = 0x70103 // deg
+ Direction Hash = 0x8d909 // direction
+ Display Hash = 0xcce07 // display
+ Document Hash = 0x52408 // document
+ Dodgerblue Hash = 0x52c0a // dodgerblue
+ Dpcm Hash = 0x53604 // dpcm
+ Dpi Hash = 0x54f03 // dpi
+ Dppx Hash = 0x55b04 // dppx
+ Elevation Hash = 0x6d09 // elevation
+ Empty_Cells Hash = 0x3910b // empty-cells
+ Env Hash = 0x4f503 // env
+ Fantasy Hash = 0x3a407 // fantasy
+ Fill Hash = 0x67604 // fill
+ Filter Hash = 0x406 // filter
+ Firebrick Hash = 0x83509 // firebrick
+ Flex Hash = 0x55f04 // flex
+ Flex_Basis Hash = 0x89d0a // flex-basis
+ Flex_Direction Hash = 0x8d40e // flex-direction
+ Flex_Flow Hash = 0xc8709 // flex-flow
+ Flex_Grow Hash = 0x55f09 // flex-grow
+ Flex_Shrink Hash = 0x5680b // flex-shrink
+ Flex_Wrap Hash = 0x57309 // flex-wrap
+ Float Hash = 0x59505 // float
+ Floralwhite Hash = 0x5bd0b // floralwhite
+ Font Hash = 0x25404 // font
+ Font_Face Hash = 0x25409 // font-face
+ Font_Family Hash = 0x5ee0b // font-family
+ Font_Size Hash = 0x5f909 // font-size
+ Font_Size_Adjust Hash = 0x5f910 // font-size-adjust
+ Font_Stretch Hash = 0x6250c // font-stretch
+ Font_Style Hash = 0x6360a // font-style
+ Font_Variant Hash = 0x6400c // font-variant
+ Font_Weight Hash = 0x65b0b // font-weight
+ Forestgreen Hash = 0x4ec0b // forestgreen
+ Fuchsia Hash = 0x66607 // fuchsia
+ Function Hash = 0x15208 // function
+ Gainsboro Hash = 0xec09 // gainsboro
+ Ghostwhite Hash = 0x2990a // ghostwhite
+ Goldenrod Hash = 0x3ff09 // goldenrod
+ Grad Hash = 0x1004 // grad
+ Greenyellow Hash = 0x7600b // greenyellow
+ Grid Hash = 0x35504 // grid
+ Grid_Area Hash = 0x35509 // grid-area
+ Grid_Auto_Columns Hash = 0x7bb11 // grid-auto-columns
+ Grid_Auto_Flow Hash = 0x81c0e // grid-auto-flow
+ Grid_Auto_Rows Hash = 0x8640e // grid-auto-rows
+ Grid_Column Hash = 0x69e0b // grid-column
+ Grid_Column_End Hash = 0xcdb0f // grid-column-end
+ Grid_Column_Gap Hash = 0x69e0f // grid-column-gap
+ Grid_Column_Start Hash = 0x6bd11 // grid-column-start
+ Grid_Row Hash = 0x6ce08 // grid-row
+ Grid_Row_End Hash = 0x6ce0c // grid-row-end
+ Grid_Row_Gap Hash = 0x6e70c // grid-row-gap
+ Grid_Row_Start Hash = 0x7030e // grid-row-start
+ Grid_Template Hash = 0x7110d // grid-template
+ Grid_Template_Areas Hash = 0x71113 // grid-template-areas
+ Grid_Template_Columns Hash = 0x73815 // grid-template-columns
+ Grid_Template_Rows Hash = 0x77012 // grid-template-rows
+ Height Hash = 0x9306 // height
+ Honeydew Hash = 0x16008 // honeydew
+ Hsl Hash = 0x26f03 // hsl
+ Hsla Hash = 0x26f04 // hsla
+ Hz Hash = 0x68502 // hz
+ Ime_Mode Hash = 0xa1c08 // ime-mode
+ Import Hash = 0x78d06 // import
+ Important Hash = 0x78d09 // important
+ In Hash = 0x4402 // in
+ Include_Source Hash = 0x1800e // include-source
+ Indianred Hash = 0xb0909 // indianred
+ Inherit Hash = 0x79607 // inherit
+ Initial Hash = 0x79d07 // initial
+ Invert Hash = 0x7e406 // invert
+ Justify_Content Hash = 0x4e0f // justify-content
+ Justify_Items Hash = 0x6050d // justify-items
+ Justify_Self Hash = 0x82a0c // justify-self
+ Keyframes Hash = 0x5cb09 // keyframes
+ Khz Hash = 0x68403 // khz
+ Large Hash = 0xa905 // large
+ Larger Hash = 0xa906 // larger
+ Lavender Hash = 0x27108 // lavender
+ Lavenderblush Hash = 0x2710d // lavenderblush
+ Lawngreen Hash = 0x2ca09 // lawngreen
+ Layer_Background_Color Hash = 0x21116 // layer-background-color
+ Layer_Background_Image Hash = 0x5a716 // layer-background-image
+ Layout_Flow Hash = 0xcf80b // layout-flow
+ Layout_Grid Hash = 0x8050b // layout-grid
+ Layout_Grid_Char Hash = 0x80510 // layout-grid-char
+ Layout_Grid_Char_Spacing Hash = 0x80518 // layout-grid-char-spacing
+ Layout_Grid_Line Hash = 0x83e10 // layout-grid-line
+ Layout_Grid_Mode Hash = 0x85410 // layout-grid-mode
+ Layout_Grid_Type Hash = 0x88710 // layout-grid-type
+ Left Hash = 0x2e604 // left
+ Lemonchiffon Hash = 0x24b0c // lemonchiffon
+ Letter_Spacing Hash = 0x7ae0e // letter-spacing
+ Lightblue Hash = 0x8ba09 // lightblue
+ Lightcoral Hash = 0x8c30a // lightcoral
+ Lightcyan Hash = 0x8e209 // lightcyan
+ Lightgoldenrodyellow Hash = 0x8fc14 // lightgoldenrodyellow
+ Lightgray Hash = 0x91009 // lightgray
+ Lightgreen Hash = 0x9190a // lightgreen
+ Lightpink Hash = 0x92309 // lightpink
+ Lightsalmon Hash = 0x92c0b // lightsalmon
+ Lightseagreen Hash = 0x9370d // lightseagreen
+ Lightskyblue Hash = 0x9440c // lightskyblue
+ Lightslateblue Hash = 0x9500e // lightslateblue
+ Lightsteelblue Hash = 0x95e0e // lightsteelblue
+ Lightyellow Hash = 0x96c0b // lightyellow
+ Limegreen Hash = 0x97709 // limegreen
+ Line_Break Hash = 0x84a0a // line-break
+ Line_Height Hash = 0x8e0b // line-height
+ Linear_Gradient Hash = 0x9800f // linear-gradient
+ List_Style Hash = 0x98f0a // list-style
+ List_Style_Image Hash = 0x98f10 // list-style-image
+ List_Style_Position Hash = 0x99f13 // list-style-position
+ List_Style_Type Hash = 0x9b20f // list-style-type
+ Local Hash = 0x9c105 // local
+ Magenta Hash = 0xced07 // magenta
+ Margin Hash = 0x53906 // margin
+ Margin_Bottom Hash = 0xdb10d // margin-bottom
+ Margin_Left Hash = 0xdbd0b // margin-left
+ Margin_Right Hash = 0xb890c // margin-right
+ Margin_Top Hash = 0x5390a // margin-top
+ Marker_Offset Hash = 0xad00d // marker-offset
+ Marks Hash = 0xaee05 // marks
+ Mask Hash = 0x9cf04 // mask
+ Max Hash = 0x9d303 // max
+ Max_Height Hash = 0x9d30a // max-height
+ Max_Width Hash = 0x9dd09 // max-width
+ Media Hash = 0xd4505 // media
+ Medium Hash = 0x9e606 // medium
+ Mediumaquamarine Hash = 0x9e610 // mediumaquamarine
+ Mediumblue Hash = 0x9f60a // mediumblue
+ Mediumorchid Hash = 0xa000c // mediumorchid
+ Mediumpurple Hash = 0xa420c // mediumpurple
+ Mediumseagreen Hash = 0xa4e0e // mediumseagreen
+ Mediumslateblue Hash = 0xa5c0f // mediumslateblue
+ Mediumspringgreen Hash = 0xa6b11 // mediumspringgreen
+ Mediumturquoise Hash = 0xa7c0f // mediumturquoise
+ Mediumvioletred Hash = 0xa8b0f // mediumvioletred
+ Midnightblue Hash = 0xaa90c // midnightblue
+ Min Hash = 0x14d03 // min
+ Min_Height Hash = 0xab50a // min-height
+ Min_Width Hash = 0xabf09 // min-width
+ Mintcream Hash = 0xac809 // mintcream
+ Mistyrose Hash = 0xae409 // mistyrose
+ Mm Hash = 0xaed02 // mm
+ Moccasin Hash = 0xb0308 // moccasin
+ Monospace Hash = 0xaa009 // monospace
+ Ms Hash = 0x102 // ms
+ Namespace Hash = 0xd409 // namespace
+ Navajowhite Hash = 0x750b // navajowhite
+ No_Repeat Hash = 0xbf09 // no-repeat
+ None Hash = 0x38e04 // none
+ Normal Hash = 0x36e06 // normal
+ Offset Hash = 0xad706 // offset
+ Offset_Anchor Hash = 0xad70d // offset-anchor
+ Offset_Distance Hash = 0xb1d0f // offset-distance
+ Offset_Path Hash = 0xb2c0b // offset-path
+ Offset_Position Hash = 0xb370f // offset-position
+ Offset_Rotate Hash = 0xb460d // offset-rotate
+ Olivedrab Hash = 0xb6609 // olivedrab
+ Orangered Hash = 0x75409 // orangered
+ Order Hash = 0x22805 // order
+ Orphans Hash = 0x37f07 // orphans
+ Outline Hash = 0xba707 // outline
+ Outline_Color Hash = 0xba70d // outline-color
+ Outline_Style Hash = 0xbb40d // outline-style
+ Outline_Width Hash = 0xbc10d // outline-width
+ Overflow Hash = 0x9d08 // overflow
+ Overflow_X Hash = 0x9d0a // overflow-x
+ Overflow_Y Hash = 0xbce0a // overflow-y
+ Padding Hash = 0x45207 // padding
+ Padding_Bottom Hash = 0xb7c0e // padding-bottom
+ Padding_Box Hash = 0x4520b // padding-box
+ Padding_Left Hash = 0xd0a0c // padding-left
+ Padding_Right Hash = 0x5420d // padding-right
+ Padding_Top Hash = 0x57b0b // padding-top
+ Page Hash = 0x58504 // page
+ Page_Break_After Hash = 0x58510 // page-break-after
+ Page_Break_Before Hash = 0x6ac11 // page-break-before
+ Page_Break_Inside Hash = 0x6f211 // page-break-inside
+ Palegoldenrod Hash = 0xc100d // palegoldenrod
+ Palegreen Hash = 0xbd809 // palegreen
+ Paleturquoise Hash = 0xbe10d // paleturquoise
+ Palevioletred Hash = 0xbee0d // palevioletred
+ Papayawhip Hash = 0xc070a // papayawhip
+ Pause Hash = 0xc2905 // pause
+ Pause_After Hash = 0xc290b // pause-after
+ Pause_Before Hash = 0xc340c // pause-before
+ Pc Hash = 0x53702 // pc
+ Peachpuff Hash = 0x89509 // peachpuff
+ Pitch Hash = 0x55005 // pitch
+ Pitch_Range Hash = 0x5500b // pitch-range
+ Place_Content Hash = 0xc400d // place-content
+ Place_Items Hash = 0xc4d0b // place-items
+ Place_Self Hash = 0xc7e0a // place-self
+ Play_During Hash = 0xcd10b // play-during
+ Position Hash = 0x13908 // position
+ Powderblue Hash = 0xc9b0a // powderblue
+ Progid Hash = 0xca506 // progid
+ Pt Hash = 0x39302 // pt
+ Px Hash = 0x55d02 // px
+ Q Hash = 0x64d01 // q
+ Quotes Hash = 0xcb706 // quotes
+ Rad Hash = 0x903 // rad
+ Radial_Gradient Hash = 0x90f // radial-gradient
+ Repeat Hash = 0xc206 // repeat
+ Repeat_X Hash = 0x1c308 // repeat-x
+ Repeat_Y Hash = 0xc208 // repeat-y
+ Rgb Hash = 0x2903 // rgb
+ Rgba Hash = 0x2904 // rgba
+ Richness Hash = 0xae08 // richness
+ Right Hash = 0x31905 // right
+ Rosybrown Hash = 0xf309 // rosybrown
+ Round Hash = 0x3005 // round
+ Row_Gap Hash = 0x6ec07 // row-gap
+ Royalblue Hash = 0x69509 // royalblue
+ Ruby_Align Hash = 0xd930a // ruby-align
+ Ruby_Overhang Hash = 0xe00d // ruby-overhang
+ Ruby_Position Hash = 0x1340d // ruby-position
+ S Hash = 0x201 // s
+ Saddlebrown Hash = 0xb50b // saddlebrown
+ Sandybrown Hash = 0x3850a // sandybrown
+ Sans_Serif Hash = 0x39b0a // sans-serif
+ Scroll Hash = 0x12006 // scroll
+ Scrollbar_3d_Light_Color Hash = 0xd7c18 // scrollbar-3d-light-color
+ Scrollbar_Arrow_Color Hash = 0x12015 // scrollbar-arrow-color
+ Scrollbar_Base_Color Hash = 0x8a614 // scrollbar-base-color
+ Scrollbar_Dark_Shadow_Color Hash = 0x5d31b // scrollbar-dark-shadow-color
+ Scrollbar_Face_Color Hash = 0x61114 // scrollbar-face-color
+ Scrollbar_Highlight_Color Hash = 0x7cb19 // scrollbar-highlight-color
+ Scrollbar_Shadow_Color Hash = 0x87116 // scrollbar-shadow-color
+ Scrollbar_Track_Color Hash = 0x72315 // scrollbar-track-color
+ Seagreen Hash = 0x93c08 // seagreen
+ Seashell Hash = 0x2c308 // seashell
+ Serif Hash = 0x3a005 // serif
+ Size Hash = 0x1d604 // size
+ Slateblue Hash = 0x95509 // slateblue
+ Slategray Hash = 0xbfe09 // slategray
+ Small Hash = 0x68f05 // small
+ Smaller Hash = 0x68f07 // smaller
+ Solid Hash = 0x74c05 // solid
+ Space Hash = 0x6905 // space
+ Speak Hash = 0x78105 // speak
+ Speak_Header Hash = 0x7810c // speak-header
+ Speak_Numeral Hash = 0x7f90d // speak-numeral
+ Speak_Punctuation Hash = 0xaf211 // speak-punctuation
+ Speech_Rate Hash = 0xc570b // speech-rate
+ Springgreen Hash = 0xa710b // springgreen
+ Steelblue Hash = 0x96309 // steelblue
+ Stress Hash = 0x11b06 // stress
+ Stroke Hash = 0xc7806 // stroke
+ Supports Hash = 0xcbc08 // supports
+ Table_Layout Hash = 0xcf20c // table-layout
+ Text_Align Hash = 0x10e0a // text-align
+ Text_Align_Last Hash = 0x10e0f // text-align-last
+ Text_Autospace Hash = 0x4400e // text-autospace
+ Text_Decoration Hash = 0x7e0f // text-decoration
+ Text_Decoration_Color Hash = 0x2a115 // text-decoration-color
+ Text_Decoration_Line Hash = 0x7e14 // text-decoration-line
+ Text_Decoration_Style Hash = 0xb5115 // text-decoration-style
+ Text_Decoration_Thickness Hash = 0xc6019 // text-decoration-thickness
+ Text_Emphasis Hash = 0x170d // text-emphasis
+ Text_Emphasis_Color Hash = 0x1713 // text-emphasis-color
+ Text_Indent Hash = 0x3f0b // text-indent
+ Text_Justify Hash = 0x490c // text-justify
+ Text_Kashida_Space Hash = 0x5c12 // text-kashida-space
+ Text_Overflow Hash = 0x980d // text-overflow
+ Text_Shadow Hash = 0xd6d0b // text-shadow
+ Text_Transform Hash = 0xda40e // text-transform
+ Text_Underline_Position Hash = 0xdc717 // text-underline-position
+ Top Hash = 0x3be03 // top
+ Transition Hash = 0x4750a // transition
+ Transition_Delay Hash = 0x59a10 // transition-delay
+ Transition_Duration Hash = 0xb9413 // transition-duration
+ Transition_Property Hash = 0x47513 // transition-property
+ Transition_Timing_Function Hash = 0xa281a // transition-timing-function
+ Transparent Hash = 0xd150b // transparent
+ Turn Hash = 0xd1f04 // turn
+ Turquoise Hash = 0xa8209 // turquoise
+ Unicode_Bidi Hash = 0xcc40c // unicode-bidi
+ Unicode_Range Hash = 0xd230d // unicode-range
+ Unset Hash = 0xd3005 // unset
+ Url Hash = 0x3f403 // url
+ Var Hash = 0x64503 // var
+ Vertical_Align Hash = 0x7e60e // vertical-align
+ Visibility Hash = 0x4f70a // visibility
+ Voice_Family Hash = 0xd350c // voice-family
+ Volume Hash = 0xd4106 // volume
+ White Hash = 0x7b05 // white
+ White_Space Hash = 0x6500b // white-space
+ Whitesmoke Hash = 0x5c30a // whitesmoke
+ Widows Hash = 0xd7706 // widows
+ Width Hash = 0x26b05 // width
+ Word_Break Hash = 0x1670a // word-break
+ Word_Spacing Hash = 0x28e0c // word-spacing
+ Word_Wrap Hash = 0xd0209 // word-wrap
+ Writing_Mode Hash = 0xc8f0c // writing-mode
+ X_Large Hash = 0xa707 // x-large
+ X_Small Hash = 0x68d07 // x-small
+ Xx_Large Hash = 0xa608 // xx-large
+ Xx_Small Hash = 0x68c08 // xx-small
+ Yellow Hash = 0x76506 // yellow
+ Yellowgreen Hash = 0x7650b // yellowgreen
+ Z_Index Hash = 0x68607 // z-index
+)
+
+//var HashMap = map[string]Hash{
+// "-ms-filter": Ms_Filter,
+// "accelerator": Accelerator,
+// "aliceblue": Aliceblue,
+// "align-content": Align_Content,
+// "align-items": Align_Items,
+// "align-self": Align_Self,
+// "all": All,
+// "alpha": Alpha,
+// "animation": Animation,
+// "animation-delay": Animation_Delay,
+// "animation-direction": Animation_Direction,
+// "animation-duration": Animation_Duration,
+// "animation-fill-mode": Animation_Fill_Mode,
+// "animation-iteration-count": Animation_Iteration_Count,
+// "animation-name": Animation_Name,
+// "animation-play-state": Animation_Play_State,
+// "animation-timing-function": Animation_Timing_Function,
+// "antiquewhite": Antiquewhite,
+// "aquamarine": Aquamarine,
+// "attr": Attr,
+// "auto": Auto,
+// "azimuth": Azimuth,
+// "background": Background,
+// "background-attachment": Background_Attachment,
+// "background-clip": Background_Clip,
+// "background-color": Background_Color,
+// "background-image": Background_Image,
+// "background-origin": Background_Origin,
+// "background-position": Background_Position,
+// "background-position-x": Background_Position_X,
+// "background-position-y": Background_Position_Y,
+// "background-repeat": Background_Repeat,
+// "background-size": Background_Size,
+// "behavior": Behavior,
+// "black": Black,
+// "blanchedalmond": Blanchedalmond,
+// "blueviolet": Blueviolet,
+// "bold": Bold,
+// "border": Border,
+// "border-bottom": Border_Bottom,
+// "border-bottom-color": Border_Bottom_Color,
+// "border-bottom-style": Border_Bottom_Style,
+// "border-bottom-width": Border_Bottom_Width,
+// "border-box": Border_Box,
+// "border-collapse": Border_Collapse,
+// "border-color": Border_Color,
+// "border-left": Border_Left,
+// "border-left-color": Border_Left_Color,
+// "border-left-style": Border_Left_Style,
+// "border-left-width": Border_Left_Width,
+// "border-right": Border_Right,
+// "border-right-color": Border_Right_Color,
+// "border-right-style": Border_Right_Style,
+// "border-right-width": Border_Right_Width,
+// "border-spacing": Border_Spacing,
+// "border-style": Border_Style,
+// "border-top": Border_Top,
+// "border-top-color": Border_Top_Color,
+// "border-top-style": Border_Top_Style,
+// "border-top-width": Border_Top_Width,
+// "border-width": Border_Width,
+// "bottom": Bottom,
+// "box-shadow": Box_Shadow,
+// "burlywood": Burlywood,
+// "cadetblue": Cadetblue,
+// "calc": Calc,
+// "caption-side": Caption_Side,
+// "caret-color": Caret_Color,
+// "center": Center,
+// "charset": Charset,
+// "chartreuse": Chartreuse,
+// "chocolate": Chocolate,
+// "clamp": Clamp,
+// "clear": Clear,
+// "clip": Clip,
+// "cm": Cm,
+// "color": Color,
+// "column-count": Column_Count,
+// "column-gap": Column_Gap,
+// "column-rule": Column_Rule,
+// "column-rule-color": Column_Rule_Color,
+// "column-rule-style": Column_Rule_Style,
+// "column-rule-width": Column_Rule_Width,
+// "column-width": Column_Width,
+// "columns": Columns,
+// "content": Content,
+// "cornflowerblue": Cornflowerblue,
+// "cornsilk": Cornsilk,
+// "counter-increment": Counter_Increment,
+// "counter-reset": Counter_Reset,
+// "cue": Cue,
+// "cue-after": Cue_After,
+// "cue-before": Cue_Before,
+// "currentcolor": Currentcolor,
+// "cursive": Cursive,
+// "cursor": Cursor,
+// "darkblue": Darkblue,
+// "darkcyan": Darkcyan,
+// "darkgoldenrod": Darkgoldenrod,
+// "darkgray": Darkgray,
+// "darkgreen": Darkgreen,
+// "darkkhaki": Darkkhaki,
+// "darkmagenta": Darkmagenta,
+// "darkolivegreen": Darkolivegreen,
+// "darkorange": Darkorange,
+// "darkorchid": Darkorchid,
+// "darksalmon": Darksalmon,
+// "darkseagreen": Darkseagreen,
+// "darkslateblue": Darkslateblue,
+// "darkslategray": Darkslategray,
+// "darkturquoise": Darkturquoise,
+// "darkviolet": Darkviolet,
+// "deeppink": Deeppink,
+// "deepskyblue": Deepskyblue,
+// "default": Default,
+// "deg": Deg,
+// "direction": Direction,
+// "display": Display,
+// "document": Document,
+// "dodgerblue": Dodgerblue,
+// "dpcm": Dpcm,
+// "dpi": Dpi,
+// "dppx": Dppx,
+// "elevation": Elevation,
+// "empty-cells": Empty_Cells,
+// "env": Env,
+// "fantasy": Fantasy,
+// "fill": Fill,
+// "filter": Filter,
+// "firebrick": Firebrick,
+// "flex": Flex,
+// "flex-basis": Flex_Basis,
+// "flex-direction": Flex_Direction,
+// "flex-flow": Flex_Flow,
+// "flex-grow": Flex_Grow,
+// "flex-shrink": Flex_Shrink,
+// "flex-wrap": Flex_Wrap,
+// "float": Float,
+// "floralwhite": Floralwhite,
+// "font": Font,
+// "font-face": Font_Face,
+// "font-family": Font_Family,
+// "font-size": Font_Size,
+// "font-size-adjust": Font_Size_Adjust,
+// "font-stretch": Font_Stretch,
+// "font-style": Font_Style,
+// "font-variant": Font_Variant,
+// "font-weight": Font_Weight,
+// "forestgreen": Forestgreen,
+// "fuchsia": Fuchsia,
+// "function": Function,
+// "gainsboro": Gainsboro,
+// "ghostwhite": Ghostwhite,
+// "goldenrod": Goldenrod,
+// "grad": Grad,
+// "greenyellow": Greenyellow,
+// "grid": Grid,
+// "grid-area": Grid_Area,
+// "grid-auto-columns": Grid_Auto_Columns,
+// "grid-auto-flow": Grid_Auto_Flow,
+// "grid-auto-rows": Grid_Auto_Rows,
+// "grid-column": Grid_Column,
+// "grid-column-end": Grid_Column_End,
+// "grid-column-gap": Grid_Column_Gap,
+// "grid-column-start": Grid_Column_Start,
+// "grid-row": Grid_Row,
+// "grid-row-end": Grid_Row_End,
+// "grid-row-gap": Grid_Row_Gap,
+// "grid-row-start": Grid_Row_Start,
+// "grid-template": Grid_Template,
+// "grid-template-areas": Grid_Template_Areas,
+// "grid-template-columns": Grid_Template_Columns,
+// "grid-template-rows": Grid_Template_Rows,
+// "height": Height,
+// "honeydew": Honeydew,
+// "hsl": Hsl,
+// "hsla": Hsla,
+// "hz": Hz,
+// "ime-mode": Ime_Mode,
+// "import": Import,
+// "important": Important,
+// "in": In,
+// "include-source": Include_Source,
+// "indianred": Indianred,
+// "inherit": Inherit,
+// "initial": Initial,
+// "invert": Invert,
+// "justify-content": Justify_Content,
+// "justify-items": Justify_Items,
+// "justify-self": Justify_Self,
+// "keyframes": Keyframes,
+// "khz": Khz,
+// "large": Large,
+// "larger": Larger,
+// "lavender": Lavender,
+// "lavenderblush": Lavenderblush,
+// "lawngreen": Lawngreen,
+// "layer-background-color": Layer_Background_Color,
+// "layer-background-image": Layer_Background_Image,
+// "layout-flow": Layout_Flow,
+// "layout-grid": Layout_Grid,
+// "layout-grid-char": Layout_Grid_Char,
+// "layout-grid-char-spacing": Layout_Grid_Char_Spacing,
+// "layout-grid-line": Layout_Grid_Line,
+// "layout-grid-mode": Layout_Grid_Mode,
+// "layout-grid-type": Layout_Grid_Type,
+// "left": Left,
+// "lemonchiffon": Lemonchiffon,
+// "letter-spacing": Letter_Spacing,
+// "lightblue": Lightblue,
+// "lightcoral": Lightcoral,
+// "lightcyan": Lightcyan,
+// "lightgoldenrodyellow": Lightgoldenrodyellow,
+// "lightgray": Lightgray,
+// "lightgreen": Lightgreen,
+// "lightpink": Lightpink,
+// "lightsalmon": Lightsalmon,
+// "lightseagreen": Lightseagreen,
+// "lightskyblue": Lightskyblue,
+// "lightslateblue": Lightslateblue,
+// "lightsteelblue": Lightsteelblue,
+// "lightyellow": Lightyellow,
+// "limegreen": Limegreen,
+// "line-break": Line_Break,
+// "line-height": Line_Height,
+// "linear-gradient": Linear_Gradient,
+// "list-style": List_Style,
+// "list-style-image": List_Style_Image,
+// "list-style-position": List_Style_Position,
+// "list-style-type": List_Style_Type,
+// "local": Local,
+// "magenta": Magenta,
+// "margin": Margin,
+// "margin-bottom": Margin_Bottom,
+// "margin-left": Margin_Left,
+// "margin-right": Margin_Right,
+// "margin-top": Margin_Top,
+// "marker-offset": Marker_Offset,
+// "marks": Marks,
+// "mask": Mask,
+// "max": Max,
+// "max-height": Max_Height,
+// "max-width": Max_Width,
+// "media": Media,
+// "medium": Medium,
+// "mediumaquamarine": Mediumaquamarine,
+// "mediumblue": Mediumblue,
+// "mediumorchid": Mediumorchid,
+// "mediumpurple": Mediumpurple,
+// "mediumseagreen": Mediumseagreen,
+// "mediumslateblue": Mediumslateblue,
+// "mediumspringgreen": Mediumspringgreen,
+// "mediumturquoise": Mediumturquoise,
+// "mediumvioletred": Mediumvioletred,
+// "midnightblue": Midnightblue,
+// "min": Min,
+// "min-height": Min_Height,
+// "min-width": Min_Width,
+// "mintcream": Mintcream,
+// "mistyrose": Mistyrose,
+// "mm": Mm,
+// "moccasin": Moccasin,
+// "monospace": Monospace,
+// "ms": Ms,
+// "namespace": Namespace,
+// "navajowhite": Navajowhite,
+// "no-repeat": No_Repeat,
+// "none": None,
+// "normal": Normal,
+// "offset": Offset,
+// "offset-anchor": Offset_Anchor,
+// "offset-distance": Offset_Distance,
+// "offset-path": Offset_Path,
+// "offset-position": Offset_Position,
+// "offset-rotate": Offset_Rotate,
+// "olivedrab": Olivedrab,
+// "orangered": Orangered,
+// "order": Order,
+// "orphans": Orphans,
+// "outline": Outline,
+// "outline-color": Outline_Color,
+// "outline-style": Outline_Style,
+// "outline-width": Outline_Width,
+// "overflow": Overflow,
+// "overflow-x": Overflow_X,
+// "overflow-y": Overflow_Y,
+// "padding": Padding,
+// "padding-bottom": Padding_Bottom,
+// "padding-box": Padding_Box,
+// "padding-left": Padding_Left,
+// "padding-right": Padding_Right,
+// "padding-top": Padding_Top,
+// "page": Page,
+// "page-break-after": Page_Break_After,
+// "page-break-before": Page_Break_Before,
+// "page-break-inside": Page_Break_Inside,
+// "palegoldenrod": Palegoldenrod,
+// "palegreen": Palegreen,
+// "paleturquoise": Paleturquoise,
+// "palevioletred": Palevioletred,
+// "papayawhip": Papayawhip,
+// "pause": Pause,
+// "pause-after": Pause_After,
+// "pause-before": Pause_Before,
+// "pc": Pc,
+// "peachpuff": Peachpuff,
+// "pitch": Pitch,
+// "pitch-range": Pitch_Range,
+// "place-content": Place_Content,
+// "place-items": Place_Items,
+// "place-self": Place_Self,
+// "play-during": Play_During,
+// "position": Position,
+// "powderblue": Powderblue,
+// "progid": Progid,
+// "pt": Pt,
+// "px": Px,
+// "q": Q,
+// "quotes": Quotes,
+// "rad": Rad,
+// "radial-gradient": Radial_Gradient,
+// "repeat": Repeat,
+// "repeat-x": Repeat_X,
+// "repeat-y": Repeat_Y,
+// "rgb": Rgb,
+// "rgba": Rgba,
+// "richness": Richness,
+// "right": Right,
+// "rosybrown": Rosybrown,
+// "round": Round,
+// "row-gap": Row_Gap,
+// "royalblue": Royalblue,
+// "ruby-align": Ruby_Align,
+// "ruby-overhang": Ruby_Overhang,
+// "ruby-position": Ruby_Position,
+// "s": S,
+// "saddlebrown": Saddlebrown,
+// "sandybrown": Sandybrown,
+// "sans-serif": Sans_Serif,
+// "scroll": Scroll,
+// "scrollbar-3d-light-color": Scrollbar_3d_Light_Color,
+// "scrollbar-arrow-color": Scrollbar_Arrow_Color,
+// "scrollbar-base-color": Scrollbar_Base_Color,
+// "scrollbar-dark-shadow-color": Scrollbar_Dark_Shadow_Color,
+// "scrollbar-face-color": Scrollbar_Face_Color,
+// "scrollbar-highlight-color": Scrollbar_Highlight_Color,
+// "scrollbar-shadow-color": Scrollbar_Shadow_Color,
+// "scrollbar-track-color": Scrollbar_Track_Color,
+// "seagreen": Seagreen,
+// "seashell": Seashell,
+// "serif": Serif,
+// "size": Size,
+// "slateblue": Slateblue,
+// "slategray": Slategray,
+// "small": Small,
+// "smaller": Smaller,
+// "solid": Solid,
+// "space": Space,
+// "speak": Speak,
+// "speak-header": Speak_Header,
+// "speak-numeral": Speak_Numeral,
+// "speak-punctuation": Speak_Punctuation,
+// "speech-rate": Speech_Rate,
+// "springgreen": Springgreen,
+// "steelblue": Steelblue,
+// "stress": Stress,
+// "stroke": Stroke,
+// "supports": Supports,
+// "table-layout": Table_Layout,
+// "text-align": Text_Align,
+// "text-align-last": Text_Align_Last,
+// "text-autospace": Text_Autospace,
+// "text-decoration": Text_Decoration,
+// "text-decoration-color": Text_Decoration_Color,
+// "text-decoration-line": Text_Decoration_Line,
+// "text-decoration-style": Text_Decoration_Style,
+// "text-decoration-thickness": Text_Decoration_Thickness,
+// "text-emphasis": Text_Emphasis,
+// "text-emphasis-color": Text_Emphasis_Color,
+// "text-indent": Text_Indent,
+// "text-justify": Text_Justify,
+// "text-kashida-space": Text_Kashida_Space,
+// "text-overflow": Text_Overflow,
+// "text-shadow": Text_Shadow,
+// "text-transform": Text_Transform,
+// "text-underline-position": Text_Underline_Position,
+// "top": Top,
+// "transition": Transition,
+// "transition-delay": Transition_Delay,
+// "transition-duration": Transition_Duration,
+// "transition-property": Transition_Property,
+// "transition-timing-function": Transition_Timing_Function,
+// "transparent": Transparent,
+// "turn": Turn,
+// "turquoise": Turquoise,
+// "unicode-bidi": Unicode_Bidi,
+// "unicode-range": UnicodeRange,
+// "unset": Unset,
+// "url": Url,
+// "var": Var,
+// "vertical-align": Vertical_Align,
+// "visibility": Visibility,
+// "voice-family": Voice_Family,
+// "volume": Volume,
+// "white": White,
+// "white-space": White_Space,
+// "whitesmoke": Whitesmoke,
+// "widows": Widows,
+// "width": Width,
+// "word-break": Word_Break,
+// "word-spacing": Word_Spacing,
+// "word-wrap": Word_Wrap,
+// "writing-mode": Writing_Mode,
+// "x-large": X_Large,
+// "x-small": X_Small,
+// "xx-large": Xx_Large,
+// "xx-small": Xx_Small,
+// "yellow": Yellow,
+// "yellowgreen": Yellowgreen,
+// "z-index": Z_Index,
+//}
+
+// String returns the text associated with the hash.
+func (i Hash) String() string {
+ return string(i.Bytes())
+}
+
+// Bytes returns the text associated with the hash.
+func (i Hash) Bytes() []byte {
+ start := uint32(i >> 8)
+ n := uint32(i & 0xff)
+ if start+n > uint32(len(_Hash_text)) {
+ return []byte{}
+ }
+ return _Hash_text[start : start+n]
+}
+
+// ToHash returns a hash Hash for a given []byte. Hash is a uint32 that is associated with the text in []byte. It returns zero if no match found.
+func ToHash(s []byte) Hash {
+ if len(s) == 0 || len(s) > _Hash_maxLen {
+ return 0
+ }
+ //if 3 < len(s) {
+ // return HashMap[string(s)]
+ //}
+ h := uint32(_Hash_hash0)
+ for i := 0; i < len(s); i++ {
+ h ^= uint32(s[i])
+ h *= 16777619
+ }
+ if i := _Hash_table[h&uint32(len(_Hash_table)-1)]; int(i&0xff) == len(s) {
+ t := _Hash_text[i>>8 : i>>8+i&0xff]
+ for i := 0; i < len(s); i++ {
+ if t[i] != s[i] {
+ goto NEXT
+ }
+ }
+ return i
+ }
+NEXT:
+ if i := _Hash_table[(h>>16)&uint32(len(_Hash_table)-1)]; int(i&0xff) == len(s) {
+ t := _Hash_text[i>>8 : i>>8+i&0xff]
+ for i := 0; i < len(s); i++ {
+ if t[i] != s[i] {
+ return 0
+ }
+ }
+ return i
+ }
+ return 0
+}
+
+const _Hash_hash0 = 0x9acb0442
+const _Hash_maxLen = 27
+
+var _Hash_text = []byte("" +
+ "-ms-filteradial-gradientext-emphasis-colorgbackground-attach" +
+ "mentext-indentext-justify-contentext-kashida-spacelevationav" +
+ "ajowhitext-decoration-line-heightext-overflow-xx-largerichne" +
+ "ssaddlebrowno-repeat-yanimation-namespacenteruby-overhangain" +
+ "sborosybrownanimation-play-statext-align-lastresscrollbar-ar" +
+ "row-coloruby-positionanimation-timing-functionazimuthoneydew" +
+ "ord-breakbackground-originclude-sourcebackground-position-xb" +
+ "ackground-position-ybackground-repeat-xbackground-sizebehavi" +
+ "orblackblanchedalmondarkblueboldarkcyanimation-delayer-backg" +
+ "round-colorborder-bottom-colorborder-bottom-stylemonchiffont" +
+ "-faceborder-bottom-widthslavenderblushborder-box-shadoword-s" +
+ "pacinghostwhitext-decoration-colorborder-collapseashellawngr" +
+ "eenborder-colorborder-left-colorborder-left-styleborder-left" +
+ "-widthborder-right-colorborder-right-styleborder-right-width" +
+ "border-spacingrid-areanimation-durationormalphacceleratorpha" +
+ "nsandybrownonempty-cellsans-serifantasyborder-styleborder-to" +
+ "p-colorborder-top-styleborder-top-widthborder-widthburlywood" +
+ "arkgoldenrodarkgraycaption-sideepskybluecaret-colorchartreus" +
+ "echocolatext-autospaceclampadding-boxclearcolumn-counter-res" +
+ "etransition-propertycolumn-rule-colorcolumn-rule-stylecolumn" +
+ "-rule-widthcolumn-widthcornflowerbluecornsilkcue-aftercue-be" +
+ "forestgreenvisibilitycurrentcolorcursivecursordarkvioletdocu" +
+ "mentdodgerbluedpcmargin-topadding-rightdpitch-rangedppxflex-" +
+ "growflex-shrinkflex-wrapadding-topage-break-afterfloattransi" +
+ "tion-delayer-background-imagefloralwhitesmokeyframescrollbar" +
+ "-dark-shadow-colorfont-familyfont-size-adjustify-itemscrollb" +
+ "ar-face-colorfont-stretcharsetfont-stylefont-variantiquewhit" +
+ "e-spacefont-weightfuchsianimation-fill-modeeppinkhz-indexx-s" +
+ "malleroyalbluegrid-column-gapage-break-beforegrid-column-sta" +
+ "rtgrid-row-endarkolivegreengrid-row-gapage-break-insidegrid-" +
+ "row-startgrid-template-areascrollbar-track-colorgrid-templat" +
+ "e-columnsolidarkorangeredarkgreenyellowgreengrid-template-ro" +
+ "wspeak-headerimportantinheritinitialicebluevioletter-spacing" +
+ "rid-auto-columnscrollbar-highlight-colorinvertical-align-ite" +
+ "mspeak-numeralayout-grid-char-spacingrid-auto-flowjustify-se" +
+ "lfirebricklayout-grid-line-breaklayout-grid-modegrid-auto-ro" +
+ "wscrollbar-shadow-colorlayout-grid-typeachpufflex-basiscroll" +
+ "bar-base-colorlightbluelightcoralign-selflex-directionlightc" +
+ "yanimation-directionlightgoldenrodyellowlightgraylightgreenl" +
+ "ightpinklightsalmonlightseagreenlightskybluelightslateblueli" +
+ "ghtsteelbluelightyellowlimegreenlinear-gradientlist-style-im" +
+ "agelist-style-positionlist-style-typelocalcadetbluemaskmax-h" +
+ "eightmax-widthmediumaquamarinemediumbluemediumorchidarkorchi" +
+ "darkkhakime-modefaultransition-timing-functionmediumpurpleme" +
+ "diumseagreenmediumslatebluemediumspringgreenmediumturquoisem" +
+ "ediumvioletredarksalmonospacemidnightbluemin-heightmin-width" +
+ "mintcreamarker-offset-anchormistyrosemmarkspeak-punctuationm" +
+ "occasindianredarkseagreenoffset-distanceoffset-pathoffset-po" +
+ "sitionoffset-rotatext-decoration-styleolivedrabackground-cli" +
+ "padding-bottomargin-rightransition-durationoutline-coloroutl" +
+ "ine-styleoutline-widthoverflow-ypalegreenpaleturquoisepalevi" +
+ "oletredarkslategraypapayawhipalegoldenrodarkslatebluepause-a" +
+ "fterpause-beforeplace-contentplace-itemspeech-ratext-decorat" +
+ "ion-thicknesstrokeplace-selflex-flowriting-modepowderbluepro" +
+ "gidarkturquoisequotesupportsunicode-bidisplay-duringrid-colu" +
+ "mn-endarkmagentable-layout-floword-wrapadding-leftransparent" +
+ "urnunicode-rangeunsetvoice-familyvolumedianimation-iteration" +
+ "-counter-incrementext-shadowidowscrollbar-3d-light-coloruby-" +
+ "align-contentext-transformargin-bottomargin-leftext-underlin" +
+ "e-position")
+
+var _Hash_table = [1 << 10]Hash{
+ 0x3: 0xc290b, // pause-after
+ 0x6: 0xd5d11, // counter-increment
+ 0x8: 0xcce07, // display
+ 0x9: 0x51a0a, // darkviolet
+ 0xb: 0xbf09, // no-repeat
+ 0xd: 0x4402, // in
+ 0x14: 0x6f211, // page-break-inside
+ 0x15: 0x6250c, // font-stretch
+ 0x19: 0x5f910, // font-size-adjust
+ 0x1a: 0x47513, // transition-property
+ 0x1c: 0x78105, // speak
+ 0x1f: 0x82a0c, // justify-self
+ 0x20: 0x61114, // scrollbar-face-color
+ 0x24: 0x2b60f, // border-collapse
+ 0x25: 0x68607, // z-index
+ 0x27: 0xd980d, // align-content
+ 0x2a: 0x99f13, // list-style-position
+ 0x2b: 0xcdb0f, // grid-column-end
+ 0x2c: 0x14119, // animation-timing-function
+ 0x30: 0xb0909, // indianred
+ 0x34: 0x97709, // limegreen
+ 0x35: 0xbc10d, // outline-width
+ 0x3f: 0x15a07, // azimuth
+ 0x40: 0x1e70e, // blanchedalmond
+ 0x41: 0x84a0a, // line-break
+ 0x42: 0x7a209, // aliceblue
+ 0x43: 0xf309, // rosybrown
+ 0x46: 0xa7c0f, // mediumturquoise
+ 0x49: 0xd7706, // widows
+ 0x4b: 0xb370f, // offset-position
+ 0x4d: 0xd150b, // transparent
+ 0x4e: 0x79d07, // initial
+ 0x52: 0x1cb0f, // background-size
+ 0x55: 0x2505, // color
+ 0x56: 0x59a10, // transition-delay
+ 0x5a: 0x750b, // navajowhite
+ 0x5b: 0x7110d, // grid-template
+ 0x5c: 0x3b710, // border-top-color
+ 0x62: 0xbce0a, // overflow-y
+ 0x64: 0x9370d, // lightseagreen
+ 0x6c: 0x10e0f, // text-align-last
+ 0x6f: 0x8050b, // layout-grid
+ 0x70: 0xca09, // animation
+ 0x71: 0x1da08, // behavior
+ 0x72: 0x5390a, // margin-top
+ 0x74: 0x3ab0c, // border-style
+ 0x78: 0x5d31b, // scrollbar-dark-shadow-color
+ 0x79: 0x69103, // all
+ 0x7a: 0x3f0b, // text-indent
+ 0x7b: 0xbe10d, // paleturquoise
+ 0x7e: 0x58510, // page-break-after
+ 0x80: 0x5420d, // padding-right
+ 0x84: 0x7e60e, // vertical-align
+ 0x85: 0x50d07, // cursive
+ 0x8a: 0x7030e, // grid-row-start
+ 0x8c: 0xae08, // richness
+ 0x8e: 0x3b70a, // border-top
+ 0x94: 0x35509, // grid-area
+ 0x95: 0x85410, // layout-grid-mode
+ 0x96: 0xaee05, // marks
+ 0x97: 0x64d01, // q
+ 0x98: 0x78d09, // important
+ 0x9c: 0x406, // filter
+ 0x9d: 0xa8b0f, // mediumvioletred
+ 0xa5: 0xc570b, // speech-rate
+ 0xa8: 0x53702, // pc
+ 0xab: 0x90f, // radial-gradient
+ 0xae: 0x11b06, // stress
+ 0xb4: 0x6050d, // justify-items
+ 0xb7: 0x9500e, // lightslateblue
+ 0xba: 0x35504, // grid
+ 0xbb: 0xb0308, // moccasin
+ 0xbe: 0xd0209, // word-wrap
+ 0xc0: 0x6d90e, // darkolivegreen
+ 0xc5: 0xc6019, // text-decoration-thickness
+ 0xc7: 0xdb06, // center
+ 0xc8: 0x2a115, // text-decoration-color
+ 0xcb: 0xabf09, // min-width
+ 0xce: 0x5ee0b, // font-family
+ 0xd1: 0xa1c08, // ime-mode
+ 0xd3: 0x3d710, // border-top-width
+ 0xd4: 0x53906, // margin
+ 0xd9: 0x4880b, // column-rule
+ 0xda: 0x98f0a, // list-style
+ 0xdf: 0x6ce0c, // grid-row-end
+ 0xe4: 0x2050f, // animation-delay
+ 0xe8: 0x4aa11, // column-rule-width
+ 0xec: 0x57309, // flex-wrap
+ 0xed: 0xced07, // magenta
+ 0xee: 0x88710, // layout-grid-type
+ 0xef: 0x4520b, // padding-box
+ 0xf0: 0x7e14, // text-decoration-line
+ 0xf2: 0x4dd09, // cue-after
+ 0xf4: 0x8640e, // grid-auto-rows
+ 0xf5: 0x7650b, // yellowgreen
+ 0xf8: 0x89509, // peachpuff
+ 0xf9: 0x74607, // columns
+ 0xfa: 0x22805, // order
+ 0xfb: 0x3120c, // border-right
+ 0x100: 0x1800e, // include-source
+ 0x104: 0xc2905, // pause
+ 0x105: 0x1fc04, // bold
+ 0x106: 0xcc40c, // unicode-bidi
+ 0x108: 0x67604, // fill
+ 0x109: 0x75c09, // darkgreen
+ 0x10b: 0x45d05, // clear
+ 0x10c: 0x67d08, // deeppink
+ 0x110: 0x8e913, // animation-direction
+ 0x112: 0x1b811, // background-repeat
+ 0x117: 0xca506, // progid
+ 0x11d: 0x8a614, // scrollbar-base-color
+ 0x11e: 0xa, // -ms-filter
+ 0x11f: 0x2ca09, // lawngreen
+ 0x120: 0x51406, // cursor
+ 0x121: 0x44e05, // clamp
+ 0x123: 0x48811, // column-rule-color
+ 0x128: 0x40f0c, // caption-side
+ 0x12a: 0xc9b0a, // powderblue
+ 0x12b: 0xdc717, // text-underline-position
+ 0x12d: 0x72315, // scrollbar-track-color
+ 0x131: 0x81c0e, // grid-auto-flow
+ 0x132: 0x7810c, // speak-header
+ 0x133: 0x25409, // font-face
+ 0x136: 0xa710b, // springgreen
+ 0x13a: 0xc7e0a, // place-self
+ 0x13d: 0xc206, // repeat
+ 0x13e: 0x9800f, // linear-gradient
+ 0x142: 0x5010c, // currentcolor
+ 0x145: 0xad706, // offset
+ 0x14a: 0x69e0f, // grid-column-gap
+ 0x14c: 0x6905, // space
+ 0x14e: 0x39b0a, // sans-serif
+ 0x14f: 0x6360a, // font-style
+ 0x153: 0x66607, // fuchsia
+ 0x154: 0xb7904, // clip
+ 0x155: 0xae409, // mistyrose
+ 0x158: 0x9d08, // overflow
+ 0x15d: 0xc7806, // stroke
+ 0x162: 0x80510, // layout-grid-char
+ 0x163: 0xa420c, // mediumpurple
+ 0x165: 0x4f503, // env
+ 0x168: 0x4690d, // counter-reset
+ 0x16b: 0x5cb09, // keyframes
+ 0x16f: 0x7b05, // white
+ 0x172: 0x1004, // grad
+ 0x174: 0xdb10d, // margin-bottom
+ 0x175: 0x31212, // border-right-color
+ 0x177: 0x25404, // font
+ 0x178: 0xc100d, // palegoldenrod
+ 0x179: 0x73815, // grid-template-columns
+ 0x17a: 0x7e0f, // text-decoration
+ 0x17e: 0x89d0a, // flex-basis
+ 0x186: 0x7ef0b, // align-items
+ 0x189: 0x4bb0c, // column-width
+ 0x18a: 0x3c710, // border-top-style
+ 0x18b: 0x1d604, // size
+ 0x18c: 0xd4505, // media
+ 0x191: 0xb7c0e, // padding-bottom
+ 0x194: 0x2df11, // border-left-color
+ 0x195: 0x7a70a, // blueviolet
+ 0x198: 0x92c0b, // lightsalmon
+ 0x19d: 0x27108, // lavender
+ 0x19e: 0x5a716, // layer-background-image
+ 0x1a0: 0x6500b, // white-space
+ 0x1a3: 0xe00d, // ruby-overhang
+ 0x1a4: 0x24b0c, // lemonchiffon
+ 0x1a5: 0x3be03, // top
+ 0x1a9: 0x2c308, // seashell
+ 0x1aa: 0x7ae0e, // letter-spacing
+ 0x1ac: 0x2b0a, // background
+ 0x1af: 0x64503, // var
+ 0x1b0: 0xaed02, // mm
+ 0x1b6: 0x12015, // scrollbar-arrow-color
+ 0x1b8: 0xda40e, // text-transform
+ 0x1b9: 0x65b0b, // font-weight
+ 0x1ba: 0x53802, // cm
+ 0x1bb: 0x12006, // scroll
+ 0x1c0: 0x21710, // background-color
+ 0x1c1: 0x2710d, // lavenderblush
+ 0x1c6: 0xb5115, // text-decoration-style
+ 0x1c9: 0x79607, // inherit
+ 0x1cf: 0x2e604, // left
+ 0x1d0: 0x6490c, // antiquewhite
+ 0x1d4: 0xb6609, // olivedrab
+ 0x1da: 0x2990a, // ghostwhite
+ 0x1dd: 0x91009, // lightgray
+ 0x1e2: 0x26f04, // hsla
+ 0x1e3: 0x26f03, // hsl
+ 0x1e4: 0xbd809, // palegreen
+ 0x1e5: 0x4190b, // deepskyblue
+ 0x1e8: 0xac809, // mintcream
+ 0x1ea: 0x7e406, // invert
+ 0x1eb: 0x6400c, // font-variant
+ 0x1ec: 0x8fc14, // lightgoldenrodyellow
+ 0x1ee: 0x62f07, // charset
+ 0x1ef: 0xc8f0c, // writing-mode
+ 0x1f0: 0x5c30a, // whitesmoke
+ 0x1f5: 0x9d0a, // overflow-x
+ 0x1f6: 0xaa90c, // midnightblue
+ 0x1f7: 0xcb706, // quotes
+ 0x1f8: 0x22706, // border
+ 0x1fa: 0x42f0a, // chartreuse
+ 0x1fc: 0xba707, // outline
+ 0x1fd: 0xa281a, // transition-timing-function
+ 0x1fe: 0xcbc08, // supports
+ 0x204: 0x1670a, // word-break
+ 0x205: 0xaa009, // monospace
+ 0x206: 0x2850a, // box-shadow
+ 0x209: 0x5680b, // flex-shrink
+ 0x20f: 0xd0a0c, // padding-left
+ 0x214: 0xc4d0b, // place-items
+ 0x216: 0xc070a, // papayawhip
+ 0x217: 0x17111, // background-origin
+ 0x218: 0x52408, // document
+ 0x219: 0x52c0a, // dodgerblue
+ 0x21c: 0x9440c, // lightskyblue
+ 0x21e: 0x6bd11, // grid-column-start
+ 0x221: 0x30111, // border-left-width
+ 0x224: 0x68c08, // xx-small
+ 0x226: 0x1f408, // darkblue
+ 0x229: 0x25d13, // border-bottom-width
+ 0x22a: 0x98f10, // list-style-image
+ 0x22d: 0x44504, // auto
+ 0x230: 0x1e205, // black
+ 0x231: 0xaf211, // speak-punctuation
+ 0x232: 0x13908, // position
+ 0x234: 0xc340c, // pause-before
+ 0x236: 0x95e0e, // lightsteelblue
+ 0x23a: 0xcd10b, // play-during
+ 0x23f: 0x83509, // firebrick
+ 0x249: 0x6ce08, // grid-row
+ 0x24a: 0x55d02, // px
+ 0x24c: 0x1a315, // background-position-y
+ 0x251: 0xd1f04, // turn
+ 0x256: 0xba70d, // outline-color
+ 0x257: 0x9c304, // calc
+ 0x258: 0xd4919, // animation-iteration-count
+ 0x259: 0xad70d, // offset-anchor
+ 0x25b: 0xa4e0e, // mediumseagreen
+ 0x25e: 0x4620c, // column-count
+ 0x263: 0x10e0a, // text-align
+ 0x266: 0x66c13, // animation-fill-mode
+ 0x267: 0x32412, // border-right-style
+ 0x268: 0xa707, // x-large
+ 0x269: 0x8d40e, // flex-direction
+ 0x26a: 0x4f70a, // visibility
+ 0x26f: 0xb2c0b, // offset-path
+ 0x270: 0x27e0a, // border-box
+ 0x276: 0x70103, // deg
+ 0x278: 0x1713, // text-emphasis-color
+ 0x27f: 0xc1c0d, // darkslateblue
+ 0x283: 0x55f09, // flex-grow
+ 0x285: 0x8e209, // lightcyan
+ 0x28a: 0x102, // ms
+ 0x28d: 0xa906, // larger
+ 0x28e: 0xa990a, // darksalmon
+ 0x292: 0x2f011, // border-left-style
+ 0x293: 0xa8209, // turquoise
+ 0x294: 0x3a407, // fantasy
+ 0x296: 0xec09, // gainsboro
+ 0x297: 0x201, // s
+ 0x298: 0x23a13, // border-bottom-style
+ 0x299: 0xce90b, // darkmagenta
+ 0x29b: 0xb50b, // saddlebrown
+ 0x2a0: 0x59505, // float
+ 0x2a3: 0x6ec07, // row-gap
+ 0x2a5: 0xd4106, // volume
+ 0x2a6: 0xab50a, // min-height
+ 0x2a7: 0x77012, // grid-template-rows
+ 0x2a9: 0x3760b, // accelerator
+ 0x2b0: 0x68f05, // small
+ 0x2b1: 0x59804, // attr
+ 0x2b2: 0x28e0c, // word-spacing
+ 0x2b3: 0x35d12, // animation-duration
+ 0x2b5: 0x4dd03, // cue
+ 0x2b6: 0x95509, // slateblue
+ 0x2b8: 0x38e04, // none
+ 0x2b9: 0x6a30a, // column-gap
+ 0x2ba: 0x4e0f, // justify-content
+ 0x2bb: 0x5607, // content
+ 0x2bd: 0x54f03, // dpi
+ 0x2be: 0x87116, // scrollbar-shadow-color
+ 0x2bf: 0x78d06, // import
+ 0x2c0: 0xc8709, // flex-flow
+ 0x2c1: 0x69509, // royalblue
+ 0x2c3: 0x9c609, // cadetblue
+ 0x2c4: 0x490c, // text-justify
+ 0x2cb: 0x8c30a, // lightcoral
+ 0x2cf: 0xb890c, // margin-right
+ 0x2d2: 0x76506, // yellow
+ 0x2d3: 0x26b05, // width
+ 0x2d6: 0x14d03, // min
+ 0x2da: 0x1340d, // ruby-position
+ 0x2dc: 0x40708, // darkgray
+ 0x2e2: 0x69e0b, // grid-column
+ 0x2e4: 0xa1409, // darkkhaki
+ 0x2e5: 0xc400d, // place-content
+ 0x2e7: 0xbee0d, // palevioletred
+ 0x2ea: 0x5bd0b, // floralwhite
+ 0x2eb: 0xc208, // repeat-y
+ 0x2ee: 0x980d, // text-overflow
+ 0x2f1: 0xca0e, // animation-name
+ 0x2fb: 0x7cb19, // scrollbar-highlight-color
+ 0x2fe: 0x5500b, // pitch-range
+ 0x302: 0x3005, // round
+ 0x305: 0x4c70e, // cornflowerblue
+ 0x307: 0x7f90d, // speak-numeral
+ 0x308: 0x9e606, // medium
+ 0x30a: 0x170d, // text-emphasis
+ 0x30d: 0x9dd09, // max-width
+ 0x311: 0x36e06, // normal
+ 0x312: 0x68403, // khz
+ 0x315: 0x2903, // rgb
+ 0x316: 0x8ba09, // lightblue
+ 0x317: 0x8d909, // direction
+ 0x31a: 0xd350c, // voice-family
+ 0x31c: 0x3480e, // border-spacing
+ 0x321: 0x6d09, // elevation
+ 0x323: 0x1c308, // repeat-x
+ 0x324: 0x83e10, // layout-grid-line
+ 0x326: 0xa000c, // mediumorchid
+ 0x32b: 0xa6b11, // mediumspringgreen
+ 0x32d: 0xa905, // large
+ 0x32e: 0xd930a, // ruby-align
+ 0x330: 0xbfa0d, // darkslategray
+ 0x332: 0x5c12, // text-kashida-space
+ 0x334: 0xbb40d, // outline-style
+ 0x336: 0x3a005, // serif
+ 0x337: 0x4240b, // caret-color
+ 0x33a: 0x37205, // alpha
+ 0x33c: 0x71113, // grid-template-areas
+ 0x33d: 0x49911, // column-rule-style
+ 0x33f: 0xcf80b, // layout-flow
+ 0x340: 0x31905, // right
+ 0x341: 0x3e70c, // border-width
+ 0x343: 0xb6e0f, // background-clip
+ 0x344: 0xd230d, // unicode-range
+ 0x345: 0x74c05, // solid
+ 0x346: 0x2df0b, // border-left
+ 0x348: 0x9ec0a, // aquamarine
+ 0x349: 0x3850a, // sandybrown
+ 0x34a: 0x16008, // honeydew
+ 0x34b: 0x75409, // orangered
+ 0x34c: 0xb110c, // darkseagreen
+ 0x34d: 0x37f07, // orphans
+ 0x34e: 0x6e70c, // grid-row-gap
+ 0x351: 0x22e06, // bottom
+ 0x359: 0x9c105, // local
+ 0x35c: 0x8cb0a, // align-self
+ 0x35e: 0x33612, // border-right-width
+ 0x360: 0x2b15, // background-attachment
+ 0x364: 0x9190a, // lightgreen
+ 0x366: 0x39302, // pt
+ 0x368: 0x4400e, // text-autospace
+ 0x36b: 0x3f403, // url
+ 0x36c: 0x68502, // hz
+ 0x371: 0x9306, // height
+ 0x372: 0x5ad10, // background-image
+ 0x377: 0x903, // rad
+ 0x37c: 0x21116, // layer-background-color
+ 0x37d: 0x1ff08, // darkcyan
+ 0x382: 0x18e13, // background-position
+ 0x384: 0x9d303, // max
+ 0x38c: 0xa608, // xx-large
+ 0x38d: 0x3f309, // burlywood
+ 0x38f: 0xd7c18, // scrollbar-3d-light-color
+ 0x390: 0x3ff09, // goldenrod
+ 0x392: 0x92309, // lightpink
+ 0x393: 0x8e0b, // line-height
+ 0x396: 0x22713, // border-bottom-color
+ 0x398: 0x80518, // layout-grid-char-spacing
+ 0x39c: 0x2904, // rgba
+ 0x3a1: 0x9f60a, // mediumblue
+ 0x3a3: 0x9d30a, // max-height
+ 0x3a4: 0x7bb11, // grid-auto-columns
+ 0x3a5: 0xa0b0a, // darkorchid
+ 0x3a9: 0x7600b, // greenyellow
+ 0x3ae: 0x96c0b, // lightyellow
+ 0x3b1: 0x4750a, // transition
+ 0x3b3: 0x4e60a, // cue-before
+ 0x3b6: 0x15208, // function
+ 0x3b9: 0x96309, // steelblue
+ 0x3be: 0xa5c0f, // mediumslateblue
+ 0x3bf: 0xcaa0d, // darkturquoise
+ 0x3c0: 0x43909, // chocolate
+ 0x3c3: 0x5f909, // font-size
+ 0x3c5: 0x55f04, // flex
+ 0x3c7: 0xd3005, // unset
+ 0x3c8: 0xd6d0b, // text-shadow
+ 0x3ca: 0x4ec0b, // forestgreen
+ 0x3cc: 0xbfe09, // slategray
+ 0x3cd: 0x6ac11, // page-break-before
+ 0x3ce: 0x55b04, // dppx
+ 0x3d0: 0x2270d, // border-bottom
+ 0x3d3: 0xb1d0f, // offset-distance
+ 0x3d4: 0x3fb0d, // darkgoldenrod
+ 0x3d6: 0x53604, // dpcm
+ 0x3d8: 0x7500a, // darkorange
+ 0x3dc: 0xb9413, // transition-duration
+ 0x3de: 0x2d30c, // border-color
+ 0x3df: 0x18e15, // background-position-x
+ 0x3e0: 0x55005, // pitch
+ 0x3e2: 0xdbd0b, // margin-left
+ 0x3e3: 0x58504, // page
+ 0x3e5: 0x57b0b, // padding-top
+ 0x3e7: 0xb460d, // offset-rotate
+ 0x3e8: 0x93c08, // seagreen
+ 0x3e9: 0x4d508, // cornsilk
+ 0x3ea: 0x68f07, // smaller
+ 0x3ec: 0xcf20c, // table-layout
+ 0x3ed: 0xfc14, // animation-play-state
+ 0x3ef: 0xa2207, // default
+ 0x3f0: 0x68d07, // x-small
+ 0x3f3: 0x9e610, // mediumaquamarine
+ 0x3f4: 0xad00d, // marker-offset
+ 0x3f9: 0xd409, // namespace
+ 0x3fa: 0x9cf04, // mask
+ 0x3fb: 0x45207, // padding
+ 0x3fd: 0x9b20f, // list-style-type
+ 0x3ff: 0x3910b, // empty-cells
+}
diff --git a/vendor/github.com/tdewolff/minify/v2/css/table.go b/vendor/github.com/tdewolff/minify/v2/css/table.go
new file mode 100644
index 0000000..b7ecb84
--- /dev/null
+++ b/vendor/github.com/tdewolff/minify/v2/css/table.go
@@ -0,0 +1,198 @@
+package css
+
+var optionalZeroDimension = map[string]bool{
+ "px": true,
+ "mm": true,
+ "q": true,
+ "cm": true,
+ "in": true,
+ "pt": true,
+ "pc": true,
+ "ch": true,
+ "em": true,
+ "ex": true,
+ "rem": true,
+ "vh": true,
+ "vw": true,
+ "vmin": true,
+ "vmax": true,
+ "deg": true,
+ "grad": true,
+ "rad": true,
+ "turn": true,
+}
+
+// Uses http://www.w3.org/TR/2010/PR-css3-color-20101028/ for colors
+
+// ShortenColorHex maps a color hexcode to its shorter name
+var ShortenColorHex = map[string][]byte{
+ "#000080": []byte("navy"),
+ "#008000": []byte("green"),
+ "#008080": []byte("teal"),
+ "#4b0082": []byte("indigo"),
+ "#800000": []byte("maroon"),
+ "#800080": []byte("purple"),
+ "#808000": []byte("olive"),
+ "#808080": []byte("gray"),
+ "#a0522d": []byte("sienna"),
+ "#a52a2a": []byte("brown"),
+ "#c0c0c0": []byte("silver"),
+ "#cd853f": []byte("peru"),
+ "#d2b48c": []byte("tan"),
+ "#da70d6": []byte("orchid"),
+ "#dda0dd": []byte("plum"),
+ "#ee82ee": []byte("violet"),
+ "#f0e68c": []byte("khaki"),
+ "#f0ffff": []byte("azure"),
+ "#f5deb3": []byte("wheat"),
+ "#f5f5dc": []byte("beige"),
+ "#fa8072": []byte("salmon"),
+ "#faf0e6": []byte("linen"),
+ "#ff6347": []byte("tomato"),
+ "#ff7f50": []byte("coral"),
+ "#ffa500": []byte("orange"),
+ "#ffc0cb": []byte("pink"),
+ "#ffd700": []byte("gold"),
+ "#ffe4c4": []byte("bisque"),
+ "#fffafa": []byte("snow"),
+ "#fffff0": []byte("ivory"),
+ "#ff0000": []byte("red"),
+ "#f00": []byte("red"),
+}
+
+// ShortenColorName maps a color name to its shorter hexcode
+var ShortenColorName = map[Hash][]byte{
+ Black: []byte("#000"),
+ Darkblue: []byte("#00008b"),
+ Mediumblue: []byte("#0000cd"),
+ Darkgreen: []byte("#006400"),
+ Darkcyan: []byte("#008b8b"),
+ Deepskyblue: []byte("#00bfff"),
+ Darkturquoise: []byte("#00ced1"),
+ Mediumspringgreen: []byte("#00fa9a"),
+ Springgreen: []byte("#00ff7f"),
+ Midnightblue: []byte("#191970"),
+ Dodgerblue: []byte("#1e90ff"),
+ Lightseagreen: []byte("#20b2aa"),
+ Forestgreen: []byte("#228b22"),
+ Seagreen: []byte("#2e8b57"),
+ Darkslategray: []byte("#2f4f4f"),
+ Limegreen: []byte("#32cd32"),
+ Mediumseagreen: []byte("#3cb371"),
+ Turquoise: []byte("#40e0d0"),
+ Royalblue: []byte("#4169e1"),
+ Steelblue: []byte("#4682b4"),
+ Darkslateblue: []byte("#483d8b"),
+ Mediumturquoise: []byte("#48d1cc"),
+ Darkolivegreen: []byte("#556b2f"),
+ Cadetblue: []byte("#5f9ea0"),
+ Cornflowerblue: []byte("#6495ed"),
+ Mediumaquamarine: []byte("#66cdaa"),
+ Slateblue: []byte("#6a5acd"),
+ Olivedrab: []byte("#6b8e23"),
+ Slategray: []byte("#708090"),
+ Lightslateblue: []byte("#789"),
+ Mediumslateblue: []byte("#7b68ee"),
+ Lawngreen: []byte("#7cfc00"),
+ Chartreuse: []byte("#7fff00"),
+ Aquamarine: []byte("#7fffd4"),
+ Lightskyblue: []byte("#87cefa"),
+ Blueviolet: []byte("#8a2be2"),
+ Darkmagenta: []byte("#8b008b"),
+ Saddlebrown: []byte("#8b4513"),
+ Darkseagreen: []byte("#8fbc8f"),
+ Lightgreen: []byte("#90ee90"),
+ Mediumpurple: []byte("#9370db"),
+ Darkviolet: []byte("#9400d3"),
+ Palegreen: []byte("#98fb98"),
+ Darkorchid: []byte("#9932cc"),
+ Yellowgreen: []byte("#9acd32"),
+ Darkgray: []byte("#a9a9a9"),
+ Lightblue: []byte("#add8e6"),
+ Greenyellow: []byte("#adff2f"),
+ Paleturquoise: []byte("#afeeee"),
+ Lightsteelblue: []byte("#b0c4de"),
+ Powderblue: []byte("#b0e0e6"),
+ Firebrick: []byte("#b22222"),
+ Darkgoldenrod: []byte("#b8860b"),
+ Mediumorchid: []byte("#ba55d3"),
+ Rosybrown: []byte("#bc8f8f"),
+ Darkkhaki: []byte("#bdb76b"),
+ Mediumvioletred: []byte("#c71585"),
+ Indianred: []byte("#cd5c5c"),
+ Chocolate: []byte("#d2691e"),
+ Lightgray: []byte("#d3d3d3"),
+ Goldenrod: []byte("#daa520"),
+ Palevioletred: []byte("#db7093"),
+ Gainsboro: []byte("#dcdcdc"),
+ Burlywood: []byte("#deb887"),
+ Lightcyan: []byte("#e0ffff"),
+ Lavender: []byte("#e6e6fa"),
+ Darksalmon: []byte("#e9967a"),
+ Palegoldenrod: []byte("#eee8aa"),
+ Lightcoral: []byte("#f08080"),
+ Aliceblue: []byte("#f0f8ff"),
+ Honeydew: []byte("#f0fff0"),
+ Sandybrown: []byte("#f4a460"),
+ Whitesmoke: []byte("#f5f5f5"),
+ Mintcream: []byte("#f5fffa"),
+ Ghostwhite: []byte("#f8f8ff"),
+ Antiquewhite: []byte("#faebd7"),
+ Lightgoldenrodyellow: []byte("#fafad2"),
+ Fuchsia: []byte("#f0f"),
+ Magenta: []byte("#f0f"),
+ Deeppink: []byte("#ff1493"),
+ Orangered: []byte("#ff4500"),
+ Darkorange: []byte("#ff8c00"),
+ Lightsalmon: []byte("#ffa07a"),
+ Lightpink: []byte("#ffb6c1"),
+ Peachpuff: []byte("#ffdab9"),
+ Navajowhite: []byte("#ffdead"),
+ Moccasin: []byte("#ffe4b5"),
+ Mistyrose: []byte("#ffe4e1"),
+ Blanchedalmond: []byte("#ffebcd"),
+ Papayawhip: []byte("#ffefd5"),
+ Lavenderblush: []byte("#fff0f5"),
+ Seashell: []byte("#fff5ee"),
+ Cornsilk: []byte("#fff8dc"),
+ Lemonchiffon: []byte("#fffacd"),
+ Floralwhite: []byte("#fffaf0"),
+ Yellow: []byte("#ff0"),
+ Lightyellow: []byte("#ffffe0"),
+ White: []byte("#fff"),
+}
+
+// PropertyOverrides is a map of which properties are overridden by the given property.
+var PropertyOverrides = map[Hash][]Hash{
+ Background: {Background, Background_Image, Background_Position, Background_Size, Background_Repeat, Background_Origin, Background_Clip, Background_Attachment, Background_Color},
+ Font: {Font, Font_Style, Font_Variant, Font_Weight, Font_Stretch, Font_Size, Font_Family, Line_Height},
+ Border: {Border, Border_Width, Border_Top_Width, Border_Right_Width, Border_Bottom_Width, Border_Left_Width, Border_Style, Border_Top_Style, Border_Right_Style, Border_Bottom_Style, Border_Left_Style, Border_Color, Border_Top_Color, Border_Right_Color, Border_Bottom_Color, Border_Left_Color},
+ Border_Width: {Border_Width, Border_Top_Width, Border_Right_Width, Border_Bottom_Width, Border_Left_Width},
+ Border_Style: {Border_Style, Border_Top_Style, Border_Right_Style, Border_Bottom_Style, Border_Left_Style},
+ Border_Color: {Border_Color, Border_Top_Color, Border_Right_Color, Border_Bottom_Color, Border_Left_Color},
+ Border_Top: {Border_Top, Border_Top_Width, Border_Top_Style, Border_Top_Color},
+ Border_Right: {Border_Right, Border_Right_Width, Border_Right_Style, Border_Right_Color},
+ Border_Bottom: {Border_Bottom, Border_Bottom_Width, Border_Bottom_Style, Border_Bottom_Color},
+ Border_Left: {Border_Left, Border_Left_Width, Border_Left_Style, Border_Left_Color},
+ Margin: {Margin, Margin_Top, Margin_Right, Margin_Bottom, Margin_Left},
+ Padding: {Padding, Padding_Top, Padding_Right, Padding_Bottom, Padding_Left},
+ Column_Rule: {Column_Rule, Column_Rule_Width, Column_Rule_Style, Column_Rule_Color},
+ Animation: {Animation, Animation_Name, Animation_Duration, Animation_Timing_Function, Animation_Delay, Animation_Iteration_Count, Animation_Direction, Animation_Fill_Mode, Animation_Play_State},
+ Columns: {Columns, Column_Width, Column_Count},
+ Flex: {Flex, Flex_Basis, Flex_Grow, Flex_Shrink},
+ Flex_Flow: {Flex_Flow, Flex_Direction, Flex_Wrap},
+ Grid: {Grid, Grid_Template_Rows, Grid_Template_Columns, Grid_Template_Areas, Grid_Auto_Rows, Grid_Auto_Columns, Grid_Auto_Flow, Grid_Column_Gap, Grid_Row_Gap, Column_Gap, Row_Gap},
+ Grid_Area: {Grid_Area, Grid_Row_Start, Grid_Column_Start, Grid_Row_End, Grid_Column_End},
+ Grid_Row: {Grid_Row, Grid_Row_Start, Grid_Row_End},
+ Grid_Column: {Grid_Column, Grid_Column_Start, Grid_Column_End},
+ Grid_Template: {Grid_Template, Grid_Template_Rows, Grid_Template_Columns, Grid_Template_Areas},
+ List_Style: {List_Style, List_Style_Image, List_Style_Position, List_Style_Type},
+ Offset: {Offset, Offset_Position, Offset_Path, Offset_Distance, Offset_Anchor, Offset_Rotate},
+ Outline: {Outline, Outline_Width, Outline_Style, Outline_Color},
+ Overflow: {Overflow, Overflow_X, Overflow_Y},
+ Place_Content: {Place_Content, Align_Content, Justify_Content},
+ Place_Items: {Place_Items, Align_Items, Justify_Items},
+ Place_Self: {Place_Self, Align_Self, Justify_Self},
+ Text_Decoration: {Text_Decoration, Text_Decoration_Color, Text_Decoration_Color, Text_Decoration_Line, Text_Decoration_Thickness},
+ Transition: {Transition, Transition_Property, Transition_Duration, Transition_Timing_Function, Transition_Delay},
+}
diff --git a/vendor/github.com/tdewolff/minify/v2/css/util.go b/vendor/github.com/tdewolff/minify/v2/css/util.go
new file mode 100644
index 0000000..7325aca
--- /dev/null
+++ b/vendor/github.com/tdewolff/minify/v2/css/util.go
@@ -0,0 +1,55 @@
+package css
+
+import (
+ "encoding/hex"
+
+ "github.com/tdewolff/parse/v2"
+ "github.com/tdewolff/parse/v2/css"
+)
+
+func removeMarkupNewlines(data []byte) []byte {
+ // remove any \\\r\n \\\r \\\n
+ for i := 1; i < len(data)-2; i++ {
+ if data[i] == '\\' && (data[i+1] == '\n' || data[i+1] == '\r') {
+ // encountered first replacee, now start to move bytes to the front
+ j := i + 2
+ if data[i+1] == '\r' && len(data) > i+2 && data[i+2] == '\n' {
+ j++
+ }
+ for ; j < len(data); j++ {
+ if data[j] == '\\' && len(data) > j+1 && (data[j+1] == '\n' || data[j+1] == '\r') {
+ if data[j+1] == '\r' && len(data) > j+2 && data[j+2] == '\n' {
+ j++
+ }
+ j++
+ } else {
+ data[i] = data[j]
+ i++
+ }
+ }
+ data = data[:i]
+ break
+ }
+ }
+ return data
+}
+
+func rgbToToken(r, g, b float64) Token {
+ // r, g, b are in interval [0.0, 1.0]
+ rgb := []byte{byte((r * 255.0) + 0.5), byte((g * 255.0) + 0.5), byte((b * 255.0) + 0.5)}
+
+ val := make([]byte, 7)
+ val[0] = '#'
+ hex.Encode(val[1:], rgb)
+ parse.ToLower(val)
+ if s, ok := ShortenColorHex[string(val[:7])]; ok {
+ return Token{css.IdentToken, s, nil, 0, 0}
+ } else if val[1] == val[2] && val[3] == val[4] && val[5] == val[6] {
+ val[2] = val[3]
+ val[3] = val[5]
+ val = val[:4]
+ } else {
+ val = val[:7]
+ }
+ return Token{css.HashToken, val, nil, 0, 0}
+}
diff --git a/vendor/github.com/tdewolff/minify/v2/html/buffer.go b/vendor/github.com/tdewolff/minify/v2/html/buffer.go
new file mode 100644
index 0000000..f58367b
--- /dev/null
+++ b/vendor/github.com/tdewolff/minify/v2/html/buffer.go
@@ -0,0 +1,137 @@
+package html
+
+import (
+ "github.com/tdewolff/parse/v2"
+ "github.com/tdewolff/parse/v2/html"
+)
+
+// Token is a single token unit with an attribute value (if given) and hash of the data.
+type Token struct {
+ html.TokenType
+ Hash Hash
+ Data []byte
+ Text []byte
+ AttrVal []byte
+ Traits traits
+ Offset int
+}
+
+// TokenBuffer is a buffer that allows for token look-ahead.
+type TokenBuffer struct {
+ r *parse.Input
+ l *html.Lexer
+
+ buf []Token
+ pos int
+
+ attrBuffer []*Token
+}
+
+// NewTokenBuffer returns a new TokenBuffer.
+func NewTokenBuffer(r *parse.Input, l *html.Lexer) *TokenBuffer {
+ return &TokenBuffer{
+ r: r,
+ l: l,
+ buf: make([]Token, 0, 8),
+ }
+}
+
+func (z *TokenBuffer) read(t *Token) {
+ t.Offset = z.r.Offset()
+ t.TokenType, t.Data = z.l.Next()
+ t.Text = z.l.Text()
+ if t.TokenType == html.AttributeToken {
+ t.Offset += 1 + len(t.Text) + 1
+ t.AttrVal = z.l.AttrVal()
+ if len(t.AttrVal) > 1 && (t.AttrVal[0] == '"' || t.AttrVal[0] == '\'') {
+ t.Offset++
+ t.AttrVal = t.AttrVal[1 : len(t.AttrVal)-1] // quotes will be readded in attribute loop if necessary
+ }
+ t.Hash = ToHash(t.Text)
+ t.Traits = attrMap[t.Hash]
+ } else if t.TokenType == html.StartTagToken || t.TokenType == html.EndTagToken {
+ t.AttrVal = nil
+ t.Hash = ToHash(t.Text)
+ t.Traits = tagMap[t.Hash] // zero if not exist
+ } else {
+ t.AttrVal = nil
+ t.Hash = 0
+ t.Traits = 0
+ }
+}
+
+// Peek returns the ith element and possibly does an allocation.
+// Peeking past an error will panic.
+func (z *TokenBuffer) Peek(pos int) *Token {
+ pos += z.pos
+ if pos >= len(z.buf) {
+ if len(z.buf) > 0 && z.buf[len(z.buf)-1].TokenType == html.ErrorToken {
+ return &z.buf[len(z.buf)-1]
+ }
+
+ c := cap(z.buf)
+ d := len(z.buf) - z.pos
+ p := pos - z.pos + 1 // required peek length
+ var buf []Token
+ if 2*p > c {
+ buf = make([]Token, 0, 2*c+p)
+ } else {
+ buf = z.buf
+ }
+ copy(buf[:d], z.buf[z.pos:])
+
+ buf = buf[:p]
+ pos -= z.pos
+ for i := d; i < p; i++ {
+ z.read(&buf[i])
+ if buf[i].TokenType == html.ErrorToken {
+ buf = buf[:i+1]
+ pos = i
+ break
+ }
+ }
+ z.pos, z.buf = 0, buf
+ }
+ return &z.buf[pos]
+}
+
+// Shift returns the first element and advances position.
+func (z *TokenBuffer) Shift() *Token {
+ if z.pos >= len(z.buf) {
+ t := &z.buf[:1][0]
+ z.read(t)
+ return t
+ }
+ t := &z.buf[z.pos]
+ z.pos++
+ return t
+}
+
+// Attributes extracts the gives attribute hashes from a tag.
+// It returns in the same order pointers to the requested token data or nil.
+func (z *TokenBuffer) Attributes(hashes ...Hash) []*Token {
+ n := 0
+ for {
+ if t := z.Peek(n); t.TokenType != html.AttributeToken {
+ break
+ }
+ n++
+ }
+ if len(hashes) > cap(z.attrBuffer) {
+ z.attrBuffer = make([]*Token, len(hashes))
+ } else {
+ z.attrBuffer = z.attrBuffer[:len(hashes)]
+ for i := range z.attrBuffer {
+ z.attrBuffer[i] = nil
+ }
+ }
+ for i := z.pos; i < z.pos+n; i++ {
+ attr := &z.buf[i]
+ for j, hash := range hashes {
+ if hash == attr.Hash {
+ z.attrBuffer[j] = attr
+ }
+ }
+ }
+ return z.attrBuffer
+}
diff --git a/vendor/github.com/tdewolff/minify/v2/html/hash.go b/vendor/github.com/tdewolff/minify/v2/html/hash.go
new file mode 100644
index 0000000..3b91cbb
--- /dev/null
+++ b/vendor/github.com/tdewolff/minify/v2/html/hash.go
@@ -0,0 +1,543 @@
+package html
+
+// generated by hasher -type=Hash -file=hash.go; DO NOT EDIT, except for adding more constants to the list and rerun go generate
+
+// uses github.com/tdewolff/hasher
+//go:generate hasher -type=Hash -file=hash.go
+
+// Hash defines perfect hashes for a predefined list of strings
+type Hash uint32
+
+// Unique hash definitions to be used instead of strings
+const (
+ A Hash = 0x1 // a
+ Abbr Hash = 0x37a04 // abbr
+ About Hash = 0x5 // about
+ Accept Hash = 0x1106 // accept
+ Accept_Charset Hash = 0x110e // accept-charset
+ Action Hash = 0x23f06 // action
+ Address Hash = 0x5a07 // address
+ Align Hash = 0x32705 // align
+ Alink Hash = 0x7005 // alink
+ Allowfullscreen Hash = 0x2ad0f // allowfullscreen
+ Amp_Boilerplate Hash = 0x610f // amp-boilerplate
+ Area Hash = 0x1e304 // area
+ Article Hash = 0x2707 // article
+ Aside Hash = 0xb405 // aside
+ Async Hash = 0xac05 // async
+ Audio Hash = 0xd105 // audio
+ Autofocus Hash = 0xe409 // autofocus
+ Autoplay Hash = 0x10808 // autoplay
+ Axis Hash = 0x11004 // axis
+ B Hash = 0x101 // b
+ Background Hash = 0x300a // background
+ Base Hash = 0x19604 // base
+ Bb Hash = 0x37b02 // bb
+ Bdi Hash = 0x7503 // bdi
+ Bdo Hash = 0x31f03 // bdo
+ Bgcolor Hash = 0x12607 // bgcolor
+ Blockquote Hash = 0x13e0a // blockquote
+ Body Hash = 0xd04 // body
+ Br Hash = 0x37c02 // br
+ Button Hash = 0x14806 // button
+ Canvas Hash = 0xb006 // canvas
+ Caption Hash = 0x21f07 // caption
+ Charset Hash = 0x1807 // charset
+ Checked Hash = 0x1b307 // checked
+ Cite Hash = 0xfb04 // cite
+ Class Hash = 0x15905 // class
+ Classid Hash = 0x15907 // classid
+ Clear Hash = 0x2b05 // clear
+ Code Hash = 0x19204 // code
+ Codebase Hash = 0x19208 // codebase
+ Codetype Hash = 0x1a408 // codetype
+ Col Hash = 0x12803 // col
+ Colgroup Hash = 0x1bb08 // colgroup
+ Color Hash = 0x12805 // color
+ Cols Hash = 0x1cf04 // cols
+ Colspan Hash = 0x1cf07 // colspan
+ Compact Hash = 0x1ec07 // compact
+ Content Hash = 0x28407 // content
+ Controls Hash = 0x20108 // controls
+ Data Hash = 0x1f04 // data
+ Datalist Hash = 0x1f08 // datalist
+ Datatype Hash = 0x4d08 // datatype
+ Dd Hash = 0x5b02 // dd
+ Declare Hash = 0xb707 // declare
+ Default Hash = 0x7f07 // default
+ DefaultChecked Hash = 0x1730e // defaultChecked
+ DefaultMuted Hash = 0x7f0c // defaultMuted
+ DefaultSelected Hash = 0x8a0f // defaultSelected
+ Defer Hash = 0x9805 // defer
+ Del Hash = 0x10503 // del
+ Details Hash = 0x15f07 // details
+ Dfn Hash = 0x16c03 // dfn
+ Dialog Hash = 0xa606 // dialog
+ Dir Hash = 0x7603 // dir
+ Disabled Hash = 0x18008 // disabled
+ Div Hash = 0x18703 // div
+ Dl Hash = 0x1b902 // dl
+ Dt Hash = 0x23102 // dt
+ Em Hash = 0x4302 // em
+ Embed Hash = 0x4905 // embed
+ Enabled Hash = 0x26c07 // enabled
+ Enctype Hash = 0x1fa07 // enctype
+ Face Hash = 0x5604 // face
+ Fieldset Hash = 0x21408 // fieldset
+ Figcaption Hash = 0x21c0a // figcaption
+ Figure Hash = 0x22606 // figure
+ Footer Hash = 0xdb06 // footer
+ For Hash = 0x23b03 // for
+ Form Hash = 0x23b04 // form
+ Formaction Hash = 0x23b0a // formaction
+ Formnovalidate Hash = 0x2450e // formnovalidate
+ Frame Hash = 0x28c05 // frame
+ Frameborder Hash = 0x28c0b // frameborder
+ H1 Hash = 0x2e002 // h1
+ H2 Hash = 0x25302 // h2
+ H3 Hash = 0x25502 // h3
+ H4 Hash = 0x25702 // h4
+ H5 Hash = 0x25902 // h5
+ H6 Hash = 0x25b02 // h6
+ Head Hash = 0x2d204 // head
+ Header Hash = 0x2d206 // header
+ Hgroup Hash = 0x25d06 // hgroup
+ Hidden Hash = 0x26806 // hidden
+ Hr Hash = 0x32d02 // hr
+ Href Hash = 0x32d04 // href
+ Hreflang Hash = 0x32d08 // hreflang
+ Html Hash = 0x27304 // html
+ Http_Equiv Hash = 0x2770a // http-equiv
+ I Hash = 0x2401 // i
+ Icon Hash = 0x28304 // icon
+ Id Hash = 0xb602 // id
+ Iframe Hash = 0x28b06 // iframe
+ Img Hash = 0x29703 // img
+ Inert Hash = 0xf605 // inert
+ Inlist Hash = 0x29a06 // inlist
+ Input Hash = 0x2a405 // input
+ Ins Hash = 0x2a903 // ins
+ Ismap Hash = 0x11205 // ismap
+ Itemscope Hash = 0xfc09 // itemscope
+ Kbd Hash = 0x7403 // kbd
+ Keygen Hash = 0x1f606 // keygen
+ Label Hash = 0xbe05 // label
+ Lang Hash = 0x33104 // lang
+ Language Hash = 0x33108 // language
+ Legend Hash = 0x2c506 // legend
+ Li Hash = 0x2302 // li
+ Link Hash = 0x7104 // link
+ Longdesc Hash = 0xc208 // longdesc
+ Main Hash = 0xf404 // main
+ Manifest Hash = 0x2bc08 // manifest
+ Map Hash = 0xee03 // map
+ Mark Hash = 0x2cb04 // mark
+ Math Hash = 0x2cf04 // math
+ Max Hash = 0x2d803 // max
+ Maxlength Hash = 0x2d809 // maxlength
+ Media Hash = 0xa405 // media
+ Menu Hash = 0x12204 // menu
+ Meta Hash = 0x2e204 // meta
+ Meter Hash = 0x2f705 // meter
+ Method Hash = 0x2fc06 // method
+ Multiple Hash = 0x30208 // multiple
+ Muted Hash = 0x30a05 // muted
+ Name Hash = 0xa204 // name
+ Nav Hash = 0x32403 // nav
+ Nohref Hash = 0x32b06 // nohref
+ Noresize Hash = 0x13608 // noresize
+ Noscript Hash = 0x14d08 // noscript
+ Noshade Hash = 0x16e07 // noshade
+ Novalidate Hash = 0x2490a // novalidate
+ Nowrap Hash = 0x1d506 // nowrap
+ Object Hash = 0xd506 // object
+ Ol Hash = 0xcb02 // ol
+ Open Hash = 0x32104 // open
+ Optgroup Hash = 0x35608 // optgroup
+ Option Hash = 0x30f06 // option
+ Output Hash = 0x206 // output
+ P Hash = 0x501 // p
+ Param Hash = 0xf005 // param
+ Pauseonexit Hash = 0x1160b // pauseonexit
+ Picture Hash = 0x1c207 // picture
+ Plaintext Hash = 0x1da09 // plaintext
+ Poster Hash = 0x26206 // poster
+ Pre Hash = 0x35d03 // pre
+ Prefix Hash = 0x35d06 // prefix
+ Profile Hash = 0x36407 // profile
+ Progress Hash = 0x34208 // progress
+ Property Hash = 0x31508 // property
+ Q Hash = 0x14301 // q
+ Rb Hash = 0x2f02 // rb
+ Readonly Hash = 0x1e408 // readonly
+ Rel Hash = 0xbc03 // rel
+ Required Hash = 0x22a08 // required
+ Resource Hash = 0x1c708 // resource
+ Rev Hash = 0x7803 // rev
+ Reversed Hash = 0x7808 // reversed
+ Rows Hash = 0x9c04 // rows
+ Rowspan Hash = 0x9c07 // rowspan
+ Rp Hash = 0x6a02 // rp
+ Rt Hash = 0x2802 // rt
+ Rtc Hash = 0xf903 // rtc
+ Ruby Hash = 0xe004 // ruby
+ Rules Hash = 0x12c05 // rules
+ S Hash = 0x1c01 // s
+ Samp Hash = 0x6004 // samp
+ Scope Hash = 0x10005 // scope
+ Scoped Hash = 0x10006 // scoped
+ Script Hash = 0x14f06 // script
+ Scrolling Hash = 0xc809 // scrolling
+ Seamless Hash = 0x19808 // seamless
+ Section Hash = 0x13007 // section
+ Select Hash = 0x16506 // select
+ Selected Hash = 0x16508 // selected
+ Shape Hash = 0x19f05 // shape
+ Size Hash = 0x13a04 // size
+ Slot Hash = 0x20804 // slot
+ Small Hash = 0x2ab05 // small
+ Sortable Hash = 0x2ef08 // sortable
+ Source Hash = 0x1c906 // source
+ Span Hash = 0x9f04 // span
+ Src Hash = 0x34903 // src
+ Srcset Hash = 0x34906 // srcset
+ Start Hash = 0x2505 // start
+ Strong Hash = 0x29e06 // strong
+ Style Hash = 0x2c205 // style
+ Sub Hash = 0x31d03 // sub
+ Summary Hash = 0x33907 // summary
+ Sup Hash = 0x34003 // sup
+ Svg Hash = 0x34f03 // svg
+ Tabindex Hash = 0x2e408 // tabindex
+ Table Hash = 0x2f205 // table
+ Target Hash = 0x706 // target
+ Tbody Hash = 0xc05 // tbody
+ Td Hash = 0x1e02 // td
+ Template Hash = 0x4208 // template
+ Text Hash = 0x1df04 // text
+ Textarea Hash = 0x1df08 // textarea
+ Tfoot Hash = 0xda05 // tfoot
+ Th Hash = 0x2d102 // th
+ Thead Hash = 0x2d105 // thead
+ Time Hash = 0x12004 // time
+ Title Hash = 0x15405 // title
+ Tr Hash = 0x1f202 // tr
+ Track Hash = 0x1f205 // track
+ Translate Hash = 0x20b09 // translate
+ Truespeed Hash = 0x23209 // truespeed
+ Type Hash = 0x5104 // type
+ Typemustmatch Hash = 0x1a80d // typemustmatch
+ Typeof Hash = 0x5106 // typeof
+ U Hash = 0x301 // u
+ Ul Hash = 0x8302 // ul
+ Undeterminate Hash = 0x370d // undeterminate
+ Usemap Hash = 0xeb06 // usemap
+ Valign Hash = 0x32606 // valign
+ Value Hash = 0x18905 // value
+ Valuetype Hash = 0x18909 // valuetype
+ Var Hash = 0x28003 // var
+ Video Hash = 0x35205 // video
+ Visible Hash = 0x36b07 // visible
+ Vlink Hash = 0x37205 // vlink
+ Vocab Hash = 0x37705 // vocab
+ Wbr Hash = 0x37e03 // wbr
+ Xmlns Hash = 0x2eb05 // xmlns
+ Xmp Hash = 0x36203 // xmp
+)
+
+// String returns the hash' name.
+func (i Hash) String() string {
+ start := uint32(i >> 8)
+ n := uint32(i & 0xff)
+ if start+n > uint32(len(_Hash_text)) {
+ return ""
+ }
+ return _Hash_text[start : start+n]
+}
+
+// ToHash returns the hash whose name is s. It returns zero if there is no
+// such hash. It is case sensitive.
+func ToHash(s []byte) Hash {
+ if len(s) == 0 || len(s) > _Hash_maxLen {
+ return 0
+ }
+ h := uint32(_Hash_hash0)
+ for i := 0; i < len(s); i++ {
+ h ^= uint32(s[i])
+ h *= 16777619
+ }
+ if i := _Hash_table[h&uint32(len(_Hash_table)-1)]; int(i&0xff) == len(s) {
+ t := _Hash_text[i>>8 : i>>8+i&0xff]
+ for i := 0; i < len(s); i++ {
+ if t[i] != s[i] {
+ goto NEXT
+ }
+ }
+ return i
+ }
+NEXT:
+ if i := _Hash_table[(h>>16)&uint32(len(_Hash_table)-1)]; int(i&0xff) == len(s) {
+ t := _Hash_text[i>>8 : i>>8+i&0xff]
+ for i := 0; i < len(s); i++ {
+ if t[i] != s[i] {
+ return 0
+ }
+ }
+ return i
+ }
+ return 0
+}
+
+const _Hash_hash0 = 0x9acb0442
+const _Hash_maxLen = 15
+const _Hash_text = "aboutputargetbodyaccept-charsetdatalistarticlearbackgroundet" +
+ "erminatemplatembedatatypeofaceaddressamp-boilerplatealinkbdi" +
+ "reversedefaultMutedefaultSelectedeferowspanamedialogasyncanv" +
+ "asideclarelabelongdescrollingaudiobjectfooterubyautofocusema" +
+ "paramainertcitemscopedelautoplayaxismapauseonexitimenubgcolo" +
+ "rulesectionoresizeblockquotebuttonoscriptitleclassidetailsel" +
+ "ectedfnoshadefaultCheckedisabledivaluetypecodebaseamlesshape" +
+ "codetypemustmatcheckedlcolgroupicturesourcecolspanowraplaint" +
+ "extareadonlycompactrackeygenctypecontrolslotranslatefieldset" +
+ "figcaptionfigurequiredtruespeedformactionformnovalidateh2h3h" +
+ "4h5h6hgrouposterhiddenabledhtmlhttp-equivaricontentiframebor" +
+ "derimginlistronginputinsmallowfullscreenmanifestylegendmarkm" +
+ "atheadermaxlength1metabindexmlnsortablemetermethodmultiplemu" +
+ "tedoptionpropertysubdopenavalignohreflanguagesummarysuprogre" +
+ "ssrcsetsvgvideoptgrouprefixmprofilevisiblevlinkvocabbrwbr"
+
+var _Hash_table = [1 << 9]Hash{
+ 0x0: 0x1df08, // textarea
+ 0x4: 0x32d02, // hr
+ 0x8: 0x1c207, // picture
+ 0xb: 0x18905, // value
+ 0xf: 0x2e408, // tabindex
+ 0x12: 0x15905, // class
+ 0x15: 0x37e03, // wbr
+ 0x18: 0x1a80d, // typemustmatch
+ 0x1a: 0x1b902, // dl
+ 0x1d: 0xf903, // rtc
+ 0x1e: 0x25702, // h4
+ 0x22: 0x2ef08, // sortable
+ 0x24: 0x4208, // template
+ 0x25: 0x28c0b, // frameborder
+ 0x28: 0x37a04, // abbr
+ 0x29: 0x28b06, // iframe
+ 0x2a: 0x610f, // amp-boilerplate
+ 0x2c: 0x1e408, // readonly
+ 0x30: 0x23f06, // action
+ 0x33: 0x28c05, // frame
+ 0x35: 0x12c05, // rules
+ 0x36: 0x30208, // multiple
+ 0x38: 0x31f03, // bdo
+ 0x39: 0x1d506, // nowrap
+ 0x3e: 0x21408, // fieldset
+ 0x3f: 0x7503, // bdi
+ 0x46: 0x7f0c, // defaultMuted
+ 0x49: 0x35205, // video
+ 0x4c: 0x19808, // seamless
+ 0x4d: 0x13608, // noresize
+ 0x4f: 0xb602, // id
+ 0x51: 0x25d06, // hgroup
+ 0x52: 0x23102, // dt
+ 0x55: 0x12805, // color
+ 0x56: 0x34003, // sup
+ 0x59: 0x370d, // undeterminate
+ 0x5a: 0x35608, // optgroup
+ 0x5b: 0x2d206, // header
+ 0x5c: 0xb405, // aside
+ 0x5f: 0x10005, // scope
+ 0x60: 0x101, // b
+ 0x61: 0xcb02, // ol
+ 0x64: 0x32b06, // nohref
+ 0x65: 0x1da09, // plaintext
+ 0x66: 0x20804, // slot
+ 0x67: 0x11004, // axis
+ 0x68: 0x12803, // col
+ 0x69: 0x32606, // valign
+ 0x6c: 0x2d105, // thead
+ 0x70: 0x34906, // srcset
+ 0x71: 0x26806, // hidden
+ 0x76: 0x1bb08, // colgroup
+ 0x78: 0x34f03, // svg
+ 0x7b: 0x2cb04, // mark
+ 0x7e: 0x33104, // lang
+ 0x81: 0x1cf04, // cols
+ 0x86: 0x5a07, // address
+ 0x8b: 0xf404, // main
+ 0x8c: 0x4302, // em
+ 0x8f: 0x32d08, // hreflang
+ 0x93: 0x1b307, // checked
+ 0x94: 0x25902, // h5
+ 0x95: 0x301, // u
+ 0x96: 0x32705, // align
+ 0x97: 0x14301, // q
+ 0x99: 0xd506, // object
+ 0x9b: 0x28407, // content
+ 0x9d: 0xc809, // scrolling
+ 0x9f: 0x36407, // profile
+ 0xa0: 0x34903, // src
+ 0xa1: 0xda05, // tfoot
+ 0xa3: 0x2f705, // meter
+ 0xa4: 0x37705, // vocab
+ 0xa6: 0xd04, // body
+ 0xa8: 0x19204, // code
+ 0xac: 0x20108, // controls
+ 0xb0: 0x2ab05, // small
+ 0xb1: 0x18008, // disabled
+ 0xb5: 0x5604, // face
+ 0xb6: 0x501, // p
+ 0xb9: 0x2302, // li
+ 0xbb: 0xe409, // autofocus
+ 0xbf: 0x27304, // html
+ 0xc2: 0x4d08, // datatype
+ 0xc6: 0x35d06, // prefix
+ 0xcb: 0x35d03, // pre
+ 0xcc: 0x1106, // accept
+ 0xd1: 0x23b03, // for
+ 0xd5: 0x29e06, // strong
+ 0xd6: 0x9c07, // rowspan
+ 0xd7: 0x25502, // h3
+ 0xd8: 0x2cf04, // math
+ 0xde: 0x16e07, // noshade
+ 0xdf: 0x19f05, // shape
+ 0xe1: 0x10006, // scoped
+ 0xe3: 0x706, // target
+ 0xe6: 0x21c0a, // figcaption
+ 0xe9: 0x1df04, // text
+ 0xea: 0x1c708, // resource
+ 0xec: 0xee03, // map
+ 0xf0: 0x29a06, // inlist
+ 0xf1: 0x16506, // select
+ 0xf2: 0x1f606, // keygen
+ 0xf3: 0x5106, // typeof
+ 0xf6: 0xb006, // canvas
+ 0xf7: 0x30f06, // option
+ 0xf8: 0xbe05, // label
+ 0xf9: 0xbc03, // rel
+ 0xfb: 0x1f04, // data
+ 0xfd: 0x6004, // samp
+ 0x100: 0x110e, // accept-charset
+ 0x101: 0xeb06, // usemap
+ 0x103: 0x2bc08, // manifest
+ 0x109: 0xa204, // name
+ 0x10a: 0x14806, // button
+ 0x10b: 0x2b05, // clear
+ 0x10e: 0x33907, // summary
+ 0x10f: 0x2e204, // meta
+ 0x110: 0x33108, // language
+ 0x112: 0x300a, // background
+ 0x113: 0x2707, // article
+ 0x116: 0x23b0a, // formaction
+ 0x119: 0x1, // a
+ 0x11b: 0x5, // about
+ 0x11c: 0xfc09, // itemscope
+ 0x11e: 0x14d08, // noscript
+ 0x11f: 0x15907, // classid
+ 0x120: 0x36203, // xmp
+ 0x121: 0x19604, // base
+ 0x123: 0x1c01, // s
+ 0x124: 0x36b07, // visible
+ 0x126: 0x37b02, // bb
+ 0x127: 0x9c04, // rows
+ 0x12d: 0x2450e, // formnovalidate
+ 0x131: 0x1f205, // track
+ 0x135: 0x18703, // div
+ 0x136: 0xac05, // async
+ 0x137: 0x31508, // property
+ 0x13a: 0x16c03, // dfn
+ 0x13e: 0xf605, // inert
+ 0x142: 0x10503, // del
+ 0x144: 0x25302, // h2
+ 0x147: 0x2c205, // style
+ 0x149: 0x29703, // img
+ 0x14a: 0xc05, // tbody
+ 0x14b: 0x7603, // dir
+ 0x14c: 0x2eb05, // xmlns
+ 0x14e: 0x1f08, // datalist
+ 0x14f: 0x32d04, // href
+ 0x150: 0x1f202, // tr
+ 0x151: 0x13e0a, // blockquote
+ 0x152: 0x18909, // valuetype
+ 0x155: 0xdb06, // footer
+ 0x157: 0x14f06, // script
+ 0x158: 0x1cf07, // colspan
+ 0x15d: 0x1730e, // defaultChecked
+ 0x15f: 0x2490a, // novalidate
+ 0x164: 0x1a408, // codetype
+ 0x165: 0x2c506, // legend
+ 0x16b: 0x1160b, // pauseonexit
+ 0x16c: 0x21f07, // caption
+ 0x16f: 0x26c07, // enabled
+ 0x173: 0x26206, // poster
+ 0x175: 0x30a05, // muted
+ 0x176: 0x11205, // ismap
+ 0x178: 0x2a903, // ins
+ 0x17a: 0xe004, // ruby
+ 0x17b: 0x37c02, // br
+ 0x17c: 0x8a0f, // defaultSelected
+ 0x17d: 0x7403, // kbd
+ 0x17f: 0x1c906, // source
+ 0x182: 0x9f04, // span
+ 0x184: 0x2d803, // max
+ 0x18a: 0x5b02, // dd
+ 0x18b: 0x13a04, // size
+ 0x18c: 0xa405, // media
+ 0x18d: 0x19208, // codebase
+ 0x18f: 0x4905, // embed
+ 0x192: 0x5104, // type
+ 0x193: 0xf005, // param
+ 0x194: 0x25b02, // h6
+ 0x197: 0x28304, // icon
+ 0x198: 0x12607, // bgcolor
+ 0x199: 0x2ad0f, // allowfullscreen
+ 0x19a: 0x12004, // time
+ 0x19b: 0x7803, // rev
+ 0x19d: 0x34208, // progress
+ 0x19e: 0x22606, // figure
+ 0x1a0: 0x6a02, // rp
+ 0x1a2: 0xa606, // dialog
+ 0x1a4: 0x2802, // rt
+ 0x1a7: 0x1e304, // area
+ 0x1a8: 0x7808, // reversed
+ 0x1aa: 0x32104, // open
+ 0x1ac: 0x2d204, // head
+ 0x1ad: 0x7005, // alink
+ 0x1af: 0x28003, // var
+ 0x1b0: 0x15f07, // details
+ 0x1b1: 0x2401, // i
+ 0x1b3: 0x1e02, // td
+ 0x1b4: 0xb707, // declare
+ 0x1b5: 0x8302, // ul
+ 0x1ba: 0x2fc06, // method
+ 0x1bd: 0x13007, // section
+ 0x1be: 0x22a08, // required
+ 0x1c2: 0x9805, // defer
+ 0x1c3: 0x37205, // vlink
+ 0x1c4: 0x15405, // title
+ 0x1c5: 0x2770a, // http-equiv
+ 0x1c6: 0x1fa07, // enctype
+ 0x1c7: 0x1ec07, // compact
+ 0x1c8: 0x2d809, // maxlength
+ 0x1c9: 0x16508, // selected
+ 0x1cc: 0xd105, // audio
+ 0x1cd: 0xc208, // longdesc
+ 0x1d1: 0xfb04, // cite
+ 0x1da: 0x2505, // start
+ 0x1de: 0x2d102, // th
+ 0x1df: 0x10808, // autoplay
+ 0x1e2: 0x7104, // link
+ 0x1e3: 0x206, // output
+ 0x1e5: 0x12204, // menu
+ 0x1e6: 0x2a405, // input
+ 0x1eb: 0x32403, // nav
+ 0x1ec: 0x31d03, // sub
+ 0x1ee: 0x1807, // charset
+ 0x1ef: 0x7f07, // default
+ 0x1f3: 0x2f205, // table
+ 0x1f4: 0x23b04, // form
+ 0x1f5: 0x23209, // truespeed
+ 0x1f6: 0x2f02, // rb
+ 0x1fb: 0x20b09, // translate
+ 0x1fd: 0x2e002, // h1
+}
diff --git a/vendor/github.com/tdewolff/minify/v2/html/html.go b/vendor/github.com/tdewolff/minify/v2/html/html.go
new file mode 100644
index 0000000..616a9ba
--- /dev/null
+++ b/vendor/github.com/tdewolff/minify/v2/html/html.go
@@ -0,0 +1,514 @@
+// Package html minifies HTML5 following the specifications at http://www.w3.org/TR/html5/syntax.html.
+package html
+
+import (
+ "bytes"
+ "io"
+
+ "github.com/tdewolff/minify/v2"
+ "github.com/tdewolff/parse/v2"
+ "github.com/tdewolff/parse/v2/buffer"
+ "github.com/tdewolff/parse/v2/html"
+)
+
+var (
+ gtBytes = []byte(">")
+ isBytes = []byte("=")
+ spaceBytes = []byte(" ")
+ doctypeBytes = []byte("<!doctype html>")
+ jsMimeBytes = []byte("application/javascript")
+ cssMimeBytes = []byte("text/css")
+ htmlMimeBytes = []byte("text/html")
+ svgMimeBytes = []byte("image/svg+xml")
+ formMimeBytes = []byte("application/x-www-form-urlencoded")
+ mathMimeBytes = []byte("application/mathml+xml")
+ dataSchemeBytes = []byte("data:")
+ jsSchemeBytes = []byte("javascript:")
+ httpBytes = []byte("http")
+ radioBytes = []byte("radio")
+ onBytes = []byte("on")
+ textBytes = []byte("text")
+ noneBytes = []byte("none")
+ submitBytes = []byte("submit")
+ allBytes = []byte("all")
+ rectBytes = []byte("rect")
+ dataBytes = []byte("data")
+ getBytes = []byte("get")
+ autoBytes = []byte("auto")
+ oneBytes = []byte("one")
+ inlineParams = map[string]string{"inline": "1"}
+)
+
+////////////////////////////////////////////////////////////////
+
+// Minifier is an HTML minifier.
+type Minifier struct {
+ KeepComments bool
+ KeepConditionalComments bool
+ KeepDefaultAttrVals bool
+ KeepDocumentTags bool
+ KeepEndTags bool
+ KeepQuotes bool
+ KeepWhitespace bool
+}
+
+// Minify minifies HTML data, it reads from r and writes to w.
+func Minify(m *minify.M, w io.Writer, r io.Reader, params map[string]string) error {
+ return (&Minifier{}).Minify(m, w, r, params)
+}
+
+// Minify minifies HTML data, it reads from r and writes to w.
+func (o *Minifier) Minify(m *minify.M, w io.Writer, r io.Reader, _ map[string]string) error {
+ var rawTagHash Hash
+ var rawTagMediatype []byte
+
+ omitSpace := true // if true the next leading space is omitted
+ inPre := false
+
+ attrMinifyBuffer := buffer.NewWriter(make([]byte, 0, 64))
+ attrByteBuffer := make([]byte, 0, 64)
+
+ z := parse.NewInput(r)
+ defer z.Restore()
+
+ l := html.NewLexer(z)
+ tb := NewTokenBuffer(z, l)
+ for {
+ t := *tb.Shift()
+ switch t.TokenType {
+ case html.ErrorToken:
+ if _, err := w.Write(nil); err != nil {
+ return err
+ }
+ if l.Err() == io.EOF {
+ return nil
+ }
+ return l.Err()
+ case html.DoctypeToken:
+ w.Write(doctypeBytes)
+ case html.CommentToken:
+ if o.KeepComments {
+ w.Write(t.Data)
+ } else if o.KeepConditionalComments && 6 < len(t.Text) && (bytes.HasPrefix(t.Text, []byte("[if ")) || bytes.HasSuffix(t.Text, []byte("[endif]")) || bytes.HasSuffix(t.Text, []byte("[endif]--"))) {
+ // [if ...] is always 7 or more characters, [endif] is only encountered for downlevel-revealed
+ // see https://msdn.microsoft.com/en-us/library/ms537512(v=vs.85).aspx#syntax
+ if bytes.HasPrefix(t.Data, []byte("<!--[if ")) && bytes.HasSuffix(t.Data, []byte("<![endif]-->")) { // downlevel-hidden
+ begin := bytes.IndexByte(t.Data, '>') + 1
+ end := len(t.Data) - len("<![endif]-->")
+ w.Write(t.Data[:begin])
+ if err := o.Minify(m, w, buffer.NewReader(t.Data[begin:end]), nil); err != nil {
+ return minify.UpdateErrorPosition(err, z, t.Offset)
+ }
+ w.Write(t.Data[end:])
+ } else {
+ w.Write(t.Data) // downlevel-revealed or short downlevel-hidden
+ }
+ } else if 1 < len(t.Text) && t.Text[0] == '#' {
+ // SSI tags
+ w.Write(t.Data)
+ }
+ case html.SvgToken:
+ if err := m.MinifyMimetype(svgMimeBytes, w, buffer.NewReader(t.Data), nil); err != nil {
+ if err != minify.ErrNotExist {
+ return minify.UpdateErrorPosition(err, z, t.Offset)
+ }
+ w.Write(t.Data)
+ }
+ case html.MathToken:
+ if err := m.MinifyMimetype(mathMimeBytes, w, buffer.NewReader(t.Data), nil); err != nil {
+ if err != minify.ErrNotExist {
+ return minify.UpdateErrorPosition(err, z, t.Offset)
+ }
+ w.Write(t.Data)
+ }
+ case html.TextToken:
+ // CSS and JS minifiers for inline code
+ if rawTagHash != 0 {
+ if rawTagHash == Style || rawTagHash == Script || rawTagHash == Iframe {
+ var mimetype []byte
+ var params map[string]string
+ if rawTagHash == Iframe {
+ mimetype = htmlMimeBytes
+ } else if len(rawTagMediatype) > 0 {
+ mimetype, params = parse.Mediatype(rawTagMediatype)
+ } else if rawTagHash == Script {
+ mimetype = jsMimeBytes
+ } else if rawTagHash == Style {
+ mimetype = cssMimeBytes
+ }
+ if err := m.MinifyMimetype(mimetype, w, buffer.NewReader(t.Data), params); err != nil {
+ if err != minify.ErrNotExist {
+ return minify.UpdateErrorPosition(err, z, t.Offset)
+ }
+ w.Write(t.Data)
+ }
+ } else {
+ w.Write(t.Data)
+ }
+ } else if inPre {
+ w.Write(t.Data)
+ } else {
+ t.Data = parse.ReplaceMultipleWhitespaceAndEntities(t.Data, EntitiesMap, TextRevEntitiesMap)
+
+ // whitespace removal; trim left
+ if omitSpace && parse.IsWhitespace(t.Data[0]) {
+ t.Data = t.Data[1:]
+ }
+
+ // whitespace removal; trim right
+ omitSpace = false
+ if len(t.Data) == 0 {
+ omitSpace = true
+ } else if parse.IsWhitespace(t.Data[len(t.Data)-1]) {
+ omitSpace = true
+ i := 0
+ for {
+ next := tb.Peek(i)
+ // trim if EOF, text token with leading whitespace or block token
+ if next.TokenType == html.ErrorToken {
+ t.Data = t.Data[:len(t.Data)-1]
+ omitSpace = false
+ break
+ } else if next.TokenType == html.TextToken {
+ // this only happens when a comment, doctype or phrasing end tag (only for !o.KeepWhitespace) was in between
+ // remove if the text token starts with a whitespace
+ if len(next.Data) > 0 && parse.IsWhitespace(next.Data[0]) {
+ t.Data = t.Data[:len(t.Data)-1]
+ omitSpace = false
+ }
+ break
+ } else if next.TokenType == html.StartTagToken || next.TokenType == html.EndTagToken {
+ if o.KeepWhitespace {
+ break
+ }
+ // remove when followed up by a block tag
+ if next.Traits&nonPhrasingTag != 0 {
+ t.Data = t.Data[:len(t.Data)-1]
+ omitSpace = false
+ break
+ } else if next.TokenType == html.StartTagToken {
+ break
+ }
+ }
+ i++
+ }
+ }
+
+ w.Write(t.Data)
+ }
+ case html.StartTagToken, html.EndTagToken:
+ rawTagHash = 0
+ hasAttributes := false
+ if t.TokenType == html.StartTagToken {
+ if next := tb.Peek(0); next.TokenType == html.AttributeToken {
+ hasAttributes = true
+ }
+ if t.Traits&rawTag != 0 {
+ // ignore empty script and style tags
+ if !hasAttributes && (t.Hash == Script || t.Hash == Style) {
+ if next := tb.Peek(1); next.TokenType == html.EndTagToken {
+ tb.Shift()
+ tb.Shift()
+ break
+ }
+ }
+ rawTagHash = t.Hash
+ rawTagMediatype = nil
+
+ // do not minify content of <style amp-boilerplate>
+ if hasAttributes && t.Hash == Style {
+ if attrs := tb.Attributes(Amp_Boilerplate); attrs[0] != nil {
+ rawTagHash = 0
+ }
+ }
+ }
+ } else if t.Hash == Template {
+ omitSpace = true // EndTagToken
+ }
+
+ if t.Hash == Pre {
+ inPre = t.TokenType == html.StartTagToken
+ }
+
+ // remove superfluous tags, except for html, head and body tags when KeepDocumentTags is set
+ if !hasAttributes && (!o.KeepDocumentTags && (t.Hash == Html || t.Hash == Head || t.Hash == Body) || t.Hash == Colgroup) {
+ break
+ } else if t.TokenType == html.EndTagToken {
+ omitEndTag := false
+ if !o.KeepEndTags {
+ if t.Hash == Thead || t.Hash == Tbody || t.Hash == Tfoot || t.Hash == Tr || t.Hash == Th ||
+ t.Hash == Td || t.Hash == Option || t.Hash == Dd || t.Hash == Dt || t.Hash == Li ||
+ t.Hash == Rb || t.Hash == Rt || t.Hash == Rtc || t.Hash == Rp {
+ omitEndTag = true // omit end tags
+ } else if t.Hash == P {
+ i := 0
+ for {
+ next := tb.Peek(i)
+ i++
+ // continue if text token is empty or whitespace
+ if next.TokenType == html.TextToken && parse.IsAllWhitespace(next.Data) {
+ continue
+ }
+ if next.TokenType == html.ErrorToken || next.TokenType == html.EndTagToken && next.Traits&keepPTag == 0 || next.TokenType == html.StartTagToken && next.Traits&omitPTag != 0 {
+ omitEndTag = true // omit p end tag
+ }
+ break
+ }
+ } else if t.Hash == Optgroup {
+ i := 0
+ for {
+ next := tb.Peek(i)
+ i++
+ // continue if text token
+ if next.TokenType == html.TextToken {
+ continue
+ }
+ if next.TokenType == html.ErrorToken || next.Hash != Option {
+ omitEndTag = true // omit optgroup end tag
+ }
+ break
+ }
+ }
+ }
+
+ if t.Traits&nonPhrasingTag != 0 {
+ omitSpace = true // omit spaces after block elements
+ } else if o.KeepWhitespace || t.Traits&objectTag != 0 {
+ omitSpace = false
+ }
+
+ if !omitEndTag {
+ if len(t.Data) > 3+len(t.Text) {
+ t.Data[2+len(t.Text)] = '>'
+ t.Data = t.Data[:3+len(t.Text)]
+ }
+ w.Write(t.Data)
+ }
+
+ // skip text in select and optgroup tags
+ if t.Hash == Option || t.Hash == Optgroup {
+ if next := tb.Peek(0); next.TokenType == html.TextToken {
+ tb.Shift()
+ }
+ }
+ break
+ }
+
+ if o.KeepWhitespace || t.Traits&objectTag != 0 {
+ omitSpace = false
+ } else if t.Traits&nonPhrasingTag != 0 {
+ omitSpace = true // omit spaces after block elements
+ }
+
+ w.Write(t.Data)
+
+ if hasAttributes {
+ if t.Hash == Meta {
+ attrs := tb.Attributes(Content, Http_Equiv, Charset, Name)
+ if content := attrs[0]; content != nil {
+ if httpEquiv := attrs[1]; httpEquiv != nil {
+ httpEquiv.AttrVal = parse.TrimWhitespace(httpEquiv.AttrVal)
+ if charset := attrs[2]; charset == nil && parse.EqualFold(httpEquiv.AttrVal, []byte("content-type")) {
+ content.AttrVal = minify.Mediatype(content.AttrVal)
+ if bytes.Equal(content.AttrVal, []byte("text/html;charset=utf-8")) {
+ httpEquiv.Text = nil
+ content.Text = []byte("charset")
+ content.Hash = Charset
+ content.AttrVal = []byte("utf-8")
+ }
+ }
+ }
+ if name := attrs[3]; name != nil {
+ name.AttrVal = parse.TrimWhitespace(name.AttrVal)
+ if parse.EqualFold(name.AttrVal, []byte("keywords")) {
+ content.AttrVal = bytes.ReplaceAll(content.AttrVal, []byte(", "), []byte(","))
+ } else if parse.EqualFold(name.AttrVal, []byte("viewport")) {
+ content.AttrVal = bytes.ReplaceAll(content.AttrVal, []byte(" "), []byte(""))
+ for i := 0; i < len(content.AttrVal); i++ {
+ if content.AttrVal[i] == '=' && i+2 < len(content.AttrVal) {
+ i++
+ if n := parse.Number(content.AttrVal[i:]); n > 0 {
+ minNum := minify.Number(content.AttrVal[i:i+n], -1)
+ if len(minNum) < n {
+ copy(content.AttrVal[i:i+len(minNum)], minNum)
+ copy(content.AttrVal[i+len(minNum):], content.AttrVal[i+n:])
+ content.AttrVal = content.AttrVal[:len(content.AttrVal)+len(minNum)-n]
+ }
+ i += len(minNum)
+ }
+ i-- // mitigate for-loop increase
+ }
+ }
+ }
+ }
+ }
+ } else if t.Hash == Script {
+ attrs := tb.Attributes(Src, Charset)
+ if attrs[0] != nil && attrs[1] != nil {
+ attrs[1].Text = nil
+ }
+ } else if t.Hash == Input {
+ attrs := tb.Attributes(Type, Value)
+ if t, value := attrs[0], attrs[1]; t != nil && value != nil {
+ isRadio := parse.EqualFold(t.AttrVal, radioBytes)
+ if !isRadio && len(value.AttrVal) == 0 {
+ value.Text = nil
+ } else if isRadio && parse.EqualFold(value.AttrVal, onBytes) {
+ value.Text = nil
+ }
+ }
+ } else if t.Hash == A {
+ attrs := tb.Attributes(Id, Name)
+ if id, name := attrs[0], attrs[1]; id != nil && name != nil {
+ if bytes.Equal(id.AttrVal, name.AttrVal) {
+ name.Text = nil
+ }
+ }
+ }
+
+ // write attributes
+ for {
+ attr := *tb.Shift()
+ if attr.TokenType != html.AttributeToken {
+ break
+ } else if attr.Text == nil {
+ continue // removed attribute
+ }
+
+ val := attr.AttrVal
+ if attr.Traits&trimAttr != 0 {
+ val = parse.ReplaceMultipleWhitespaceAndEntities(val, EntitiesMap, nil)
+ val = parse.TrimWhitespace(val)
+ } else {
+ val = parse.ReplaceEntities(val, EntitiesMap, nil)
+ }
+ if t.Traits != 0 {
+ if len(val) == 0 && (attr.Hash == Class ||
+ attr.Hash == Dir ||
+ attr.Hash == Id ||
+ attr.Hash == Name ||
+ attr.Hash == Action && t.Hash == Form) {
+ continue // omit empty attribute values
+ }
+ if attr.Traits&caselessAttr != 0 {
+ val = parse.ToLower(val)
+ }
+ if rawTagHash != 0 && attr.Hash == Type {
+ rawTagMediatype = parse.Copy(val)
+ }
+
+ if attr.Hash == Enctype || attr.Hash == Codetype || attr.Hash == Accept || attr.Hash == Type && (t.Hash == A || t.Hash == Link || t.Hash == Embed || t.Hash == Object || t.Hash == Source || t.Hash == Script || t.Hash == Style) {
+ val = minify.Mediatype(val)
+ }
+
+ // default attribute values can be omitted
+ if !o.KeepDefaultAttrVals && (attr.Hash == Type && (t.Hash == Script && jsMimetypes[string(val)] ||
+ t.Hash == Style && bytes.Equal(val, cssMimeBytes) ||
+ t.Hash == Link && bytes.Equal(val, cssMimeBytes) ||
+ t.Hash == Input && bytes.Equal(val, textBytes) ||
+ t.Hash == Button && bytes.Equal(val, submitBytes)) ||
+ attr.Hash == Language && t.Hash == Script ||
+ attr.Hash == Method && bytes.Equal(val, getBytes) ||
+ attr.Hash == Enctype && bytes.Equal(val, formMimeBytes) ||
+ attr.Hash == Colspan && bytes.Equal(val, oneBytes) ||
+ attr.Hash == Rowspan && bytes.Equal(val, oneBytes) ||
+ attr.Hash == Shape && bytes.Equal(val, rectBytes) ||
+ attr.Hash == Span && bytes.Equal(val, oneBytes) ||
+ attr.Hash == Clear && bytes.Equal(val, noneBytes) ||
+ attr.Hash == Frameborder && bytes.Equal(val, oneBytes) ||
+ attr.Hash == Scrolling && bytes.Equal(val, autoBytes) ||
+ attr.Hash == Valuetype && bytes.Equal(val, dataBytes) ||
+ attr.Hash == Media && t.Hash == Style && bytes.Equal(val, allBytes)) {
+ continue
+ }
+
+ if attr.Hash == Style {
+ // CSS minifier for attribute inline code
+ val = parse.TrimWhitespace(val)
+ attrMinifyBuffer.Reset()
+ if err := m.MinifyMimetype(cssMimeBytes, attrMinifyBuffer, buffer.NewReader(val), inlineParams); err == nil {
+ val = attrMinifyBuffer.Bytes()
+ } else if err != minify.ErrNotExist {
+ return minify.UpdateErrorPosition(err, z, attr.Offset)
+ }
+ if len(val) == 0 {
+ continue
+ }
+ } else if len(attr.Text) > 2 && attr.Text[0] == 'o' && attr.Text[1] == 'n' {
+ // JS minifier for attribute inline code
+ val = parse.TrimWhitespace(val)
+ if len(val) >= 11 && parse.EqualFold(val[:11], jsSchemeBytes) {
+ val = val[11:]
+ }
+ attrMinifyBuffer.Reset()
+ if err := m.MinifyMimetype(jsMimeBytes, attrMinifyBuffer, buffer.NewReader(val), nil); err == nil {
+ val = attrMinifyBuffer.Bytes()
+ } else if err != minify.ErrNotExist {
+ return minify.UpdateErrorPosition(err, z, attr.Offset)
+ }
+ if len(val) == 0 {
+ continue
+ }
+ } else if attr.Traits&urlAttr != 0 { // anchors are already handled
+ val = parse.TrimWhitespace(val)
+ if 5 < len(val) {
+ if parse.EqualFold(val[:4], httpBytes) {
+ if val[4] == ':' {
+ if m.URL != nil && m.URL.Scheme == "http" {
+ val = val[5:]
+ } else {
+ parse.ToLower(val[:4])
+ }
+ } else if (val[4] == 's' || val[4] == 'S') && val[5] == ':' {
+ if m.URL != nil && m.URL.Scheme == "https" {
+ val = val[6:]
+ } else {
+ parse.ToLower(val[:5])
+ }
+ }
+ } else if parse.EqualFold(val[:5], dataSchemeBytes) {
+ val = minify.DataURI(m, val)
+ }
+ }
+ }
+ }
+
+ w.Write(spaceBytes)
+ w.Write(attr.Text)
+ if len(val) > 0 && attr.Traits&booleanAttr == 0 {
+ w.Write(isBytes)
+
+ // use double quotes for RDFa attributes
+ isXML := attr.Hash == Vocab || attr.Hash == Typeof || attr.Hash == Property || attr.Hash == Resource || attr.Hash == Prefix || attr.Hash == Content || attr.Hash == About || attr.Hash == Rev || attr.Hash == Datatype || attr.Hash == Inlist
+
+ // no quotes if possible, else prefer single or double depending on which occurs more often in value
+ var quote byte
+
+ if 0 < len(attr.Data) && (attr.Data[len(attr.Data)-1] == '\'' || attr.Data[len(attr.Data)-1] == '"') {
+ quote = attr.Data[len(attr.Data)-1]
+ }
+ val = html.EscapeAttrVal(&attrByteBuffer, val, quote, o.KeepQuotes, isXML)
+ w.Write(val)
+ }
+ }
+ } else {
+ _ = tb.Shift() // StartTagClose
+ }
+ w.Write(gtBytes)
+
+ // skip text in select and optgroup tags
+ if t.Hash == Select || t.Hash == Optgroup {
+ if next := tb.Peek(0); next.TokenType == html.TextToken {
+ tb.Shift()
+ }
+ }
+
+ // keep space after phrasing tags (<i>, <span>, ...) FontAwesome etc.
+ if t.TokenType == html.StartTagToken && t.Traits&nonPhrasingTag == 0 {
+ if next := tb.Peek(0); next.Hash == t.Hash && next.TokenType == html.EndTagToken {
+ omitSpace = false
+ }
+ }
+ }
+ }
+}
diff --git a/vendor/github.com/tdewolff/minify/v2/html/table.go b/vendor/github.com/tdewolff/minify/v2/html/table.go
new file mode 100644
index 0000000..22239fc
--- /dev/null
+++ b/vendor/github.com/tdewolff/minify/v2/html/table.go
@@ -0,0 +1,1346 @@
+package html
+
+type traits uint16
+
+const (
+ normalTag traits = 1 << iota
+ rawTag // raw tags need special processing for their content
+ nonPhrasingTag // non-phrasing elements are unaffected by whitespace, remove spaces around these tags
+ objectTag // content tags with a few exclusions, keep spaces after these open/close tags
+ omitPTag // omit p end tag if it is followed by this start tag
+ keepPTag // keep p end tag if it is followed by this end tag
+)
+
+const (
+ booleanAttr traits = 1 << iota
+ caselessAttr
+ urlAttr
+ trimAttr
+)
+
+var tagMap = map[Hash]traits{
+ A: keepPTag,
+ Abbr: normalTag,
+ Address: nonPhrasingTag | omitPTag,
+ Area: normalTag,
+ Article: nonPhrasingTag | omitPTag,
+ Aside: nonPhrasingTag | omitPTag,
+ Audio: keepPTag,
+ B: normalTag,
+ Base: normalTag,
+ Bb: normalTag,
+ Bdi: normalTag,
+ Bdo: normalTag,
+ Blockquote: nonPhrasingTag | omitPTag,
+ Body: nonPhrasingTag,
+ Br: nonPhrasingTag,
+ Button: objectTag,
+ Canvas: objectTag | keepPTag,
+ Caption: nonPhrasingTag,
+ Cite: normalTag,
+ Code: normalTag,
+ Col: nonPhrasingTag,
+ Colgroup: nonPhrasingTag,
+ Data: normalTag,
+ Datalist: normalTag,
+ Dd: nonPhrasingTag,
+ Del: keepPTag,
+ Details: omitPTag,
+ Dfn: normalTag,
+ Dialog: normalTag,
+ Div: nonPhrasingTag | omitPTag,
+ Dl: nonPhrasingTag | omitPTag,
+ Dt: nonPhrasingTag,
+ Em: normalTag,
+ Embed: nonPhrasingTag,
+ Fieldset: nonPhrasingTag | omitPTag,
+ Figcaption: nonPhrasingTag | omitPTag,
+ Figure: nonPhrasingTag | omitPTag,
+ Footer: nonPhrasingTag | omitPTag,
+ Form: nonPhrasingTag | omitPTag,
+ H1: nonPhrasingTag | omitPTag,
+ H2: nonPhrasingTag | omitPTag,
+ H3: nonPhrasingTag | omitPTag,
+ H4: nonPhrasingTag | omitPTag,
+ H5: nonPhrasingTag | omitPTag,
+ H6: nonPhrasingTag | omitPTag,
+ Head: nonPhrasingTag,
+ Header: nonPhrasingTag | omitPTag,
+ Hgroup: nonPhrasingTag,
+ Hr: nonPhrasingTag | omitPTag,
+ Html: nonPhrasingTag,
+ I: normalTag,
+ Iframe: rawTag | objectTag,
+ Img: objectTag,
+ Input: objectTag,
+ Ins: keepPTag,
+ Kbd: normalTag,
+ Label: normalTag,
+ Legend: normalTag,
+ Li: nonPhrasingTag,
+ Link: normalTag,
+ Main: nonPhrasingTag | omitPTag,
+ Map: keepPTag,
+ Mark: normalTag,
+ Math: rawTag,
+ Menu: omitPTag,
+ Meta: nonPhrasingTag,
+ Meter: objectTag,
+ Nav: nonPhrasingTag | omitPTag,
+ Noscript: nonPhrasingTag | keepPTag,
+ Object: objectTag,
+ Ol: nonPhrasingTag | omitPTag,
+ Optgroup: normalTag,
+ Option: normalTag,
+ Output: nonPhrasingTag,
+ P: nonPhrasingTag | omitPTag,
+ Param: normalTag,
+ Picture: normalTag,
+ Pre: nonPhrasingTag | omitPTag,
+ Progress: objectTag,
+ Q: objectTag,
+ Rp: normalTag,
+ Rt: normalTag,
+ Ruby: normalTag,
+ S: normalTag,
+ Samp: normalTag,
+ Script: rawTag,
+ Section: nonPhrasingTag | omitPTag,
+ Select: objectTag,
+ Slot: normalTag,
+ Small: normalTag,
+ Source: normalTag,
+ Span: normalTag,
+ Strong: normalTag,
+ Style: rawTag | nonPhrasingTag,
+ Sub: normalTag,
+ Summary: normalTag,
+ Sup: normalTag,
+ Svg: rawTag | objectTag,
+ Table: nonPhrasingTag | omitPTag,
+ Tbody: nonPhrasingTag,
+ Td: nonPhrasingTag,
+ Template: normalTag,
+ Textarea: rawTag | objectTag,
+ Tfoot: nonPhrasingTag,
+ Th: nonPhrasingTag,
+ Thead: nonPhrasingTag,
+ Time: normalTag,
+ Title: nonPhrasingTag,
+ Tr: nonPhrasingTag,
+ Track: normalTag,
+ U: normalTag,
+ Ul: nonPhrasingTag | omitPTag,
+ Var: normalTag,
+ Video: objectTag | keepPTag,
+ Wbr: normalTag,
+}
+
+var attrMap = map[Hash]traits{
+ Accept: trimAttr,
+ Accept_Charset: caselessAttr,
+ Action: urlAttr,
+ Align: caselessAttr,
+ Alink: caselessAttr,
+ Allowfullscreen: booleanAttr,
+ Async: booleanAttr,
+ Autofocus: booleanAttr,
+ Autoplay: booleanAttr,
+ Axis: caselessAttr,
+ Background: urlAttr,
+ Bgcolor: caselessAttr,
+ Charset: caselessAttr,
+ Checked: booleanAttr,
+ Cite: urlAttr,
+ Class: trimAttr,
+ Classid: urlAttr,
+ Clear: caselessAttr,
+ Codebase: urlAttr,
+ Codetype: trimAttr,
+ Color: caselessAttr,
+ Cols: trimAttr,
+ Colspan: trimAttr,
+ Compact: booleanAttr,
+ Controls: booleanAttr,
+ Data: urlAttr,
+ Declare: booleanAttr,
+ Default: booleanAttr,
+ DefaultChecked: booleanAttr,
+ DefaultMuted: booleanAttr,
+ DefaultSelected: booleanAttr,
+ Defer: booleanAttr,
+ Dir: caselessAttr,
+ Disabled: booleanAttr,
+ Enabled: booleanAttr,
+ Enctype: trimAttr,
+ Face: caselessAttr,
+ Formaction: urlAttr,
+ Formnovalidate: booleanAttr,
+ Frame: caselessAttr,
+ Hidden: booleanAttr,
+ Href: urlAttr,
+ Hreflang: caselessAttr,
+ Http_Equiv: caselessAttr,
+ Icon: urlAttr,
+ Inert: booleanAttr,
+ Ismap: booleanAttr,
+ Itemscope: booleanAttr,
+ Lang: trimAttr,
+ Language: caselessAttr,
+ Link: caselessAttr,
+ Longdesc: urlAttr,
+ Manifest: urlAttr,
+ Maxlength: trimAttr,
+ Media: caselessAttr | trimAttr,
+ Method: caselessAttr,
+ Multiple: booleanAttr,
+ Muted: booleanAttr,
+ Nohref: booleanAttr,
+ Noresize: booleanAttr,
+ Noshade: booleanAttr,
+ Novalidate: booleanAttr,
+ Nowrap: booleanAttr,
+ Open: booleanAttr,
+ Pauseonexit: booleanAttr,
+ Poster: urlAttr,
+ Profile: urlAttr,
+ Readonly: booleanAttr,
+ Rel: caselessAttr | trimAttr,
+ Required: booleanAttr,
+ Rev: caselessAttr,
+ Reversed: booleanAttr,
+ Rows: trimAttr,
+ Rowspan: trimAttr,
+ Rules: caselessAttr,
+ Scope: caselessAttr,
+ Scoped: booleanAttr,
+ Scrolling: caselessAttr,
+ Seamless: booleanAttr,
+ Selected: booleanAttr,
+ Shape: caselessAttr,
+ Size: trimAttr,
+ Sortable: booleanAttr,
+ Span: trimAttr,
+ Src: urlAttr,
+ Srcset: trimAttr,
+ Tabindex: trimAttr,
+ Target: caselessAttr,
+ Text: caselessAttr,
+ Translate: caselessAttr,
+ Truespeed: booleanAttr,
+ Type: trimAttr,
+ Typemustmatch: booleanAttr,
+ Undeterminate: booleanAttr,
+ Usemap: urlAttr,
+ Valign: caselessAttr,
+ Valuetype: caselessAttr,
+ Vlink: caselessAttr,
+ Visible: booleanAttr,
+ Xmlns: urlAttr,
+}
+
+var jsMimetypes = map[string]bool{
+ "text/javascript": true,
+ "application/javascript": true,
+}
+
+// EntitiesMap are all named character entities.
+var EntitiesMap = map[string][]byte{
+ "AElig": []byte("&#198;"),
+ "AMP": []byte("&"),
+ "Aacute": []byte("&#193;"),
+ "Abreve": []byte("&#258;"),
+ "Acirc": []byte("&#194;"),
+ "Agrave": []byte("&#192;"),
+ "Alpha": []byte("&#913;"),
+ "Amacr": []byte("&#256;"),
+ "Aogon": []byte("&#260;"),
+ "ApplyFunction": []byte("&af;"),
+ "Aring": []byte("&#197;"),
+ "Assign": []byte("&#8788;"),
+ "Atilde": []byte("&#195;"),
+ "Backslash": []byte("&#8726;"),
+ "Barwed": []byte("&#8966;"),
+ "Because": []byte("&#8757;"),
+ "Bernoullis": []byte("&Bscr;"),
+ "Breve": []byte("&#728;"),
+ "Bumpeq": []byte("&bump;"),
+ "Cacute": []byte("&#262;"),
+ "CapitalDifferentialD": []byte("&DD;"),
+ "Cayleys": []byte("&Cfr;"),
+ "Ccaron": []byte("&#268;"),
+ "Ccedil": []byte("&#199;"),
+ "Ccirc": []byte("&#264;"),
+ "Cconint": []byte("&#8752;"),
+ "Cedilla": []byte("&#184;"),
+ "CenterDot": []byte("&#183;"),
+ "CircleDot": []byte("&odot;"),
+ "CircleMinus": []byte("&#8854;"),
+ "CirclePlus": []byte("&#8853;"),
+ "CircleTimes": []byte("&#8855;"),
+ "ClockwiseContourIntegral": []byte("&#8754;"),
+ "CloseCurlyDoubleQuote": []byte("&#8221;"),
+ "CloseCurlyQuote": []byte("&#8217;"),
+ "Congruent": []byte("&#8801;"),
+ "Conint": []byte("&#8751;"),
+ "ContourIntegral": []byte("&oint;"),
+ "Coproduct": []byte("&#8720;"),
+ "CounterClockwiseContourIntegral": []byte("&#8755;"),
+ "CupCap": []byte("&#8781;"),
+ "DDotrahd": []byte("&#10513;"),
+ "Dagger": []byte("&#8225;"),
+ "Dcaron": []byte("&#270;"),
+ "Delta": []byte("&#916;"),
+ "DiacriticalAcute": []byte("&#180;"),
+ "DiacriticalDot": []byte("&dot;"),
+ "DiacriticalDoubleAcute": []byte("&#733;"),
+ "DiacriticalGrave": []byte("`"),
+ "DiacriticalTilde": []byte("&#732;"),
+ "Diamond": []byte("&diam;"),
+ "DifferentialD": []byte("&dd;"),
+ "DotDot": []byte("&#8412;"),
+ "DotEqual": []byte("&#8784;"),
+ "DoubleContourIntegral": []byte("&#8751;"),
+ "DoubleDot": []byte("&Dot;"),
+ "DoubleDownArrow": []byte("&dArr;"),
+ "DoubleLeftArrow": []byte("&lArr;"),
+ "DoubleLeftRightArrow": []byte("&iff;"),
+ "DoubleLeftTee": []byte("&Dashv;"),
+ "DoubleLongLeftArrow": []byte("&xlArr;"),
+ "DoubleLongLeftRightArrow": []byte("&xhArr;"),
+ "DoubleLongRightArrow": []byte("&xrArr;"),
+ "DoubleRightArrow": []byte("&rArr;"),
+ "DoubleRightTee": []byte("&#8872;"),
+ "DoubleUpArrow": []byte("&uArr;"),
+ "DoubleUpDownArrow": []byte("&vArr;"),
+ "DoubleVerticalBar": []byte("&par;"),
+ "DownArrow": []byte("&darr;"),
+ "DownArrowBar": []byte("&#10515;"),
+ "DownArrowUpArrow": []byte("&#8693;"),
+ "DownBreve": []byte("&#785;"),
+ "DownLeftRightVector": []byte("&#10576;"),
+ "DownLeftTeeVector": []byte("&#10590;"),
+ "DownLeftVector": []byte("&#8637;"),
+ "DownLeftVectorBar": []byte("&#10582;"),
+ "DownRightTeeVector": []byte("&#10591;"),
+ "DownRightVector": []byte("&#8641;"),
+ "DownRightVectorBar": []byte("&#10583;"),
+ "DownTee": []byte("&top;"),
+ "DownTeeArrow": []byte("&#8615;"),
+ "Downarrow": []byte("&dArr;"),
+ "Dstrok": []byte("&#272;"),
+ "Eacute": []byte("&#201;"),
+ "Ecaron": []byte("&#282;"),
+ "Ecirc": []byte("&#202;"),
+ "Egrave": []byte("&#200;"),
+ "Element": []byte("&in;"),
+ "Emacr": []byte("&#274;"),
+ "EmptySmallSquare": []byte("&#9723;"),
+ "EmptyVerySmallSquare": []byte("&#9643;"),
+ "Eogon": []byte("&#280;"),
+ "Epsilon": []byte("&#917;"),
+ "EqualTilde": []byte("&esim;"),
+ "Equilibrium": []byte("&#8652;"),
+ "Exists": []byte("&#8707;"),
+ "ExponentialE": []byte("&ee;"),
+ "FilledSmallSquare": []byte("&#9724;"),
+ "FilledVerySmallSquare": []byte("&squf;"),
+ "ForAll": []byte("&#8704;"),
+ "Fouriertrf": []byte("&Fscr;"),
+ "GT": []byte(">"),
+ "Gamma": []byte("&#915;"),
+ "Gammad": []byte("&#988;"),
+ "Gbreve": []byte("&#286;"),
+ "Gcedil": []byte("&#290;"),
+ "Gcirc": []byte("&#284;"),
+ "GreaterEqual": []byte("&ge;"),
+ "GreaterEqualLess": []byte("&gel;"),
+ "GreaterFullEqual": []byte("&gE;"),
+ "GreaterGreater": []byte("&#10914;"),
+ "GreaterLess": []byte("&gl;"),
+ "GreaterSlantEqual": []byte("&ges;"),
+ "GreaterTilde": []byte("&gsim;"),
+ "HARDcy": []byte("&#1066;"),
+ "Hacek": []byte("&#711;"),
+ "Hat": []byte("^"),
+ "Hcirc": []byte("&#292;"),
+ "HilbertSpace": []byte("&Hscr;"),
+ "HorizontalLine": []byte("&boxh;"),
+ "Hstrok": []byte("&#294;"),
+ "HumpDownHump": []byte("&bump;"),
+ "HumpEqual": []byte("&#8783;"),
+ "IJlig": []byte("&#306;"),
+ "Iacute": []byte("&#205;"),
+ "Icirc": []byte("&#206;"),
+ "Ifr": []byte("&Im;"),
+ "Igrave": []byte("&#204;"),
+ "Imacr": []byte("&#298;"),
+ "ImaginaryI": []byte("&ii;"),
+ "Implies": []byte("&rArr;"),
+ "Integral": []byte("&int;"),
+ "Intersection": []byte("&xcap;"),
+ "InvisibleComma": []byte("&ic;"),
+ "InvisibleTimes": []byte("&it;"),
+ "Iogon": []byte("&#302;"),
+ "Itilde": []byte("&#296;"),
+ "Jcirc": []byte("&#308;"),
+ "Jsercy": []byte("&#1032;"),
+ "Kappa": []byte("&#922;"),
+ "Kcedil": []byte("&#310;"),
+ "LT": []byte("<"),
+ "Lacute": []byte("&#313;"),
+ "Lambda": []byte("&#923;"),
+ "Laplacetrf": []byte("&Lscr;"),
+ "Lcaron": []byte("&#317;"),
+ "Lcedil": []byte("&#315;"),
+ "LeftAngleBracket": []byte("&lang;"),
+ "LeftArrow": []byte("&larr;"),
+ "LeftArrowBar": []byte("&#8676;"),
+ "LeftArrowRightArrow": []byte("&#8646;"),
+ "LeftCeiling": []byte("&#8968;"),
+ "LeftDoubleBracket": []byte("&lobrk;"),
+ "LeftDownTeeVector": []byte("&#10593;"),
+ "LeftDownVector": []byte("&#8643;"),
+ "LeftDownVectorBar": []byte("&#10585;"),
+ "LeftFloor": []byte("&#8970;"),
+ "LeftRightArrow": []byte("&harr;"),
+ "LeftRightVector": []byte("&#10574;"),
+ "LeftTee": []byte("&#8867;"),
+ "LeftTeeArrow": []byte("&#8612;"),
+ "LeftTeeVector": []byte("&#10586;"),
+ "LeftTriangle": []byte("&#8882;"),
+ "LeftTriangleBar": []byte("&#10703;"),
+ "LeftTriangleEqual": []byte("&#8884;"),
+ "LeftUpDownVector": []byte("&#10577;"),
+ "LeftUpTeeVector": []byte("&#10592;"),
+ "LeftUpVector": []byte("&#8639;"),
+ "LeftUpVectorBar": []byte("&#10584;"),
+ "LeftVector": []byte("&#8636;"),
+ "LeftVectorBar": []byte("&#10578;"),
+ "Leftarrow": []byte("&lArr;"),
+ "Leftrightarrow": []byte("&iff;"),
+ "LessEqualGreater": []byte("&leg;"),
+ "LessFullEqual": []byte("&lE;"),
+ "LessGreater": []byte("&lg;"),
+ "LessLess": []byte("&#10913;"),
+ "LessSlantEqual": []byte("&les;"),
+ "LessTilde": []byte("&lsim;"),
+ "Lleftarrow": []byte("&#8666;"),
+ "Lmidot": []byte("&#319;"),
+ "LongLeftArrow": []byte("&xlarr;"),
+ "LongLeftRightArrow": []byte("&xharr;"),
+ "LongRightArrow": []byte("&xrarr;"),
+ "Longleftarrow": []byte("&xlArr;"),
+ "Longleftrightarrow": []byte("&xhArr;"),
+ "Longrightarrow": []byte("&xrArr;"),
+ "LowerLeftArrow": []byte("&#8601;"),
+ "LowerRightArrow": []byte("&#8600;"),
+ "Lstrok": []byte("&#321;"),
+ "MediumSpace": []byte("&#8287;"),
+ "Mellintrf": []byte("&Mscr;"),
+ "MinusPlus": []byte("&mp;"),
+ "Nacute": []byte("&#323;"),
+ "Ncaron": []byte("&#327;"),
+ "Ncedil": []byte("&#325;"),
+ "NegativeMediumSpace": []byte("&#8203;"),
+ "NegativeThickSpace": []byte("&#8203;"),
+ "NegativeThinSpace": []byte("&#8203;"),
+ "NegativeVeryThinSpace": []byte("&#8203;"),
+ "NestedGreaterGreater": []byte("&Gt;"),
+ "NestedLessLess": []byte("&Lt;"),
+ "NewLine": []byte("\n"),
+ "NoBreak": []byte("&#8288;"),
+ "NonBreakingSpace": []byte("&#160;"),
+ "NotCongruent": []byte("&#8802;"),
+ "NotCupCap": []byte("&#8813;"),
+ "NotDoubleVerticalBar": []byte("&npar;"),
+ "NotElement": []byte("&#8713;"),
+ "NotEqual": []byte("&ne;"),
+ "NotExists": []byte("&#8708;"),
+ "NotGreater": []byte("&ngt;"),
+ "NotGreaterEqual": []byte("&nge;"),
+ "NotGreaterLess": []byte("&ntgl;"),
+ "NotGreaterTilde": []byte("&#8821;"),
+ "NotLeftTriangle": []byte("&#8938;"),
+ "NotLeftTriangleEqual": []byte("&#8940;"),
+ "NotLess": []byte("&nlt;"),
+ "NotLessEqual": []byte("&nle;"),
+ "NotLessGreater": []byte("&ntlg;"),
+ "NotLessTilde": []byte("&#8820;"),
+ "NotPrecedes": []byte("&npr;"),
+ "NotPrecedesSlantEqual": []byte("&#8928;"),
+ "NotReverseElement": []byte("&#8716;"),
+ "NotRightTriangle": []byte("&#8939;"),
+ "NotRightTriangleEqual": []byte("&#8941;"),
+ "NotSquareSubsetEqual": []byte("&#8930;"),
+ "NotSquareSupersetEqual": []byte("&#8931;"),
+ "NotSubsetEqual": []byte("&#8840;"),
+ "NotSucceeds": []byte("&nsc;"),
+ "NotSucceedsSlantEqual": []byte("&#8929;"),
+ "NotSupersetEqual": []byte("&#8841;"),
+ "NotTilde": []byte("&nsim;"),
+ "NotTildeEqual": []byte("&#8772;"),
+ "NotTildeFullEqual": []byte("&#8775;"),
+ "NotTildeTilde": []byte("&nap;"),
+ "NotVerticalBar": []byte("&nmid;"),
+ "Ntilde": []byte("&#209;"),
+ "OElig": []byte("&#338;"),
+ "Oacute": []byte("&#211;"),
+ "Ocirc": []byte("&#212;"),
+ "Odblac": []byte("&#336;"),
+ "Ograve": []byte("&#210;"),
+ "Omacr": []byte("&#332;"),
+ "Omega": []byte("&ohm;"),
+ "Omicron": []byte("&#927;"),
+ "OpenCurlyDoubleQuote": []byte("&#8220;"),
+ "OpenCurlyQuote": []byte("&#8216;"),
+ "Oslash": []byte("&#216;"),
+ "Otilde": []byte("&#213;"),
+ "OverBar": []byte("&#8254;"),
+ "OverBrace": []byte("&#9182;"),
+ "OverBracket": []byte("&tbrk;"),
+ "OverParenthesis": []byte("&#9180;"),
+ "PartialD": []byte("&part;"),
+ "PlusMinus": []byte("&pm;"),
+ "Poincareplane": []byte("&Hfr;"),
+ "Precedes": []byte("&pr;"),
+ "PrecedesEqual": []byte("&pre;"),
+ "PrecedesSlantEqual": []byte("&#8828;"),
+ "PrecedesTilde": []byte("&#8830;"),
+ "Product": []byte("&prod;"),
+ "Proportion": []byte("&#8759;"),
+ "Proportional": []byte("&prop;"),
+ "QUOT": []byte("\""),
+ "Racute": []byte("&#340;"),
+ "Rcaron": []byte("&#344;"),
+ "Rcedil": []byte("&#342;"),
+ "ReverseElement": []byte("&ni;"),
+ "ReverseEquilibrium": []byte("&#8651;"),
+ "ReverseUpEquilibrium": []byte("&duhar;"),
+ "Rfr": []byte("&Re;"),
+ "RightAngleBracket": []byte("&rang;"),
+ "RightArrow": []byte("&rarr;"),
+ "RightArrowBar": []byte("&#8677;"),
+ "RightArrowLeftArrow": []byte("&#8644;"),
+ "RightCeiling": []byte("&#8969;"),
+ "RightDoubleBracket": []byte("&robrk;"),
+ "RightDownTeeVector": []byte("&#10589;"),
+ "RightDownVector": []byte("&#8642;"),
+ "RightDownVectorBar": []byte("&#10581;"),
+ "RightFloor": []byte("&#8971;"),
+ "RightTee": []byte("&#8866;"),
+ "RightTeeArrow": []byte("&map;"),
+ "RightTeeVector": []byte("&#10587;"),
+ "RightTriangle": []byte("&#8883;"),
+ "RightTriangleBar": []byte("&#10704;"),
+ "RightTriangleEqual": []byte("&#8885;"),
+ "RightUpDownVector": []byte("&#10575;"),
+ "RightUpTeeVector": []byte("&#10588;"),
+ "RightUpVector": []byte("&#8638;"),
+ "RightUpVectorBar": []byte("&#10580;"),
+ "RightVector": []byte("&#8640;"),
+ "RightVectorBar": []byte("&#10579;"),
+ "Rightarrow": []byte("&rArr;"),
+ "RoundImplies": []byte("&#10608;"),
+ "Rrightarrow": []byte("&#8667;"),
+ "RuleDelayed": []byte("&#10740;"),
+ "SHCHcy": []byte("&#1065;"),
+ "SOFTcy": []byte("&#1068;"),
+ "Sacute": []byte("&#346;"),
+ "Scaron": []byte("&#352;"),
+ "Scedil": []byte("&#350;"),
+ "Scirc": []byte("&#348;"),
+ "ShortDownArrow": []byte("&darr;"),
+ "ShortLeftArrow": []byte("&larr;"),
+ "ShortRightArrow": []byte("&rarr;"),
+ "ShortUpArrow": []byte("&uarr;"),
+ "Sigma": []byte("&#931;"),
+ "SmallCircle": []byte("&#8728;"),
+ "Square": []byte("&squ;"),
+ "SquareIntersection": []byte("&#8851;"),
+ "SquareSubset": []byte("&#8847;"),
+ "SquareSubsetEqual": []byte("&#8849;"),
+ "SquareSuperset": []byte("&#8848;"),
+ "SquareSupersetEqual": []byte("&#8850;"),
+ "SquareUnion": []byte("&#8852;"),
+ "Subset": []byte("&Sub;"),
+ "SubsetEqual": []byte("&sube;"),
+ "Succeeds": []byte("&sc;"),
+ "SucceedsEqual": []byte("&sce;"),
+ "SucceedsSlantEqual": []byte("&#8829;"),
+ "SucceedsTilde": []byte("&#8831;"),
+ "SuchThat": []byte("&ni;"),
+ "Superset": []byte("&sup;"),
+ "SupersetEqual": []byte("&supe;"),
+ "Supset": []byte("&Sup;"),
+ "THORN": []byte("&#222;"),
+ "Tab": []byte(" "),
+ "Tcaron": []byte("&#356;"),
+ "Tcedil": []byte("&#354;"),
+ "Therefore": []byte("&#8756;"),
+ "Theta": []byte("&#920;"),
+ "ThinSpace": []byte("&#8201;"),
+ "Tilde": []byte("&sim;"),
+ "TildeEqual": []byte("&sime;"),
+ "TildeFullEqual": []byte("&cong;"),
+ "TildeTilde": []byte("&ap;"),
+ "TripleDot": []byte("&tdot;"),
+ "Tstrok": []byte("&#358;"),
+ "Uacute": []byte("&#218;"),
+ "Uarrocir": []byte("&#10569;"),
+ "Ubreve": []byte("&#364;"),
+ "Ucirc": []byte("&#219;"),
+ "Udblac": []byte("&#368;"),
+ "Ugrave": []byte("&#217;"),
+ "Umacr": []byte("&#362;"),
+ "UnderBar": []byte("_"),
+ "UnderBrace": []byte("&#9183;"),
+ "UnderBracket": []byte("&bbrk;"),
+ "UnderParenthesis": []byte("&#9181;"),
+ "Union": []byte("&xcup;"),
+ "UnionPlus": []byte("&#8846;"),
+ "Uogon": []byte("&#370;"),
+ "UpArrow": []byte("&uarr;"),
+ "UpArrowBar": []byte("&#10514;"),
+ "UpArrowDownArrow": []byte("&#8645;"),
+ "UpDownArrow": []byte("&varr;"),
+ "UpEquilibrium": []byte("&udhar;"),
+ "UpTee": []byte("&bot;"),
+ "UpTeeArrow": []byte("&#8613;"),
+ "Uparrow": []byte("&uArr;"),
+ "Updownarrow": []byte("&vArr;"),
+ "UpperLeftArrow": []byte("&#8598;"),
+ "UpperRightArrow": []byte("&#8599;"),
+ "Upsilon": []byte("&#933;"),
+ "Uring": []byte("&#366;"),
+ "Utilde": []byte("&#360;"),
+ "Verbar": []byte("&Vert;"),
+ "VerticalBar": []byte("&mid;"),
+ "VerticalLine": []byte("|"),
+ "VerticalSeparator": []byte("&#10072;"),
+ "VerticalTilde": []byte("&wr;"),
+ "VeryThinSpace": []byte("&#8202;"),
+ "Vvdash": []byte("&#8874;"),
+ "Wcirc": []byte("&#372;"),
+ "Yacute": []byte("&#221;"),
+ "Ycirc": []byte("&#374;"),
+ "Zacute": []byte("&#377;"),
+ "Zcaron": []byte("&#381;"),
+ "ZeroWidthSpace": []byte("&#8203;"),
+ "aacute": []byte("&#225;"),
+ "abreve": []byte("&#259;"),
+ "acirc": []byte("&#226;"),
+ "acute": []byte("&#180;"),
+ "aelig": []byte("&#230;"),
+ "agrave": []byte("&#224;"),
+ "alefsym": []byte("&#8501;"),
+ "alpha": []byte("&#945;"),
+ "amacr": []byte("&#257;"),
+ "amp": []byte("&"),
+ "andslope": []byte("&#10840;"),
+ "angle": []byte("&ang;"),
+ "angmsd": []byte("&#8737;"),
+ "angmsdaa": []byte("&#10664;"),
+ "angmsdab": []byte("&#10665;"),
+ "angmsdac": []byte("&#10666;"),
+ "angmsdad": []byte("&#10667;"),
+ "angmsdae": []byte("&#10668;"),
+ "angmsdaf": []byte("&#10669;"),
+ "angmsdag": []byte("&#10670;"),
+ "angmsdah": []byte("&#10671;"),
+ "angrtvb": []byte("&#8894;"),
+ "angrtvbd": []byte("&#10653;"),
+ "angsph": []byte("&#8738;"),
+ "angst": []byte("&#197;"),
+ "angzarr": []byte("&#9084;"),
+ "aogon": []byte("&#261;"),
+ "apos": []byte("'"),
+ "approx": []byte("&ap;"),
+ "approxeq": []byte("&ape;"),
+ "aring": []byte("&#229;"),
+ "ast": []byte("*"),
+ "asymp": []byte("&ap;"),
+ "asympeq": []byte("&#8781;"),
+ "atilde": []byte("&#227;"),
+ "awconint": []byte("&#8755;"),
+ "backcong": []byte("&#8780;"),
+ "backepsilon": []byte("&#1014;"),
+ "backprime": []byte("&#8245;"),
+ "backsim": []byte("&bsim;"),
+ "backsimeq": []byte("&#8909;"),
+ "barvee": []byte("&#8893;"),
+ "barwed": []byte("&#8965;"),
+ "barwedge": []byte("&#8965;"),
+ "bbrktbrk": []byte("&#9142;"),
+ "becaus": []byte("&#8757;"),
+ "because": []byte("&#8757;"),
+ "bemptyv": []byte("&#10672;"),
+ "bernou": []byte("&Bscr;"),
+ "between": []byte("&#8812;"),
+ "bigcap": []byte("&xcap;"),
+ "bigcirc": []byte("&#9711;"),
+ "bigcup": []byte("&xcup;"),
+ "bigodot": []byte("&xodot;"),
+ "bigoplus": []byte("&#10753;"),
+ "bigotimes": []byte("&#10754;"),
+ "bigsqcup": []byte("&#10758;"),
+ "bigstar": []byte("&#9733;"),
+ "bigtriangledown": []byte("&#9661;"),
+ "bigtriangleup": []byte("&#9651;"),
+ "biguplus": []byte("&#10756;"),
+ "bigvee": []byte("&Vee;"),
+ "bigwedge": []byte("&#8896;"),
+ "bkarow": []byte("&rbarr;"),
+ "blacklozenge": []byte("&lozf;"),
+ "blacksquare": []byte("&squf;"),
+ "blacktriangle": []byte("&#9652;"),
+ "blacktriangledown": []byte("&#9662;"),
+ "blacktriangleleft": []byte("&#9666;"),
+ "blacktriangleright": []byte("&#9656;"),
+ "bottom": []byte("&bot;"),
+ "bowtie": []byte("&#8904;"),
+ "boxminus": []byte("&#8863;"),
+ "boxplus": []byte("&#8862;"),
+ "boxtimes": []byte("&#8864;"),
+ "bprime": []byte("&#8245;"),
+ "breve": []byte("&#728;"),
+ "brvbar": []byte("&#166;"),
+ "bsol": []byte("\\"),
+ "bsolhsub": []byte("&#10184;"),
+ "bullet": []byte("&bull;"),
+ "bumpeq": []byte("&#8783;"),
+ "cacute": []byte("&#263;"),
+ "capbrcup": []byte("&#10825;"),
+ "caron": []byte("&#711;"),
+ "ccaron": []byte("&#269;"),
+ "ccedil": []byte("&#231;"),
+ "ccirc": []byte("&#265;"),
+ "ccupssm": []byte("&#10832;"),
+ "cedil": []byte("&#184;"),
+ "cemptyv": []byte("&#10674;"),
+ "centerdot": []byte("&#183;"),
+ "checkmark": []byte("&check;"),
+ "circeq": []byte("&cire;"),
+ "circlearrowleft": []byte("&#8634;"),
+ "circlearrowright": []byte("&#8635;"),
+ "circledR": []byte("&REG;"),
+ "circledS": []byte("&oS;"),
+ "circledast": []byte("&oast;"),
+ "circledcirc": []byte("&ocir;"),
+ "circleddash": []byte("&#8861;"),
+ "cirfnint": []byte("&#10768;"),
+ "cirscir": []byte("&#10690;"),
+ "clubsuit": []byte("&#9827;"),
+ "colon": []byte(":"),
+ "colone": []byte("&#8788;"),
+ "coloneq": []byte("&#8788;"),
+ "comma": []byte(","),
+ "commat": []byte("@"),
+ "compfn": []byte("&#8728;"),
+ "complement": []byte("&comp;"),
+ "complexes": []byte("&Copf;"),
+ "congdot": []byte("&#10861;"),
+ "conint": []byte("&oint;"),
+ "coprod": []byte("&#8720;"),
+ "copysr": []byte("&#8471;"),
+ "cudarrl": []byte("&#10552;"),
+ "cudarrr": []byte("&#10549;"),
+ "cularr": []byte("&#8630;"),
+ "cularrp": []byte("&#10557;"),
+ "cupbrcap": []byte("&#10824;"),
+ "cupdot": []byte("&#8845;"),
+ "curarr": []byte("&#8631;"),
+ "curarrm": []byte("&#10556;"),
+ "curlyeqprec": []byte("&#8926;"),
+ "curlyeqsucc": []byte("&#8927;"),
+ "curlyvee": []byte("&#8910;"),
+ "curlywedge": []byte("&#8911;"),
+ "curren": []byte("&#164;"),
+ "curvearrowleft": []byte("&#8630;"),
+ "curvearrowright": []byte("&#8631;"),
+ "cwconint": []byte("&#8754;"),
+ "cylcty": []byte("&#9005;"),
+ "dagger": []byte("&#8224;"),
+ "daleth": []byte("&#8504;"),
+ "dbkarow": []byte("&rBarr;"),
+ "dblac": []byte("&#733;"),
+ "dcaron": []byte("&#271;"),
+ "ddagger": []byte("&#8225;"),
+ "ddotseq": []byte("&eDDot;"),
+ "delta": []byte("&#948;"),
+ "demptyv": []byte("&#10673;"),
+ "diamond": []byte("&diam;"),
+ "diamondsuit": []byte("&#9830;"),
+ "digamma": []byte("&#989;"),
+ "divide": []byte("&div;"),
+ "divideontimes": []byte("&#8903;"),
+ "divonx": []byte("&#8903;"),
+ "dlcorn": []byte("&#8990;"),
+ "dlcrop": []byte("&#8973;"),
+ "dollar": []byte("$"),
+ "doteqdot": []byte("&eDot;"),
+ "dotminus": []byte("&#8760;"),
+ "dotplus": []byte("&#8724;"),
+ "dotsquare": []byte("&#8865;"),
+ "doublebarwedge": []byte("&#8966;"),
+ "downarrow": []byte("&darr;"),
+ "downdownarrows": []byte("&#8650;"),
+ "downharpoonleft": []byte("&#8643;"),
+ "downharpoonright": []byte("&#8642;"),
+ "drbkarow": []byte("&RBarr;"),
+ "drcorn": []byte("&#8991;"),
+ "drcrop": []byte("&#8972;"),
+ "dstrok": []byte("&#273;"),
+ "dwangle": []byte("&#10662;"),
+ "dzigrarr": []byte("&#10239;"),
+ "eacute": []byte("&#233;"),
+ "ecaron": []byte("&#283;"),
+ "ecirc": []byte("&#234;"),
+ "ecolon": []byte("&#8789;"),
+ "egrave": []byte("&#232;"),
+ "elinters": []byte("&#9191;"),
+ "emacr": []byte("&#275;"),
+ "emptyset": []byte("&#8709;"),
+ "emptyv": []byte("&#8709;"),
+ "emsp13": []byte("&#8196;"),
+ "emsp14": []byte("&#8197;"),
+ "eogon": []byte("&#281;"),
+ "epsilon": []byte("&#949;"),
+ "eqcirc": []byte("&ecir;"),
+ "eqcolon": []byte("&#8789;"),
+ "eqsim": []byte("&esim;"),
+ "eqslantgtr": []byte("&egs;"),
+ "eqslantless": []byte("&els;"),
+ "equals": []byte("="),
+ "equest": []byte("&#8799;"),
+ "equivDD": []byte("&#10872;"),
+ "eqvparsl": []byte("&#10725;"),
+ "excl": []byte("!"),
+ "expectation": []byte("&Escr;"),
+ "exponentiale": []byte("&ee;"),
+ "fallingdotseq": []byte("&#8786;"),
+ "female": []byte("&#9792;"),
+ "forall": []byte("&#8704;"),
+ "fpartint": []byte("&#10765;"),
+ "frac12": []byte("&#189;"),
+ "frac13": []byte("&#8531;"),
+ "frac14": []byte("&#188;"),
+ "frac15": []byte("&#8533;"),
+ "frac16": []byte("&#8537;"),
+ "frac18": []byte("&#8539;"),
+ "frac23": []byte("&#8532;"),
+ "frac25": []byte("&#8534;"),
+ "frac34": []byte("&#190;"),
+ "frac35": []byte("&#8535;"),
+ "frac38": []byte("&#8540;"),
+ "frac45": []byte("&#8536;"),
+ "frac56": []byte("&#8538;"),
+ "frac58": []byte("&#8541;"),
+ "frac78": []byte("&#8542;"),
+ "gacute": []byte("&#501;"),
+ "gamma": []byte("&#947;"),
+ "gammad": []byte("&#989;"),
+ "gbreve": []byte("&#287;"),
+ "gcirc": []byte("&#285;"),
+ "geq": []byte("&ge;"),
+ "geqq": []byte("&gE;"),
+ "geqslant": []byte("&ges;"),
+ "gesdoto": []byte("&#10882;"),
+ "gesdotol": []byte("&#10884;"),
+ "ggg": []byte("&Gg;"),
+ "gnapprox": []byte("&gnap;"),
+ "gneq": []byte("&gne;"),
+ "gneqq": []byte("&gnE;"),
+ "grave": []byte("`"),
+ "gt": []byte(">"),
+ "gtquest": []byte("&#10876;"),
+ "gtrapprox": []byte("&gap;"),
+ "gtrdot": []byte("&#8919;"),
+ "gtreqless": []byte("&gel;"),
+ "gtreqqless": []byte("&gEl;"),
+ "gtrless": []byte("&gl;"),
+ "gtrsim": []byte("&gsim;"),
+ "hArr": []byte("&iff;"),
+ "hairsp": []byte("&#8202;"),
+ "hamilt": []byte("&Hscr;"),
+ "hardcy": []byte("&#1098;"),
+ "harrcir": []byte("&#10568;"),
+ "hcirc": []byte("&#293;"),
+ "hearts": []byte("&#9829;"),
+ "heartsuit": []byte("&#9829;"),
+ "hellip": []byte("&mldr;"),
+ "hercon": []byte("&#8889;"),
+ "hksearow": []byte("&#10533;"),
+ "hkswarow": []byte("&#10534;"),
+ "homtht": []byte("&#8763;"),
+ "hookleftarrow": []byte("&#8617;"),
+ "hookrightarrow": []byte("&#8618;"),
+ "horbar": []byte("&#8213;"),
+ "hslash": []byte("&hbar;"),
+ "hstrok": []byte("&#295;"),
+ "hybull": []byte("&#8259;"),
+ "hyphen": []byte("&dash;"),
+ "iacute": []byte("&#237;"),
+ "icirc": []byte("&#238;"),
+ "iexcl": []byte("&#161;"),
+ "igrave": []byte("&#236;"),
+ "iiiint": []byte("&qint;"),
+ "iiint": []byte("&tint;"),
+ "ijlig": []byte("&#307;"),
+ "imacr": []byte("&#299;"),
+ "image": []byte("&Im;"),
+ "imagline": []byte("&Iscr;"),
+ "imagpart": []byte("&Im;"),
+ "imath": []byte("&#305;"),
+ "imped": []byte("&#437;"),
+ "incare": []byte("&#8453;"),
+ "infintie": []byte("&#10717;"),
+ "inodot": []byte("&#305;"),
+ "intcal": []byte("&#8890;"),
+ "integers": []byte("&Zopf;"),
+ "intercal": []byte("&#8890;"),
+ "intlarhk": []byte("&#10775;"),
+ "intprod": []byte("&iprod;"),
+ "iogon": []byte("&#303;"),
+ "iquest": []byte("&#191;"),
+ "isin": []byte("&in;"),
+ "isindot": []byte("&#8949;"),
+ "isinsv": []byte("&#8947;"),
+ "isinv": []byte("&in;"),
+ "itilde": []byte("&#297;"),
+ "jcirc": []byte("&#309;"),
+ "jmath": []byte("&#567;"),
+ "jsercy": []byte("&#1112;"),
+ "kappa": []byte("&#954;"),
+ "kappav": []byte("&#1008;"),
+ "kcedil": []byte("&#311;"),
+ "kgreen": []byte("&#312;"),
+ "lacute": []byte("&#314;"),
+ "laemptyv": []byte("&#10676;"),
+ "lagran": []byte("&Lscr;"),
+ "lambda": []byte("&#955;"),
+ "langle": []byte("&lang;"),
+ "laquo": []byte("&#171;"),
+ "larrbfs": []byte("&#10527;"),
+ "larrhk": []byte("&#8617;"),
+ "larrlp": []byte("&#8619;"),
+ "larrsim": []byte("&#10611;"),
+ "larrtl": []byte("&#8610;"),
+ "lbrace": []byte("{"),
+ "lbrack": []byte("["),
+ "lbrksld": []byte("&#10639;"),
+ "lbrkslu": []byte("&#10637;"),
+ "lcaron": []byte("&#318;"),
+ "lcedil": []byte("&#316;"),
+ "lcub": []byte("{"),
+ "ldquor": []byte("&#8222;"),
+ "ldrdhar": []byte("&#10599;"),
+ "ldrushar": []byte("&#10571;"),
+ "leftarrow": []byte("&larr;"),
+ "leftarrowtail": []byte("&#8610;"),
+ "leftharpoondown": []byte("&#8637;"),
+ "leftharpoonup": []byte("&#8636;"),
+ "leftleftarrows": []byte("&#8647;"),
+ "leftrightarrow": []byte("&harr;"),
+ "leftrightarrows": []byte("&#8646;"),
+ "leftrightharpoons": []byte("&#8651;"),
+ "leftrightsquigarrow": []byte("&#8621;"),
+ "leftthreetimes": []byte("&#8907;"),
+ "leq": []byte("&le;"),
+ "leqq": []byte("&lE;"),
+ "leqslant": []byte("&les;"),
+ "lesdoto": []byte("&#10881;"),
+ "lesdotor": []byte("&#10883;"),
+ "lessapprox": []byte("&lap;"),
+ "lessdot": []byte("&#8918;"),
+ "lesseqgtr": []byte("&leg;"),
+ "lesseqqgtr": []byte("&lEg;"),
+ "lessgtr": []byte("&lg;"),
+ "lesssim": []byte("&lsim;"),
+ "lfloor": []byte("&#8970;"),
+ "llcorner": []byte("&#8990;"),
+ "lmidot": []byte("&#320;"),
+ "lmoust": []byte("&#9136;"),
+ "lmoustache": []byte("&#9136;"),
+ "lnapprox": []byte("&lnap;"),
+ "lneq": []byte("&lne;"),
+ "lneqq": []byte("&lnE;"),
+ "longleftarrow": []byte("&xlarr;"),
+ "longleftrightarrow": []byte("&xharr;"),
+ "longmapsto": []byte("&xmap;"),
+ "longrightarrow": []byte("&xrarr;"),
+ "looparrowleft": []byte("&#8619;"),
+ "looparrowright": []byte("&#8620;"),
+ "lotimes": []byte("&#10804;"),
+ "lowast": []byte("&#8727;"),
+ "lowbar": []byte("_"),
+ "lozenge": []byte("&loz;"),
+ "lpar": []byte("("),
+ "lrcorner": []byte("&#8991;"),
+ "lsaquo": []byte("&#8249;"),
+ "lsqb": []byte("["),
+ "lsquor": []byte("&#8218;"),
+ "lstrok": []byte("&#322;"),
+ "lt": []byte("<"),
+ "lthree": []byte("&#8907;"),
+ "ltimes": []byte("&#8905;"),
+ "ltquest": []byte("&#10875;"),
+ "lurdshar": []byte("&#10570;"),
+ "luruhar": []byte("&#10598;"),
+ "maltese": []byte("&malt;"),
+ "mapsto": []byte("&map;"),
+ "mapstodown": []byte("&#8615;"),
+ "mapstoleft": []byte("&#8612;"),
+ "mapstoup": []byte("&#8613;"),
+ "marker": []byte("&#9646;"),
+ "measuredangle": []byte("&#8737;"),
+ "micro": []byte("&#181;"),
+ "midast": []byte("*"),
+ "middot": []byte("&#183;"),
+ "minusb": []byte("&#8863;"),
+ "minusd": []byte("&#8760;"),
+ "minusdu": []byte("&#10794;"),
+ "mnplus": []byte("&mp;"),
+ "models": []byte("&#8871;"),
+ "mstpos": []byte("&ac;"),
+ "multimap": []byte("&#8888;"),
+ "nLeftarrow": []byte("&#8653;"),
+ "nLeftrightarrow": []byte("&#8654;"),
+ "nRightarrow": []byte("&#8655;"),
+ "nVDash": []byte("&#8879;"),
+ "nVdash": []byte("&#8878;"),
+ "nabla": []byte("&Del;"),
+ "nacute": []byte("&#324;"),
+ "napos": []byte("&#329;"),
+ "napprox": []byte("&nap;"),
+ "natural": []byte("&#9838;"),
+ "naturals": []byte("&Nopf;"),
+ "ncaron": []byte("&#328;"),
+ "ncedil": []byte("&#326;"),
+ "nearrow": []byte("&#8599;"),
+ "nequiv": []byte("&#8802;"),
+ "nesear": []byte("&toea;"),
+ "nexist": []byte("&#8708;"),
+ "nexists": []byte("&#8708;"),
+ "ngeq": []byte("&nge;"),
+ "ngtr": []byte("&ngt;"),
+ "niv": []byte("&ni;"),
+ "nleftarrow": []byte("&#8602;"),
+ "nleftrightarrow": []byte("&#8622;"),
+ "nleq": []byte("&nle;"),
+ "nless": []byte("&nlt;"),
+ "nltrie": []byte("&#8940;"),
+ "notinva": []byte("&#8713;"),
+ "notinvb": []byte("&#8951;"),
+ "notinvc": []byte("&#8950;"),
+ "notniva": []byte("&#8716;"),
+ "notnivb": []byte("&#8958;"),
+ "notnivc": []byte("&#8957;"),
+ "nparallel": []byte("&npar;"),
+ "npolint": []byte("&#10772;"),
+ "nprcue": []byte("&#8928;"),
+ "nprec": []byte("&npr;"),
+ "nrightarrow": []byte("&#8603;"),
+ "nrtrie": []byte("&#8941;"),
+ "nsccue": []byte("&#8929;"),
+ "nshortmid": []byte("&nmid;"),
+ "nshortparallel": []byte("&npar;"),
+ "nsimeq": []byte("&#8772;"),
+ "nsmid": []byte("&nmid;"),
+ "nspar": []byte("&npar;"),
+ "nsqsube": []byte("&#8930;"),
+ "nsqsupe": []byte("&#8931;"),
+ "nsubseteq": []byte("&#8840;"),
+ "nsucc": []byte("&nsc;"),
+ "nsupseteq": []byte("&#8841;"),
+ "ntilde": []byte("&#241;"),
+ "ntriangleleft": []byte("&#8938;"),
+ "ntrianglelefteq": []byte("&#8940;"),
+ "ntriangleright": []byte("&#8939;"),
+ "ntrianglerighteq": []byte("&#8941;"),
+ "num": []byte("#"),
+ "numero": []byte("&#8470;"),
+ "nvDash": []byte("&#8877;"),
+ "nvdash": []byte("&#8876;"),
+ "nvinfin": []byte("&#10718;"),
+ "nwarrow": []byte("&#8598;"),
+ "oacute": []byte("&#243;"),
+ "ocirc": []byte("&#244;"),
+ "odblac": []byte("&#337;"),
+ "oelig": []byte("&#339;"),
+ "ograve": []byte("&#242;"),
+ "olcross": []byte("&#10683;"),
+ "omacr": []byte("&#333;"),
+ "omega": []byte("&#969;"),
+ "omicron": []byte("&#959;"),
+ "ominus": []byte("&#8854;"),
+ "order": []byte("&oscr;"),
+ "orderof": []byte("&oscr;"),
+ "origof": []byte("&#8886;"),
+ "orslope": []byte("&#10839;"),
+ "oslash": []byte("&#248;"),
+ "otilde": []byte("&#245;"),
+ "otimes": []byte("&#8855;"),
+ "otimesas": []byte("&#10806;"),
+ "parallel": []byte("&par;"),
+ "percnt": []byte("%"),
+ "period": []byte("."),
+ "permil": []byte("&#8240;"),
+ "perp": []byte("&bot;"),
+ "pertenk": []byte("&#8241;"),
+ "phmmat": []byte("&Mscr;"),
+ "pitchfork": []byte("&fork;"),
+ "planck": []byte("&hbar;"),
+ "planckh": []byte("&#8462;"),
+ "plankv": []byte("&hbar;"),
+ "plus": []byte("+"),
+ "plusacir": []byte("&#10787;"),
+ "pluscir": []byte("&#10786;"),
+ "plusdo": []byte("&#8724;"),
+ "plusmn": []byte("&pm;"),
+ "plussim": []byte("&#10790;"),
+ "plustwo": []byte("&#10791;"),
+ "pointint": []byte("&#10773;"),
+ "pound": []byte("&#163;"),
+ "prec": []byte("&pr;"),
+ "precapprox": []byte("&prap;"),
+ "preccurlyeq": []byte("&#8828;"),
+ "preceq": []byte("&pre;"),
+ "precnapprox": []byte("&prnap;"),
+ "precneqq": []byte("&prnE;"),
+ "precnsim": []byte("&#8936;"),
+ "precsim": []byte("&#8830;"),
+ "primes": []byte("&Popf;"),
+ "prnsim": []byte("&#8936;"),
+ "profalar": []byte("&#9006;"),
+ "profline": []byte("&#8978;"),
+ "profsurf": []byte("&#8979;"),
+ "propto": []byte("&prop;"),
+ "prurel": []byte("&#8880;"),
+ "puncsp": []byte("&#8200;"),
+ "qprime": []byte("&#8279;"),
+ "quaternions": []byte("&Hopf;"),
+ "quatint": []byte("&#10774;"),
+ "quest": []byte("?"),
+ "questeq": []byte("&#8799;"),
+ "quot": []byte("\""),
+ "racute": []byte("&#341;"),
+ "radic": []byte("&Sqrt;"),
+ "raemptyv": []byte("&#10675;"),
+ "rangle": []byte("&rang;"),
+ "raquo": []byte("&#187;"),
+ "rarrbfs": []byte("&#10528;"),
+ "rarrhk": []byte("&#8618;"),
+ "rarrlp": []byte("&#8620;"),
+ "rarrsim": []byte("&#10612;"),
+ "rarrtl": []byte("&#8611;"),
+ "rationals": []byte("&Qopf;"),
+ "rbrace": []byte("}"),
+ "rbrack": []byte("]"),
+ "rbrksld": []byte("&#10638;"),
+ "rbrkslu": []byte("&#10640;"),
+ "rcaron": []byte("&#345;"),
+ "rcedil": []byte("&#343;"),
+ "rcub": []byte("}"),
+ "rdldhar": []byte("&#10601;"),
+ "rdquor": []byte("&#8221;"),
+ "real": []byte("&Re;"),
+ "realine": []byte("&Rscr;"),
+ "realpart": []byte("&Re;"),
+ "reals": []byte("&Ropf;"),
+ "rfloor": []byte("&#8971;"),
+ "rightarrow": []byte("&rarr;"),
+ "rightarrowtail": []byte("&#8611;"),
+ "rightharpoondown": []byte("&#8641;"),
+ "rightharpoonup": []byte("&#8640;"),
+ "rightleftarrows": []byte("&#8644;"),
+ "rightleftharpoons": []byte("&#8652;"),
+ "rightrightarrows": []byte("&#8649;"),
+ "rightsquigarrow": []byte("&#8605;"),
+ "rightthreetimes": []byte("&#8908;"),
+ "risingdotseq": []byte("&#8787;"),
+ "rmoust": []byte("&#9137;"),
+ "rmoustache": []byte("&#9137;"),
+ "rotimes": []byte("&#10805;"),
+ "rpar": []byte(")"),
+ "rppolint": []byte("&#10770;"),
+ "rsaquo": []byte("&#8250;"),
+ "rsqb": []byte("]"),
+ "rsquor": []byte("&#8217;"),
+ "rthree": []byte("&#8908;"),
+ "rtimes": []byte("&#8906;"),
+ "rtriltri": []byte("&#10702;"),
+ "ruluhar": []byte("&#10600;"),
+ "sacute": []byte("&#347;"),
+ "scaron": []byte("&#353;"),
+ "scedil": []byte("&#351;"),
+ "scirc": []byte("&#349;"),
+ "scnsim": []byte("&#8937;"),
+ "scpolint": []byte("&#10771;"),
+ "searrow": []byte("&#8600;"),
+ "semi": []byte(";"),
+ "seswar": []byte("&tosa;"),
+ "setminus": []byte("&#8726;"),
+ "sfrown": []byte("&#8994;"),
+ "shchcy": []byte("&#1097;"),
+ "shortmid": []byte("&mid;"),
+ "shortparallel": []byte("&par;"),
+ "sigma": []byte("&#963;"),
+ "sigmaf": []byte("&#962;"),
+ "sigmav": []byte("&#962;"),
+ "simeq": []byte("&sime;"),
+ "simplus": []byte("&#10788;"),
+ "simrarr": []byte("&#10610;"),
+ "slarr": []byte("&larr;"),
+ "smallsetminus": []byte("&#8726;"),
+ "smeparsl": []byte("&#10724;"),
+ "smid": []byte("&mid;"),
+ "softcy": []byte("&#1100;"),
+ "sol": []byte("/"),
+ "solbar": []byte("&#9023;"),
+ "spades": []byte("&#9824;"),
+ "spadesuit": []byte("&#9824;"),
+ "spar": []byte("&par;"),
+ "sqsube": []byte("&#8849;"),
+ "sqsubset": []byte("&#8847;"),
+ "sqsubseteq": []byte("&#8849;"),
+ "sqsupe": []byte("&#8850;"),
+ "sqsupset": []byte("&#8848;"),
+ "sqsupseteq": []byte("&#8850;"),
+ "square": []byte("&squ;"),
+ "squarf": []byte("&squf;"),
+ "srarr": []byte("&rarr;"),
+ "ssetmn": []byte("&#8726;"),
+ "ssmile": []byte("&#8995;"),
+ "sstarf": []byte("&Star;"),
+ "straightepsilon": []byte("&#1013;"),
+ "straightphi": []byte("&#981;"),
+ "strns": []byte("&#175;"),
+ "subedot": []byte("&#10947;"),
+ "submult": []byte("&#10945;"),
+ "subplus": []byte("&#10943;"),
+ "subrarr": []byte("&#10617;"),
+ "subset": []byte("&sub;"),
+ "subseteq": []byte("&sube;"),
+ "subseteqq": []byte("&subE;"),
+ "subsetneq": []byte("&#8842;"),
+ "subsetneqq": []byte("&subnE;"),
+ "succ": []byte("&sc;"),
+ "succapprox": []byte("&scap;"),
+ "succcurlyeq": []byte("&#8829;"),
+ "succeq": []byte("&sce;"),
+ "succnapprox": []byte("&scnap;"),
+ "succneqq": []byte("&scnE;"),
+ "succnsim": []byte("&#8937;"),
+ "succsim": []byte("&#8831;"),
+ "supdsub": []byte("&#10968;"),
+ "supedot": []byte("&#10948;"),
+ "suphsol": []byte("&#10185;"),
+ "suphsub": []byte("&#10967;"),
+ "suplarr": []byte("&#10619;"),
+ "supmult": []byte("&#10946;"),
+ "supplus": []byte("&#10944;"),
+ "supset": []byte("&sup;"),
+ "supseteq": []byte("&supe;"),
+ "supseteqq": []byte("&supE;"),
+ "supsetneq": []byte("&#8843;"),
+ "supsetneqq": []byte("&supnE;"),
+ "swarrow": []byte("&#8601;"),
+ "szlig": []byte("&#223;"),
+ "target": []byte("&#8982;"),
+ "tcaron": []byte("&#357;"),
+ "tcedil": []byte("&#355;"),
+ "telrec": []byte("&#8981;"),
+ "there4": []byte("&#8756;"),
+ "therefore": []byte("&#8756;"),
+ "theta": []byte("&#952;"),
+ "thetasym": []byte("&#977;"),
+ "thetav": []byte("&#977;"),
+ "thickapprox": []byte("&ap;"),
+ "thicksim": []byte("&sim;"),
+ "thinsp": []byte("&#8201;"),
+ "thkap": []byte("&ap;"),
+ "thksim": []byte("&sim;"),
+ "thorn": []byte("&#254;"),
+ "tilde": []byte("&#732;"),
+ "times": []byte("&#215;"),
+ "timesb": []byte("&#8864;"),
+ "timesbar": []byte("&#10801;"),
+ "topbot": []byte("&#9014;"),
+ "topfork": []byte("&#10970;"),
+ "tprime": []byte("&#8244;"),
+ "triangle": []byte("&utri;"),
+ "triangledown": []byte("&dtri;"),
+ "triangleleft": []byte("&ltri;"),
+ "trianglelefteq": []byte("&#8884;"),
+ "triangleq": []byte("&trie;"),
+ "triangleright": []byte("&rtri;"),
+ "trianglerighteq": []byte("&#8885;"),
+ "tridot": []byte("&#9708;"),
+ "triminus": []byte("&#10810;"),
+ "triplus": []byte("&#10809;"),
+ "tritime": []byte("&#10811;"),
+ "trpezium": []byte("&#9186;"),
+ "tstrok": []byte("&#359;"),
+ "twoheadleftarrow": []byte("&Larr;"),
+ "twoheadrightarrow": []byte("&Rarr;"),
+ "uacute": []byte("&#250;"),
+ "ubreve": []byte("&#365;"),
+ "ucirc": []byte("&#251;"),
+ "udblac": []byte("&#369;"),
+ "ugrave": []byte("&#249;"),
+ "ulcorn": []byte("&#8988;"),
+ "ulcorner": []byte("&#8988;"),
+ "ulcrop": []byte("&#8975;"),
+ "umacr": []byte("&#363;"),
+ "uogon": []byte("&#371;"),
+ "uparrow": []byte("&uarr;"),
+ "updownarrow": []byte("&varr;"),
+ "upharpoonleft": []byte("&#8639;"),
+ "upharpoonright": []byte("&#8638;"),
+ "upsih": []byte("&#978;"),
+ "upsilon": []byte("&#965;"),
+ "upuparrows": []byte("&#8648;"),
+ "urcorn": []byte("&#8989;"),
+ "urcorner": []byte("&#8989;"),
+ "urcrop": []byte("&#8974;"),
+ "uring": []byte("&#367;"),
+ "utilde": []byte("&#361;"),
+ "uwangle": []byte("&#10663;"),
+ "varepsilon": []byte("&#1013;"),
+ "varkappa": []byte("&#1008;"),
+ "varnothing": []byte("&#8709;"),
+ "varphi": []byte("&#981;"),
+ "varpi": []byte("&piv;"),
+ "varpropto": []byte("&prop;"),
+ "varrho": []byte("&rhov;"),
+ "varsigma": []byte("&#962;"),
+ "vartheta": []byte("&#977;"),
+ "vartriangleleft": []byte("&#8882;"),
+ "vartriangleright": []byte("&#8883;"),
+ "vee": []byte("&or;"),
+ "veebar": []byte("&#8891;"),
+ "vellip": []byte("&#8942;"),
+ "verbar": []byte("|"),
+ "vert": []byte("|"),
+ "vprop": []byte("&prop;"),
+ "vzigzag": []byte("&#10650;"),
+ "wcirc": []byte("&#373;"),
+ "wedge": []byte("&and;"),
+ "wedgeq": []byte("&#8793;"),
+ "weierp": []byte("&wp;"),
+ "wreath": []byte("&wr;"),
+ "xvee": []byte("&Vee;"),
+ "xwedge": []byte("&#8896;"),
+ "yacute": []byte("&#253;"),
+ "ycirc": []byte("&#375;"),
+ "zacute": []byte("&#378;"),
+ "zcaron": []byte("&#382;"),
+ "zeetrf": []byte("&Zfr;"),
+ "zigrarr": []byte("&#8669;"),
+}
+
+// TextRevEntitiesMap is a map of escapes.
+var TextRevEntitiesMap = map[byte][]byte{
+ '<': []byte("&lt;"),
+}
diff --git a/vendor/github.com/tdewolff/minify/v2/js/js.go b/vendor/github.com/tdewolff/minify/v2/js/js.go
new file mode 100644
index 0000000..1f6cefe
--- /dev/null
+++ b/vendor/github.com/tdewolff/minify/v2/js/js.go
@@ -0,0 +1,1277 @@
+// Package js minifies ECMAScript 2021 following the language specification at https://tc39.es/ecma262/.
+package js
+
+import (
+ "bytes"
+ "io"
+
+ "github.com/tdewolff/minify/v2"
+ "github.com/tdewolff/parse/v2"
+ "github.com/tdewolff/parse/v2/js"
+)
+
+type blockType int
+
+const (
+ defaultBlock blockType = iota
+ functionBlock
+ iterationBlock
+)
+
+// Minifier is a JS minifier.
+type Minifier struct {
+ Precision int // number of significant digits
+ KeepVarNames bool
+ useAlphabetVarNames bool
+ Version int
+}
+
+func (o *Minifier) minVersion(version int) bool {
+ return o.Version == 0 || version <= o.Version
+}
+
+// Minify minifies JS data, it reads from r and writes to w.
+func Minify(m *minify.M, w io.Writer, r io.Reader, params map[string]string) error {
+ return (&Minifier{}).Minify(m, w, r, params)
+}
+
+// Minify minifies JS data, it reads from r and writes to w.
+func (o *Minifier) Minify(_ *minify.M, w io.Writer, r io.Reader, _ map[string]string) error {
+ z := parse.NewInput(r)
+ ast, err := js.Parse(z, js.Options{WhileToFor: true})
+ if err != nil {
+ return err
+ }
+
+ // license comments
+ for _, comment := range ast.Comments {
+ if 3 < len(comment) && comment[2] == '!' {
+ w.Write(comment)
+ if comment[1] == '/' {
+ w.Write(newlineBytes)
+ }
+ } else if 2 < len(comment) && comment[0] == '#' && comment[1] == '!' {
+ w.Write(comment)
+ }
+ }
+
+ m := &jsMinifier{
+ o: o,
+ w: w,
+ renamer: newRenamer(!o.KeepVarNames, !o.useAlphabetVarNames),
+ }
+ m.hoistVars(&ast.BlockStmt)
+ ast.List = optimizeStmtList(ast.List, functionBlock)
+ for _, item := range ast.List {
+ m.writeSemicolon()
+ m.minifyStmt(item)
+ }
+
+ if _, err := w.Write(nil); err != nil {
+ return err
+ }
+ return nil
+}
+
+type expectExpr int
+
+const (
+ expectAny expectExpr = iota
+ expectExprStmt // in statement
+ expectExprBody // in arrow function body
+)
+
+type jsMinifier struct {
+ o *Minifier
+ w io.Writer
+
+ prev []byte
+ needsSemicolon bool // write a semicolon if required
+ needsSpace bool // write a space if next token is an identifier
+ expectExpr expectExpr // avoid ambiguous syntax such as an expression starting with function
+ groupedStmt bool // avoid ambiguous syntax by grouping the expression statement
+ inFor bool
+ spaceBefore byte
+
+ renamer *renamer
+}
+
+func (m *jsMinifier) write(b []byte) {
+ // 0 < len(b)
+ if m.needsSpace && js.IsIdentifierContinue(b) || m.spaceBefore == b[0] {
+ m.w.Write(spaceBytes)
+ }
+ m.w.Write(b)
+ m.prev = b
+ m.needsSpace = false
+ m.expectExpr = expectAny
+ m.spaceBefore = 0
+}
+
+func (m *jsMinifier) writeSpaceAfterIdent() {
+ // space after identifier and after regular expression (to prevent confusion with its tag)
+ if js.IsIdentifierEnd(m.prev) || 1 < len(m.prev) && m.prev[0] == '/' {
+ m.w.Write(spaceBytes)
+ }
+}
+
+func (m *jsMinifier) writeSpaceBeforeIdent() {
+ m.needsSpace = true
+}
+
+func (m *jsMinifier) writeSpaceBefore(c byte) {
+ m.spaceBefore = c
+}
+
+func (m *jsMinifier) requireSemicolon() {
+ m.needsSemicolon = true
+}
+
+func (m *jsMinifier) writeSemicolon() {
+ if m.needsSemicolon {
+ m.w.Write(semicolonBytes)
+ m.needsSemicolon = false
+ m.needsSpace = false
+ }
+}
+
+func (m *jsMinifier) minifyStmt(i js.IStmt) {
+ switch stmt := i.(type) {
+ case *js.ExprStmt:
+ m.expectExpr = expectExprStmt
+ m.minifyExpr(stmt.Value, js.OpExpr)
+ if m.groupedStmt {
+ m.write(closeParenBytes)
+ m.groupedStmt = false
+ }
+ m.requireSemicolon()
+ case *js.VarDecl:
+ m.minifyVarDecl(stmt, false)
+ m.requireSemicolon()
+ case *js.IfStmt:
+ hasIf := !isEmptyStmt(stmt.Body)
+ hasElse := !isEmptyStmt(stmt.Else)
+ if !hasIf && !hasElse {
+ break
+ }
+
+ m.write(ifOpenBytes)
+ m.minifyExpr(stmt.Cond, js.OpExpr)
+ m.write(closeParenBytes)
+
+ if !hasIf && hasElse {
+ m.requireSemicolon()
+ } else if hasIf {
+ if hasElse && endsInIf(stmt.Body) {
+ // prevent: if(a){if(b)c}else d; => if(a)if(b)c;else d;
+ m.write(openBraceBytes)
+ m.minifyStmt(stmt.Body)
+ m.write(closeBraceBytes)
+ m.needsSemicolon = false
+ } else {
+ m.minifyStmt(stmt.Body)
+ }
+ }
+ if hasElse {
+ m.writeSemicolon()
+ m.write(elseBytes)
+ m.writeSpaceBeforeIdent()
+ m.minifyStmt(stmt.Else)
+ }
+ case *js.BlockStmt:
+ m.renamer.renameScope(stmt.Scope)
+ m.minifyBlockStmt(stmt)
+ case *js.ReturnStmt:
+ m.write(returnBytes)
+ m.writeSpaceBeforeIdent()
+ m.minifyExpr(stmt.Value, js.OpExpr)
+ m.requireSemicolon()
+ case *js.LabelledStmt:
+ m.write(stmt.Label)
+ m.write(colonBytes)
+ m.minifyStmtOrBlock(stmt.Value, defaultBlock)
+ case *js.BranchStmt:
+ m.write(stmt.Type.Bytes())
+ if stmt.Label != nil {
+ m.write(spaceBytes)
+ m.write(stmt.Label)
+ }
+ m.requireSemicolon()
+ case *js.WithStmt:
+ m.write(withOpenBytes)
+ m.minifyExpr(stmt.Cond, js.OpExpr)
+ m.write(closeParenBytes)
+ m.minifyStmtOrBlock(stmt.Body, defaultBlock)
+ case *js.DoWhileStmt:
+ m.write(doBytes)
+ m.writeSpaceBeforeIdent()
+ m.minifyStmtOrBlock(stmt.Body, iterationBlock)
+ m.writeSemicolon()
+ m.write(whileOpenBytes)
+ m.minifyExpr(stmt.Cond, js.OpExpr)
+ m.write(closeParenBytes)
+ case *js.WhileStmt:
+ m.write(whileOpenBytes)
+ m.minifyExpr(stmt.Cond, js.OpExpr)
+ m.write(closeParenBytes)
+ m.minifyStmtOrBlock(stmt.Body, iterationBlock)
+ case *js.ForStmt:
+ stmt.Body.List = optimizeStmtList(stmt.Body.List, iterationBlock)
+ m.renamer.renameScope(stmt.Body.Scope)
+ m.write(forOpenBytes)
+ m.inFor = true
+ if decl, ok := stmt.Init.(*js.VarDecl); ok {
+ m.minifyVarDecl(decl, true)
+ } else {
+ m.minifyExpr(stmt.Init, js.OpLHS)
+ }
+ m.inFor = false
+ m.write(semicolonBytes)
+ m.minifyExpr(stmt.Cond, js.OpExpr)
+ m.write(semicolonBytes)
+ m.minifyExpr(stmt.Post, js.OpExpr)
+ m.write(closeParenBytes)
+ m.minifyBlockAsStmt(stmt.Body)
+ case *js.ForInStmt:
+ stmt.Body.List = optimizeStmtList(stmt.Body.List, iterationBlock)
+ m.renamer.renameScope(stmt.Body.Scope)
+ m.write(forOpenBytes)
+ m.inFor = true
+ if decl, ok := stmt.Init.(*js.VarDecl); ok {
+ m.minifyVarDecl(decl, false)
+ } else {
+ m.minifyExpr(stmt.Init, js.OpLHS)
+ }
+ m.inFor = false
+ m.writeSpaceAfterIdent()
+ m.write(inBytes)
+ m.writeSpaceBeforeIdent()
+ m.minifyExpr(stmt.Value, js.OpExpr)
+ m.write(closeParenBytes)
+ m.minifyBlockAsStmt(stmt.Body)
+ case *js.ForOfStmt:
+ stmt.Body.List = optimizeStmtList(stmt.Body.List, iterationBlock)
+ m.renamer.renameScope(stmt.Body.Scope)
+ if stmt.Await {
+ m.write(forAwaitOpenBytes)
+ } else {
+ m.write(forOpenBytes)
+ }
+ m.inFor = true
+ if decl, ok := stmt.Init.(*js.VarDecl); ok {
+ m.minifyVarDecl(decl, false)
+ } else {
+ m.minifyExpr(stmt.Init, js.OpLHS)
+ }
+ m.inFor = false
+ m.writeSpaceAfterIdent()
+ m.write(ofBytes)
+ m.writeSpaceBeforeIdent()
+ m.minifyExpr(stmt.Value, js.OpAssign)
+ m.write(closeParenBytes)
+ m.minifyBlockAsStmt(stmt.Body)
+ case *js.SwitchStmt:
+ m.write(switchOpenBytes)
+ m.minifyExpr(stmt.Init, js.OpExpr)
+ m.write(closeParenOpenBracketBytes)
+ m.needsSemicolon = false
+ for i, _ := range stmt.List {
+ stmt.List[i].List = optimizeStmtList(stmt.List[i].List, defaultBlock)
+ }
+ m.renamer.renameScope(stmt.Scope)
+ for _, clause := range stmt.List {
+ m.writeSemicolon()
+ m.write(clause.TokenType.Bytes())
+ if clause.Cond != nil {
+ m.writeSpaceBeforeIdent()
+ m.minifyExpr(clause.Cond, js.OpExpr)
+ }
+ m.write(colonBytes)
+ for _, item := range clause.List {
+ m.writeSemicolon()
+ m.minifyStmt(item)
+ }
+ }
+ m.write(closeBraceBytes)
+ m.needsSemicolon = false
+ case *js.ThrowStmt:
+ m.write(throwBytes)
+ m.writeSpaceBeforeIdent()
+ m.minifyExpr(stmt.Value, js.OpExpr)
+ m.requireSemicolon()
+ case *js.TryStmt:
+ m.write(tryBytes)
+ stmt.Body.List = optimizeStmtList(stmt.Body.List, defaultBlock)
+ m.renamer.renameScope(stmt.Body.Scope)
+ m.minifyBlockStmt(stmt.Body)
+ if stmt.Catch != nil {
+ m.write(catchBytes)
+ stmt.Catch.List = optimizeStmtList(stmt.Catch.List, defaultBlock)
+ if v, ok := stmt.Binding.(*js.Var); ok && v.Uses == 1 && m.o.minVersion(2019) {
+ stmt.Catch.Scope.Declared = stmt.Catch.Scope.Declared[1:]
+ stmt.Binding = nil
+ }
+ m.renamer.renameScope(stmt.Catch.Scope)
+ if stmt.Binding != nil {
+ m.write(openParenBytes)
+ m.minifyBinding(stmt.Binding)
+ m.write(closeParenBytes)
+ }
+ m.minifyBlockStmt(stmt.Catch)
+ }
+ if stmt.Finally != nil {
+ m.write(finallyBytes)
+ stmt.Finally.List = optimizeStmtList(stmt.Finally.List, defaultBlock)
+ m.renamer.renameScope(stmt.Finally.Scope)
+ m.minifyBlockStmt(stmt.Finally)
+ }
+ case *js.FuncDecl:
+ m.minifyFuncDecl(stmt, false)
+ case *js.ClassDecl:
+ m.minifyClassDecl(stmt)
+ case *js.DebuggerStmt:
+ m.write(debuggerBytes)
+ m.requireSemicolon()
+ case *js.EmptyStmt:
+ case *js.ImportStmt:
+ m.write(importBytes)
+ if stmt.Default != nil {
+ m.write(spaceBytes)
+ m.write(stmt.Default)
+ if len(stmt.List) != 0 {
+ m.write(commaBytes)
+ } else if stmt.Default != nil || len(stmt.List) != 0 {
+ m.write(spaceBytes)
+ }
+ }
+ if len(stmt.List) == 1 && len(stmt.List[0].Name) == 1 && stmt.List[0].Name[0] == '*' {
+ m.writeSpaceBeforeIdent()
+ m.minifyAlias(stmt.List[0])
+ if stmt.Default != nil || len(stmt.List) != 0 {
+ m.write(spaceBytes)
+ }
+ } else if 0 < len(stmt.List) {
+ m.write(openBraceBytes)
+ for i, item := range stmt.List {
+ if i != 0 {
+ m.write(commaBytes)
+ }
+ m.minifyAlias(item)
+ }
+ m.write(closeBraceBytes)
+ }
+ if stmt.Default != nil || len(stmt.List) != 0 {
+ m.write(fromBytes)
+ }
+ m.write(minifyString(stmt.Module, false))
+ m.requireSemicolon()
+ case *js.ExportStmt:
+ m.write(exportBytes)
+ if stmt.Decl != nil {
+ if stmt.Default {
+ m.write(spaceDefaultBytes)
+ m.writeSpaceBeforeIdent()
+ m.minifyExpr(stmt.Decl, js.OpAssign)
+ _, isHoistable := stmt.Decl.(*js.FuncDecl)
+ _, isClass := stmt.Decl.(*js.ClassDecl)
+ if !isHoistable && !isClass {
+ m.requireSemicolon()
+ }
+ } else {
+ m.writeSpaceBeforeIdent()
+ m.minifyStmt(stmt.Decl.(js.IStmt)) // can only be variable, function, or class decl
+ }
+ } else {
+ if len(stmt.List) == 1 && (len(stmt.List[0].Name) == 1 && stmt.List[0].Name[0] == '*' || stmt.List[0].Name == nil && len(stmt.List[0].Binding) == 1 && stmt.List[0].Binding[0] == '*') {
+ m.writeSpaceBeforeIdent()
+ m.minifyAlias(stmt.List[0])
+ if stmt.Module != nil && stmt.List[0].Name != nil {
+ m.write(spaceBytes)
+ }
+ } else if 0 < len(stmt.List) {
+ m.write(openBraceBytes)
+ for i, item := range stmt.List {
+ if i != 0 {
+ m.write(commaBytes)
+ }
+ m.minifyAlias(item)
+ }
+ m.write(closeBraceBytes)
+ }
+ if stmt.Module != nil {
+ m.write(fromBytes)
+ m.write(minifyString(stmt.Module, false))
+ }
+ m.requireSemicolon()
+ }
+ case *js.DirectivePrologueStmt:
+ stmt.Value[0] = '"'
+ stmt.Value[len(stmt.Value)-1] = '"'
+ m.write(stmt.Value)
+ m.requireSemicolon()
+ }
+}
+
+func (m *jsMinifier) minifyBlockStmt(stmt *js.BlockStmt) {
+ m.write(openBraceBytes)
+ m.needsSemicolon = false
+ for _, item := range stmt.List {
+ m.writeSemicolon()
+ m.minifyStmt(item)
+ }
+ m.write(closeBraceBytes)
+ m.needsSemicolon = false
+}
+
+func (m *jsMinifier) minifyBlockAsStmt(blockStmt *js.BlockStmt) {
+ // minify block when statement is expected, i.e. semicolon if empty or remove braces for single statement
+ // assume we already renamed the scope
+ hasLexicalVars := false
+ for _, v := range blockStmt.Scope.Declared[blockStmt.Scope.NumForDecls:] {
+ if v.Decl == js.LexicalDecl {
+ hasLexicalVars = true
+ break
+ }
+ }
+ if 1 < len(blockStmt.List) || hasLexicalVars {
+ m.minifyBlockStmt(blockStmt)
+ } else if len(blockStmt.List) == 1 {
+ m.minifyStmt(blockStmt.List[0])
+ } else {
+ m.write(semicolonBytes)
+ m.needsSemicolon = false
+ }
+}
+
+func (m *jsMinifier) minifyStmtOrBlock(i js.IStmt, blockType blockType) {
+ // minify stmt or a block
+ if blockStmt, ok := i.(*js.BlockStmt); ok {
+ blockStmt.List = optimizeStmtList(blockStmt.List, blockType)
+ m.renamer.renameScope(blockStmt.Scope)
+ m.minifyBlockAsStmt(blockStmt)
+ } else {
+ // optimizeStmtList can in some cases expand one stmt to two shorter stmts
+ list := optimizeStmtList([]js.IStmt{i}, blockType)
+ if len(list) == 1 {
+ m.minifyStmt(list[0])
+ } else if len(list) == 0 {
+ m.write(semicolonBytes)
+ m.needsSemicolon = false
+ } else {
+ m.minifyBlockStmt(&js.BlockStmt{List: list, Scope: js.Scope{}})
+ }
+ }
+}
+
+func (m *jsMinifier) minifyAlias(alias js.Alias) {
+ if alias.Name != nil {
+ if alias.Name[0] == '"' || alias.Name[0] == '\'' {
+ m.write(minifyString(alias.Name, false))
+ } else {
+ m.write(alias.Name)
+ }
+ if !bytes.Equal(alias.Name, starBytes) {
+ m.write(spaceBytes)
+ }
+ m.write(asSpaceBytes)
+ }
+ if alias.Binding != nil {
+ if alias.Binding[0] == '"' || alias.Binding[0] == '\'' {
+ m.write(minifyString(alias.Binding, false))
+ } else {
+ m.write(alias.Binding)
+ }
+ }
+}
+
+func (m *jsMinifier) minifyParams(params js.Params, removeUnused bool) {
+ // remove unused parameters from the end
+ j := len(params.List)
+ if removeUnused && params.Rest == nil {
+ for ; 0 < j; j-- {
+ if v, ok := params.List[j-1].Binding.(*js.Var); !ok || ok && 1 < v.Uses {
+ break
+ }
+ }
+ }
+
+ m.write(openParenBytes)
+ for i, item := range params.List[:j] {
+ if i != 0 {
+ m.write(commaBytes)
+ }
+ m.minifyBindingElement(item)
+ }
+ if params.Rest != nil {
+ if len(params.List) != 0 {
+ m.write(commaBytes)
+ }
+ m.write(ellipsisBytes)
+ m.minifyBinding(params.Rest)
+ }
+ m.write(closeParenBytes)
+}
+
+func (m *jsMinifier) minifyArguments(args js.Args) {
+ m.write(openParenBytes)
+ for i, item := range args.List {
+ if i != 0 {
+ m.write(commaBytes)
+ }
+ if item.Rest {
+ m.write(ellipsisBytes)
+ }
+ m.minifyExpr(item.Value, js.OpAssign)
+ }
+ m.write(closeParenBytes)
+}
+
+func (m *jsMinifier) minifyVarDecl(decl *js.VarDecl, onlyDefines bool) {
+ if len(decl.List) == 0 {
+ return
+ } else if decl.TokenType == js.ErrorToken {
+ // remove 'var' when hoisting variables
+ first := true
+ for _, item := range decl.List {
+ if item.Default != nil || !onlyDefines {
+ if !first {
+ m.write(commaBytes)
+ }
+ m.minifyBindingElement(item)
+ first = false
+ }
+ }
+ } else {
+ if decl.TokenType == js.VarToken && len(decl.List) <= 10000 {
+ // move single var decls forward and order for GZIP optimization
+ start := 0
+ if _, ok := decl.List[0].Binding.(*js.Var); !ok {
+ start++
+ }
+ for i := 0; i < len(decl.List); i++ {
+ item := decl.List[i]
+ if v, ok := item.Binding.(*js.Var); ok && item.Default == nil && len(v.Data) == 1 {
+ for j := start; j < len(decl.List); j++ {
+ if v2, ok := decl.List[j].Binding.(*js.Var); ok && decl.List[j].Default == nil && len(v2.Data) == 1 {
+ if m.renamer.identOrder[v2.Data[0]] < m.renamer.identOrder[v.Data[0]] {
+ continue
+ } else if m.renamer.identOrder[v2.Data[0]] == m.renamer.identOrder[v.Data[0]] {
+ break
+ }
+ }
+ decl.List = append(decl.List[:i], decl.List[i+1:]...)
+ decl.List = append(decl.List[:j], append([]js.BindingElement{item}, decl.List[j:]...)...)
+ break
+ }
+ }
+ }
+ }
+
+ m.write(decl.TokenType.Bytes())
+ m.writeSpaceBeforeIdent()
+ for i, item := range decl.List {
+ if i != 0 {
+ m.write(commaBytes)
+ }
+ m.minifyBindingElement(item)
+ }
+ }
+}
+
+func (m *jsMinifier) minifyFuncDecl(decl *js.FuncDecl, inExpr bool) {
+ parentRename := m.renamer.rename
+ m.renamer.rename = !decl.Body.Scope.HasWith && !m.o.KeepVarNames
+ m.hoistVars(&decl.Body)
+ decl.Body.List = optimizeStmtList(decl.Body.List, functionBlock)
+
+ if decl.Async {
+ m.write(asyncSpaceBytes)
+ }
+ m.write(functionBytes)
+ if decl.Generator {
+ m.write(starBytes)
+ }
+
+ // TODO: remove function name, really necessary?
+ //if decl.Name != nil && decl.Name.Uses == 1 {
+ // scope := decl.Body.Scope
+ // for i, vorig := range scope.Declared {
+ // if decl.Name == vorig {
+ // scope.Declared = append(scope.Declared[:i], scope.Declared[i+1:]...)
+ // }
+ // }
+ //}
+
+ if inExpr {
+ m.renamer.renameScope(decl.Body.Scope)
+ }
+ if decl.Name != nil && (!inExpr || 1 < decl.Name.Uses) {
+ if !decl.Generator {
+ m.write(spaceBytes)
+ }
+ m.write(decl.Name.Data)
+ }
+ if !inExpr {
+ m.renamer.renameScope(decl.Body.Scope)
+ }
+
+ m.minifyParams(decl.Params, true)
+ m.minifyBlockStmt(&decl.Body)
+ m.renamer.rename = parentRename
+}
+
+func (m *jsMinifier) minifyMethodDecl(decl *js.MethodDecl) {
+ parentRename := m.renamer.rename
+ m.renamer.rename = !decl.Body.Scope.HasWith && !m.o.KeepVarNames
+ m.hoistVars(&decl.Body)
+ decl.Body.List = optimizeStmtList(decl.Body.List, functionBlock)
+
+ if decl.Static {
+ m.write(staticBytes)
+ m.writeSpaceBeforeIdent()
+ }
+ if decl.Async {
+ m.write(asyncBytes)
+ if decl.Generator {
+ m.write(starBytes)
+ } else {
+ m.writeSpaceBeforeIdent()
+ }
+ } else if decl.Generator {
+ m.write(starBytes)
+ } else if decl.Get {
+ m.write(getBytes)
+ m.writeSpaceBeforeIdent()
+ } else if decl.Set {
+ m.write(setBytes)
+ m.writeSpaceBeforeIdent()
+ }
+ m.minifyPropertyName(decl.Name)
+ m.renamer.renameScope(decl.Body.Scope)
+ m.minifyParams(decl.Params, !decl.Set)
+ m.minifyBlockStmt(&decl.Body)
+ m.renamer.rename = parentRename
+}
+
+func (m *jsMinifier) minifyArrowFunc(decl *js.ArrowFunc) {
+ parentRename := m.renamer.rename
+ m.renamer.rename = !decl.Body.Scope.HasWith && !m.o.KeepVarNames
+ m.hoistVars(&decl.Body)
+ decl.Body.List = optimizeStmtList(decl.Body.List, functionBlock)
+
+ m.renamer.renameScope(decl.Body.Scope)
+ if decl.Async {
+ m.write(asyncBytes)
+ }
+ removeParens := false
+ if decl.Params.Rest == nil && len(decl.Params.List) == 1 && decl.Params.List[0].Default == nil {
+ if decl.Params.List[0].Binding == nil {
+ removeParens = true
+ } else if _, ok := decl.Params.List[0].Binding.(*js.Var); ok {
+ removeParens = true
+ }
+ }
+ if removeParens {
+ if decl.Async && decl.Params.List[0].Binding != nil {
+ // add space after async in: async a => ...
+ m.write(spaceBytes)
+ }
+ m.minifyBindingElement(decl.Params.List[0])
+ } else {
+ parentInFor := m.inFor
+ m.inFor = false
+ m.minifyParams(decl.Params, true)
+ m.inFor = parentInFor
+ }
+ m.write(arrowBytes)
+ removeBraces := false
+ if 0 < len(decl.Body.List) {
+ returnStmt, isReturn := decl.Body.List[len(decl.Body.List)-1].(*js.ReturnStmt)
+ if isReturn && returnStmt.Value != nil {
+ // merge expression statements to final return statement, remove function body braces
+ var list []js.IExpr
+ removeBraces = true
+ for _, item := range decl.Body.List[:len(decl.Body.List)-1] {
+ if expr, isExpr := item.(*js.ExprStmt); isExpr {
+ list = append(list, expr.Value)
+ } else {
+ removeBraces = false
+ break
+ }
+ }
+ if removeBraces {
+ list = append(list, returnStmt.Value)
+ expr := list[0]
+ if 0 < len(list) {
+ if 1 < len(list) {
+ expr = &js.CommaExpr{list}
+ }
+ expr = &js.GroupExpr{X: expr}
+ }
+ m.expectExpr = expectExprBody
+ m.minifyExpr(expr, js.OpAssign)
+ if m.groupedStmt {
+ m.write(closeParenBytes)
+ m.groupedStmt = false
+ }
+ }
+ } else if isReturn && returnStmt.Value == nil {
+ // remove empty return
+ decl.Body.List = decl.Body.List[:len(decl.Body.List)-1]
+ }
+ }
+ if !removeBraces {
+ m.minifyBlockStmt(&decl.Body)
+ }
+ m.renamer.rename = parentRename
+}
+
+func (m *jsMinifier) minifyClassDecl(decl *js.ClassDecl) {
+ m.write(classBytes)
+ if decl.Name != nil {
+ m.write(spaceBytes)
+ m.write(decl.Name.Data)
+ }
+ if decl.Extends != nil {
+ m.write(spaceExtendsBytes)
+ m.writeSpaceBeforeIdent()
+ m.minifyExpr(decl.Extends, js.OpLHS)
+ }
+ m.write(openBraceBytes)
+ m.needsSemicolon = false
+ for _, item := range decl.List {
+ m.writeSemicolon()
+ if item.StaticBlock != nil {
+ m.write(staticBytes)
+ m.minifyBlockStmt(item.StaticBlock)
+ } else if item.Method != nil {
+ m.minifyMethodDecl(item.Method)
+ } else {
+ if item.Static {
+ m.write(staticBytes)
+ if !item.Name.IsComputed() && item.Name.Literal.TokenType == js.IdentifierToken {
+ m.write(spaceBytes)
+ }
+ }
+ m.minifyPropertyName(item.Name)
+ if item.Init != nil {
+ m.write(equalBytes)
+ m.minifyExpr(item.Init, js.OpAssign)
+ }
+ m.requireSemicolon()
+ }
+ }
+ m.write(closeBraceBytes)
+ m.needsSemicolon = false
+}
+
+func (m *jsMinifier) minifyPropertyName(name js.PropertyName) {
+ if name.IsComputed() {
+ m.write(openBracketBytes)
+ m.minifyExpr(name.Computed, js.OpAssign)
+ m.write(closeBracketBytes)
+ } else if name.Literal.TokenType == js.StringToken {
+ m.write(minifyString(name.Literal.Data, false))
+ } else {
+ m.write(name.Literal.Data)
+ }
+}
+
+func (m *jsMinifier) minifyProperty(property js.Property) {
+ // property.Name is always set in ObjectLiteral
+ if property.Spread {
+ m.write(ellipsisBytes)
+ } else if v, ok := property.Value.(*js.Var); property.Name != nil && (!ok || !property.Name.IsIdent(v.Name())) {
+ // add 'old-name:' before BindingName as the latter will be renamed
+ m.minifyPropertyName(*property.Name)
+ m.write(colonBytes)
+ }
+ m.minifyExpr(property.Value, js.OpAssign)
+ if property.Init != nil {
+ m.write(equalBytes)
+ m.minifyExpr(property.Init, js.OpAssign)
+ }
+}
+
+func (m *jsMinifier) minifyBindingElement(element js.BindingElement) {
+ if element.Binding != nil {
+ parentInFor := m.inFor
+ m.inFor = false
+ m.minifyBinding(element.Binding)
+ m.inFor = parentInFor
+ if element.Default != nil {
+ m.write(equalBytes)
+ m.minifyExpr(element.Default, js.OpAssign)
+ }
+ }
+}
+
+func (m *jsMinifier) minifyBinding(ibinding js.IBinding) {
+ switch binding := ibinding.(type) {
+ case *js.Var:
+ m.write(binding.Data)
+ case *js.BindingArray:
+ m.write(openBracketBytes)
+ for i, item := range binding.List {
+ if i != 0 {
+ m.write(commaBytes)
+ }
+ m.minifyBindingElement(item)
+ }
+ if binding.Rest != nil {
+ if 0 < len(binding.List) {
+ m.write(commaBytes)
+ }
+ m.write(ellipsisBytes)
+ m.minifyBinding(binding.Rest)
+ }
+ m.write(closeBracketBytes)
+ case *js.BindingObject:
+ m.write(openBraceBytes)
+ for i, item := range binding.List {
+ if i != 0 {
+ m.write(commaBytes)
+ }
+ // item.Key is always set
+ if item.Key.IsComputed() {
+ m.minifyPropertyName(*item.Key)
+ m.write(colonBytes)
+ } else if v, ok := item.Value.Binding.(*js.Var); !ok || !item.Key.IsIdent(v.Data) {
+ // add 'old-name:' before BindingName as the latter will be renamed
+ m.minifyPropertyName(*item.Key)
+ m.write(colonBytes)
+ }
+ m.minifyBindingElement(item.Value)
+ }
+ if binding.Rest != nil {
+ if 0 < len(binding.List) {
+ m.write(commaBytes)
+ }
+ m.write(ellipsisBytes)
+ m.write(binding.Rest.Data)
+ }
+ m.write(closeBraceBytes)
+ }
+}
+
+func (m *jsMinifier) minifyExpr(i js.IExpr, prec js.OpPrec) {
+ if cond, ok := i.(*js.CondExpr); ok {
+ i = m.optimizeCondExpr(cond, prec)
+ } else if unary, ok := i.(*js.UnaryExpr); ok {
+ i = optimizeUnaryExpr(unary, prec)
+ }
+
+ switch expr := i.(type) {
+ case *js.Var:
+ for expr.Link != nil {
+ expr = expr.Link
+ }
+ data := expr.Data
+ if bytes.Equal(data, undefinedBytes) { // TODO: only if not defined
+ if js.OpUnary < prec {
+ m.write(groupedVoidZeroBytes)
+ } else {
+ m.write(voidZeroBytes)
+ }
+ } else if bytes.Equal(data, infinityBytes) { // TODO: only if not defined
+ if js.OpMul < prec {
+ m.write(groupedOneDivZeroBytes)
+ } else {
+ m.write(oneDivZeroBytes)
+ }
+ } else {
+ m.write(data)
+ }
+ case *js.LiteralExpr:
+ if expr.TokenType == js.DecimalToken {
+ m.write(decimalNumber(expr.Data, m.o.Precision))
+ } else if expr.TokenType == js.BinaryToken {
+ m.write(binaryNumber(expr.Data, m.o.Precision))
+ } else if expr.TokenType == js.OctalToken {
+ m.write(octalNumber(expr.Data, m.o.Precision))
+ } else if expr.TokenType == js.HexadecimalToken {
+ m.write(hexadecimalNumber(expr.Data, m.o.Precision))
+ } else if expr.TokenType == js.TrueToken {
+ if js.OpUnary < prec {
+ m.write(groupedNotZeroBytes)
+ } else {
+ m.write(notZeroBytes)
+ }
+ } else if expr.TokenType == js.FalseToken {
+ if js.OpUnary < prec {
+ m.write(groupedNotOneBytes)
+ } else {
+ m.write(notOneBytes)
+ }
+ } else if expr.TokenType == js.StringToken {
+ m.write(minifyString(expr.Data, true))
+ } else if expr.TokenType == js.RegExpToken {
+ // </script>/ => < /script>/
+ if 0 < len(m.prev) && m.prev[len(m.prev)-1] == '<' && bytes.HasPrefix(expr.Data, regExpScriptBytes) {
+ m.write(spaceBytes)
+ }
+ m.write(minifyRegExp(expr.Data))
+ } else {
+ m.write(expr.Data)
+ }
+ case *js.BinaryExpr:
+ mergeBinaryExpr(expr)
+ if expr.X == nil {
+ m.minifyExpr(expr.Y, prec)
+ break
+ }
+
+ precLeft := binaryLeftPrecMap[expr.Op]
+ // convert (a,b)&&c into a,b&&c but not a=(b,c)&&d into a=(b,c&&d)
+ if prec <= js.OpExpr {
+ if group, ok := expr.X.(*js.GroupExpr); ok {
+ if comma, ok := group.X.(*js.CommaExpr); ok && js.OpAnd <= exprPrec(comma.List[len(comma.List)-1]) {
+ expr.X = group.X
+ precLeft = js.OpExpr
+ }
+ }
+ }
+ if expr.Op == js.InstanceofToken || expr.Op == js.InToken {
+ group := expr.Op == js.InToken && m.inFor
+ if group {
+ m.write(openParenBytes)
+ }
+ m.minifyExpr(expr.X, precLeft)
+ m.writeSpaceAfterIdent()
+ m.write(expr.Op.Bytes())
+ m.writeSpaceBeforeIdent()
+ m.minifyExpr(expr.Y, binaryRightPrecMap[expr.Op])
+ if group {
+ m.write(closeParenBytes)
+ }
+ } else {
+ // TODO: has effect on GZIP?
+ //if expr.Op == js.EqEqToken || expr.Op == js.NotEqToken || expr.Op == js.EqEqEqToken || expr.Op == js.NotEqEqToken {
+ // // switch a==const for const==a, such as typeof a=="undefined" for "undefined"==typeof a (GZIP improvement)
+ // if _, ok := expr.Y.(*js.LiteralExpr); ok {
+ // expr.X, expr.Y = expr.Y, expr.X
+ // }
+ //}
+
+ if v, not, ok := isUndefinedOrNullVar(expr); ok {
+ // change a===null||a===undefined to a==null
+ op := js.EqEqToken
+ if not {
+ op = js.NotEqToken
+ }
+ expr = &js.BinaryExpr{op, v, &js.LiteralExpr{js.NullToken, nullBytes}}
+ }
+
+ m.minifyExpr(expr.X, precLeft)
+ if expr.Op == js.GtToken && m.prev[len(m.prev)-1] == '-' {
+ // 0 < len(m.prev) always
+ m.write(spaceBytes)
+ } else if expr.Op == js.EqEqEqToken || expr.Op == js.NotEqEqToken {
+ if left, ok := expr.X.(*js.UnaryExpr); ok && left.Op == js.TypeofToken {
+ if right, ok := expr.Y.(*js.LiteralExpr); ok && right.TokenType == js.StringToken {
+ if expr.Op == js.EqEqEqToken {
+ expr.Op = js.EqEqToken
+ } else {
+ expr.Op = js.NotEqToken
+ }
+ }
+ } else if right, ok := expr.Y.(*js.UnaryExpr); ok && right.Op == js.TypeofToken {
+ if left, ok := expr.X.(*js.LiteralExpr); ok && left.TokenType == js.StringToken {
+ if expr.Op == js.EqEqEqToken {
+ expr.Op = js.EqEqToken
+ } else {
+ expr.Op = js.NotEqToken
+ }
+ }
+ }
+ }
+ m.write(expr.Op.Bytes())
+ if expr.Op == js.AddToken {
+ // +++ => + ++
+ m.writeSpaceBefore('+')
+ } else if expr.Op == js.SubToken {
+ // --- => - --
+ m.writeSpaceBefore('-')
+ } else if expr.Op == js.DivToken {
+ // // => / /
+ m.writeSpaceBefore('/')
+ }
+ m.minifyExpr(expr.Y, binaryRightPrecMap[expr.Op])
+ }
+ case *js.UnaryExpr:
+ if expr.Op == js.PostIncrToken || expr.Op == js.PostDecrToken {
+ m.minifyExpr(expr.X, unaryPrecMap[expr.Op])
+ m.write(expr.Op.Bytes())
+ } else {
+ isLtNot := expr.Op == js.NotToken && 0 < len(m.prev) && m.prev[len(m.prev)-1] == '<'
+ m.write(expr.Op.Bytes())
+ if expr.Op == js.DeleteToken || expr.Op == js.VoidToken || expr.Op == js.TypeofToken || expr.Op == js.AwaitToken {
+ m.writeSpaceBeforeIdent()
+ } else if expr.Op == js.PosToken {
+ // +++ => + ++
+ m.writeSpaceBefore('+')
+ } else if expr.Op == js.NegToken || isLtNot {
+ // --- => - --
+ // <!-- => <! --
+ m.writeSpaceBefore('-')
+ } else if expr.Op == js.NotToken {
+ if lit, ok := expr.X.(*js.LiteralExpr); ok && (lit.TokenType == js.StringToken || lit.TokenType == js.RegExpToken) {
+ // !"string" => !1
+ m.write(oneBytes)
+ break
+ } else if ok && lit.TokenType == js.DecimalToken {
+ // !123 => !1 (except for !0)
+ if num := minify.Number(lit.Data, m.o.Precision); len(num) == 1 && num[0] == '0' {
+ m.write(zeroBytes)
+ } else {
+ m.write(oneBytes)
+ }
+ break
+ }
+ }
+ m.minifyExpr(expr.X, unaryPrecMap[expr.Op])
+ }
+ case *js.DotExpr:
+ if group, ok := expr.X.(*js.GroupExpr); ok {
+ if lit, ok := group.X.(*js.LiteralExpr); ok && lit.TokenType == js.DecimalToken {
+ num := minify.Number(lit.Data, m.o.Precision)
+ isInt := true
+ for _, c := range num {
+ if c == '.' || c == 'e' || c == 'E' {
+ isInt = false
+ break
+ }
+ }
+ if isInt {
+ m.write(num)
+ m.write(dotBytes)
+ } else {
+ m.write(num)
+ }
+ m.write(dotBytes)
+ m.write(expr.Y.Data)
+ break
+ }
+ }
+ if prec < js.OpMember {
+ m.minifyExpr(expr.X, js.OpCall)
+ } else {
+ m.minifyExpr(expr.X, js.OpMember)
+ }
+ if expr.Optional {
+ m.write(questionBytes)
+ } else if last := m.prev[len(m.prev)-1]; '0' <= last && last <= '9' {
+ // 0 < len(m.prev) always
+ isInteger := true
+ for _, c := range m.prev[:len(m.prev)-1] {
+ if c < '0' || '9' < c {
+ isInteger = false
+ break
+ }
+ }
+ if isInteger {
+ // prevent previous integer
+ m.write(dotBytes)
+ }
+ }
+ m.write(dotBytes)
+ m.write(expr.Y.Data)
+ case *js.GroupExpr:
+ if cond, ok := expr.X.(*js.CondExpr); ok {
+ expr.X = m.optimizeCondExpr(cond, js.OpExpr)
+ }
+ precInside := exprPrec(expr.X)
+ if prec <= precInside || precInside == js.OpCoalesce && prec == js.OpBitOr {
+ m.minifyExpr(expr.X, prec)
+ } else {
+ parentInFor := m.inFor
+ m.inFor = false
+ m.write(openParenBytes)
+ m.minifyExpr(expr.X, js.OpExpr)
+ m.write(closeParenBytes)
+ m.inFor = parentInFor
+ }
+ case *js.ArrayExpr:
+ parentInFor := m.inFor
+ m.inFor = false
+ m.write(openBracketBytes)
+ for i, item := range expr.List {
+ if i != 0 {
+ m.write(commaBytes)
+ }
+ if item.Spread {
+ m.write(ellipsisBytes)
+ }
+ m.minifyExpr(item.Value, js.OpAssign)
+ }
+ if 0 < len(expr.List) && expr.List[len(expr.List)-1].Value == nil {
+ m.write(commaBytes)
+ }
+ m.write(closeBracketBytes)
+ m.inFor = parentInFor
+ case *js.ObjectExpr:
+ parentInFor := m.inFor
+ m.inFor = false
+ groupedStmt := m.expectExpr != expectAny
+ if groupedStmt {
+ m.write(openParenBracketBytes)
+ } else {
+ m.write(openBraceBytes)
+ }
+ for i, item := range expr.List {
+ if i != 0 {
+ m.write(commaBytes)
+ }
+ m.minifyProperty(item)
+ }
+ m.write(closeBraceBytes)
+ if groupedStmt {
+ m.groupedStmt = true
+ }
+ m.inFor = parentInFor
+ case *js.TemplateExpr:
+ if expr.Tag != nil {
+ if prec < js.OpMember {
+ m.minifyExpr(expr.Tag, js.OpCall)
+ } else {
+ m.minifyExpr(expr.Tag, js.OpMember)
+ }
+ if expr.Optional {
+ m.write(optChainBytes)
+ }
+ }
+ parentInFor := m.inFor
+ m.inFor = false
+ for _, item := range expr.List {
+ m.write(replaceEscapes(item.Value, '`', 1, 2))
+ m.minifyExpr(item.Expr, js.OpExpr)
+ }
+ m.write(replaceEscapes(expr.Tail, '`', 1, 1))
+ m.inFor = parentInFor
+ case *js.NewExpr:
+ if expr.Args == nil && js.OpLHS < prec && prec != js.OpNew {
+ m.write(openNewBytes)
+ m.writeSpaceBeforeIdent()
+ m.minifyExpr(expr.X, js.OpNew)
+ m.write(closeParenBytes)
+ } else {
+ m.write(newBytes)
+ m.writeSpaceBeforeIdent()
+ if expr.Args != nil {
+ m.minifyExpr(expr.X, js.OpMember)
+ m.minifyArguments(*expr.Args)
+ } else {
+ m.minifyExpr(expr.X, js.OpNew)
+ }
+ }
+ case *js.NewTargetExpr:
+ m.write(newTargetBytes)
+ m.writeSpaceBeforeIdent()
+ case *js.ImportMetaExpr:
+ if m.expectExpr == expectExprStmt {
+ m.write(openParenBytes)
+ m.groupedStmt = true
+ }
+ m.write(importMetaBytes)
+ m.writeSpaceBeforeIdent()
+ case *js.YieldExpr:
+ m.write(yieldBytes)
+ m.writeSpaceBeforeIdent()
+ if expr.X != nil {
+ if expr.Generator {
+ m.write(starBytes)
+ m.minifyExpr(expr.X, js.OpAssign)
+ } else if v, ok := expr.X.(*js.Var); !ok || !bytes.Equal(v.Name(), undefinedBytes) { // TODO: only if not defined
+ m.minifyExpr(expr.X, js.OpAssign)
+ }
+ }
+ case *js.CallExpr:
+ m.minifyExpr(expr.X, js.OpCall)
+ parentInFor := m.inFor
+ m.inFor = false
+ if expr.Optional {
+ m.write(optChainBytes)
+ }
+ m.minifyArguments(expr.Args)
+ m.inFor = parentInFor
+ case *js.IndexExpr:
+ if m.expectExpr == expectExprStmt {
+ if v, ok := expr.X.(*js.Var); ok && bytes.Equal(v.Name(), letBytes) {
+ m.write(notBytes)
+ }
+ }
+ if prec < js.OpMember {
+ m.minifyExpr(expr.X, js.OpCall)
+ } else {
+ m.minifyExpr(expr.X, js.OpMember)
+ }
+ if expr.Optional {
+ m.write(optChainBytes)
+ }
+ if lit, ok := expr.Y.(*js.LiteralExpr); ok && lit.TokenType == js.StringToken && 2 < len(lit.Data) {
+ if isIdent := js.AsIdentifierName(lit.Data[1 : len(lit.Data)-1]); isIdent {
+ m.write(dotBytes)
+ m.write(lit.Data[1 : len(lit.Data)-1])
+ break
+ } else if isNum := js.AsDecimalLiteral(lit.Data[1 : len(lit.Data)-1]); isNum {
+ m.write(openBracketBytes)
+ m.write(minify.Number(lit.Data[1:len(lit.Data)-1], 0))
+ m.write(closeBracketBytes)
+ break
+ }
+ }
+ parentInFor := m.inFor
+ m.inFor = false
+ m.write(openBracketBytes)
+ m.minifyExpr(expr.Y, js.OpExpr)
+ m.write(closeBracketBytes)
+ m.inFor = parentInFor
+ case *js.CondExpr:
+ m.minifyExpr(expr.Cond, js.OpCoalesce)
+ m.write(questionBytes)
+ m.minifyExpr(expr.X, js.OpAssign)
+ m.write(colonBytes)
+ m.minifyExpr(expr.Y, js.OpAssign)
+ case *js.VarDecl:
+ m.minifyVarDecl(expr, true) // happens in for statement or when vars were hoisted
+ case *js.FuncDecl:
+ grouped := m.expectExpr == expectExprStmt && prec != js.OpExpr
+ if grouped {
+ m.write(openParenBytes)
+ } else if m.expectExpr == expectExprStmt {
+ m.write(notBytes)
+ }
+ parentInFor, parentGroupedStmt := m.inFor, m.groupedStmt
+ m.inFor, m.groupedStmt = false, false
+ m.minifyFuncDecl(expr, true)
+ m.inFor, m.groupedStmt = parentInFor, parentGroupedStmt
+ if grouped {
+ m.write(closeParenBytes)
+ }
+ case *js.ArrowFunc:
+ parentGroupedStmt := m.groupedStmt
+ m.groupedStmt = false
+ m.minifyArrowFunc(expr)
+ m.groupedStmt = parentGroupedStmt
+ case *js.MethodDecl:
+ parentGroupedStmt := m.groupedStmt
+ m.groupedStmt = false
+ m.minifyMethodDecl(expr) // only happens in object literal
+ m.groupedStmt = parentGroupedStmt
+ case *js.ClassDecl:
+ if m.expectExpr == expectExprStmt {
+ m.write(notBytes)
+ }
+ parentInFor, parentGroupedStmt := m.inFor, m.groupedStmt
+ m.inFor, m.groupedStmt = false, false
+ m.minifyClassDecl(expr)
+ m.inFor, m.groupedStmt = parentInFor, parentGroupedStmt
+ case *js.CommaExpr:
+ for i, item := range expr.List {
+ if i != 0 {
+ m.write(commaBytes)
+ }
+ m.minifyExpr(item, js.OpAssign)
+ }
+ }
+}
diff --git a/vendor/github.com/tdewolff/minify/v2/js/stmtlist.go b/vendor/github.com/tdewolff/minify/v2/js/stmtlist.go
new file mode 100644
index 0000000..a1d3e2e
--- /dev/null
+++ b/vendor/github.com/tdewolff/minify/v2/js/stmtlist.go
@@ -0,0 +1,341 @@
+package js
+
+import (
+ "github.com/tdewolff/parse/v2/js"
+)
+
+func optimizeStmt(i js.IStmt) js.IStmt {
+ // convert if/else into expression statement, and optimize blocks
+ if ifStmt, ok := i.(*js.IfStmt); ok {
+ hasIf := !isEmptyStmt(ifStmt.Body)
+ hasElse := !isEmptyStmt(ifStmt.Else)
+ if unaryExpr, ok := ifStmt.Cond.(*js.UnaryExpr); ok && unaryExpr.Op == js.NotToken && hasElse {
+ ifStmt.Cond = unaryExpr.X
+ ifStmt.Body, ifStmt.Else = ifStmt.Else, ifStmt.Body
+ hasIf, hasElse = hasElse, hasIf
+ }
+ if !hasIf && !hasElse {
+ return &js.ExprStmt{Value: ifStmt.Cond}
+ } else if hasIf && !hasElse {
+ ifStmt.Body = optimizeStmt(ifStmt.Body)
+ if X, isExprBody := ifStmt.Body.(*js.ExprStmt); isExprBody {
+ if unaryExpr, ok := ifStmt.Cond.(*js.UnaryExpr); ok && unaryExpr.Op == js.NotToken {
+ left := groupExpr(unaryExpr.X, binaryLeftPrecMap[js.OrToken])
+ right := groupExpr(X.Value, binaryRightPrecMap[js.OrToken])
+ return &js.ExprStmt{&js.BinaryExpr{js.OrToken, left, right}}
+ }
+ left := groupExpr(ifStmt.Cond, binaryLeftPrecMap[js.AndToken])
+ right := groupExpr(X.Value, binaryRightPrecMap[js.AndToken])
+ return &js.ExprStmt{&js.BinaryExpr{js.AndToken, left, right}}
+ } else if X, isIfStmt := ifStmt.Body.(*js.IfStmt); isIfStmt && isEmptyStmt(X.Else) {
+ left := groupExpr(ifStmt.Cond, binaryLeftPrecMap[js.AndToken])
+ right := groupExpr(X.Cond, binaryRightPrecMap[js.AndToken])
+ ifStmt.Cond = &js.BinaryExpr{js.AndToken, left, right}
+ ifStmt.Body = X.Body
+ return ifStmt
+ }
+ } else if !hasIf && hasElse {
+ ifStmt.Else = optimizeStmt(ifStmt.Else)
+ if X, isExprElse := ifStmt.Else.(*js.ExprStmt); isExprElse {
+ left := groupExpr(ifStmt.Cond, binaryLeftPrecMap[js.OrToken])
+ right := groupExpr(X.Value, binaryRightPrecMap[js.OrToken])
+ return &js.ExprStmt{&js.BinaryExpr{js.OrToken, left, right}}
+ }
+ } else if hasIf && hasElse {
+ ifStmt.Body = optimizeStmt(ifStmt.Body)
+ ifStmt.Else = optimizeStmt(ifStmt.Else)
+ XExpr, isExprBody := ifStmt.Body.(*js.ExprStmt)
+ YExpr, isExprElse := ifStmt.Else.(*js.ExprStmt)
+ if isExprBody && isExprElse {
+ return &js.ExprStmt{condExpr(ifStmt.Cond, XExpr.Value, YExpr.Value)}
+ }
+ XReturn, isReturnBody := ifStmt.Body.(*js.ReturnStmt)
+ YReturn, isReturnElse := ifStmt.Else.(*js.ReturnStmt)
+ if isReturnBody && isReturnElse {
+ if XReturn.Value == nil && YReturn.Value == nil {
+ return &js.ReturnStmt{commaExpr(ifStmt.Cond, &js.UnaryExpr{
+ Op: js.VoidToken,
+ X: &js.LiteralExpr{js.NumericToken, zeroBytes},
+ })}
+ } else if XReturn.Value != nil && YReturn.Value != nil {
+ return &js.ReturnStmt{condExpr(ifStmt.Cond, XReturn.Value, YReturn.Value)}
+ }
+ return ifStmt
+ }
+ XThrow, isThrowBody := ifStmt.Body.(*js.ThrowStmt)
+ YThrow, isThrowElse := ifStmt.Else.(*js.ThrowStmt)
+ if isThrowBody && isThrowElse {
+ return &js.ThrowStmt{condExpr(ifStmt.Cond, XThrow.Value, YThrow.Value)}
+ }
+ }
+ } else if decl, ok := i.(*js.VarDecl); ok {
+ // TODO: remove function name in var name=function name(){}
+ //for _, item := range decl.List {
+ // if v, ok := item.Binding.(*js.Var); ok && item.Default != nil {
+ // if fun, ok := item.Default.(*js.FuncDecl); ok && fun.Name != nil && bytes.Equal(v.Data, fun.Name.Data) {
+ // scope := fun.Body.Scope
+ // for i, vorig := range scope.Declared {
+ // if fun.Name == vorig {
+ // scope.Declared = append(scope.Declared[:i], scope.Declared[i+1:]...)
+ // }
+ // }
+ // scope.AddUndeclared(v)
+ // v.Uses += fun.Name.Uses - 1
+ // fun.Name.Link = v
+ // fun.Name = nil
+ // }
+ // }
+ //}
+
+ if decl.TokenType == js.ErrorToken {
+ // convert hoisted var declaration to expression or empty (if there are no defines) statement
+ for _, item := range decl.List {
+ if item.Default != nil {
+ return &js.ExprStmt{Value: decl}
+ }
+ }
+ return &js.EmptyStmt{}
+ }
+ // TODO: remove unused declarations
+ //for i := 0; i < len(decl.List); i++ {
+ // if v, ok := decl.List[i].Binding.(*js.Var); ok && v.Uses < 2 {
+ // decl.List = append(decl.List[:i], decl.List[i+1:]...)
+ // i--
+ // }
+ //}
+ //if len(decl.List) == 0 {
+ // return &js.EmptyStmt{}
+ //}
+ return decl
+ } else if blockStmt, ok := i.(*js.BlockStmt); ok {
+ // merge body and remove braces if it is not a lexical declaration
+ blockStmt.List = optimizeStmtList(blockStmt.List, defaultBlock)
+ if len(blockStmt.List) == 1 {
+ if _, ok := blockStmt.List[0].(*js.ClassDecl); ok {
+ return &js.EmptyStmt{}
+ } else if varDecl, ok := blockStmt.List[0].(*js.VarDecl); ok && varDecl.TokenType != js.VarToken {
+ // remove let or const declaration in otherwise empty scope, but keep assignments
+ exprs := []js.IExpr{}
+ for _, item := range varDecl.List {
+ if item.Default != nil && hasSideEffects(item.Default) {
+ exprs = append(exprs, item.Default)
+ }
+ }
+ if len(exprs) == 0 {
+ return &js.EmptyStmt{}
+ } else if len(exprs) == 1 {
+ return &js.ExprStmt{exprs[0]}
+ }
+ return &js.ExprStmt{&js.CommaExpr{exprs}}
+ }
+ return optimizeStmt(blockStmt.List[0])
+ } else if len(blockStmt.List) == 0 {
+ return &js.EmptyStmt{}
+ }
+ return blockStmt
+ }
+ return i
+}
+
+func optimizeStmtList(list []js.IStmt, blockType blockType) []js.IStmt {
+ // merge expression statements as well as if/else statements followed by flow control statements
+ if len(list) == 0 {
+ return list
+ }
+ j := 0 // write index
+ for i := 0; i < len(list); i++ { // read index
+ if ifStmt, ok := list[i].(*js.IfStmt); ok && !isEmptyStmt(ifStmt.Else) {
+ // if(!a)b;else c => if(a)c; else b
+ if unary, ok := ifStmt.Cond.(*js.UnaryExpr); ok && unary.Op == js.NotToken && isFlowStmt(lastStmt(ifStmt.Else)) {
+ ifStmt.Cond = unary.X
+ ifStmt.Body, ifStmt.Else = ifStmt.Else, ifStmt.Body
+ }
+ if isFlowStmt(lastStmt(ifStmt.Body)) {
+ // if body ends in flow statement (return, throw, break, continue), we can remove the else statement and put its body in the current scope
+ if blockStmt, ok := ifStmt.Else.(*js.BlockStmt); ok {
+ blockStmt.Scope.Unscope()
+ list = append(list[:i+1], append(blockStmt.List, list[i+1:]...)...)
+ } else {
+ list = append(list[:i+1], append([]js.IStmt{ifStmt.Else}, list[i+1:]...)...)
+ }
+ ifStmt.Else = nil
+ }
+ }
+
+ list[i] = optimizeStmt(list[i])
+
+ if _, ok := list[i].(*js.EmptyStmt); ok {
+ k := i + 1
+ for ; k < len(list); k++ {
+ if _, ok := list[k].(*js.EmptyStmt); !ok {
+ break
+ }
+ }
+ list = append(list[:i], list[k:]...)
+ i--
+ continue
+ }
+
+ if 0 < i {
+ // merge expression statements with expression, return, and throw statements
+ if left, ok := list[i-1].(*js.ExprStmt); ok {
+ if right, ok := list[i].(*js.ExprStmt); ok {
+ right.Value = commaExpr(left.Value, right.Value)
+ j--
+ } else if returnStmt, ok := list[i].(*js.ReturnStmt); ok && returnStmt.Value != nil {
+ returnStmt.Value = commaExpr(left.Value, returnStmt.Value)
+ j--
+ } else if throwStmt, ok := list[i].(*js.ThrowStmt); ok {
+ throwStmt.Value = commaExpr(left.Value, throwStmt.Value)
+ j--
+ } else if forStmt, ok := list[i].(*js.ForStmt); ok {
+ if varDecl, ok := forStmt.Init.(*js.VarDecl); ok && len(varDecl.List) == 0 || forStmt.Init == nil {
+ // TODO: only merge statements that don't have 'in' or 'of' keywords (slow to check?)
+ forStmt.Init = left.Value
+ j--
+ }
+ } else if whileStmt, ok := list[i].(*js.WhileStmt); ok {
+ // TODO: only merge statements that don't have 'in' or 'of' keywords (slow to check?)
+ var body *js.BlockStmt
+ if blockStmt, ok := whileStmt.Body.(*js.BlockStmt); ok {
+ body = blockStmt
+ } else {
+ body = &js.BlockStmt{}
+ body.List = []js.IStmt{whileStmt.Body}
+ }
+ list[i] = &js.ForStmt{Init: left.Value, Cond: whileStmt.Cond, Post: nil, Body: body}
+ j--
+ } else if switchStmt, ok := list[i].(*js.SwitchStmt); ok {
+ switchStmt.Init = commaExpr(left.Value, switchStmt.Init)
+ j--
+ } else if withStmt, ok := list[i].(*js.WithStmt); ok {
+ withStmt.Cond = commaExpr(left.Value, withStmt.Cond)
+ j--
+ } else if ifStmt, ok := list[i].(*js.IfStmt); ok {
+ ifStmt.Cond = commaExpr(left.Value, ifStmt.Cond)
+ j--
+ } else if varDecl, ok := list[i].(*js.VarDecl); ok && varDecl.TokenType == js.VarToken {
+ if merge := mergeVarDeclExprStmt(varDecl, left, true); merge {
+ j--
+ }
+ }
+ } else if left, ok := list[i-1].(*js.VarDecl); ok {
+ if right, ok := list[i].(*js.VarDecl); ok && left.TokenType == right.TokenType {
+ // merge const and let declarations, or non-hoisted var declarations
+ right.List = append(left.List, right.List...)
+ j--
+
+ // remove from vardecls list of scope
+ scope := left.Scope.Func
+ for i, decl := range scope.VarDecls {
+ if left == decl {
+ scope.VarDecls = append(scope.VarDecls[:i], scope.VarDecls[i+1:]...)
+ break
+ }
+ }
+ } else if left.TokenType == js.VarToken {
+ if exprStmt, ok := list[i].(*js.ExprStmt); ok {
+ // pull in assignments to variables into the declaration, e.g. var a;a=5 => var a=5
+ if merge := mergeVarDeclExprStmt(left, exprStmt, false); merge {
+ list[i] = list[i-1]
+ j--
+ }
+ } else if forStmt, ok := list[i].(*js.ForStmt); ok {
+ // TODO: only merge statements that don't have 'in' or 'of' keywords (slow to check?)
+ if forStmt.Init == nil {
+ forStmt.Init = left
+ j--
+ } else if decl, ok := forStmt.Init.(*js.VarDecl); ok && decl.TokenType == js.ErrorToken && !hasDefines(decl) {
+ forStmt.Init = left
+ j--
+ } else if ok && (decl.TokenType == js.VarToken || decl.TokenType == js.ErrorToken) {
+ // this is the second VarDecl, so we are hoisting var declarations, which means the forInit variables are already in 'left'
+ mergeVarDecls(left, decl, false)
+ decl.TokenType = js.VarToken
+ forStmt.Init = left
+ j--
+ }
+ } else if whileStmt, ok := list[i].(*js.WhileStmt); ok {
+ // TODO: only merge statements that don't have 'in' or 'of' keywords (slow to check?)
+ var body *js.BlockStmt
+ if blockStmt, ok := whileStmt.Body.(*js.BlockStmt); ok {
+ body = blockStmt
+ } else {
+ body = &js.BlockStmt{}
+ body.List = []js.IStmt{whileStmt.Body}
+ }
+ list[i] = &js.ForStmt{Init: left, Cond: whileStmt.Cond, Post: nil, Body: body}
+ j--
+ }
+ }
+ }
+ }
+ list[j] = list[i]
+
+ // merge if/else with return/throw when followed by return/throw
+ MergeIfReturnThrow:
+ if 0 < j {
+ // separate from expression merging in case of: if(a)return b;b=c;return d
+ if ifStmt, ok := list[j-1].(*js.IfStmt); ok && isEmptyStmt(ifStmt.Body) != isEmptyStmt(ifStmt.Else) {
+ // either the if body is empty or the else body is empty. In case where both bodies have return/throw, we already rewrote that if statement to an return/throw statement
+ if returnStmt, ok := list[j].(*js.ReturnStmt); ok {
+ if returnStmt.Value == nil {
+ if left, ok := ifStmt.Body.(*js.ReturnStmt); ok && left.Value == nil {
+ list[j-1] = &js.ExprStmt{Value: ifStmt.Cond}
+ } else if left, ok := ifStmt.Else.(*js.ReturnStmt); ok && left.Value == nil {
+ list[j-1] = &js.ExprStmt{Value: ifStmt.Cond}
+ }
+ } else {
+ if left, ok := ifStmt.Body.(*js.ReturnStmt); ok && left.Value != nil {
+ returnStmt.Value = condExpr(ifStmt.Cond, left.Value, returnStmt.Value)
+ list[j-1] = returnStmt
+ j--
+ goto MergeIfReturnThrow
+ } else if left, ok := ifStmt.Else.(*js.ReturnStmt); ok && left.Value != nil {
+ returnStmt.Value = condExpr(ifStmt.Cond, returnStmt.Value, left.Value)
+ list[j-1] = returnStmt
+ j--
+ goto MergeIfReturnThrow
+ }
+ }
+ } else if throwStmt, ok := list[j].(*js.ThrowStmt); ok {
+ if left, ok := ifStmt.Body.(*js.ThrowStmt); ok {
+ throwStmt.Value = condExpr(ifStmt.Cond, left.Value, throwStmt.Value)
+ list[j-1] = throwStmt
+ j--
+ goto MergeIfReturnThrow
+ } else if left, ok := ifStmt.Else.(*js.ThrowStmt); ok {
+ throwStmt.Value = condExpr(ifStmt.Cond, throwStmt.Value, left.Value)
+ list[j-1] = throwStmt
+ j--
+ goto MergeIfReturnThrow
+ }
+ }
+ }
+ }
+ j++
+ }
+
+ // remove superfluous return or continue
+ if 0 < j {
+ if blockType == functionBlock {
+ if returnStmt, ok := list[j-1].(*js.ReturnStmt); ok {
+ if returnStmt.Value == nil || isUndefined(returnStmt.Value) {
+ j--
+ } else if commaExpr, ok := returnStmt.Value.(*js.CommaExpr); ok && isUndefined(commaExpr.List[len(commaExpr.List)-1]) {
+ // rewrite function f(){return a,void 0} => function f(){a}
+ if len(commaExpr.List) == 2 {
+ list[j-1] = &js.ExprStmt{Value: commaExpr.List[0]}
+ } else {
+ commaExpr.List = commaExpr.List[:len(commaExpr.List)-1]
+ }
+ }
+ }
+ } else if blockType == iterationBlock {
+ if branchStmt, ok := list[j-1].(*js.BranchStmt); ok && branchStmt.Type == js.ContinueToken && branchStmt.Label == nil {
+ j--
+ }
+ }
+ }
+ return list[:j]
+}
diff --git a/vendor/github.com/tdewolff/minify/v2/js/util.go b/vendor/github.com/tdewolff/minify/v2/js/util.go
new file mode 100644
index 0000000..6883d93
--- /dev/null
+++ b/vendor/github.com/tdewolff/minify/v2/js/util.go
@@ -0,0 +1,1361 @@
+package js
+
+import (
+ "bytes"
+ "encoding/hex"
+ stdStrconv "strconv"
+ "unicode/utf8"
+
+ "github.com/tdewolff/minify/v2"
+ "github.com/tdewolff/parse/v2/js"
+ "github.com/tdewolff/parse/v2/strconv"
+)
+
+var (
+ spaceBytes = []byte(" ")
+ newlineBytes = []byte("\n")
+ starBytes = []byte("*")
+ colonBytes = []byte(":")
+ semicolonBytes = []byte(";")
+ commaBytes = []byte(",")
+ dotBytes = []byte(".")
+ ellipsisBytes = []byte("...")
+ openBraceBytes = []byte("{")
+ closeBraceBytes = []byte("}")
+ openParenBytes = []byte("(")
+ closeParenBytes = []byte(")")
+ openBracketBytes = []byte("[")
+ closeBracketBytes = []byte("]")
+ openParenBracketBytes = []byte("({")
+ closeParenOpenBracketBytes = []byte("){")
+ notBytes = []byte("!")
+ questionBytes = []byte("?")
+ equalBytes = []byte("=")
+ optChainBytes = []byte("?.")
+ arrowBytes = []byte("=>")
+ zeroBytes = []byte("0")
+ oneBytes = []byte("1")
+ letBytes = []byte("let")
+ getBytes = []byte("get")
+ setBytes = []byte("set")
+ asyncBytes = []byte("async")
+ functionBytes = []byte("function")
+ staticBytes = []byte("static")
+ ifOpenBytes = []byte("if(")
+ elseBytes = []byte("else")
+ withOpenBytes = []byte("with(")
+ doBytes = []byte("do")
+ whileOpenBytes = []byte("while(")
+ forOpenBytes = []byte("for(")
+ forAwaitOpenBytes = []byte("for await(")
+ inBytes = []byte("in")
+ ofBytes = []byte("of")
+ switchOpenBytes = []byte("switch(")
+ throwBytes = []byte("throw")
+ tryBytes = []byte("try")
+ catchBytes = []byte("catch")
+ finallyBytes = []byte("finally")
+ importBytes = []byte("import")
+ exportBytes = []byte("export")
+ fromBytes = []byte("from")
+ returnBytes = []byte("return")
+ classBytes = []byte("class")
+ asSpaceBytes = []byte("as ")
+ asyncSpaceBytes = []byte("async ")
+ spaceDefaultBytes = []byte(" default")
+ spaceExtendsBytes = []byte(" extends")
+ yieldBytes = []byte("yield")
+ newBytes = []byte("new")
+ openNewBytes = []byte("(new")
+ newTargetBytes = []byte("new.target")
+ importMetaBytes = []byte("import.meta")
+ nanBytes = []byte("NaN")
+ undefinedBytes = []byte("undefined")
+ infinityBytes = []byte("Infinity")
+ nullBytes = []byte("null")
+ voidZeroBytes = []byte("void 0")
+ groupedVoidZeroBytes = []byte("(void 0)")
+ oneDivZeroBytes = []byte("1/0")
+ groupedOneDivZeroBytes = []byte("(1/0)")
+ notZeroBytes = []byte("!0")
+ groupedNotZeroBytes = []byte("(!0)")
+ notOneBytes = []byte("!1")
+ groupedNotOneBytes = []byte("(!1)")
+ debuggerBytes = []byte("debugger")
+ regExpScriptBytes = []byte("/script>")
+)
+
+func isEmptyStmt(stmt js.IStmt) bool {
+ if stmt == nil {
+ return true
+ } else if _, ok := stmt.(*js.EmptyStmt); ok {
+ return true
+ } else if decl, ok := stmt.(*js.VarDecl); ok && decl.TokenType == js.ErrorToken {
+ for _, item := range decl.List {
+ if item.Default != nil {
+ return false
+ }
+ }
+ return true
+ } else if block, ok := stmt.(*js.BlockStmt); ok {
+ for _, item := range block.List {
+ if ok := isEmptyStmt(item); !ok {
+ return false
+ }
+ }
+ return true
+ }
+ return false
+}
+
+func isFlowStmt(stmt js.IStmt) bool {
+ if _, ok := stmt.(*js.ReturnStmt); ok {
+ return true
+ } else if _, ok := stmt.(*js.ThrowStmt); ok {
+ return true
+ } else if _, ok := stmt.(*js.BranchStmt); ok {
+ return true
+ }
+ return false
+}
+
+func lastStmt(stmt js.IStmt) js.IStmt {
+ if block, ok := stmt.(*js.BlockStmt); ok && 0 < len(block.List) {
+ return lastStmt(block.List[len(block.List)-1])
+ }
+ return stmt
+}
+
+func endsInIf(istmt js.IStmt) bool {
+ switch stmt := istmt.(type) {
+ case *js.IfStmt:
+ if stmt.Else == nil {
+ _, ok := optimizeStmt(stmt).(*js.IfStmt)
+ return ok
+ }
+ return endsInIf(stmt.Else)
+ case *js.BlockStmt:
+ if 0 < len(stmt.List) {
+ return endsInIf(stmt.List[len(stmt.List)-1])
+ }
+ case *js.LabelledStmt:
+ return endsInIf(stmt.Value)
+ case *js.WithStmt:
+ return endsInIf(stmt.Body)
+ case *js.WhileStmt:
+ return endsInIf(stmt.Body)
+ case *js.ForStmt:
+ return endsInIf(stmt.Body)
+ case *js.ForInStmt:
+ return endsInIf(stmt.Body)
+ case *js.ForOfStmt:
+ return endsInIf(stmt.Body)
+ }
+ return false
+}
+
+// precedence maps for the precedence inside the operation
+var unaryPrecMap = map[js.TokenType]js.OpPrec{
+ js.PostIncrToken: js.OpLHS,
+ js.PostDecrToken: js.OpLHS,
+ js.PreIncrToken: js.OpUnary,
+ js.PreDecrToken: js.OpUnary,
+ js.NotToken: js.OpUnary,
+ js.BitNotToken: js.OpUnary,
+ js.TypeofToken: js.OpUnary,
+ js.VoidToken: js.OpUnary,
+ js.DeleteToken: js.OpUnary,
+ js.PosToken: js.OpUnary,
+ js.NegToken: js.OpUnary,
+ js.AwaitToken: js.OpUnary,
+}
+
+var binaryLeftPrecMap = map[js.TokenType]js.OpPrec{
+ js.EqToken: js.OpLHS,
+ js.MulEqToken: js.OpLHS,
+ js.DivEqToken: js.OpLHS,
+ js.ModEqToken: js.OpLHS,
+ js.ExpEqToken: js.OpLHS,
+ js.AddEqToken: js.OpLHS,
+ js.SubEqToken: js.OpLHS,
+ js.LtLtEqToken: js.OpLHS,
+ js.GtGtEqToken: js.OpLHS,
+ js.GtGtGtEqToken: js.OpLHS,
+ js.BitAndEqToken: js.OpLHS,
+ js.BitXorEqToken: js.OpLHS,
+ js.BitOrEqToken: js.OpLHS,
+ js.ExpToken: js.OpUpdate,
+ js.MulToken: js.OpMul,
+ js.DivToken: js.OpMul,
+ js.ModToken: js.OpMul,
+ js.AddToken: js.OpAdd,
+ js.SubToken: js.OpAdd,
+ js.LtLtToken: js.OpShift,
+ js.GtGtToken: js.OpShift,
+ js.GtGtGtToken: js.OpShift,
+ js.LtToken: js.OpCompare,
+ js.LtEqToken: js.OpCompare,
+ js.GtToken: js.OpCompare,
+ js.GtEqToken: js.OpCompare,
+ js.InToken: js.OpCompare,
+ js.InstanceofToken: js.OpCompare,
+ js.EqEqToken: js.OpEquals,
+ js.NotEqToken: js.OpEquals,
+ js.EqEqEqToken: js.OpEquals,
+ js.NotEqEqToken: js.OpEquals,
+ js.BitAndToken: js.OpBitAnd,
+ js.BitXorToken: js.OpBitXor,
+ js.BitOrToken: js.OpBitOr,
+ js.AndToken: js.OpAnd,
+ js.OrToken: js.OpOr,
+ js.NullishToken: js.OpBitOr, // or OpCoalesce
+ js.CommaToken: js.OpExpr,
+}
+
+var binaryRightPrecMap = map[js.TokenType]js.OpPrec{
+ js.EqToken: js.OpAssign,
+ js.MulEqToken: js.OpAssign,
+ js.DivEqToken: js.OpAssign,
+ js.ModEqToken: js.OpAssign,
+ js.ExpEqToken: js.OpAssign,
+ js.AddEqToken: js.OpAssign,
+ js.SubEqToken: js.OpAssign,
+ js.LtLtEqToken: js.OpAssign,
+ js.GtGtEqToken: js.OpAssign,
+ js.GtGtGtEqToken: js.OpAssign,
+ js.BitAndEqToken: js.OpAssign,
+ js.BitXorEqToken: js.OpAssign,
+ js.BitOrEqToken: js.OpAssign,
+ js.ExpToken: js.OpExp,
+ js.MulToken: js.OpExp,
+ js.DivToken: js.OpExp,
+ js.ModToken: js.OpExp,
+ js.AddToken: js.OpMul,
+ js.SubToken: js.OpMul,
+ js.LtLtToken: js.OpAdd,
+ js.GtGtToken: js.OpAdd,
+ js.GtGtGtToken: js.OpAdd,
+ js.LtToken: js.OpShift,
+ js.LtEqToken: js.OpShift,
+ js.GtToken: js.OpShift,
+ js.GtEqToken: js.OpShift,
+ js.InToken: js.OpShift,
+ js.InstanceofToken: js.OpShift,
+ js.EqEqToken: js.OpCompare,
+ js.NotEqToken: js.OpCompare,
+ js.EqEqEqToken: js.OpCompare,
+ js.NotEqEqToken: js.OpCompare,
+ js.BitAndToken: js.OpEquals,
+ js.BitXorToken: js.OpBitAnd,
+ js.BitOrToken: js.OpBitXor,
+ js.AndToken: js.OpAnd, // changes order in AST but not in execution
+ js.OrToken: js.OpOr, // changes order in AST but not in execution
+ js.NullishToken: js.OpBitOr, // or OpCoalesce
+ js.CommaToken: js.OpAssign,
+}
+
+// precedence maps of the operation itself
+var unaryOpPrecMap = map[js.TokenType]js.OpPrec{
+ js.PostIncrToken: js.OpUpdate,
+ js.PostDecrToken: js.OpUpdate,
+ js.PreIncrToken: js.OpUpdate,
+ js.PreDecrToken: js.OpUpdate,
+ js.NotToken: js.OpUnary,
+ js.BitNotToken: js.OpUnary,
+ js.TypeofToken: js.OpUnary,
+ js.VoidToken: js.OpUnary,
+ js.DeleteToken: js.OpUnary,
+ js.PosToken: js.OpUnary,
+ js.NegToken: js.OpUnary,
+ js.AwaitToken: js.OpUnary,
+}
+
+var binaryOpPrecMap = map[js.TokenType]js.OpPrec{
+ js.EqToken: js.OpAssign,
+ js.MulEqToken: js.OpAssign,
+ js.DivEqToken: js.OpAssign,
+ js.ModEqToken: js.OpAssign,
+ js.ExpEqToken: js.OpAssign,
+ js.AddEqToken: js.OpAssign,
+ js.SubEqToken: js.OpAssign,
+ js.LtLtEqToken: js.OpAssign,
+ js.GtGtEqToken: js.OpAssign,
+ js.GtGtGtEqToken: js.OpAssign,
+ js.BitAndEqToken: js.OpAssign,
+ js.BitXorEqToken: js.OpAssign,
+ js.BitOrEqToken: js.OpAssign,
+ js.ExpToken: js.OpExp,
+ js.MulToken: js.OpMul,
+ js.DivToken: js.OpMul,
+ js.ModToken: js.OpMul,
+ js.AddToken: js.OpAdd,
+ js.SubToken: js.OpAdd,
+ js.LtLtToken: js.OpShift,
+ js.GtGtToken: js.OpShift,
+ js.GtGtGtToken: js.OpShift,
+ js.LtToken: js.OpCompare,
+ js.LtEqToken: js.OpCompare,
+ js.GtToken: js.OpCompare,
+ js.GtEqToken: js.OpCompare,
+ js.InToken: js.OpCompare,
+ js.InstanceofToken: js.OpCompare,
+ js.EqEqToken: js.OpEquals,
+ js.NotEqToken: js.OpEquals,
+ js.EqEqEqToken: js.OpEquals,
+ js.NotEqEqToken: js.OpEquals,
+ js.BitAndToken: js.OpBitAnd,
+ js.BitXorToken: js.OpBitXor,
+ js.BitOrToken: js.OpBitOr,
+ js.AndToken: js.OpAnd,
+ js.OrToken: js.OpOr,
+ js.NullishToken: js.OpCoalesce,
+ js.CommaToken: js.OpExpr,
+}
+
+func exprPrec(i js.IExpr) js.OpPrec {
+ switch expr := i.(type) {
+ case *js.Var, *js.LiteralExpr, *js.ArrayExpr, *js.ObjectExpr, *js.FuncDecl, *js.ClassDecl:
+ return js.OpPrimary
+ case *js.UnaryExpr:
+ return unaryOpPrecMap[expr.Op]
+ case *js.BinaryExpr:
+ return binaryOpPrecMap[expr.Op]
+ case *js.NewExpr:
+ if expr.Args == nil {
+ return js.OpNew
+ }
+ return js.OpMember
+ case *js.TemplateExpr:
+ if expr.Tag == nil {
+ return js.OpPrimary
+ }
+ return expr.Prec
+ case *js.DotExpr:
+ return expr.Prec
+ case *js.IndexExpr:
+ return expr.Prec
+ case *js.NewTargetExpr, *js.ImportMetaExpr:
+ return js.OpMember
+ case *js.CallExpr:
+ return js.OpCall
+ case *js.CondExpr, *js.YieldExpr, *js.ArrowFunc:
+ return js.OpAssign
+ case *js.GroupExpr:
+ return exprPrec(expr.X)
+ }
+ return js.OpExpr // CommaExpr
+}
+
+func hasSideEffects(i js.IExpr) bool {
+ // assume that variable usage and that the index operator themselves have no side effects
+ switch expr := i.(type) {
+ case *js.Var, *js.LiteralExpr, *js.FuncDecl, *js.ClassDecl, *js.ArrowFunc, *js.NewTargetExpr, *js.ImportMetaExpr:
+ return false
+ case *js.NewExpr, *js.CallExpr, *js.YieldExpr:
+ return true
+ case *js.GroupExpr:
+ return hasSideEffects(expr.X)
+ case *js.DotExpr:
+ return hasSideEffects(expr.X)
+ case *js.IndexExpr:
+ return hasSideEffects(expr.X) || hasSideEffects(expr.Y)
+ case *js.CondExpr:
+ return hasSideEffects(expr.Cond) || hasSideEffects(expr.X) || hasSideEffects(expr.Y)
+ case *js.CommaExpr:
+ for _, item := range expr.List {
+ if hasSideEffects(item) {
+ return true
+ }
+ }
+ case *js.ArrayExpr:
+ for _, item := range expr.List {
+ if hasSideEffects(item.Value) {
+ return true
+ }
+ }
+ return false
+ case *js.ObjectExpr:
+ for _, item := range expr.List {
+ if hasSideEffects(item.Value) || item.Init != nil && hasSideEffects(item.Init) || item.Name != nil && item.Name.IsComputed() && hasSideEffects(item.Name.Computed) {
+ return true
+ }
+ }
+ return false
+ case *js.TemplateExpr:
+ if hasSideEffects(expr.Tag) {
+ return true
+ }
+ for _, item := range expr.List {
+ if hasSideEffects(item.Expr) {
+ return true
+ }
+ }
+ return false
+ case *js.UnaryExpr:
+ if expr.Op == js.DeleteToken || expr.Op == js.PreIncrToken || expr.Op == js.PreDecrToken || expr.Op == js.PostIncrToken || expr.Op == js.PostDecrToken {
+ return true
+ }
+ return hasSideEffects(expr.X)
+ case *js.BinaryExpr:
+ return binaryOpPrecMap[expr.Op] == js.OpAssign
+ }
+ return true
+}
+
+// TODO: use in more cases
+func groupExpr(i js.IExpr, prec js.OpPrec) js.IExpr {
+ precInside := exprPrec(i)
+ if _, ok := i.(*js.GroupExpr); !ok && precInside < prec && (precInside != js.OpCoalesce || prec != js.OpBitOr) {
+ return &js.GroupExpr{X: i}
+ }
+ return i
+}
+
+// TODO: use in more cases
+func condExpr(cond, x, y js.IExpr) js.IExpr {
+ if comma, ok := cond.(*js.CommaExpr); ok {
+ comma.List[len(comma.List)-1] = &js.CondExpr{
+ Cond: groupExpr(comma.List[len(comma.List)-1], js.OpCoalesce),
+ X: groupExpr(x, js.OpAssign),
+ Y: groupExpr(y, js.OpAssign),
+ }
+ return comma
+ }
+ return &js.CondExpr{
+ Cond: groupExpr(cond, js.OpCoalesce),
+ X: groupExpr(x, js.OpAssign),
+ Y: groupExpr(y, js.OpAssign),
+ }
+}
+
+func commaExpr(x, y js.IExpr) js.IExpr {
+ comma, ok := x.(*js.CommaExpr)
+ if !ok {
+ comma = &js.CommaExpr{List: []js.IExpr{x}}
+ }
+ if comma2, ok := y.(*js.CommaExpr); ok {
+ comma.List = append(comma.List, comma2.List...)
+ } else {
+ comma.List = append(comma.List, y)
+ }
+ return comma
+}
+
+func innerExpr(i js.IExpr) js.IExpr {
+ for {
+ if group, ok := i.(*js.GroupExpr); ok {
+ i = group.X
+ } else {
+ return i
+ }
+ }
+}
+
+func finalExpr(i js.IExpr) js.IExpr {
+ i = innerExpr(i)
+ if comma, ok := i.(*js.CommaExpr); ok {
+ i = comma.List[len(comma.List)-1]
+ }
+ if binary, ok := i.(*js.BinaryExpr); ok && binary.Op == js.EqToken {
+ i = binary.X // return first
+ }
+ return i
+}
+
+func isTrue(i js.IExpr) bool {
+ i = innerExpr(i)
+ if lit, ok := i.(*js.LiteralExpr); ok && lit.TokenType == js.TrueToken {
+ return true
+ } else if unary, ok := i.(*js.UnaryExpr); ok && unary.Op == js.NotToken {
+ ret, _ := isFalsy(unary.X)
+ return ret
+ }
+ return false
+}
+
+func isFalse(i js.IExpr) bool {
+ i = innerExpr(i)
+ if lit, ok := i.(*js.LiteralExpr); ok {
+ return lit.TokenType == js.FalseToken
+ } else if unary, ok := i.(*js.UnaryExpr); ok && unary.Op == js.NotToken {
+ ret, _ := isTruthy(unary.X)
+ return ret
+ }
+ return false
+}
+
+func isEqualExpr(a, b js.IExpr) bool {
+ a = innerExpr(a)
+ b = innerExpr(b)
+ if left, ok := a.(*js.Var); ok {
+ if right, ok := b.(*js.Var); ok {
+ return bytes.Equal(left.Name(), right.Name())
+ }
+ }
+ // TODO: use reflect.DeepEqual?
+ return false
+}
+
+func toNullishExpr(condExpr *js.CondExpr) (js.IExpr, bool) {
+ if v, not, ok := isUndefinedOrNullVar(condExpr.Cond); ok {
+ left, right := condExpr.X, condExpr.Y
+ if not {
+ left, right = right, left
+ }
+ if isEqualExpr(v, right) {
+ // convert conditional expression to nullish: a==null?b:a => a??b
+ return &js.BinaryExpr{js.NullishToken, groupExpr(right, binaryLeftPrecMap[js.NullishToken]), groupExpr(left, binaryRightPrecMap[js.NullishToken])}, true
+ } else if isUndefined(left) {
+ // convert conditional expression to optional expr: a==null?undefined:a.b => a?.b
+ expr := right
+ var parent js.IExpr
+ for {
+ prevExpr := expr
+ if callExpr, ok := expr.(*js.CallExpr); ok {
+ expr = callExpr.X
+ } else if dotExpr, ok := expr.(*js.DotExpr); ok {
+ expr = dotExpr.X
+ } else if indexExpr, ok := expr.(*js.IndexExpr); ok {
+ expr = indexExpr.X
+ } else if templateExpr, ok := expr.(*js.TemplateExpr); ok {
+ expr = templateExpr.Tag
+ } else {
+ break
+ }
+ parent = prevExpr
+ }
+ if parent != nil && isEqualExpr(v, expr) {
+ if callExpr, ok := parent.(*js.CallExpr); ok {
+ callExpr.Optional = true
+ } else if dotExpr, ok := parent.(*js.DotExpr); ok {
+ dotExpr.Optional = true
+ } else if indexExpr, ok := parent.(*js.IndexExpr); ok {
+ indexExpr.Optional = true
+ } else if templateExpr, ok := parent.(*js.TemplateExpr); ok {
+ templateExpr.Optional = true
+ }
+ return right, true
+ }
+ }
+ }
+ return nil, false
+}
+
+func isUndefinedOrNullVar(i js.IExpr) (*js.Var, bool, bool) {
+ i = innerExpr(i)
+ if binary, ok := i.(*js.BinaryExpr); ok && (binary.Op == js.OrToken || binary.Op == js.AndToken) {
+ eqEqOp := js.EqEqToken
+ eqEqEqOp := js.EqEqEqToken
+ if binary.Op == js.AndToken {
+ eqEqOp = js.NotEqToken
+ eqEqEqOp = js.NotEqEqToken
+ }
+
+ left, isBinaryX := innerExpr(binary.X).(*js.BinaryExpr)
+ right, isBinaryY := innerExpr(binary.Y).(*js.BinaryExpr)
+ if isBinaryX && isBinaryY && (left.Op == eqEqOp || left.Op == eqEqEqOp) && (right.Op == eqEqOp || right.Op == eqEqEqOp) {
+ var leftVar, rightVar *js.Var
+ if v, ok := left.X.(*js.Var); ok && isUndefinedOrNull(left.Y) {
+ leftVar = v
+ } else if v, ok := left.Y.(*js.Var); ok && isUndefinedOrNull(left.X) {
+ leftVar = v
+ }
+ if v, ok := right.X.(*js.Var); ok && isUndefinedOrNull(right.Y) {
+ rightVar = v
+ } else if v, ok := right.Y.(*js.Var); ok && isUndefinedOrNull(right.X) {
+ rightVar = v
+ }
+ if leftVar != nil && leftVar == rightVar {
+ return leftVar, binary.Op == js.AndToken, true
+ }
+ }
+ } else if ok && (binary.Op == js.EqEqToken || binary.Op == js.NotEqToken) {
+ var variable *js.Var
+ if v, ok := binary.X.(*js.Var); ok && isUndefinedOrNull(binary.Y) {
+ variable = v
+ } else if v, ok := binary.Y.(*js.Var); ok && isUndefinedOrNull(binary.X) {
+ variable = v
+ }
+ if variable != nil {
+ return variable, binary.Op == js.NotEqToken, true
+ }
+ }
+ return nil, false, false
+}
+
+func isUndefinedOrNull(i js.IExpr) bool {
+ i = innerExpr(i)
+ if lit, ok := i.(*js.LiteralExpr); ok {
+ return lit.TokenType == js.NullToken
+ }
+ return isUndefined(i)
+}
+
+func isUndefined(i js.IExpr) bool {
+ i = innerExpr(i)
+ if v, ok := i.(*js.Var); ok {
+ if bytes.Equal(v.Name(), undefinedBytes) { // TODO: only if not defined
+ return true
+ }
+ } else if unary, ok := i.(*js.UnaryExpr); ok && unary.Op == js.VoidToken {
+ return !hasSideEffects(unary.X)
+ }
+ return false
+}
+
+// returns whether truthy and whether it could be coerced to a boolean (i.e. when returns (false,true) this means it is falsy)
+func isTruthy(i js.IExpr) (bool, bool) {
+ if falsy, ok := isFalsy(i); ok {
+ return !falsy, true
+ }
+ return false, false
+}
+
+// returns whether falsy and whether it could be coerced to a boolean (i.e. when returns (false,true) this means it is truthy)
+func isFalsy(i js.IExpr) (bool, bool) {
+ negated := false
+ group, isGroup := i.(*js.GroupExpr)
+ unary, isUnary := i.(*js.UnaryExpr)
+ for isGroup || isUnary && unary.Op == js.NotToken {
+ if isGroup {
+ i = group.X
+ } else {
+ i = unary.X
+ negated = !negated
+ }
+ group, isGroup = i.(*js.GroupExpr)
+ unary, isUnary = i.(*js.UnaryExpr)
+ }
+ if lit, ok := i.(*js.LiteralExpr); ok {
+ tt := lit.TokenType
+ d := lit.Data
+ if tt == js.FalseToken || tt == js.NullToken || tt == js.StringToken && len(lit.Data) == 0 {
+ return !negated, true // falsy
+ } else if tt == js.TrueToken || tt == js.StringToken {
+ return negated, true // truthy
+ } else if tt == js.DecimalToken || tt == js.BinaryToken || tt == js.OctalToken || tt == js.HexadecimalToken || tt == js.BigIntToken {
+ for _, c := range d {
+ if c == 'e' || c == 'E' || c == 'n' {
+ break
+ } else if c != '0' && c != '.' && c != 'x' && c != 'X' && c != 'b' && c != 'B' && c != 'o' && c != 'O' {
+ return negated, true // truthy
+ }
+ }
+ return !negated, true // falsy
+ }
+ } else if isUndefined(i) {
+ return !negated, true // falsy
+ } else if v, ok := i.(*js.Var); ok && bytes.Equal(v.Name(), nanBytes) {
+ return !negated, true // falsy
+ }
+ return false, false // unknown
+}
+
+func isBooleanExpr(expr js.IExpr) bool {
+ if unaryExpr, ok := expr.(*js.UnaryExpr); ok {
+ return unaryExpr.Op == js.NotToken
+ } else if binaryExpr, ok := expr.(*js.BinaryExpr); ok {
+ op := binaryOpPrecMap[binaryExpr.Op]
+ if op == js.OpAnd || op == js.OpOr {
+ return isBooleanExpr(binaryExpr.X) && isBooleanExpr(binaryExpr.Y)
+ }
+ return op == js.OpCompare || op == js.OpEquals
+ } else if litExpr, ok := expr.(*js.LiteralExpr); ok {
+ return litExpr.TokenType == js.TrueToken || litExpr.TokenType == js.FalseToken
+ } else if groupExpr, ok := expr.(*js.GroupExpr); ok {
+ return isBooleanExpr(groupExpr.X)
+ }
+ return false
+}
+
+func invertBooleanOp(op js.TokenType) js.TokenType {
+ if op == js.EqEqToken {
+ return js.NotEqToken
+ } else if op == js.NotEqToken {
+ return js.EqEqToken
+ } else if op == js.EqEqEqToken {
+ return js.NotEqEqToken
+ } else if op == js.NotEqEqToken {
+ return js.EqEqEqToken
+ }
+ return js.ErrorToken
+}
+
+func optimizeBooleanExpr(expr js.IExpr, invert bool, prec js.OpPrec) js.IExpr {
+ if invert {
+ // unary !(boolean) has already been handled
+ if binaryExpr, ok := expr.(*js.BinaryExpr); ok && binaryOpPrecMap[binaryExpr.Op] == js.OpEquals {
+ binaryExpr.Op = invertBooleanOp(binaryExpr.Op)
+ return expr
+ } else {
+ return optimizeUnaryExpr(&js.UnaryExpr{js.NotToken, groupExpr(expr, js.OpUnary)}, prec)
+ }
+ } else if isBooleanExpr(expr) {
+ return groupExpr(expr, prec)
+ } else {
+ return &js.UnaryExpr{js.NotToken, &js.UnaryExpr{js.NotToken, groupExpr(expr, js.OpUnary)}}
+ }
+}
+
+func optimizeUnaryExpr(expr *js.UnaryExpr, prec js.OpPrec) js.IExpr {
+ if expr.Op == js.NotToken {
+ invert := true
+ var expr2 js.IExpr = expr.X
+ for {
+ if unary, ok := expr2.(*js.UnaryExpr); ok && unary.Op == js.NotToken {
+ invert = !invert
+ expr2 = unary.X
+ } else if group, ok := expr2.(*js.GroupExpr); ok {
+ expr2 = group.X
+ } else {
+ break
+ }
+ }
+ if !invert && isBooleanExpr(expr2) {
+ return groupExpr(expr2, prec)
+ } else if binary, ok := expr2.(*js.BinaryExpr); ok && invert {
+ if binaryOpPrecMap[binary.Op] == js.OpEquals {
+ binary.Op = invertBooleanOp(binary.Op)
+ return groupExpr(binary, prec)
+ } else if binary.Op == js.AndToken || binary.Op == js.OrToken {
+ op := js.AndToken
+ if binary.Op == js.AndToken {
+ op = js.OrToken
+ }
+ precInside := binaryOpPrecMap[op]
+ needsGroup := precInside < prec && (precInside != js.OpCoalesce || prec != js.OpBitOr)
+
+ // rewrite !(a||b) to !a&&!b
+ // rewrite !(a==0||b==0) to a!=0&&b!=0
+ score := 3 // savings if rewritten (group parentheses and not-token)
+ if needsGroup {
+ score -= 2
+ }
+ score -= 2 // add two not-tokens for left and right
+
+ // == and === can become != and !==
+ var isEqX, isEqY bool
+ if binaryExpr, ok := binary.X.(*js.BinaryExpr); ok && binaryOpPrecMap[binaryExpr.Op] == js.OpEquals {
+ score += 1
+ isEqX = true
+ }
+ if binaryExpr, ok := binary.Y.(*js.BinaryExpr); ok && binaryOpPrecMap[binaryExpr.Op] == js.OpEquals {
+ score += 1
+ isEqY = true
+ }
+
+ // add group if it wasn't already there
+ var needsGroupX, needsGroupY bool
+ if !isEqX && binaryLeftPrecMap[binary.Op] <= exprPrec(binary.X) && exprPrec(binary.X) < js.OpUnary {
+ score -= 2
+ needsGroupX = true
+ }
+ if !isEqY && binaryRightPrecMap[binary.Op] <= exprPrec(binary.Y) && exprPrec(binary.Y) < js.OpUnary {
+ score -= 2
+ needsGroupY = true
+ }
+
+ // remove group
+ if op == js.OrToken {
+ if exprPrec(binary.X) == js.OpOr {
+ score += 2
+ }
+ if exprPrec(binary.Y) == js.OpAnd {
+ score += 2
+ }
+ }
+
+ if 0 < score {
+ binary.Op = op
+ if isEqX {
+ binary.X.(*js.BinaryExpr).Op = invertBooleanOp(binary.X.(*js.BinaryExpr).Op)
+ }
+ if isEqY {
+ binary.Y.(*js.BinaryExpr).Op = invertBooleanOp(binary.Y.(*js.BinaryExpr).Op)
+ }
+ if needsGroupX {
+ binary.X = &js.GroupExpr{binary.X}
+ }
+ if needsGroupY {
+ binary.Y = &js.GroupExpr{binary.Y}
+ }
+ if !isEqX {
+ binary.X = &js.UnaryExpr{js.NotToken, binary.X}
+ }
+ if !isEqY {
+ binary.Y = &js.UnaryExpr{js.NotToken, binary.Y}
+ }
+ if needsGroup {
+ return &js.GroupExpr{binary}
+ }
+ return binary
+ }
+ }
+ }
+ }
+ return expr
+}
+
+func (m *jsMinifier) optimizeCondExpr(expr *js.CondExpr, prec js.OpPrec) js.IExpr {
+ // remove double negative !! in condition, or switch cases for single negative !
+ if unary1, ok := expr.Cond.(*js.UnaryExpr); ok && unary1.Op == js.NotToken {
+ if unary2, ok := unary1.X.(*js.UnaryExpr); ok && unary2.Op == js.NotToken {
+ if isBooleanExpr(unary2.X) {
+ expr.Cond = unary2.X
+ }
+ } else {
+ expr.Cond = unary1.X
+ expr.X, expr.Y = expr.Y, expr.X
+ }
+ }
+
+ finalCond := finalExpr(expr.Cond)
+ if truthy, ok := isTruthy(expr.Cond); truthy && ok {
+ // if condition is truthy
+ return expr.X
+ } else if !truthy && ok {
+ // if condition is falsy
+ return expr.Y
+ } else if isEqualExpr(finalCond, expr.X) && (exprPrec(finalCond) < js.OpAssign || binaryLeftPrecMap[js.OrToken] <= exprPrec(finalCond)) && (exprPrec(expr.Y) < js.OpAssign || binaryRightPrecMap[js.OrToken] <= exprPrec(expr.Y)) {
+ // if condition is equal to true body
+ // for higher prec we need to add group parenthesis, and for lower prec we have parenthesis anyways. This only is shorter if len(expr.X) >= 3. isEqualExpr only checks for literal variables, which is a name will be minified to a one or two character name.
+ return &js.BinaryExpr{js.OrToken, groupExpr(expr.Cond, binaryLeftPrecMap[js.OrToken]), expr.Y}
+ } else if isEqualExpr(finalCond, expr.Y) && (exprPrec(finalCond) < js.OpAssign || binaryLeftPrecMap[js.AndToken] <= exprPrec(finalCond)) && (exprPrec(expr.X) < js.OpAssign || binaryRightPrecMap[js.AndToken] <= exprPrec(expr.X)) {
+ // if condition is equal to false body
+ // for higher prec we need to add group parenthesis, and for lower prec we have parenthesis anyways. This only is shorter if len(expr.X) >= 3. isEqualExpr only checks for literal variables, which is a name will be minified to a one or two character name.
+ return &js.BinaryExpr{js.AndToken, groupExpr(expr.Cond, binaryLeftPrecMap[js.AndToken]), expr.X}
+ } else if isEqualExpr(expr.X, expr.Y) {
+ // if true and false bodies are equal
+ return groupExpr(&js.CommaExpr{[]js.IExpr{expr.Cond, expr.X}}, prec)
+ } else if nullishExpr, ok := toNullishExpr(expr); ok && m.o.minVersion(2020) {
+ // no need to check whether left/right need to add groups, as the space saving is always more
+ return nullishExpr
+ } else {
+ callX, isCallX := expr.X.(*js.CallExpr)
+ callY, isCallY := expr.Y.(*js.CallExpr)
+ if isCallX && isCallY && len(callX.Args.List) == 1 && len(callY.Args.List) == 1 && !callX.Args.List[0].Rest && !callY.Args.List[0].Rest && isEqualExpr(callX.X, callY.X) {
+ expr.X = callX.Args.List[0].Value
+ expr.Y = callY.Args.List[0].Value
+ return &js.CallExpr{callX.X, js.Args{[]js.Arg{{expr, false}}}, false} // recompress the conditional expression inside
+ }
+
+ // shorten when true and false bodies are true and false
+ trueX, falseX := isTrue(expr.X), isFalse(expr.X)
+ trueY, falseY := isTrue(expr.Y), isFalse(expr.Y)
+ if trueX && falseY || falseX && trueY {
+ return optimizeBooleanExpr(expr.Cond, falseX, prec)
+ } else if trueX || trueY {
+ // trueX != trueY
+ cond := optimizeBooleanExpr(expr.Cond, trueY, binaryLeftPrecMap[js.OrToken])
+ if trueY {
+ return &js.BinaryExpr{js.OrToken, cond, groupExpr(expr.X, binaryRightPrecMap[js.OrToken])}
+ } else {
+ return &js.BinaryExpr{js.OrToken, cond, groupExpr(expr.Y, binaryRightPrecMap[js.OrToken])}
+ }
+ } else if falseX || falseY {
+ // falseX != falseY
+ cond := optimizeBooleanExpr(expr.Cond, falseX, binaryLeftPrecMap[js.AndToken])
+ if falseX {
+ return &js.BinaryExpr{js.AndToken, cond, groupExpr(expr.Y, binaryRightPrecMap[js.AndToken])}
+ } else {
+ return &js.BinaryExpr{js.AndToken, cond, groupExpr(expr.X, binaryRightPrecMap[js.AndToken])}
+ }
+ } else if condExpr, ok := expr.X.(*js.CondExpr); ok && isEqualExpr(expr.Y, condExpr.Y) {
+ // nested conditional expression with same false bodies
+ return &js.CondExpr{&js.BinaryExpr{js.AndToken, groupExpr(expr.Cond, binaryLeftPrecMap[js.AndToken]), groupExpr(condExpr.Cond, binaryRightPrecMap[js.AndToken])}, condExpr.X, expr.Y}
+ } else if prec <= js.OpExpr {
+ // regular conditional expression
+ // convert (a,b)?c:d => a,b?c:d
+ if group, ok := expr.Cond.(*js.GroupExpr); ok {
+ if comma, ok := group.X.(*js.CommaExpr); ok && js.OpCoalesce <= exprPrec(comma.List[len(comma.List)-1]) {
+ expr.Cond = comma.List[len(comma.List)-1]
+ comma.List[len(comma.List)-1] = expr
+ return comma // recompress the conditional expression inside
+ }
+ }
+ }
+ }
+ return expr
+}
+
+func isHexDigit(b byte) bool {
+ return '0' <= b && b <= '9' || 'a' <= b && b <= 'f' || 'A' <= b && b <= 'F'
+}
+
+func mergeBinaryExpr(expr *js.BinaryExpr) {
+ // merge string concatenations which may be intertwined with other additions
+ var ok bool
+ for expr.Op == js.AddToken {
+ if lit, ok := expr.Y.(*js.LiteralExpr); ok && lit.TokenType == js.StringToken {
+ left := expr
+ strings := []*js.LiteralExpr{lit}
+ n := len(lit.Data) - 2
+ for left.Op == js.AddToken {
+ if 50 < len(strings) {
+ return // limit recursion
+ }
+ if lit, ok := left.X.(*js.LiteralExpr); ok && lit.TokenType == js.StringToken {
+ strings = append(strings, lit)
+ n += len(lit.Data) - 2
+ left.X = nil
+ } else if newLeft, ok := left.X.(*js.BinaryExpr); ok {
+ if lit, ok := newLeft.Y.(*js.LiteralExpr); ok && lit.TokenType == js.StringToken {
+ strings = append(strings, lit)
+ n += len(lit.Data) - 2
+ left = newLeft
+ continue
+ }
+ }
+ break
+ }
+
+ if 1 < len(strings) {
+ // unescaped quotes will be repaired in minifyString later on
+ b := make([]byte, 0, n+2)
+ b = append(b, strings[len(strings)-1].Data[:len(strings[len(strings)-1].Data)-1]...)
+ for i := len(strings) - 2; 0 < i; i-- {
+ b = append(b, strings[i].Data[1:len(strings[i].Data)-1]...)
+ }
+ b = append(b, strings[0].Data[1:]...)
+ b[len(b)-1] = b[0]
+
+ expr.X = left.X
+ expr.Y.(*js.LiteralExpr).Data = b
+ }
+ }
+ if expr, ok = expr.X.(*js.BinaryExpr); !ok {
+ break
+ }
+ }
+}
+
+func minifyString(b []byte, allowTemplate bool) []byte {
+ if len(b) < 3 {
+ return []byte("\"\"")
+ }
+
+ // switch quotes if more optimal
+ singleQuotes := 0
+ doubleQuotes := 0
+ backtickQuotes := 0
+ newlines := 0
+ dollarSigns := 0
+ notEscapes := false
+ for i := 1; i < len(b)-1; i++ {
+ if b[i] == '\'' {
+ singleQuotes++
+ } else if b[i] == '"' {
+ doubleQuotes++
+ } else if b[i] == '`' {
+ backtickQuotes++
+ } else if b[i] == '$' {
+ dollarSigns++
+ } else if b[i] == '\\' && i+1 < len(b) {
+ if b[i+1] == 'n' || b[i+1] == 'r' {
+ newlines++
+ } else if '1' <= b[i+1] && b[i+1] <= '9' || b[i+1] == '0' && i+2 < len(b) && '0' <= b[i+2] && b[i+2] <= '9' {
+ notEscapes = true
+ }
+ }
+ }
+ quote := byte('"') // default to " for better GZIP compression
+ quotes := singleQuotes
+ if doubleQuotes < singleQuotes {
+ quote = byte('"')
+ quotes = doubleQuotes
+ } else if singleQuotes < doubleQuotes {
+ quote = byte('\'')
+ }
+ if allowTemplate && !notEscapes && backtickQuotes+dollarSigns < quotes+newlines {
+ quote = byte('`')
+ }
+ b[0] = quote
+ b[len(b)-1] = quote
+
+ // strip unnecessary escapes
+ return replaceEscapes(b, quote, 1, 1)
+}
+
+func replaceEscapes(b []byte, quote byte, prefix, suffix int) []byte {
+ // strip unnecessary escapes
+ j := 0
+ start := 0
+ for i := prefix; i < len(b)-suffix; i++ {
+ if c := b[i]; c == '\\' {
+ c = b[i+1]
+ if c == quote || c == '\\' || quote != '`' && (c == 'n' || c == 'r') || c == '0' && (i+2 == len(b)-1 || b[i+2] < '0' || '7' < b[i+2]) {
+ // keep escape sequence
+ i++
+ continue
+ }
+ n := 1 // number of characters to skip
+ if c == '\n' || c == '\r' || c == 0xE2 && i+3 < len(b)-1 && b[i+2] == 0x80 && (b[i+3] == 0xA8 || b[i+3] == 0xA9) {
+ // line continuations
+ if c == 0xE2 {
+ n = 4
+ } else if c == '\r' && i+2 < len(b)-1 && b[i+2] == '\n' {
+ n = 3
+ } else {
+ n = 2
+ }
+ } else if c == 'x' {
+ if i+3 < len(b)-1 && isHexDigit(b[i+2]) && b[i+2] < '8' && isHexDigit(b[i+3]) && (!(b[i+2] == '0' && b[i+3] == '0') || i+3 == len(b) || b[i+3] != '\\' && (b[i+3] < '0' && '7' < b[i+3])) {
+ // don't convert \x00 to \0 if it may be an octal number
+ // hexadecimal escapes
+ _, _ = hex.Decode(b[i+3:i+4:i+4], b[i+2:i+4])
+ n = 3
+ if b[i+3] == '\\' || b[i+3] == quote || b[i+3] == '\n' || b[i+3] == '\r' || b[i+3] == 0 {
+ if b[i+3] == '\n' {
+ b[i+3] = 'n'
+ } else if b[i+3] == '\r' {
+ b[i+3] = 'r'
+ }
+ n--
+ b[i+2] = '\\'
+ }
+ } else {
+ i++
+ continue
+ }
+ } else if c == 'u' && i+2 < len(b) {
+ l := i + 2
+ if b[i+2] == '{' {
+ l++
+ }
+ r := l
+ for ; r < len(b) && (b[i+2] == '{' || r < l+4); r++ {
+ if b[r] < '0' || '9' < b[r] && b[r] < 'A' || 'F' < b[r] && b[r] < 'a' || 'f' < b[r] {
+ break
+ }
+ }
+ if b[i+2] == '{' && 6 < r-l || b[i+2] != '{' && r-l != 4 {
+ i++
+ continue
+ }
+ num, err := stdStrconv.ParseInt(string(b[l:r]), 16, 32)
+ if err != nil || 0x10FFFF <= num {
+ i++
+ continue
+ }
+
+ if num == 0 {
+ // don't convert NULL to literal NULL (gives JS parsing problems)
+ if r == len(b) || b[r] != '\\' && (b[r] < '0' && '7' < b[r]) {
+ b[r-2] = '\\'
+ n = r - l
+ } else {
+ // don't convert NULL to \0 (may be an octal number)
+ b[r-4] = '\\'
+ b[r-3] = 'x'
+ n = r - l - 2
+ }
+ } else {
+ // decode unicode character to UTF-8 and put at the end of the escape sequence
+ // then skip the first part of the escape sequence until the decoded character
+ n = 2 + r - l
+ if b[i+2] == '{' {
+ n += 2
+ }
+ m := utf8.RuneLen(rune(num))
+ if m == -1 {
+ i++
+ continue
+ }
+ utf8.EncodeRune(b[i+n-m:], rune(num))
+ n -= m
+ }
+ } else if '0' <= c && c <= '7' {
+ // octal escapes (legacy), \0 already handled
+ num := c - '0'
+ if i+2 < len(b)-1 && '0' <= b[i+2] && b[i+2] <= '7' {
+ num = num*8 + b[i+2] - '0'
+ n++
+ if num < 32 && i+3 < len(b)-1 && '0' <= b[i+3] && b[i+3] <= '7' {
+ num = num*8 + b[i+3] - '0'
+ n++
+ }
+ }
+ b[i+n] = num
+ if num == 0 || num == '\\' || num == quote || num == '\n' || num == '\r' {
+ if num == 0 {
+ b[i+n] = '0'
+ } else if num == '\n' {
+ b[i+n] = 'n'
+ } else if num == '\r' {
+ b[i+n] = 'r'
+ }
+ n--
+ b[i+n] = '\\'
+ }
+ } else if c == 'n' {
+ b[i+1] = '\n' // only for template literals
+ } else if c == 'r' {
+ b[i+1] = '\r' // only for template literals
+ } else if c == 't' {
+ b[i+1] = '\t'
+ } else if c == 'f' {
+ b[i+1] = '\f'
+ } else if c == 'v' {
+ b[i+1] = '\v'
+ } else if c == 'b' {
+ b[i+1] = '\b'
+ }
+ // remove unnecessary escape character, anything but 0x00, 0x0A, 0x0D, \, ' or "
+ if start != 0 {
+ j += copy(b[j:], b[start:i])
+ } else {
+ j = i
+ }
+ start = i + n
+ i += n - 1
+ } else if c == quote || c == '$' && quote == '`' && (i+1 < len(b) && b[i+1] == '{' || i+2 < len(b) && b[i+1] == '\\' && b[i+2] == '{') {
+ // may not be escaped properly when changing quotes
+ if j < start {
+ // avoid append
+ j += copy(b[j:], b[start:i])
+ b[j] = '\\'
+ j++
+ start = i
+ } else {
+ b = append(append(b[:i], '\\'), b[i:]...)
+ i++
+ b[i] = c // was overwritten above
+ }
+ } else if c == '<' && 9 <= len(b)-1-i {
+ if b[i+1] == '\\' && 10 <= len(b)-1-i && bytes.Equal(b[i+2:i+10], []byte("/script>")) {
+ i += 9
+ } else if bytes.Equal(b[i+1:i+9], []byte("/script>")) {
+ i++
+ if j < start {
+ // avoid append
+ j += copy(b[j:], b[start:i])
+ b[j] = '\\'
+ j++
+ start = i
+ } else {
+ b = append(append(b[:i], '\\'), b[i:]...)
+ i++
+ b[i] = '/' // was overwritten above
+ }
+ }
+ }
+ }
+ if start != 0 {
+ j += copy(b[j:], b[start:])
+ return b[:j]
+ }
+ return b
+}
+
+var regexpEscapeTable = [256]bool{
+ // ASCII
+ false, false, false, false, false, false, false, false,
+ false, false, false, false, false, false, false, false,
+ false, false, false, false, false, false, false, false,
+ false, false, false, false, false, false, false, false,
+
+ false, false, false, false, true, false, false, false, // $
+ true, true, true, true, false, false, true, true, // (, ), *, +, ., /
+ true, true, true, true, true, true, true, true, // 0, 1, 2, 3, 4, 5, 6, 7
+ true, true, false, false, false, false, false, true, // 8, 9, ?
+
+ false, false, true, false, true, false, false, false, // B, D
+ false, false, false, false, false, false, false, false,
+ true, false, false, true, false, false, false, true, // P, S, W
+ false, false, false, true, true, true, true, false, // [, \, ], ^
+
+ false, false, true, true, true, false, true, false, // b, c, d, f
+ false, false, false, true, false, false, true, false, // k, n
+ true, false, true, true, true, true, true, true, // p, r, s, t, u, v, w
+ true, false, false, true, true, true, false, false, // x, {, |, }
+
+ // non-ASCII
+ false, false, false, false, false, false, false, false,
+ false, false, false, false, false, false, false, false,
+ false, false, false, false, false, false, false, false,
+ false, false, false, false, false, false, false, false,
+
+ false, false, false, false, false, false, false, false,
+ false, false, false, false, false, false, false, false,
+ false, false, false, false, false, false, false, false,
+ false, false, false, false, false, false, false, false,
+
+ false, false, false, false, false, false, false, false,
+ false, false, false, false, false, false, false, false,
+ false, false, false, false, false, false, false, false,
+ false, false, false, false, false, false, false, false,
+
+ false, false, false, false, false, false, false, false,
+ false, false, false, false, false, false, false, false,
+ false, false, false, false, false, false, false, false,
+ false, false, false, false, false, false, false, false,
+}
+
+var regexpClassEscapeTable = [256]bool{
+ // ASCII
+ false, false, false, false, false, false, false, false,
+ false, false, false, false, false, false, false, false,
+ false, false, false, false, false, false, false, false,
+ false, false, false, false, false, false, false, false,
+
+ false, false, false, false, false, false, false, false,
+ false, false, false, false, false, false, false, false,
+ true, true, true, true, true, true, true, true, // 0, 1, 2, 3, 4, 5, 6, 7
+ true, true, false, false, false, false, false, false, // 8, 9
+
+ false, false, false, false, true, false, false, false, // D
+ false, false, false, false, false, false, false, false,
+ true, false, false, true, false, false, false, true, // P, S, W
+ false, false, false, false, true, true, false, false, // \, ]
+
+ false, false, true, true, true, false, true, false, // b, c, d, f
+ false, false, false, false, false, false, true, false, // n
+ true, false, true, true, true, true, true, true, // p, r, s, t, u, v, w
+ true, false, false, false, false, false, false, false, // x
+
+ // non-ASCII
+ false, false, false, false, false, false, false, false,
+ false, false, false, false, false, false, false, false,
+ false, false, false, false, false, false, false, false,
+ false, false, false, false, false, false, false, false,
+
+ false, false, false, false, false, false, false, false,
+ false, false, false, false, false, false, false, false,
+ false, false, false, false, false, false, false, false,
+ false, false, false, false, false, false, false, false,
+
+ false, false, false, false, false, false, false, false,
+ false, false, false, false, false, false, false, false,
+ false, false, false, false, false, false, false, false,
+ false, false, false, false, false, false, false, false,
+
+ false, false, false, false, false, false, false, false,
+ false, false, false, false, false, false, false, false,
+ false, false, false, false, false, false, false, false,
+ false, false, false, false, false, false, false, false,
+}
+
+func minifyRegExp(b []byte) []byte {
+ inClass := false
+ afterDash := 0
+ iClass := 0
+ for i := 1; i < len(b)-1; i++ {
+ if inClass {
+ afterDash++
+ }
+ if b[i] == '\\' {
+ c := b[i+1]
+ escape := true
+ if inClass {
+ escape = regexpClassEscapeTable[c] || c == '-' && 2 < afterDash && i+2 < len(b) && b[i+2] != ']' || c == '^' && i == iClass+1
+ } else {
+ escape = regexpEscapeTable[c]
+ }
+ if !escape {
+ b = append(b[:i], b[i+1:]...)
+ if inClass && 2 < afterDash && c == '-' {
+ afterDash = 0
+ } else if inClass && c == '^' {
+ afterDash = 1
+ }
+ } else {
+ i++
+ }
+ } else if b[i] == '[' {
+ if b[i+1] == '^' {
+ i++
+ }
+ afterDash = 1
+ inClass = true
+ iClass = i
+ } else if inClass && b[i] == ']' {
+ inClass = false
+ } else if b[i] == '/' {
+ break
+ } else if inClass && 2 < afterDash && b[i] == '-' {
+ afterDash = 0
+ }
+ }
+ return b
+}
+
+func removeUnderscores(b []byte) []byte {
+ for i := 0; i < len(b); i++ {
+ if b[i] == '_' {
+ b = append(b[:i], b[i+1:]...)
+ i--
+ }
+ }
+ return b
+}
+
+func decimalNumber(b []byte, prec int) []byte {
+ b = removeUnderscores(b)
+ return minify.Number(b, prec)
+}
+
+func binaryNumber(b []byte, prec int) []byte {
+ b = removeUnderscores(b)
+ if len(b) <= 2 || 65 < len(b) {
+ return b
+ }
+ var n int64
+ for _, c := range b[2:] {
+ n *= 2
+ n += int64(c - '0')
+ }
+ i := strconv.LenInt(n) - 1
+ b = b[:i+1]
+ for 0 <= i {
+ b[i] = byte('0' + n%10)
+ n /= 10
+ i--
+ }
+ return minify.Number(b, prec)
+}
+
+func octalNumber(b []byte, prec int) []byte {
+ b = removeUnderscores(b)
+ if len(b) <= 2 || 23 < len(b) {
+ return b
+ }
+ var n int64
+ for _, c := range b[2:] {
+ n *= 8
+ n += int64(c - '0')
+ }
+ i := strconv.LenInt(n) - 1
+ b = b[:i+1]
+ for 0 <= i {
+ b[i] = byte('0' + n%10)
+ n /= 10
+ i--
+ }
+ return minify.Number(b, prec)
+}
+
+func hexadecimalNumber(b []byte, prec int) []byte {
+ b = removeUnderscores(b)
+ if len(b) <= 2 || 12 < len(b) || len(b) == 12 && ('D' < b[2] && b[2] <= 'F' || 'd' < b[2]) {
+ return b
+ }
+ var n int64
+ for _, c := range b[2:] {
+ n *= 16
+ if c <= '9' {
+ n += int64(c - '0')
+ } else if c <= 'F' {
+ n += 10 + int64(c-'A')
+ } else {
+ n += 10 + int64(c-'a')
+ }
+ }
+ i := strconv.LenInt(n) - 1
+ b = b[:i+1]
+ for 0 <= i {
+ b[i] = byte('0' + n%10)
+ n /= 10
+ i--
+ }
+ return minify.Number(b, prec)
+}
diff --git a/vendor/github.com/tdewolff/minify/v2/js/vars.go b/vendor/github.com/tdewolff/minify/v2/js/vars.go
new file mode 100644
index 0000000..81457c3
--- /dev/null
+++ b/vendor/github.com/tdewolff/minify/v2/js/vars.go
@@ -0,0 +1,443 @@
+package js
+
+import (
+ "bytes"
+ "sort"
+
+ "github.com/tdewolff/parse/v2/js"
+)
+
+const identStartLen = 54
+const identContinueLen = 64
+
+type renamer struct {
+ identStart []byte
+ identContinue []byte
+ identOrder map[byte]int
+ reserved map[string]struct{}
+ rename bool
+}
+
+func newRenamer(rename, useCharFreq bool) *renamer {
+ reserved := make(map[string]struct{}, len(js.Keywords))
+ for name := range js.Keywords {
+ reserved[name] = struct{}{}
+ }
+ identStart := []byte("abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ_$")
+ identContinue := []byte("abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ_$0123456789")
+ if useCharFreq {
+ // sorted based on character frequency of a collection of JS samples
+ identStart = []byte("etnsoiarclduhmfpgvbjy_wOxCEkASMFTzDNLRPHIBV$WUKqYGXQZJ")
+ identContinue = []byte("etnsoiarcldu14023hm8f6pg57v9bjy_wOxCEkASMFTzDNLRPHIBV$WUKqYGXQZJ")
+ }
+ if len(identStart) != identStartLen || len(identContinue) != identContinueLen {
+ panic("bad identStart or identContinue lengths")
+ }
+ identOrder := map[byte]int{}
+ for i, c := range identStart {
+ identOrder[c] = i
+ }
+ return &renamer{
+ identStart: identStart,
+ identContinue: identContinue,
+ identOrder: identOrder,
+ reserved: reserved,
+ rename: rename,
+ }
+}
+
+func (r *renamer) renameScope(scope js.Scope) {
+ if !r.rename {
+ return
+ }
+
+ i := 0
+ // keep function argument declaration order to improve GZIP compression
+ sort.Sort(js.VarsByUses(scope.Declared[scope.NumFuncArgs:]))
+ for _, v := range scope.Declared {
+ v.Data = r.getName(v.Data, i)
+ i++
+ for r.isReserved(v.Data, scope.Undeclared) {
+ v.Data = r.getName(v.Data, i)
+ i++
+ }
+ }
+}
+
+func (r *renamer) isReserved(name []byte, undeclared js.VarArray) bool {
+ if 1 < len(name) { // there are no keywords or known globals that are one character long
+ if _, ok := r.reserved[string(name)]; ok {
+ return true
+ }
+ }
+ for _, v := range undeclared {
+ for v.Link != nil {
+ v = v.Link
+ }
+ if bytes.Equal(v.Data, name) {
+ return true
+ }
+ }
+ return false
+}
+
+func (r *renamer) getIndex(name []byte) int {
+ index := 0
+NameLoop:
+ for i := len(name) - 1; 0 <= i; i-- {
+ chars := r.identContinue
+ if i == 0 {
+ chars = r.identStart
+ index *= identStartLen
+ } else {
+ index *= identContinueLen
+ }
+ for j, c := range chars {
+ if name[i] == c {
+ index += j
+ continue NameLoop
+ }
+ }
+ return -1
+ }
+ for n := 0; n < len(name)-1; n++ {
+ offset := identStartLen
+ for i := 0; i < n; i++ {
+ offset *= identContinueLen
+ }
+ index += offset
+ }
+ return index
+}
+
+func (r *renamer) getName(name []byte, index int) []byte {
+ // Generate new names for variables where the last character is (a-zA-Z$_) and others are (a-zA-Z).
+ // Thus we can have 54 one-character names and 52*54=2808 two-character names for every branch leaf.
+ // That is sufficient for virtually all input.
+
+ // one character
+ if index < identStartLen {
+ name[0] = r.identStart[index]
+ return name[:1]
+ }
+ index -= identStartLen
+
+ // two characters or more
+ n := 2
+ for {
+ offset := identStartLen
+ for i := 0; i < n-1; i++ {
+ offset *= identContinueLen
+ }
+ if index < offset {
+ break
+ }
+ index -= offset
+ n++
+ }
+
+ if cap(name) < n {
+ name = make([]byte, n)
+ } else {
+ name = name[:n]
+ }
+ name[0] = r.identStart[index%identStartLen]
+ index /= identStartLen
+ for i := 1; i < n; i++ {
+ name[i] = r.identContinue[index%identContinueLen]
+ index /= identContinueLen
+ }
+ return name
+}
+
+////////////////////////////////////////////////////////////////
+
+func hasDefines(v *js.VarDecl) bool {
+ for _, item := range v.List {
+ if item.Default != nil {
+ return true
+ }
+ }
+ return false
+}
+
+func bindingVars(ibinding js.IBinding) (vs []*js.Var) {
+ switch binding := ibinding.(type) {
+ case *js.Var:
+ vs = append(vs, binding)
+ case *js.BindingArray:
+ for _, item := range binding.List {
+ if item.Binding != nil {
+ vs = append(vs, bindingVars(item.Binding)...)
+ }
+ }
+ if binding.Rest != nil {
+ vs = append(vs, bindingVars(binding.Rest)...)
+ }
+ case *js.BindingObject:
+ for _, item := range binding.List {
+ if item.Value.Binding != nil {
+ vs = append(vs, bindingVars(item.Value.Binding)...)
+ }
+ }
+ if binding.Rest != nil {
+ vs = append(vs, binding.Rest)
+ }
+ }
+ return
+}
+
+func addDefinition(decl *js.VarDecl, binding js.IBinding, value js.IExpr, forward bool) {
+ // see if not already defined in variable declaration list
+ // if forward is set, binding=value comes before decl, otherwise the reverse holds true
+ vars := bindingVars(binding)
+
+ // remove variables in destination
+RemoveVarsLoop:
+ for _, vbind := range vars {
+ for i, item := range decl.List {
+ if v, ok := item.Binding.(*js.Var); ok && item.Default == nil && v == vbind {
+ v.Uses--
+ decl.List = append(decl.List[:i], decl.List[i+1:]...)
+ continue RemoveVarsLoop
+ }
+ }
+
+ if value != nil {
+ // variable declaration must be somewhere else, find and remove it
+ for _, decl2 := range decl.Scope.Func.VarDecls {
+ for i, item := range decl2.List {
+ if v, ok := item.Binding.(*js.Var); ok && item.Default == nil && v == vbind {
+ v.Uses--
+ decl2.List = append(decl2.List[:i], decl2.List[i+1:]...)
+ continue RemoveVarsLoop
+ }
+ }
+ }
+ }
+ }
+
+ // add declaration to destination
+ item := js.BindingElement{Binding: binding, Default: value}
+ if forward {
+ decl.List = append([]js.BindingElement{item}, decl.List...)
+ } else {
+ decl.List = append(decl.List, item)
+ }
+}
+
+func mergeVarDecls(dst, src *js.VarDecl, forward bool) {
+ // Merge var declarations by moving declarations from src to dst. If forward is set, src comes first and dst after, otherwise the order is reverse.
+ if forward {
+ // reverse order so we can iterate from beginning to end, sometimes addDefinition may remove another declaration in the src list
+ n := len(src.List) - 1
+ for j := 0; j < len(src.List)/2; j++ {
+ src.List[j], src.List[n-j] = src.List[n-j], src.List[j]
+ }
+ }
+ for j := 0; j < len(src.List); j++ {
+ addDefinition(dst, src.List[j].Binding, src.List[j].Default, forward)
+ }
+ src.List = src.List[:0]
+}
+
+func mergeVarDeclExprStmt(decl *js.VarDecl, exprStmt *js.ExprStmt, forward bool) bool {
+ // Merge var declarations with an assignment expression. If forward is set than expr comes first and decl after, otherwise the order is reverse.
+ if decl2, ok := exprStmt.Value.(*js.VarDecl); ok {
+ // this happens when a variable declarations is converted to an expression due to hoisting
+ mergeVarDecls(decl, decl2, forward)
+ return true
+ } else if commaExpr, ok := exprStmt.Value.(*js.CommaExpr); ok {
+ n := 0
+ for i := 0; i < len(commaExpr.List); i++ {
+ item := commaExpr.List[i]
+ if forward {
+ item = commaExpr.List[len(commaExpr.List)-i-1]
+ }
+ if src, ok := item.(*js.VarDecl); ok {
+ // this happens when a variable declarations is converted to an expression due to hoisting
+ mergeVarDecls(decl, src, forward)
+ n++
+ continue
+ } else if binaryExpr, ok := item.(*js.BinaryExpr); ok && binaryExpr.Op == js.EqToken {
+ if v, ok := binaryExpr.X.(*js.Var); ok && v.Decl == js.VariableDecl {
+ addDefinition(decl, v, binaryExpr.Y, forward)
+ n++
+ continue
+ }
+ }
+ break
+ }
+ merge := n == len(commaExpr.List)
+ if !forward {
+ commaExpr.List = commaExpr.List[n:]
+ } else {
+ commaExpr.List = commaExpr.List[:len(commaExpr.List)-n]
+ }
+ return merge
+ } else if binaryExpr, ok := exprStmt.Value.(*js.BinaryExpr); ok && binaryExpr.Op == js.EqToken {
+ if v, ok := binaryExpr.X.(*js.Var); ok && v.Decl == js.VariableDecl {
+ addDefinition(decl, v, binaryExpr.Y, forward)
+ return true
+ }
+ }
+ return false
+}
+
+func (m *jsMinifier) countHoistLength(ibinding js.IBinding) int {
+ if !m.o.KeepVarNames {
+ return len(bindingVars(ibinding)) * 2 // assume that var name will be of length one, +1 for the comma
+ }
+
+ n := 0
+ for _, v := range bindingVars(ibinding) {
+ n += len(v.Data) + 1 // +1 for the comma when added to other declaration
+ }
+ return n
+}
+
+func (m *jsMinifier) hoistVars(body *js.BlockStmt) {
+ // Hoist all variable declarations in the current module/function scope to the top.
+ // If the first statement is a var declaration, expand it. Otherwise prepend a new var declaration.
+ // Except for the first var declaration, all others are converted to expressions. This is possible because an ArrayBindingPattern and ObjectBindingPattern can be converted to an ArrayLiteral or ObjectLiteral respectively, as they are supersets of the BindingPatterns.
+ if 1 < len(body.Scope.VarDecls) {
+ // Select which variable declarations will be hoisted (convert to expression) and which not
+ best := 0
+ score := make([]int, len(body.Scope.VarDecls)) // savings if hoisted
+ hoist := make([]bool, len(body.Scope.VarDecls))
+ for i, varDecl := range body.Scope.VarDecls {
+ hoist[i] = true
+ score[i] = 4 // "var "
+ if !varDecl.InForInOf {
+ n := 0
+ nArrays := 0
+ nObjects := 0
+ hasDefinitions := false
+ for j, item := range varDecl.List {
+ if item.Default != nil {
+ if _, ok := item.Binding.(*js.BindingObject); ok {
+ if j != 0 && nArrays == 0 && nObjects == 0 {
+ varDecl.List[0], varDecl.List[j] = varDecl.List[j], varDecl.List[0]
+ }
+ nObjects++
+ } else if _, ok := item.Binding.(*js.BindingArray); ok {
+ if j != 0 && nArrays == 0 && nObjects == 0 {
+ varDecl.List[0], varDecl.List[j] = varDecl.List[j], varDecl.List[0]
+ }
+ nArrays++
+ }
+ score[i] -= m.countHoistLength(item.Binding) // var names and commas
+ hasDefinitions = true
+ n++
+ }
+ }
+ if !hasDefinitions {
+ score[i] = 5 - 1 // 1 for a comma
+ if varDecl.InFor {
+ score[i]-- // semicolon can be reused
+ }
+ }
+ if nObjects != 0 && !varDecl.InFor && nObjects == n {
+ score[i] -= 2 // required parenthesis around braces
+ }
+ if nArrays != 0 || nObjects != 0 {
+ score[i]-- // space after var disappears
+ }
+ if score[i] < score[best] || body.Scope.VarDecls[best].InForInOf {
+ // select var decl with the least savings if hoisted
+ best = i
+ }
+ if score[i] < 0 {
+ hoist[i] = false
+ }
+ }
+ }
+ if body.Scope.VarDecls[best].InForInOf {
+ // no savings possible
+ return
+ }
+
+ decl := body.Scope.VarDecls[best]
+ if 10000 < len(decl.List) {
+ return
+ }
+ hoist[best] = false
+
+ // get original declarations
+ orig := []*js.Var{}
+ for _, item := range decl.List {
+ orig = append(orig, bindingVars(item.Binding)...)
+ }
+
+ // hoist other variable declarations in this function scope but don't initialize yet
+ j := 0
+ for i, varDecl := range body.Scope.VarDecls {
+ if hoist[i] {
+ varDecl.TokenType = js.ErrorToken
+ for _, item := range varDecl.List {
+ refs := bindingVars(item.Binding)
+ bindingElements := make([]js.BindingElement, 0, len(refs))
+ DeclaredLoop:
+ for _, ref := range refs {
+ for _, v := range orig {
+ if ref == v {
+ continue DeclaredLoop
+ }
+ }
+ bindingElements = append(bindingElements, js.BindingElement{Binding: ref, Default: nil})
+ orig = append(orig, ref)
+
+ s := decl.Scope
+ for s != nil && s != s.Func {
+ s.AddUndeclared(ref)
+ s = s.Parent
+ }
+ if item.Default != nil {
+ ref.Uses++
+ }
+ }
+ if i < best {
+ // prepend
+ decl.List = append(decl.List[:j], append(bindingElements, decl.List[j:]...)...)
+ j += len(bindingElements)
+ } else {
+ // append
+ decl.List = append(decl.List, bindingElements...)
+ }
+ }
+ }
+ }
+
+ // rearrange to put array/object first
+ var prevRefs []*js.Var
+ BeginArrayObject:
+ for i, item := range decl.List {
+ refs := bindingVars(item.Binding)
+ if _, ok := item.Binding.(*js.Var); !ok {
+ if i != 0 {
+ interferes := false
+ if item.Default != nil {
+ InterferenceLoop:
+ for _, ref := range refs {
+ for _, v := range prevRefs {
+ if ref == v {
+ interferes = true
+ break InterferenceLoop
+ }
+ }
+ }
+ }
+ if !interferes {
+ decl.List[0], decl.List[i] = decl.List[i], decl.List[0]
+ break BeginArrayObject
+ }
+ } else {
+ break BeginArrayObject
+ }
+ }
+ if item.Default != nil {
+ prevRefs = append(prevRefs, refs...)
+ }
+ }
+ }
+}
diff --git a/vendor/github.com/tdewolff/minify/v2/minify.go b/vendor/github.com/tdewolff/minify/v2/minify.go
new file mode 100644
index 0000000..8e2e3fd
--- /dev/null
+++ b/vendor/github.com/tdewolff/minify/v2/minify.go
@@ -0,0 +1,371 @@
+// Package minify relates MIME type to minifiers. Several minifiers are provided in the subpackages.
+package minify
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "log"
+ "mime"
+ "net/http"
+ "net/url"
+ "os"
+ "os/exec"
+ "path"
+ "regexp"
+ "strings"
+ "sync"
+
+ "github.com/tdewolff/parse/v2"
+ "github.com/tdewolff/parse/v2/buffer"
+)
+
+// Warning is used to report usage warnings such as using a deprecated feature
+var Warning = log.New(os.Stderr, "WARNING: ", 0)
+
+// ErrNotExist is returned when no minifier exists for a given mimetype.
+var ErrNotExist = errors.New("minifier does not exist for mimetype")
+
+// ErrClosedWriter is returned when writing to a closed writer.
+var ErrClosedWriter = errors.New("write on closed writer")
+
+////////////////////////////////////////////////////////////////
+
+// MinifierFunc is a function that implements Minifer.
+type MinifierFunc func(*M, io.Writer, io.Reader, map[string]string) error
+
+// Minify calls f(m, w, r, params)
+func (f MinifierFunc) Minify(m *M, w io.Writer, r io.Reader, params map[string]string) error {
+ return f(m, w, r, params)
+}
+
+// Minifier is the interface for minifiers.
+// The *M parameter is used for minifying embedded resources, such as JS within HTML.
+type Minifier interface {
+ Minify(*M, io.Writer, io.Reader, map[string]string) error
+}
+
+////////////////////////////////////////////////////////////////
+
+type patternMinifier struct {
+ pattern *regexp.Regexp
+ Minifier
+}
+
+type cmdMinifier struct {
+ cmd *exec.Cmd
+}
+
+var cmdArgExtension = regexp.MustCompile(`^\.[0-9a-zA-Z]+`)
+
+func (c *cmdMinifier) Minify(_ *M, w io.Writer, r io.Reader, _ map[string]string) error {
+ cmd := &exec.Cmd{}
+ *cmd = *c.cmd // concurrency safety
+
+ var in, out *os.File
+ for i, arg := range cmd.Args {
+ if j := strings.Index(arg, "$in"); j != -1 {
+ var err error
+ ext := cmdArgExtension.FindString(arg[j+3:])
+ if in, err = ioutil.TempFile("", "minify-in-*"+ext); err != nil {
+ return err
+ }
+ cmd.Args[i] = arg[:j] + in.Name() + arg[j+3+len(ext):]
+ } else if j := strings.Index(arg, "$out"); j != -1 {
+ var err error
+ ext := cmdArgExtension.FindString(arg[j+4:])
+ if out, err = ioutil.TempFile("", "minify-out-*"+ext); err != nil {
+ return err
+ }
+ cmd.Args[i] = arg[:j] + out.Name() + arg[j+4+len(ext):]
+ }
+ }
+
+ if in == nil {
+ cmd.Stdin = r
+ } else if _, err := io.Copy(in, r); err != nil {
+ return err
+ }
+ if out == nil {
+ cmd.Stdout = w
+ } else {
+ defer io.Copy(w, out)
+ }
+ stderr := &bytes.Buffer{}
+ cmd.Stderr = stderr
+
+ err := cmd.Run()
+ if _, ok := err.(*exec.ExitError); ok {
+ if stderr.Len() != 0 {
+ err = fmt.Errorf("%s", stderr.String())
+ }
+ err = fmt.Errorf("command %s failed: %w", cmd.Path, err)
+ }
+ return err
+}
+
+////////////////////////////////////////////////////////////////
+
+// M holds a map of mimetype => function to allow recursive minifier calls of the minifier functions.
+type M struct {
+ mutex sync.RWMutex
+ literal map[string]Minifier
+ pattern []patternMinifier
+
+ URL *url.URL
+}
+
+// New returns a new M.
+func New() *M {
+ return &M{
+ sync.RWMutex{},
+ map[string]Minifier{},
+ []patternMinifier{},
+ nil,
+ }
+}
+
+// Add adds a minifier to the mimetype => function map (unsafe for concurrent use).
+func (m *M) Add(mimetype string, minifier Minifier) {
+ m.mutex.Lock()
+ m.literal[mimetype] = minifier
+ m.mutex.Unlock()
+}
+
+// AddFunc adds a minify function to the mimetype => function map (unsafe for concurrent use).
+func (m *M) AddFunc(mimetype string, minifier MinifierFunc) {
+ m.mutex.Lock()
+ m.literal[mimetype] = minifier
+ m.mutex.Unlock()
+}
+
+// AddRegexp adds a minifier to the mimetype => function map (unsafe for concurrent use).
+func (m *M) AddRegexp(pattern *regexp.Regexp, minifier Minifier) {
+ m.mutex.Lock()
+ m.pattern = append(m.pattern, patternMinifier{pattern, minifier})
+ m.mutex.Unlock()
+}
+
+// AddFuncRegexp adds a minify function to the mimetype => function map (unsafe for concurrent use).
+func (m *M) AddFuncRegexp(pattern *regexp.Regexp, minifier MinifierFunc) {
+ m.mutex.Lock()
+ m.pattern = append(m.pattern, patternMinifier{pattern, minifier})
+ m.mutex.Unlock()
+}
+
+// AddCmd adds a minify function to the mimetype => function map (unsafe for concurrent use) that executes a command to process the minification.
+// It allows the use of external tools like ClosureCompiler, UglifyCSS, etc. for a specific mimetype.
+func (m *M) AddCmd(mimetype string, cmd *exec.Cmd) {
+ m.mutex.Lock()
+ m.literal[mimetype] = &cmdMinifier{cmd}
+ m.mutex.Unlock()
+}
+
+// AddCmdRegexp adds a minify function to the mimetype => function map (unsafe for concurrent use) that executes a command to process the minification.
+// It allows the use of external tools like ClosureCompiler, UglifyCSS, etc. for a specific mimetype regular expression.
+func (m *M) AddCmdRegexp(pattern *regexp.Regexp, cmd *exec.Cmd) {
+ m.mutex.Lock()
+ m.pattern = append(m.pattern, patternMinifier{pattern, &cmdMinifier{cmd}})
+ m.mutex.Unlock()
+}
+
+// Match returns the pattern and minifier that gets matched with the mediatype.
+// It returns nil when no matching minifier exists.
+// It has the same matching algorithm as Minify.
+func (m *M) Match(mediatype string) (string, map[string]string, MinifierFunc) {
+ m.mutex.RLock()
+ defer m.mutex.RUnlock()
+
+ mimetype, params := parse.Mediatype([]byte(mediatype))
+ if minifier, ok := m.literal[string(mimetype)]; ok { // string conversion is optimized away
+ return string(mimetype), params, minifier.Minify
+ }
+
+ for _, minifier := range m.pattern {
+ if minifier.pattern.Match(mimetype) {
+ return minifier.pattern.String(), params, minifier.Minify
+ }
+ }
+ return string(mimetype), params, nil
+}
+
+// Minify minifies the content of a Reader and writes it to a Writer (safe for concurrent use).
+// An error is returned when no such mimetype exists (ErrNotExist) or when an error occurred in the minifier function.
+// Mediatype may take the form of 'text/plain', 'text/*', '*/*' or 'text/plain; charset=UTF-8; version=2.0'.
+func (m *M) Minify(mediatype string, w io.Writer, r io.Reader) error {
+ mimetype, params := parse.Mediatype([]byte(mediatype))
+ return m.MinifyMimetype(mimetype, w, r, params)
+}
+
+// MinifyMimetype minifies the content of a Reader and writes it to a Writer (safe for concurrent use).
+// It is a lower level version of Minify and requires the mediatype to be split up into mimetype and parameters.
+// It is mostly used internally by minifiers because it is faster (no need to convert a byte-slice to string and vice versa).
+func (m *M) MinifyMimetype(mimetype []byte, w io.Writer, r io.Reader, params map[string]string) error {
+ m.mutex.RLock()
+ defer m.mutex.RUnlock()
+
+ if minifier, ok := m.literal[string(mimetype)]; ok { // string conversion is optimized away
+ return minifier.Minify(m, w, r, params)
+ }
+ for _, minifier := range m.pattern {
+ if minifier.pattern.Match(mimetype) {
+ return minifier.Minify(m, w, r, params)
+ }
+ }
+ return ErrNotExist
+}
+
+// Bytes minifies an array of bytes (safe for concurrent use). When an error occurs it return the original array and the error.
+// It returns an error when no such mimetype exists (ErrNotExist) or any error occurred in the minifier function.
+func (m *M) Bytes(mediatype string, v []byte) ([]byte, error) {
+ out := buffer.NewWriter(make([]byte, 0, len(v)))
+ if err := m.Minify(mediatype, out, buffer.NewReader(v)); err != nil {
+ return v, err
+ }
+ return out.Bytes(), nil
+}
+
+// String minifies a string (safe for concurrent use). When an error occurs it return the original string and the error.
+// It returns an error when no such mimetype exists (ErrNotExist) or any error occurred in the minifier function.
+func (m *M) String(mediatype string, v string) (string, error) {
+ out := buffer.NewWriter(make([]byte, 0, len(v)))
+ if err := m.Minify(mediatype, out, buffer.NewReader([]byte(v))); err != nil {
+ return v, err
+ }
+ return string(out.Bytes()), nil
+}
+
+// Reader wraps a Reader interface and minifies the stream.
+// Errors from the minifier are returned by the reader.
+func (m *M) Reader(mediatype string, r io.Reader) io.Reader {
+ pr, pw := io.Pipe()
+ go func() {
+ if err := m.Minify(mediatype, pw, r); err != nil {
+ pw.CloseWithError(err)
+ } else {
+ pw.Close()
+ }
+ }()
+ return pr
+}
+
+// writer makes sure that errors from the minifier are passed down through Close (can be blocking).
+type writer struct {
+ pw *io.PipeWriter
+ wg sync.WaitGroup
+ err error
+ closed bool
+}
+
+// Write intercepts any writes to the writer.
+func (w *writer) Write(b []byte) (int, error) {
+ if w.closed {
+ return 0, ErrClosedWriter
+ }
+ n, err := w.pw.Write(b)
+ if w.err != nil {
+ err = w.err
+ }
+ return n, err
+}
+
+// Close must be called when writing has finished. It returns the error from the minifier.
+func (w *writer) Close() error {
+ if !w.closed {
+ w.pw.Close()
+ w.wg.Wait()
+ w.closed = true
+ }
+ return w.err
+}
+
+// Writer wraps a Writer interface and minifies the stream.
+// Errors from the minifier are returned by Close on the writer.
+// The writer must be closed explicitly.
+func (m *M) Writer(mediatype string, w io.Writer) io.WriteCloser {
+ pr, pw := io.Pipe()
+ mw := &writer{pw, sync.WaitGroup{}, nil, false}
+ mw.wg.Add(1)
+ go func() {
+ defer mw.wg.Done()
+
+ if err := m.Minify(mediatype, w, pr); err != nil {
+ mw.err = err
+ }
+ pr.Close()
+ }()
+ return mw
+}
+
+// responseWriter wraps an http.ResponseWriter and makes sure that errors from the minifier are passed down through Close (can be blocking).
+// All writes to the response writer are intercepted and minified on the fly.
+// http.ResponseWriter loses all functionality such as Pusher, Hijacker, Flusher, ...
+type responseWriter struct {
+ http.ResponseWriter
+
+ writer *writer
+ m *M
+ mediatype string
+}
+
+// WriteHeader intercepts any header writes and removes the Content-Length header.
+func (w *responseWriter) WriteHeader(status int) {
+ w.ResponseWriter.Header().Del("Content-Length")
+ w.ResponseWriter.WriteHeader(status)
+}
+
+// Write intercepts any writes to the response writer.
+// The first write will extract the Content-Type as the mediatype. Otherwise it falls back to the RequestURI extension.
+func (w *responseWriter) Write(b []byte) (int, error) {
+ if w.writer == nil {
+ // first write
+ if mediatype := w.ResponseWriter.Header().Get("Content-Type"); mediatype != "" {
+ w.mediatype = mediatype
+ }
+ w.writer = w.m.Writer(w.mediatype, w.ResponseWriter).(*writer)
+ }
+ return w.writer.Write(b)
+}
+
+// Close must be called when writing has finished. It returns the error from the minifier.
+func (w *responseWriter) Close() error {
+ if w.writer != nil {
+ return w.writer.Close()
+ }
+ return nil
+}
+
+// ResponseWriter minifies any writes to the http.ResponseWriter.
+// http.ResponseWriter loses all functionality such as Pusher, Hijacker, Flusher, ...
+// Minification might be slower than just sending the original file! Caching is advised.
+func (m *M) ResponseWriter(w http.ResponseWriter, r *http.Request) *responseWriter {
+ mediatype := mime.TypeByExtension(path.Ext(r.RequestURI))
+ return &responseWriter{w, nil, m, mediatype}
+}
+
+// Middleware provides a middleware function that minifies content on the fly by intercepting writes to http.ResponseWriter.
+// http.ResponseWriter loses all functionality such as Pusher, Hijacker, Flusher, ...
+// Minification might be slower than just sending the original file! Caching is advised.
+func (m *M) Middleware(next http.Handler) http.Handler {
+ return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ mw := m.ResponseWriter(w, r)
+ next.ServeHTTP(mw, r)
+ mw.Close()
+ })
+}
+
+// MiddlewareWithError provides a middleware function that minifies content on the fly by intercepting writes to http.ResponseWriter. The error function allows handling minification errors.
+// http.ResponseWriter loses all functionality such as Pusher, Hijacker, Flusher, ...
+// Minification might be slower than just sending the original file! Caching is advised.
+func (m *M) MiddlewareWithError(next http.Handler, errorFunc func(w http.ResponseWriter, r *http.Request, err error)) http.Handler {
+ return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ mw := m.ResponseWriter(w, r)
+ next.ServeHTTP(mw, r)
+ if err := mw.Close(); err != nil {
+ errorFunc(w, r, err)
+ return
+ }
+ })
+}
diff --git a/vendor/github.com/tdewolff/parse/v2/.gitattributes b/vendor/github.com/tdewolff/parse/v2/.gitattributes
new file mode 100644
index 0000000..9f4b74c
--- /dev/null
+++ b/vendor/github.com/tdewolff/parse/v2/.gitattributes
@@ -0,0 +1 @@
+tests/*/corpus/* linguist-generated
diff --git a/vendor/github.com/tdewolff/parse/v2/.gitignore b/vendor/github.com/tdewolff/parse/v2/.gitignore
new file mode 100644
index 0000000..6144b69
--- /dev/null
+++ b/vendor/github.com/tdewolff/parse/v2/.gitignore
@@ -0,0 +1,5 @@
+tests/*/fuzz-fuzz.zip
+tests/*/crashers
+tests/*/suppressions
+tests/*/corpus/*
+!tests/*/corpus/*.*
diff --git a/vendor/github.com/tdewolff/parse/v2/.golangci.yml b/vendor/github.com/tdewolff/parse/v2/.golangci.yml
new file mode 100644
index 0000000..7009f92
--- /dev/null
+++ b/vendor/github.com/tdewolff/parse/v2/.golangci.yml
@@ -0,0 +1,16 @@
+linters:
+ enable:
+ - depguard
+ - dogsled
+ - gofmt
+ - goimports
+ - golint
+ - gosec
+ - govet
+ - megacheck
+ - misspell
+ - nakedret
+ - prealloc
+ - unconvert
+ - unparam
+ - wastedassign
diff --git a/vendor/github.com/tdewolff/parse/v2/LICENSE.md b/vendor/github.com/tdewolff/parse/v2/LICENSE.md
new file mode 100644
index 0000000..41677de
--- /dev/null
+++ b/vendor/github.com/tdewolff/parse/v2/LICENSE.md
@@ -0,0 +1,22 @@
+Copyright (c) 2015 Taco de Wolff
+
+ Permission is hereby granted, free of charge, to any person
+ obtaining a copy of this software and associated documentation
+ files (the "Software"), to deal in the Software without
+ restriction, including without limitation the rights to use,
+ copy, modify, merge, publish, distribute, sublicense, and/or sell
+ copies of the Software, and to permit persons to whom the
+ Software is furnished to do so, subject to the following
+ conditions:
+
+ The above copyright notice and this permission notice shall be
+ included in all copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ OTHER DEALINGS IN THE SOFTWARE. \ No newline at end of file
diff --git a/vendor/github.com/tdewolff/parse/v2/README.md b/vendor/github.com/tdewolff/parse/v2/README.md
new file mode 100644
index 0000000..837c281
--- /dev/null
+++ b/vendor/github.com/tdewolff/parse/v2/README.md
@@ -0,0 +1,64 @@
+# Parse [![API reference](https://img.shields.io/badge/godoc-reference-5272B4)](https://pkg.go.dev/github.com/tdewolff/parse/v2?tab=doc) [![Go Report Card](https://goreportcard.com/badge/github.com/tdewolff/parse)](https://goreportcard.com/report/github.com/tdewolff/parse) [![Coverage Status](https://coveralls.io/repos/github/tdewolff/parse/badge.svg?branch=master)](https://coveralls.io/github/tdewolff/parse?branch=master) [![Donate](https://img.shields.io/badge/patreon-donate-DFB317)](https://www.patreon.com/tdewolff)
+
+This package contains several lexers and parsers written in [Go][1]. All subpackages are built to be streaming, high performance and to be in accordance with the official (latest) specifications.
+
+The lexers are implemented using `buffer.Lexer` in https://github.com/tdewolff/parse/buffer and the parsers work on top of the lexers. Some subpackages have hashes defined (using [Hasher](https://github.com/tdewolff/hasher)) that speed up common byte-slice comparisons.
+
+## Buffer
+### Reader
+Reader is a wrapper around a `[]byte` that implements the `io.Reader` interface. It is comparable to `bytes.Reader` but has slightly different semantics (and a slightly smaller memory footprint).
+
+### Writer
+Writer is a buffer that implements the `io.Writer` interface and expands the buffer as needed. The reset functionality allows for better memory reuse. After calling `Reset`, it will overwrite the current buffer and thus reduce allocations.
+
+### Lexer
+Lexer is a read buffer specifically designed for building lexers. It keeps track of two positions: a start and end position. The start position is the beginning of the current token being parsed, the end position is being moved forward until a valid token is found. Calling `Shift` will collapse the positions to the end and return the parsed `[]byte`.
+
+Moving the end position can go through `Move(int)` which also accepts negative integers. One can also use `Pos() int` to try and parse a token, and if it fails rewind with `Rewind(int)`, passing the previously saved position.
+
+`Peek(int) byte` will peek forward (relative to the end position) and return the byte at that location. `PeekRune(int) (rune, int)` returns UTF-8 runes and its length at the given **byte** position. Upon an error `Peek` will return `0`, the **user must peek at every character** and not skip any, otherwise it may skip a `0` and panic on out-of-bounds indexing.
+
+`Lexeme() []byte` will return the currently selected bytes, `Skip()` will collapse the selection. `Shift() []byte` is a combination of `Lexeme() []byte` and `Skip()`.
+
+When the passed `io.Reader` returned an error, `Err() error` will return that error even if not at the end of the buffer.
+
+### StreamLexer
+StreamLexer behaves like Lexer but uses a buffer pool to read in chunks from `io.Reader`, retaining old buffers in memory that are still in use, and re-using old buffers otherwise. Calling `Free(n int)` frees up `n` bytes from the internal buffer(s). It holds an array of buffers to accommodate for keeping everything in-memory. Calling `ShiftLen() int` returns the number of bytes that have been shifted since the previous call to `ShiftLen`, which can be used to specify how many bytes need to be freed up from the buffer. If you don't need to keep returned byte slices around, call `Free(ShiftLen())` after every `Shift` call.
+
+## Strconv
+This package contains string conversion function much like the standard library's `strconv` package, but it is specifically tailored for the performance needs within the `minify` package.
+
+For example, the floating-point to string conversion function is approximately twice as fast as the standard library, but it is not as precise.
+
+## CSS
+This package is a CSS3 lexer and parser. Both follow the specification at [CSS Syntax Module Level 3](http://www.w3.org/TR/css-syntax-3/). The lexer takes an io.Reader and converts it into tokens until the EOF. The parser returns a parse tree of the full io.Reader input stream, but the low-level `Next` function can be used for stream parsing to returns grammar units until the EOF.
+
+[See README here](https://github.com/tdewolff/parse/tree/master/css).
+
+## HTML
+This package is an HTML5 lexer. It follows the specification at [The HTML syntax](http://www.w3.org/TR/html5/syntax.html). The lexer takes an io.Reader and converts it into tokens until the EOF.
+
+[See README here](https://github.com/tdewolff/parse/tree/master/html).
+
+## JS
+This package is a JS lexer (ECMA-262, edition 6.0). It follows the specification at [ECMAScript Language Specification](http://www.ecma-international.org/ecma-262/6.0/). The lexer takes an io.Reader and converts it into tokens until the EOF.
+
+[See README here](https://github.com/tdewolff/parse/tree/master/js).
+
+## JSON
+This package is a JSON parser (ECMA-404). It follows the specification at [JSON](http://json.org/). The parser takes an io.Reader and converts it into tokens until the EOF.
+
+[See README here](https://github.com/tdewolff/parse/tree/master/json).
+
+## SVG
+This package contains common hashes for SVG1.1 tags and attributes.
+
+## XML
+This package is an XML1.0 lexer. It follows the specification at [Extensible Markup Language (XML) 1.0 (Fifth Edition)](http://www.w3.org/TR/xml/). The lexer takes an io.Reader and converts it into tokens until the EOF.
+
+[See README here](https://github.com/tdewolff/parse/tree/master/xml).
+
+## License
+Released under the [MIT license](LICENSE.md).
+
+[1]: http://golang.org/ "Go Language"
diff --git a/vendor/github.com/tdewolff/parse/v2/buffer/buffer.go b/vendor/github.com/tdewolff/parse/v2/buffer/buffer.go
new file mode 100644
index 0000000..671b380
--- /dev/null
+++ b/vendor/github.com/tdewolff/parse/v2/buffer/buffer.go
@@ -0,0 +1,12 @@
+// Package buffer contains buffer and wrapper types for byte slices. It is useful for writing lexers or other high-performance byte slice handling.
+// The `Reader` and `Writer` types implement the `io.Reader` and `io.Writer` respectively and provide a thinner and faster interface than `bytes.Buffer`.
+// The `Lexer` type is useful for building lexers because it keeps track of the start and end position of a byte selection, and shifts the bytes whenever a valid token is found.
+// The `StreamLexer` does the same, but keeps a buffer pool so that it reads a limited amount at a time, allowing to parse from streaming sources.
+package buffer
+
+// defaultBufSize specifies the default initial length of internal buffers.
+var defaultBufSize = 4096
+
+// MinBuf specifies the default initial length of internal buffers.
+// Solely here to support old versions of parse.
+var MinBuf = defaultBufSize
diff --git a/vendor/github.com/tdewolff/parse/v2/buffer/lexer.go b/vendor/github.com/tdewolff/parse/v2/buffer/lexer.go
new file mode 100644
index 0000000..46e6bda
--- /dev/null
+++ b/vendor/github.com/tdewolff/parse/v2/buffer/lexer.go
@@ -0,0 +1,164 @@
+package buffer
+
+import (
+ "io"
+ "io/ioutil"
+)
+
+var nullBuffer = []byte{0}
+
+// Lexer is a buffered reader that allows peeking forward and shifting, taking an io.Reader.
+// It keeps data in-memory until Free, taking a byte length, is called to move beyond the data.
+type Lexer struct {
+ buf []byte
+ pos int // index in buf
+ start int // index in buf
+ err error
+
+ restore func()
+}
+
+// NewLexer returns a new Lexer for a given io.Reader, and uses ioutil.ReadAll to read it into a byte slice.
+// If the io.Reader implements Bytes, that is used instead.
+// It will append a NULL at the end of the buffer.
+func NewLexer(r io.Reader) *Lexer {
+ var b []byte
+ if r != nil {
+ if buffer, ok := r.(interface {
+ Bytes() []byte
+ }); ok {
+ b = buffer.Bytes()
+ } else {
+ var err error
+ b, err = ioutil.ReadAll(r)
+ if err != nil {
+ return &Lexer{
+ buf: nullBuffer,
+ err: err,
+ }
+ }
+ }
+ }
+ return NewLexerBytes(b)
+}
+
+// NewLexerBytes returns a new Lexer for a given byte slice, and appends NULL at the end.
+// To avoid reallocation, make sure the capacity has room for one more byte.
+func NewLexerBytes(b []byte) *Lexer {
+ z := &Lexer{
+ buf: b,
+ }
+
+ n := len(b)
+ if n == 0 {
+ z.buf = nullBuffer
+ } else {
+ // Append NULL to buffer, but try to avoid reallocation
+ if cap(b) > n {
+ // Overwrite next byte but restore when done
+ b = b[:n+1]
+ c := b[n]
+ b[n] = 0
+
+ z.buf = b
+ z.restore = func() {
+ b[n] = c
+ }
+ } else {
+ z.buf = append(b, 0)
+ }
+ }
+ return z
+}
+
+// Restore restores the replaced byte past the end of the buffer by NULL.
+func (z *Lexer) Restore() {
+ if z.restore != nil {
+ z.restore()
+ z.restore = nil
+ }
+}
+
+// Err returns the error returned from io.Reader or io.EOF when the end has been reached.
+func (z *Lexer) Err() error {
+ return z.PeekErr(0)
+}
+
+// PeekErr returns the error at position pos. When pos is zero, this is the same as calling Err().
+func (z *Lexer) PeekErr(pos int) error {
+ if z.err != nil {
+ return z.err
+ } else if z.pos+pos >= len(z.buf)-1 {
+ return io.EOF
+ }
+ return nil
+}
+
+// Peek returns the ith byte relative to the end position.
+// Peek returns 0 when an error has occurred, Err returns the error.
+func (z *Lexer) Peek(pos int) byte {
+ pos += z.pos
+ return z.buf[pos]
+}
+
+// PeekRune returns the rune and rune length of the ith byte relative to the end position.
+func (z *Lexer) PeekRune(pos int) (rune, int) {
+ // from unicode/utf8
+ c := z.Peek(pos)
+ if c < 0xC0 || z.Peek(pos+1) == 0 {
+ return rune(c), 1
+ } else if c < 0xE0 || z.Peek(pos+2) == 0 {
+ return rune(c&0x1F)<<6 | rune(z.Peek(pos+1)&0x3F), 2
+ } else if c < 0xF0 || z.Peek(pos+3) == 0 {
+ return rune(c&0x0F)<<12 | rune(z.Peek(pos+1)&0x3F)<<6 | rune(z.Peek(pos+2)&0x3F), 3
+ }
+ return rune(c&0x07)<<18 | rune(z.Peek(pos+1)&0x3F)<<12 | rune(z.Peek(pos+2)&0x3F)<<6 | rune(z.Peek(pos+3)&0x3F), 4
+}
+
+// Move advances the position.
+func (z *Lexer) Move(n int) {
+ z.pos += n
+}
+
+// Pos returns a mark to which can be rewinded.
+func (z *Lexer) Pos() int {
+ return z.pos - z.start
+}
+
+// Rewind rewinds the position to the given position.
+func (z *Lexer) Rewind(pos int) {
+ z.pos = z.start + pos
+}
+
+// Lexeme returns the bytes of the current selection.
+func (z *Lexer) Lexeme() []byte {
+ return z.buf[z.start:z.pos:z.pos]
+}
+
+// Skip collapses the position to the end of the selection.
+func (z *Lexer) Skip() {
+ z.start = z.pos
+}
+
+// Shift returns the bytes of the current selection and collapses the position to the end of the selection.
+func (z *Lexer) Shift() []byte {
+ b := z.buf[z.start:z.pos:z.pos]
+ z.start = z.pos
+ return b
+}
+
+// Offset returns the character position in the buffer.
+func (z *Lexer) Offset() int {
+ return z.pos
+}
+
+// Bytes returns the underlying buffer.
+func (z *Lexer) Bytes() []byte {
+ return z.buf[: len(z.buf)-1 : len(z.buf)-1]
+}
+
+// Reset resets position to the underlying buffer.
+func (z *Lexer) Reset() {
+ z.start = 0
+ z.pos = 0
+}
diff --git a/vendor/github.com/tdewolff/parse/v2/buffer/reader.go b/vendor/github.com/tdewolff/parse/v2/buffer/reader.go
new file mode 100644
index 0000000..9926eef
--- /dev/null
+++ b/vendor/github.com/tdewolff/parse/v2/buffer/reader.go
@@ -0,0 +1,44 @@
+package buffer
+
+import "io"
+
+// Reader implements an io.Reader over a byte slice.
+type Reader struct {
+ buf []byte
+ pos int
+}
+
+// NewReader returns a new Reader for a given byte slice.
+func NewReader(buf []byte) *Reader {
+ return &Reader{
+ buf: buf,
+ }
+}
+
+// Read reads bytes into the given byte slice and returns the number of bytes read and an error if occurred.
+func (r *Reader) Read(b []byte) (n int, err error) {
+ if len(b) == 0 {
+ return 0, nil
+ }
+ if r.pos >= len(r.buf) {
+ return 0, io.EOF
+ }
+ n = copy(b, r.buf[r.pos:])
+ r.pos += n
+ return
+}
+
+// Bytes returns the underlying byte slice.
+func (r *Reader) Bytes() []byte {
+ return r.buf
+}
+
+// Reset resets the position of the read pointer to the beginning of the underlying byte slice.
+func (r *Reader) Reset() {
+ r.pos = 0
+}
+
+// Len returns the length of the buffer.
+func (r *Reader) Len() int {
+ return len(r.buf)
+}
diff --git a/vendor/github.com/tdewolff/parse/v2/buffer/streamlexer.go b/vendor/github.com/tdewolff/parse/v2/buffer/streamlexer.go
new file mode 100644
index 0000000..5ea2dd5
--- /dev/null
+++ b/vendor/github.com/tdewolff/parse/v2/buffer/streamlexer.go
@@ -0,0 +1,223 @@
+package buffer
+
+import (
+ "io"
+)
+
+type block struct {
+ buf []byte
+ next int // index in pool plus one
+ active bool
+}
+
+type bufferPool struct {
+ pool []block
+ head int // index in pool plus one
+ tail int // index in pool plus one
+
+ pos int // byte pos in tail
+}
+
+func (z *bufferPool) swap(oldBuf []byte, size int) []byte {
+ // find new buffer that can be reused
+ swap := -1
+ for i := 0; i < len(z.pool); i++ {
+ if !z.pool[i].active && size <= cap(z.pool[i].buf) {
+ swap = i
+ break
+ }
+ }
+ if swap == -1 { // no free buffer found for reuse
+ if z.tail == 0 && z.pos >= len(oldBuf) && size <= cap(oldBuf) { // but we can reuse the current buffer!
+ z.pos -= len(oldBuf)
+ return oldBuf[:0]
+ }
+ // allocate new
+ z.pool = append(z.pool, block{make([]byte, 0, size), 0, true})
+ swap = len(z.pool) - 1
+ }
+
+ newBuf := z.pool[swap].buf
+
+ // put current buffer into pool
+ z.pool[swap] = block{oldBuf, 0, true}
+ if z.head != 0 {
+ z.pool[z.head-1].next = swap + 1
+ }
+ z.head = swap + 1
+ if z.tail == 0 {
+ z.tail = swap + 1
+ }
+
+ return newBuf[:0]
+}
+
+func (z *bufferPool) free(n int) {
+ z.pos += n
+ // move the tail over to next buffers
+ for z.tail != 0 && z.pos >= len(z.pool[z.tail-1].buf) {
+ z.pos -= len(z.pool[z.tail-1].buf)
+ newTail := z.pool[z.tail-1].next
+ z.pool[z.tail-1].active = false // after this, any thread may pick up the inactive buffer, so it can't be used anymore
+ z.tail = newTail
+ }
+ if z.tail == 0 {
+ z.head = 0
+ }
+}
+
+// StreamLexer is a buffered reader that allows peeking forward and shifting, taking an io.Reader.
+// It keeps data in-memory until Free, taking a byte length, is called to move beyond the data.
+type StreamLexer struct {
+ r io.Reader
+ err error
+
+ pool bufferPool
+
+ buf []byte
+ start int // index in buf
+ pos int // index in buf
+ prevStart int
+
+ free int
+}
+
+// NewStreamLexer returns a new StreamLexer for a given io.Reader with a 4kB estimated buffer size.
+// If the io.Reader implements Bytes, that buffer is used instead.
+func NewStreamLexer(r io.Reader) *StreamLexer {
+ return NewStreamLexerSize(r, defaultBufSize)
+}
+
+// NewStreamLexerSize returns a new StreamLexer for a given io.Reader and estimated required buffer size.
+// If the io.Reader implements Bytes, that buffer is used instead.
+func NewStreamLexerSize(r io.Reader, size int) *StreamLexer {
+ // if reader has the bytes in memory already, use that instead
+ if buffer, ok := r.(interface {
+ Bytes() []byte
+ }); ok {
+ return &StreamLexer{
+ err: io.EOF,
+ buf: buffer.Bytes(),
+ }
+ }
+ return &StreamLexer{
+ r: r,
+ buf: make([]byte, 0, size),
+ }
+}
+
+func (z *StreamLexer) read(pos int) byte {
+ if z.err != nil {
+ return 0
+ }
+
+ // free unused bytes
+ z.pool.free(z.free)
+ z.free = 0
+
+ // get new buffer
+ c := cap(z.buf)
+ p := pos - z.start + 1
+ if 2*p > c { // if the token is larger than half the buffer, increase buffer size
+ c = 2*c + p
+ }
+ d := len(z.buf) - z.start
+ buf := z.pool.swap(z.buf[:z.start], c)
+ copy(buf[:d], z.buf[z.start:]) // copy the left-overs (unfinished token) from the old buffer
+
+ // read in new data for the rest of the buffer
+ var n int
+ for pos-z.start >= d && z.err == nil {
+ n, z.err = z.r.Read(buf[d:cap(buf)])
+ d += n
+ }
+ pos -= z.start
+ z.pos -= z.start
+ z.start, z.buf = 0, buf[:d]
+ if pos >= d {
+ return 0
+ }
+ return z.buf[pos]
+}
+
+// Err returns the error returned from io.Reader. It may still return valid bytes for a while though.
+func (z *StreamLexer) Err() error {
+ if z.err == io.EOF && z.pos < len(z.buf) {
+ return nil
+ }
+ return z.err
+}
+
+// Free frees up bytes of length n from previously shifted tokens.
+// Each call to Shift should at one point be followed by a call to Free with a length returned by ShiftLen.
+func (z *StreamLexer) Free(n int) {
+ z.free += n
+}
+
+// Peek returns the ith byte relative to the end position and possibly does an allocation.
+// Peek returns zero when an error has occurred, Err returns the error.
+// TODO: inline function
+func (z *StreamLexer) Peek(pos int) byte {
+ pos += z.pos
+ if uint(pos) < uint(len(z.buf)) { // uint for BCE
+ return z.buf[pos]
+ }
+ return z.read(pos)
+}
+
+// PeekRune returns the rune and rune length of the ith byte relative to the end position.
+func (z *StreamLexer) PeekRune(pos int) (rune, int) {
+ // from unicode/utf8
+ c := z.Peek(pos)
+ if c < 0xC0 {
+ return rune(c), 1
+ } else if c < 0xE0 {
+ return rune(c&0x1F)<<6 | rune(z.Peek(pos+1)&0x3F), 2
+ } else if c < 0xF0 {
+ return rune(c&0x0F)<<12 | rune(z.Peek(pos+1)&0x3F)<<6 | rune(z.Peek(pos+2)&0x3F), 3
+ }
+ return rune(c&0x07)<<18 | rune(z.Peek(pos+1)&0x3F)<<12 | rune(z.Peek(pos+2)&0x3F)<<6 | rune(z.Peek(pos+3)&0x3F), 4
+}
+
+// Move advances the position.
+func (z *StreamLexer) Move(n int) {
+ z.pos += n
+}
+
+// Pos returns a mark to which can be rewinded.
+func (z *StreamLexer) Pos() int {
+ return z.pos - z.start
+}
+
+// Rewind rewinds the position to the given position.
+func (z *StreamLexer) Rewind(pos int) {
+ z.pos = z.start + pos
+}
+
+// Lexeme returns the bytes of the current selection.
+func (z *StreamLexer) Lexeme() []byte {
+ return z.buf[z.start:z.pos]
+}
+
+// Skip collapses the position to the end of the selection.
+func (z *StreamLexer) Skip() {
+ z.start = z.pos
+}
+
+// Shift returns the bytes of the current selection and collapses the position to the end of the selection.
+// It also returns the number of bytes we moved since the last call to Shift. This can be used in calls to Free.
+func (z *StreamLexer) Shift() []byte {
+ if z.pos > len(z.buf) { // make sure we peeked at least as much as we shift
+ z.read(z.pos - 1)
+ }
+ b := z.buf[z.start:z.pos]
+ z.start = z.pos
+ return b
+}
+
+// ShiftLen returns the number of bytes moved since the last call to ShiftLen. This can be used in calls to Free because it takes into account multiple Shifts or Skips.
+func (z *StreamLexer) ShiftLen() int {
+ n := z.start - z.prevStart
+ z.prevStart = z.start
+ return n
+}
diff --git a/vendor/github.com/tdewolff/parse/v2/buffer/writer.go b/vendor/github.com/tdewolff/parse/v2/buffer/writer.go
new file mode 100644
index 0000000..6c94201
--- /dev/null
+++ b/vendor/github.com/tdewolff/parse/v2/buffer/writer.go
@@ -0,0 +1,65 @@
+package buffer
+
+import (
+ "io"
+)
+
+// Writer implements an io.Writer over a byte slice.
+type Writer struct {
+ buf []byte
+ err error
+ expand bool
+}
+
+// NewWriter returns a new Writer for a given byte slice.
+func NewWriter(buf []byte) *Writer {
+ return &Writer{
+ buf: buf,
+ expand: true,
+ }
+}
+
+// NewStaticWriter returns a new Writer for a given byte slice. It does not reallocate and expand the byte-slice.
+func NewStaticWriter(buf []byte) *Writer {
+ return &Writer{
+ buf: buf,
+ expand: false,
+ }
+}
+
+// Write writes bytes from the given byte slice and returns the number of bytes written and an error if occurred. When err != nil, n == 0.
+func (w *Writer) Write(b []byte) (int, error) {
+ n := len(b)
+ end := len(w.buf)
+ if end+n > cap(w.buf) {
+ if !w.expand {
+ w.err = io.EOF
+ return 0, io.EOF
+ }
+ buf := make([]byte, end, 2*cap(w.buf)+n)
+ copy(buf, w.buf)
+ w.buf = buf
+ }
+ w.buf = w.buf[:end+n]
+ return copy(w.buf[end:], b), nil
+}
+
+// Len returns the length of the underlying byte slice.
+func (w *Writer) Len() int {
+ return len(w.buf)
+}
+
+// Bytes returns the underlying byte slice.
+func (w *Writer) Bytes() []byte {
+ return w.buf
+}
+
+// Reset empties and reuses the current buffer. Subsequent writes will overwrite the buffer, so any reference to the underlying slice is invalidated after this call.
+func (w *Writer) Reset() {
+ w.buf = w.buf[:0]
+}
+
+// Close returns the last error.
+func (w *Writer) Close() error {
+ return w.err
+}
diff --git a/vendor/github.com/tdewolff/parse/v2/common.go b/vendor/github.com/tdewolff/parse/v2/common.go
new file mode 100644
index 0000000..da46cc3
--- /dev/null
+++ b/vendor/github.com/tdewolff/parse/v2/common.go
@@ -0,0 +1,237 @@
+// Package parse contains a collection of parsers for various formats in its subpackages.
+package parse
+
+import (
+ "bytes"
+ "encoding/base64"
+ "errors"
+)
+
+var (
+ dataSchemeBytes = []byte("data:")
+ base64Bytes = []byte("base64")
+ textMimeBytes = []byte("text/plain")
+)
+
+// ErrBadDataURI is returned by DataURI when the byte slice does not start with 'data:' or is too short.
+var ErrBadDataURI = errors.New("not a data URI")
+
+// Number returns the number of bytes that parse as a number of the regex format (+|-)?([0-9]+(\.[0-9]+)?|\.[0-9]+)((e|E)(+|-)?[0-9]+)?.
+func Number(b []byte) int {
+ if len(b) == 0 {
+ return 0
+ }
+ i := 0
+ if b[i] == '+' || b[i] == '-' {
+ i++
+ if i >= len(b) {
+ return 0
+ }
+ }
+ firstDigit := (b[i] >= '0' && b[i] <= '9')
+ if firstDigit {
+ i++
+ for i < len(b) && b[i] >= '0' && b[i] <= '9' {
+ i++
+ }
+ }
+ if i < len(b) && b[i] == '.' {
+ i++
+ if i < len(b) && b[i] >= '0' && b[i] <= '9' {
+ i++
+ for i < len(b) && b[i] >= '0' && b[i] <= '9' {
+ i++
+ }
+ } else if firstDigit {
+ // . could belong to the next token
+ i--
+ return i
+ } else {
+ return 0
+ }
+ } else if !firstDigit {
+ return 0
+ }
+ iOld := i
+ if i < len(b) && (b[i] == 'e' || b[i] == 'E') {
+ i++
+ if i < len(b) && (b[i] == '+' || b[i] == '-') {
+ i++
+ }
+ if i >= len(b) || b[i] < '0' || b[i] > '9' {
+ // e could belong to next token
+ return iOld
+ }
+ for i < len(b) && b[i] >= '0' && b[i] <= '9' {
+ i++
+ }
+ }
+ return i
+}
+
+// Dimension parses a byte-slice and returns the length of the number and its unit.
+func Dimension(b []byte) (int, int) {
+ num := Number(b)
+ if num == 0 || num == len(b) {
+ return num, 0
+ } else if b[num] == '%' {
+ return num, 1
+ } else if b[num] >= 'a' && b[num] <= 'z' || b[num] >= 'A' && b[num] <= 'Z' {
+ i := num + 1
+ for i < len(b) && (b[i] >= 'a' && b[i] <= 'z' || b[i] >= 'A' && b[i] <= 'Z') {
+ i++
+ }
+ return num, i - num
+ }
+ return num, 0
+}
+
+// Mediatype parses a given mediatype and splits the mimetype from the parameters.
+// It works similar to mime.ParseMediaType but is faster.
+func Mediatype(b []byte) ([]byte, map[string]string) {
+ i := 0
+ for i < len(b) && b[i] == ' ' {
+ i++
+ }
+ b = b[i:]
+ n := len(b)
+ mimetype := b
+ var params map[string]string
+ for i := 3; i < n; i++ { // mimetype is at least three characters long
+ if b[i] == ';' || b[i] == ' ' {
+ mimetype = b[:i]
+ if b[i] == ' ' {
+ i++ // space
+ for i < n && b[i] == ' ' {
+ i++
+ }
+ if n <= i || b[i] != ';' {
+ break
+ }
+ }
+ params = map[string]string{}
+ s := string(b)
+ PARAM:
+ i++ // semicolon
+ for i < n && s[i] == ' ' {
+ i++
+ }
+ start := i
+ for i < n && s[i] != '=' && s[i] != ';' && s[i] != ' ' {
+ i++
+ }
+ key := s[start:i]
+ for i < n && s[i] == ' ' {
+ i++
+ }
+ if i < n && s[i] == '=' {
+ i++
+ for i < n && s[i] == ' ' {
+ i++
+ }
+ start = i
+ for i < n && s[i] != ';' && s[i] != ' ' {
+ i++
+ }
+ } else {
+ start = i
+ }
+ params[key] = s[start:i]
+ for i < n && s[i] == ' ' {
+ i++
+ }
+ if i < n && s[i] == ';' {
+ goto PARAM
+ }
+ break
+ }
+ }
+ return mimetype, params
+}
+
+// DataURI parses the given data URI and returns the mediatype, data and ok.
+func DataURI(dataURI []byte) ([]byte, []byte, error) {
+ if len(dataURI) > 5 && bytes.Equal(dataURI[:5], dataSchemeBytes) {
+ dataURI = dataURI[5:]
+ inBase64 := false
+ var mediatype []byte
+ i := 0
+ for j := 0; j < len(dataURI); j++ {
+ c := dataURI[j]
+ if c == '=' || c == ';' || c == ',' {
+ if c != '=' && bytes.Equal(TrimWhitespace(dataURI[i:j]), base64Bytes) {
+ if len(mediatype) > 0 {
+ mediatype = mediatype[:len(mediatype)-1]
+ }
+ inBase64 = true
+ i = j
+ } else if c != ',' {
+ mediatype = append(append(mediatype, TrimWhitespace(dataURI[i:j])...), c)
+ i = j + 1
+ } else {
+ mediatype = append(mediatype, TrimWhitespace(dataURI[i:j])...)
+ }
+ if c == ',' {
+ if len(mediatype) == 0 || mediatype[0] == ';' {
+ mediatype = textMimeBytes
+ }
+ data := dataURI[j+1:]
+ if inBase64 {
+ decoded := make([]byte, base64.StdEncoding.DecodedLen(len(data)))
+ n, err := base64.StdEncoding.Decode(decoded, data)
+ if err != nil {
+ return nil, nil, err
+ }
+ data = decoded[:n]
+ } else {
+ data = DecodeURL(data)
+ }
+ return mediatype, data, nil
+ }
+ }
+ }
+ }
+ return nil, nil, ErrBadDataURI
+}
+
+// QuoteEntity parses the given byte slice and returns the quote that got matched (' or ") and its entity length.
+// TODO: deprecated
+func QuoteEntity(b []byte) (quote byte, n int) {
+ if len(b) < 5 || b[0] != '&' {
+ return 0, 0
+ }
+ if b[1] == '#' {
+ if b[2] == 'x' {
+ i := 3
+ for i < len(b) && b[i] == '0' {
+ i++
+ }
+ if i+2 < len(b) && b[i] == '2' && b[i+2] == ';' {
+ if b[i+1] == '2' {
+ return '"', i + 3 // &#x22;
+ } else if b[i+1] == '7' {
+ return '\'', i + 3 // &#x27;
+ }
+ }
+ } else {
+ i := 2
+ for i < len(b) && b[i] == '0' {
+ i++
+ }
+ if i+2 < len(b) && b[i] == '3' && b[i+2] == ';' {
+ if b[i+1] == '4' {
+ return '"', i + 3 // &#34;
+ } else if b[i+1] == '9' {
+ return '\'', i + 3 // &#39;
+ }
+ }
+ }
+ } else if len(b) >= 6 && b[5] == ';' {
+ if bytes.Equal(b[1:5], []byte{'q', 'u', 'o', 't'}) {
+ return '"', 6 // &quot;
+ } else if bytes.Equal(b[1:5], []byte{'a', 'p', 'o', 's'}) {
+ return '\'', 6 // &apos;
+ }
+ }
+ return 0, 0
+}
diff --git a/vendor/github.com/tdewolff/parse/v2/css/README.md b/vendor/github.com/tdewolff/parse/v2/css/README.md
new file mode 100644
index 0000000..02797a7
--- /dev/null
+++ b/vendor/github.com/tdewolff/parse/v2/css/README.md
@@ -0,0 +1,170 @@
+# CSS [![API reference](https://img.shields.io/badge/godoc-reference-5272B4)](https://pkg.go.dev/github.com/tdewolff/parse/v2/css?tab=doc)
+
+This package is a CSS3 lexer and parser written in [Go][1]. Both follow the specification at [CSS Syntax Module Level 3](http://www.w3.org/TR/css-syntax-3/). The lexer takes an io.Reader and converts it into tokens until the EOF. The parser returns a parse tree of the full io.Reader input stream, but the low-level `Next` function can be used for stream parsing to returns grammar units until the EOF.
+
+## Installation
+Run the following command
+
+ go get -u github.com/tdewolff/parse/v2/css
+
+or add the following import and run project with `go get`
+
+ import "github.com/tdewolff/parse/v2/css"
+
+## Lexer
+### Usage
+The following initializes a new Lexer with io.Reader `r`:
+``` go
+l := css.NewLexer(parse.NewInput(r))
+```
+
+To tokenize until EOF an error, use:
+``` go
+for {
+ tt, text := l.Next()
+ switch tt {
+ case css.ErrorToken:
+ // error or EOF set in l.Err()
+ return
+ // ...
+ }
+}
+```
+
+All tokens (see [CSS Syntax Module Level 3](http://www.w3.org/TR/css3-syntax/)):
+``` go
+ErrorToken // non-official token, returned when errors occur
+IdentToken
+FunctionToken // rgb( rgba( ...
+AtKeywordToken // @abc
+HashToken // #abc
+StringToken
+BadStringToken
+URLToken // url(
+BadURLToken
+DelimToken // any unmatched character
+NumberToken // 5
+PercentageToken // 5%
+DimensionToken // 5em
+UnicodeRangeToken
+IncludeMatchToken // ~=
+DashMatchToken // |=
+PrefixMatchToken // ^=
+SuffixMatchToken // $=
+SubstringMatchToken // *=
+ColumnToken // ||
+WhitespaceToken
+CDOToken // <!--
+CDCToken // -->
+ColonToken
+SemicolonToken
+CommaToken
+BracketToken // ( ) [ ] { }, all bracket tokens use this, Data() can distinguish between the brackets
+CommentToken // non-official token
+```
+
+### Examples
+``` go
+package main
+
+import (
+ "os"
+
+ "github.com/tdewolff/parse/v2/css"
+)
+
+// Tokenize CSS3 from stdin.
+func main() {
+ l := css.NewLexer(parse.NewInput(os.Stdin))
+ for {
+ tt, text := l.Next()
+ switch tt {
+ case css.ErrorToken:
+ if l.Err() != io.EOF {
+ fmt.Println("Error on line", l.Line(), ":", l.Err())
+ }
+ return
+ case css.IdentToken:
+ fmt.Println("Identifier", string(text))
+ case css.NumberToken:
+ fmt.Println("Number", string(text))
+ // ...
+ }
+ }
+}
+```
+
+## Parser
+### Usage
+The following creates a new Parser.
+``` go
+// true because this is the content of an inline style attribute
+p := css.NewParser(parse.NewInput(bytes.NewBufferString("color: red;")), true)
+```
+
+To iterate over the stylesheet, use:
+``` go
+for {
+ gt, _, data := p.Next()
+ if gt == css.ErrorGrammar {
+ break
+ }
+ // ...
+}
+```
+
+All grammar units returned by `Next`:
+``` go
+ErrorGrammar
+AtRuleGrammar
+EndAtRuleGrammar
+RulesetGrammar
+EndRulesetGrammar
+DeclarationGrammar
+TokenGrammar
+```
+
+### Examples
+``` go
+package main
+
+import (
+ "bytes"
+ "fmt"
+
+ "github.com/tdewolff/parse/v2/css"
+)
+
+func main() {
+ // true because this is the content of an inline style attribute
+ p := css.NewParser(parse.NewInput(bytes.NewBufferString("color: red;")), true)
+ out := ""
+ for {
+ gt, _, data := p.Next()
+ if gt == css.ErrorGrammar {
+ break
+ } else if gt == css.AtRuleGrammar || gt == css.BeginAtRuleGrammar || gt == css.BeginRulesetGrammar || gt == css.DeclarationGrammar {
+ out += string(data)
+ if gt == css.DeclarationGrammar {
+ out += ":"
+ }
+ for _, val := range p.Values() {
+ out += string(val.Data)
+ }
+ if gt == css.BeginAtRuleGrammar || gt == css.BeginRulesetGrammar {
+ out += "{"
+ } else if gt == css.AtRuleGrammar || gt == css.DeclarationGrammar {
+ out += ";"
+ }
+ } else {
+ out += string(data)
+ }
+ }
+ fmt.Println(out)
+}
+```
+
+## License
+Released under the [MIT license](https://github.com/tdewolff/parse/blob/master/LICENSE.md).
+
+[1]: http://golang.org/ "Go Language"
diff --git a/vendor/github.com/tdewolff/parse/v2/css/hash.go b/vendor/github.com/tdewolff/parse/v2/css/hash.go
new file mode 100644
index 0000000..25d2f7c
--- /dev/null
+++ b/vendor/github.com/tdewolff/parse/v2/css/hash.go
@@ -0,0 +1,75 @@
+package css
+
+// generated by hasher -type=Hash -file=hash.go; DO NOT EDIT, except for adding more constants to the list and rerun go generate
+
+// uses github.com/tdewolff/hasher
+//go:generate hasher -type=Hash -file=hash.go
+
+// Hash defines perfect hashes for a predefined list of strings
+type Hash uint32
+
+// Unique hash definitions to be used instead of strings
+const (
+ Document Hash = 0x8 // document
+ Font_Face Hash = 0x809 // font-face
+ Keyframes Hash = 0x1109 // keyframes
+ Media Hash = 0x2105 // media
+ Page Hash = 0x2604 // page
+ Supports Hash = 0x1908 // supports
+)
+
+// String returns the hash' name.
+func (i Hash) String() string {
+ start := uint32(i >> 8)
+ n := uint32(i & 0xff)
+ if start+n > uint32(len(_Hash_text)) {
+ return ""
+ }
+ return _Hash_text[start : start+n]
+}
+
+// ToHash returns the hash whose name is s. It returns zero if there is no
+// such hash. It is case sensitive.
+func ToHash(s []byte) Hash {
+ if len(s) == 0 || len(s) > _Hash_maxLen {
+ return 0
+ }
+ h := uint32(_Hash_hash0)
+ for i := 0; i < len(s); i++ {
+ h ^= uint32(s[i])
+ h *= 16777619
+ }
+ if i := _Hash_table[h&uint32(len(_Hash_table)-1)]; int(i&0xff) == len(s) {
+ t := _Hash_text[i>>8 : i>>8+i&0xff]
+ for i := 0; i < len(s); i++ {
+ if t[i] != s[i] {
+ goto NEXT
+ }
+ }
+ return i
+ }
+NEXT:
+ if i := _Hash_table[(h>>16)&uint32(len(_Hash_table)-1)]; int(i&0xff) == len(s) {
+ t := _Hash_text[i>>8 : i>>8+i&0xff]
+ for i := 0; i < len(s); i++ {
+ if t[i] != s[i] {
+ return 0
+ }
+ }
+ return i
+ }
+ return 0
+}
+
+const _Hash_hash0 = 0x9acb0442
+const _Hash_maxLen = 9
+const _Hash_text = "documentfont-facekeyframesupportsmediapage"
+
+var _Hash_table = [1 << 3]Hash{
+ 0x1: 0x2604, // page
+ 0x2: 0x2105, // media
+ 0x3: 0x809, // font-face
+ 0x5: 0x1109, // keyframes
+ 0x6: 0x1908, // supports
+ 0x7: 0x8, // document
+}
diff --git a/vendor/github.com/tdewolff/parse/v2/css/lex.go b/vendor/github.com/tdewolff/parse/v2/css/lex.go
new file mode 100644
index 0000000..3d1ff7e
--- /dev/null
+++ b/vendor/github.com/tdewolff/parse/v2/css/lex.go
@@ -0,0 +1,698 @@
+// Package css is a CSS3 lexer and parser following the specifications at http://www.w3.org/TR/css-syntax-3/.
+package css
+
+// TODO: \uFFFD replacement character for NULL bytes in strings for example, or atleast don't end the string early
+
+import (
+ "bytes"
+ "io"
+ "strconv"
+
+ "github.com/tdewolff/parse/v2"
+)
+
+// TokenType determines the type of token, eg. a number or a semicolon.
+type TokenType uint32
+
+// TokenType values.
+const (
+ ErrorToken TokenType = iota // extra token when errors occur
+ IdentToken
+ FunctionToken // rgb( rgba( ...
+ AtKeywordToken // @abc
+ HashToken // #abc
+ StringToken
+ BadStringToken
+ URLToken
+ BadURLToken
+ DelimToken // any unmatched character
+ NumberToken // 5
+ PercentageToken // 5%
+ DimensionToken // 5em
+ UnicodeRangeToken // U+554A
+ IncludeMatchToken // ~=
+ DashMatchToken // |=
+ PrefixMatchToken // ^=
+ SuffixMatchToken // $=
+ SubstringMatchToken // *=
+ ColumnToken // ||
+ WhitespaceToken // space \t \r \n \f
+ CDOToken // <!--
+ CDCToken // -->
+ ColonToken // :
+ SemicolonToken // ;
+ CommaToken // ,
+ LeftBracketToken // [
+ RightBracketToken // ]
+ LeftParenthesisToken // (
+ RightParenthesisToken // )
+ LeftBraceToken // {
+ RightBraceToken // }
+ CommentToken // extra token for comments
+ EmptyToken
+ CustomPropertyNameToken
+ CustomPropertyValueToken
+)
+
+// String returns the string representation of a TokenType.
+func (tt TokenType) String() string {
+ switch tt {
+ case ErrorToken:
+ return "Error"
+ case IdentToken:
+ return "Ident"
+ case FunctionToken:
+ return "Function"
+ case AtKeywordToken:
+ return "AtKeyword"
+ case HashToken:
+ return "Hash"
+ case StringToken:
+ return "String"
+ case BadStringToken:
+ return "BadString"
+ case URLToken:
+ return "URL"
+ case BadURLToken:
+ return "BadURL"
+ case DelimToken:
+ return "Delim"
+ case NumberToken:
+ return "Number"
+ case PercentageToken:
+ return "Percentage"
+ case DimensionToken:
+ return "Dimension"
+ case UnicodeRangeToken:
+ return "UnicodeRange"
+ case IncludeMatchToken:
+ return "IncludeMatch"
+ case DashMatchToken:
+ return "DashMatch"
+ case PrefixMatchToken:
+ return "PrefixMatch"
+ case SuffixMatchToken:
+ return "SuffixMatch"
+ case SubstringMatchToken:
+ return "SubstringMatch"
+ case ColumnToken:
+ return "Column"
+ case WhitespaceToken:
+ return "Whitespace"
+ case CDOToken:
+ return "CDO"
+ case CDCToken:
+ return "CDC"
+ case ColonToken:
+ return "Colon"
+ case SemicolonToken:
+ return "Semicolon"
+ case CommaToken:
+ return "Comma"
+ case LeftBracketToken:
+ return "LeftBracket"
+ case RightBracketToken:
+ return "RightBracket"
+ case LeftParenthesisToken:
+ return "LeftParenthesis"
+ case RightParenthesisToken:
+ return "RightParenthesis"
+ case LeftBraceToken:
+ return "LeftBrace"
+ case RightBraceToken:
+ return "RightBrace"
+ case CommentToken:
+ return "Comment"
+ case EmptyToken:
+ return "Empty"
+ case CustomPropertyNameToken:
+ return "CustomPropertyName"
+ case CustomPropertyValueToken:
+ return "CustomPropertyValue"
+ }
+ return "Invalid(" + strconv.Itoa(int(tt)) + ")"
+}
+
+////////////////////////////////////////////////////////////////
+
+// Lexer is the state for the lexer.
+type Lexer struct {
+ r *parse.Input
+}
+
+// NewLexer returns a new Lexer for a given io.Reader.
+func NewLexer(r *parse.Input) *Lexer {
+ return &Lexer{
+ r: r,
+ }
+}
+
+// Err returns the error encountered during lexing, this is often io.EOF but also other errors can be returned.
+func (l *Lexer) Err() error {
+ return l.r.Err()
+}
+
+// Next returns the next Token. It returns ErrorToken when an error was encountered. Using Err() one can retrieve the error message.
+func (l *Lexer) Next() (TokenType, []byte) {
+ switch l.r.Peek(0) {
+ case ' ', '\t', '\n', '\r', '\f':
+ l.r.Move(1)
+ for l.consumeWhitespace() {
+ }
+ return WhitespaceToken, l.r.Shift()
+ case ':':
+ l.r.Move(1)
+ return ColonToken, l.r.Shift()
+ case ';':
+ l.r.Move(1)
+ return SemicolonToken, l.r.Shift()
+ case ',':
+ l.r.Move(1)
+ return CommaToken, l.r.Shift()
+ case '(', ')', '[', ']', '{', '}':
+ if t := l.consumeBracket(); t != ErrorToken {
+ return t, l.r.Shift()
+ }
+ case '#':
+ if l.consumeHashToken() {
+ return HashToken, l.r.Shift()
+ }
+ case '"', '\'':
+ if t := l.consumeString(); t != ErrorToken {
+ return t, l.r.Shift()
+ }
+ case '.', '+':
+ if t := l.consumeNumeric(); t != ErrorToken {
+ return t, l.r.Shift()
+ }
+ case '-':
+ if t := l.consumeNumeric(); t != ErrorToken {
+ return t, l.r.Shift()
+ } else if t := l.consumeIdentlike(); t != ErrorToken {
+ return t, l.r.Shift()
+ } else if l.consumeCDCToken() {
+ return CDCToken, l.r.Shift()
+ } else if l.consumeCustomVariableToken() {
+ return CustomPropertyNameToken, l.r.Shift()
+ }
+ case '@':
+ if l.consumeAtKeywordToken() {
+ return AtKeywordToken, l.r.Shift()
+ }
+ case '$', '*', '^', '~':
+ if t := l.consumeMatch(); t != ErrorToken {
+ return t, l.r.Shift()
+ }
+ case '/':
+ if l.consumeComment() {
+ return CommentToken, l.r.Shift()
+ }
+ case '<':
+ if l.consumeCDOToken() {
+ return CDOToken, l.r.Shift()
+ }
+ case '\\':
+ if t := l.consumeIdentlike(); t != ErrorToken {
+ return t, l.r.Shift()
+ }
+ case 'u', 'U':
+ if l.consumeUnicodeRangeToken() {
+ return UnicodeRangeToken, l.r.Shift()
+ } else if t := l.consumeIdentlike(); t != ErrorToken {
+ return t, l.r.Shift()
+ }
+ case '|':
+ if t := l.consumeMatch(); t != ErrorToken {
+ return t, l.r.Shift()
+ } else if l.consumeColumnToken() {
+ return ColumnToken, l.r.Shift()
+ }
+ case 0:
+ if l.r.Err() != nil {
+ return ErrorToken, nil
+ }
+ default:
+ if t := l.consumeNumeric(); t != ErrorToken {
+ return t, l.r.Shift()
+ } else if t := l.consumeIdentlike(); t != ErrorToken {
+ return t, l.r.Shift()
+ }
+ }
+ // can't be rune because consumeIdentlike consumes that as an identifier
+ l.r.Move(1)
+ return DelimToken, l.r.Shift()
+}
+
+////////////////////////////////////////////////////////////////
+
+/*
+The following functions follow the railroad diagrams in http://www.w3.org/TR/css3-syntax/
+*/
+
+func (l *Lexer) consumeByte(c byte) bool {
+ if l.r.Peek(0) == c {
+ l.r.Move(1)
+ return true
+ }
+ return false
+}
+
+func (l *Lexer) consumeComment() bool {
+ if l.r.Peek(0) != '/' || l.r.Peek(1) != '*' {
+ return false
+ }
+ l.r.Move(2)
+ for {
+ c := l.r.Peek(0)
+ if c == 0 && l.r.Err() != nil {
+ break
+ } else if c == '*' && l.r.Peek(1) == '/' {
+ l.r.Move(2)
+ return true
+ }
+ l.r.Move(1)
+ }
+ return true
+}
+
+func (l *Lexer) consumeNewline() bool {
+ c := l.r.Peek(0)
+ if c == '\n' || c == '\f' {
+ l.r.Move(1)
+ return true
+ } else if c == '\r' {
+ if l.r.Peek(1) == '\n' {
+ l.r.Move(2)
+ } else {
+ l.r.Move(1)
+ }
+ return true
+ }
+ return false
+}
+
+func (l *Lexer) consumeWhitespace() bool {
+ c := l.r.Peek(0)
+ if c == ' ' || c == '\t' || c == '\n' || c == '\r' || c == '\f' {
+ l.r.Move(1)
+ return true
+ }
+ return false
+}
+
+func (l *Lexer) consumeDigit() bool {
+ c := l.r.Peek(0)
+ if c >= '0' && c <= '9' {
+ l.r.Move(1)
+ return true
+ }
+ return false
+}
+
+func (l *Lexer) consumeHexDigit() bool {
+ c := l.r.Peek(0)
+ if (c >= '0' && c <= '9') || (c >= 'a' && c <= 'f') || (c >= 'A' && c <= 'F') {
+ l.r.Move(1)
+ return true
+ }
+ return false
+}
+
+func (l *Lexer) consumeEscape() bool {
+ if l.r.Peek(0) != '\\' {
+ return false
+ }
+ mark := l.r.Pos()
+ l.r.Move(1)
+ if l.consumeNewline() {
+ l.r.Rewind(mark)
+ return false
+ } else if l.consumeHexDigit() {
+ for k := 1; k < 6; k++ {
+ if !l.consumeHexDigit() {
+ break
+ }
+ }
+ l.consumeWhitespace()
+ return true
+ } else {
+ c := l.r.Peek(0)
+ if c >= 0xC0 {
+ _, n := l.r.PeekRune(0)
+ l.r.Move(n)
+ return true
+ } else if c == 0 && l.r.Err() != nil {
+ l.r.Rewind(mark)
+ return false
+ }
+ }
+ l.r.Move(1)
+ return true
+}
+
+func (l *Lexer) consumeIdentToken() bool {
+ mark := l.r.Pos()
+ if l.r.Peek(0) == '-' {
+ l.r.Move(1)
+ }
+ c := l.r.Peek(0)
+ if !((c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z') || c == '_' || c >= 0x80) {
+ if c != '\\' || !l.consumeEscape() {
+ l.r.Rewind(mark)
+ return false
+ }
+ } else {
+ l.r.Move(1)
+ }
+ for {
+ c := l.r.Peek(0)
+ if !((c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z') || (c >= '0' && c <= '9') || c == '_' || c == '-' || c >= 0x80) {
+ if c != '\\' || !l.consumeEscape() {
+ break
+ }
+ } else {
+ l.r.Move(1)
+ }
+ }
+ return true
+}
+
+// support custom variables, https://www.w3.org/TR/css-variables-1/
+func (l *Lexer) consumeCustomVariableToken() bool {
+ // expect to be on a '-'
+ l.r.Move(1)
+ if l.r.Peek(0) != '-' {
+ l.r.Move(-1)
+ return false
+ }
+ if !l.consumeIdentToken() {
+ l.r.Move(-1)
+ return false
+ }
+ return true
+}
+
+func (l *Lexer) consumeAtKeywordToken() bool {
+ // expect to be on an '@'
+ l.r.Move(1)
+ if !l.consumeIdentToken() {
+ l.r.Move(-1)
+ return false
+ }
+ return true
+}
+
+func (l *Lexer) consumeHashToken() bool {
+ // expect to be on a '#'
+ mark := l.r.Pos()
+ l.r.Move(1)
+ c := l.r.Peek(0)
+ if !((c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z') || (c >= '0' && c <= '9') || c == '_' || c == '-' || c >= 0x80) {
+ if c != '\\' || !l.consumeEscape() {
+ l.r.Rewind(mark)
+ return false
+ }
+ } else {
+ l.r.Move(1)
+ }
+ for {
+ c := l.r.Peek(0)
+ if !((c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z') || (c >= '0' && c <= '9') || c == '_' || c == '-' || c >= 0x80) {
+ if c != '\\' || !l.consumeEscape() {
+ break
+ }
+ } else {
+ l.r.Move(1)
+ }
+ }
+ return true
+}
+
+func (l *Lexer) consumeNumberToken() bool {
+ mark := l.r.Pos()
+ c := l.r.Peek(0)
+ if c == '+' || c == '-' {
+ l.r.Move(1)
+ }
+ firstDigit := l.consumeDigit()
+ if firstDigit {
+ for l.consumeDigit() {
+ }
+ }
+ if l.r.Peek(0) == '.' {
+ l.r.Move(1)
+ if l.consumeDigit() {
+ for l.consumeDigit() {
+ }
+ } else if firstDigit {
+ // . could belong to the next token
+ l.r.Move(-1)
+ return true
+ } else {
+ l.r.Rewind(mark)
+ return false
+ }
+ } else if !firstDigit {
+ l.r.Rewind(mark)
+ return false
+ }
+ mark = l.r.Pos()
+ c = l.r.Peek(0)
+ if c == 'e' || c == 'E' {
+ l.r.Move(1)
+ c = l.r.Peek(0)
+ if c == '+' || c == '-' {
+ l.r.Move(1)
+ }
+ if !l.consumeDigit() {
+ // e could belong to next token
+ l.r.Rewind(mark)
+ return true
+ }
+ for l.consumeDigit() {
+ }
+ }
+ return true
+}
+
+func (l *Lexer) consumeUnicodeRangeToken() bool {
+ c := l.r.Peek(0)
+ if (c != 'u' && c != 'U') || l.r.Peek(1) != '+' {
+ return false
+ }
+ mark := l.r.Pos()
+ l.r.Move(2)
+
+ // consume up to 6 hexDigits
+ k := 0
+ for l.consumeHexDigit() {
+ k++
+ }
+
+ // either a minus or a question mark or the end is expected
+ if l.consumeByte('-') {
+ if k == 0 || 6 < k {
+ l.r.Rewind(mark)
+ return false
+ }
+
+ // consume another up to 6 hexDigits
+ if l.consumeHexDigit() {
+ k = 1
+ for l.consumeHexDigit() {
+ k++
+ }
+ } else {
+ l.r.Rewind(mark)
+ return false
+ }
+ } else if l.consumeByte('?') {
+ // could be filled up to 6 characters with question marks or else regular hexDigits
+ k++
+ for l.consumeByte('?') {
+ k++
+ }
+ }
+ if k == 0 || 6 < k {
+ l.r.Rewind(mark)
+ return false
+ }
+ return true
+}
+
+func (l *Lexer) consumeColumnToken() bool {
+ if l.r.Peek(0) == '|' && l.r.Peek(1) == '|' {
+ l.r.Move(2)
+ return true
+ }
+ return false
+}
+
+func (l *Lexer) consumeCDOToken() bool {
+ if l.r.Peek(0) == '<' && l.r.Peek(1) == '!' && l.r.Peek(2) == '-' && l.r.Peek(3) == '-' {
+ l.r.Move(4)
+ return true
+ }
+ return false
+}
+
+func (l *Lexer) consumeCDCToken() bool {
+ if l.r.Peek(0) == '-' && l.r.Peek(1) == '-' && l.r.Peek(2) == '>' {
+ l.r.Move(3)
+ return true
+ }
+ return false
+}
+
+////////////////////////////////////////////////////////////////
+
+// consumeMatch consumes any MatchToken.
+func (l *Lexer) consumeMatch() TokenType {
+ if l.r.Peek(1) == '=' {
+ switch l.r.Peek(0) {
+ case '~':
+ l.r.Move(2)
+ return IncludeMatchToken
+ case '|':
+ l.r.Move(2)
+ return DashMatchToken
+ case '^':
+ l.r.Move(2)
+ return PrefixMatchToken
+ case '$':
+ l.r.Move(2)
+ return SuffixMatchToken
+ case '*':
+ l.r.Move(2)
+ return SubstringMatchToken
+ }
+ }
+ return ErrorToken
+}
+
+// consumeBracket consumes any bracket token.
+func (l *Lexer) consumeBracket() TokenType {
+ switch l.r.Peek(0) {
+ case '(':
+ l.r.Move(1)
+ return LeftParenthesisToken
+ case ')':
+ l.r.Move(1)
+ return RightParenthesisToken
+ case '[':
+ l.r.Move(1)
+ return LeftBracketToken
+ case ']':
+ l.r.Move(1)
+ return RightBracketToken
+ case '{':
+ l.r.Move(1)
+ return LeftBraceToken
+ case '}':
+ l.r.Move(1)
+ return RightBraceToken
+ }
+ return ErrorToken
+}
+
+// consumeNumeric consumes NumberToken, PercentageToken or DimensionToken.
+func (l *Lexer) consumeNumeric() TokenType {
+ if l.consumeNumberToken() {
+ if l.consumeByte('%') {
+ return PercentageToken
+ } else if l.consumeIdentToken() {
+ return DimensionToken
+ }
+ return NumberToken
+ }
+ return ErrorToken
+}
+
+// consumeString consumes a string and may return BadStringToken when a newline is encountered.
+func (l *Lexer) consumeString() TokenType {
+ // assume to be on " or '
+ delim := l.r.Peek(0)
+ l.r.Move(1)
+ for {
+ c := l.r.Peek(0)
+ if c == 0 && l.r.Err() != nil {
+ break
+ } else if c == '\n' || c == '\r' || c == '\f' {
+ l.r.Move(1)
+ return BadStringToken
+ } else if c == delim {
+ l.r.Move(1)
+ break
+ } else if c == '\\' {
+ if !l.consumeEscape() {
+ // either newline or EOF after backslash
+ l.r.Move(1)
+ l.consumeNewline()
+ }
+ } else {
+ l.r.Move(1)
+ }
+ }
+ return StringToken
+}
+
+func (l *Lexer) consumeUnquotedURL() bool {
+ for {
+ c := l.r.Peek(0)
+ if c == 0 && l.r.Err() != nil || c == ')' {
+ break
+ } else if c == '"' || c == '\'' || c == '(' || c == '\\' || c == ' ' || c <= 0x1F || c == 0x7F {
+ if c != '\\' || !l.consumeEscape() {
+ return false
+ }
+ } else {
+ l.r.Move(1)
+ }
+ }
+ return true
+}
+
+// consumeRemnantsBadUrl consumes bytes of a BadUrlToken so that normal tokenization may continue.
+func (l *Lexer) consumeRemnantsBadURL() {
+ for {
+ if l.consumeByte(')') || l.r.Err() != nil {
+ break
+ } else if !l.consumeEscape() {
+ l.r.Move(1)
+ }
+ }
+}
+
+// consumeIdentlike consumes IdentToken, FunctionToken or UrlToken.
+func (l *Lexer) consumeIdentlike() TokenType {
+ if l.consumeIdentToken() {
+ if l.r.Peek(0) != '(' {
+ return IdentToken
+ } else if !parse.EqualFold(bytes.Replace(l.r.Lexeme(), []byte{'\\'}, nil, -1), []byte{'u', 'r', 'l'}) {
+ l.r.Move(1)
+ return FunctionToken
+ }
+ l.r.Move(1)
+
+ // consume url
+ for l.consumeWhitespace() {
+ }
+ if c := l.r.Peek(0); c == '"' || c == '\'' {
+ if l.consumeString() == BadStringToken {
+ l.consumeRemnantsBadURL()
+ return BadURLToken
+ }
+ } else if !l.consumeUnquotedURL() && !l.consumeWhitespace() { // if unquoted URL fails due to encountering whitespace, continue
+ l.consumeRemnantsBadURL()
+ return BadURLToken
+ }
+ for l.consumeWhitespace() {
+ }
+ if !l.consumeByte(')') && l.r.Err() != io.EOF {
+ l.consumeRemnantsBadURL()
+ return BadURLToken
+ }
+ return URLToken
+ }
+ return ErrorToken
+}
diff --git a/vendor/github.com/tdewolff/parse/v2/css/parse.go b/vendor/github.com/tdewolff/parse/v2/css/parse.go
new file mode 100644
index 0000000..8daa40a
--- /dev/null
+++ b/vendor/github.com/tdewolff/parse/v2/css/parse.go
@@ -0,0 +1,462 @@
+package css
+
+import (
+ "bytes"
+ "fmt"
+ "strconv"
+
+ "github.com/tdewolff/parse/v2"
+ "github.com/tdewolff/parse/v2/buffer"
+)
+
+var wsBytes = []byte(" ")
+var endBytes = []byte("}")
+var emptyBytes = []byte("")
+
+// GrammarType determines the type of grammar.
+type GrammarType uint32
+
+// GrammarType values.
+const (
+ ErrorGrammar GrammarType = iota // extra token when errors occur
+ CommentGrammar
+ AtRuleGrammar
+ BeginAtRuleGrammar
+ EndAtRuleGrammar
+ QualifiedRuleGrammar
+ BeginRulesetGrammar
+ EndRulesetGrammar
+ DeclarationGrammar
+ TokenGrammar
+ CustomPropertyGrammar
+)
+
+// String returns the string representation of a GrammarType.
+func (tt GrammarType) String() string {
+ switch tt {
+ case ErrorGrammar:
+ return "Error"
+ case CommentGrammar:
+ return "Comment"
+ case AtRuleGrammar:
+ return "AtRule"
+ case BeginAtRuleGrammar:
+ return "BeginAtRule"
+ case EndAtRuleGrammar:
+ return "EndAtRule"
+ case QualifiedRuleGrammar:
+ return "QualifiedRule"
+ case BeginRulesetGrammar:
+ return "BeginRuleset"
+ case EndRulesetGrammar:
+ return "EndRuleset"
+ case DeclarationGrammar:
+ return "Declaration"
+ case TokenGrammar:
+ return "Token"
+ case CustomPropertyGrammar:
+ return "CustomProperty"
+ }
+ return "Invalid(" + strconv.Itoa(int(tt)) + ")"
+}
+
+////////////////////////////////////////////////////////////////
+
+// State is the state function the parser currently is in.
+type State func(*Parser) GrammarType
+
+// Token is a single TokenType and its associated data.
+type Token struct {
+ TokenType
+ Data []byte
+}
+
+func (t Token) String() string {
+ return t.TokenType.String() + "('" + string(t.Data) + "')"
+}
+
+// Parser is the state for the parser.
+type Parser struct {
+ l *Lexer
+ state []State
+ err string
+ errPos int
+
+ buf []Token
+ level int
+
+ data []byte
+ tt TokenType
+ keepWS bool
+ prevWS bool
+ prevEnd bool
+ prevComment bool
+}
+
+// NewParser returns a new CSS parser from an io.Reader. isInline specifies whether this is an inline style attribute.
+func NewParser(r *parse.Input, isInline bool) *Parser {
+ l := NewLexer(r)
+ p := &Parser{
+ l: l,
+ state: make([]State, 0, 4),
+ }
+
+ if isInline {
+ p.state = append(p.state, (*Parser).parseDeclarationList)
+ } else {
+ p.state = append(p.state, (*Parser).parseStylesheet)
+ }
+ return p
+}
+
+// HasParseError returns true if there is a parse error (and not a read error).
+func (p *Parser) HasParseError() bool {
+ return p.err != ""
+}
+
+// Err returns the error encountered during parsing, this is often io.EOF but also other errors can be returned.
+func (p *Parser) Err() error {
+ if p.err != "" {
+ r := buffer.NewReader(p.l.r.Bytes())
+ return parse.NewError(r, p.errPos, p.err)
+ }
+ return p.l.Err()
+}
+
+// Next returns the next Grammar. It returns ErrorGrammar when an error was encountered. Using Err() one can retrieve the error message.
+func (p *Parser) Next() (GrammarType, TokenType, []byte) {
+ p.err = ""
+
+ if p.prevEnd {
+ p.tt, p.data = RightBraceToken, endBytes
+ p.prevEnd = false
+ } else {
+ p.tt, p.data = p.popToken(true)
+ }
+ gt := p.state[len(p.state)-1](p)
+ return gt, p.tt, p.data
+}
+
+// Offset return offset for current Grammar
+func (p *Parser) Offset() int {
+ return p.l.r.Offset()
+}
+
+// Values returns a slice of Tokens for the last Grammar. Only AtRuleGrammar, BeginAtRuleGrammar, BeginRulesetGrammar and Declaration will return the at-rule components, ruleset selector and declaration values respectively.
+func (p *Parser) Values() []Token {
+ return p.buf
+}
+
+func (p *Parser) popToken(allowComment bool) (TokenType, []byte) {
+ p.prevWS = false
+ p.prevComment = false
+ tt, data := p.l.Next()
+ for !p.keepWS && tt == WhitespaceToken || tt == CommentToken {
+ if tt == WhitespaceToken {
+ p.prevWS = true
+ } else {
+ p.prevComment = true
+ if allowComment && len(p.state) == 1 {
+ break
+ }
+ }
+ tt, data = p.l.Next()
+ }
+ return tt, data
+}
+
+func (p *Parser) initBuf() {
+ p.buf = p.buf[:0]
+}
+
+func (p *Parser) pushBuf(tt TokenType, data []byte) {
+ p.buf = append(p.buf, Token{tt, data})
+}
+
+////////////////////////////////////////////////////////////////
+
+func (p *Parser) parseStylesheet() GrammarType {
+ if p.tt == CDOToken || p.tt == CDCToken {
+ return TokenGrammar
+ } else if p.tt == AtKeywordToken {
+ return p.parseAtRule()
+ } else if p.tt == CommentToken {
+ return CommentGrammar
+ } else if p.tt == ErrorToken {
+ return ErrorGrammar
+ }
+ return p.parseQualifiedRule()
+}
+
+func (p *Parser) parseDeclarationList() GrammarType {
+ if p.tt == CommentToken {
+ p.tt, p.data = p.popToken(false)
+ }
+ for p.tt == SemicolonToken {
+ p.tt, p.data = p.popToken(false)
+ }
+
+ // IE hack: *color:red;
+ if p.tt == DelimToken && p.data[0] == '*' {
+ tt, data := p.popToken(false)
+ p.tt = tt
+ p.data = append(p.data, data...)
+ }
+
+ if p.tt == ErrorToken {
+ return ErrorGrammar
+ } else if p.tt == AtKeywordToken {
+ return p.parseAtRule()
+ } else if p.tt == IdentToken || p.tt == DelimToken {
+ return p.parseDeclaration()
+ } else if p.tt == CustomPropertyNameToken {
+ return p.parseCustomProperty()
+ }
+
+ // parse error
+ p.initBuf()
+ p.l.r.Move(-len(p.data))
+ p.err, p.errPos = fmt.Sprintf("CSS parse error: unexpected token '%s' in declaration", string(p.data)), p.l.r.Offset()
+ p.l.r.Move(len(p.data))
+
+ if p.tt == RightBraceToken {
+ // right brace token will occur when we've had a decl error that ended in a right brace token
+ // as these are not handled by decl error, we handle it here explicitly. Normally its used to end eg. the qual rule.
+ p.pushBuf(p.tt, p.data)
+ return ErrorGrammar
+ }
+ return p.parseDeclarationError(p.tt, p.data)
+}
+
+////////////////////////////////////////////////////////////////
+
+func (p *Parser) parseAtRule() GrammarType {
+ p.initBuf()
+ p.data = parse.ToLower(parse.Copy(p.data))
+ atRuleName := p.data
+ if len(atRuleName) > 0 && atRuleName[1] == '-' {
+ if i := bytes.IndexByte(atRuleName[2:], '-'); i != -1 {
+ atRuleName = atRuleName[i+2:] // skip vendor specific prefix
+ }
+ }
+ atRule := ToHash(atRuleName[1:])
+
+ first := true
+ skipWS := false
+ for {
+ tt, data := p.popToken(false)
+ if tt == LeftBraceToken && p.level == 0 {
+ if atRule == Font_Face || atRule == Page {
+ p.state = append(p.state, (*Parser).parseAtRuleDeclarationList)
+ } else if atRule == Document || atRule == Keyframes || atRule == Media || atRule == Supports {
+ p.state = append(p.state, (*Parser).parseAtRuleRuleList)
+ } else {
+ p.state = append(p.state, (*Parser).parseAtRuleUnknown)
+ }
+ return BeginAtRuleGrammar
+ } else if (tt == SemicolonToken || tt == RightBraceToken) && p.level == 0 || tt == ErrorToken {
+ p.prevEnd = (tt == RightBraceToken)
+ return AtRuleGrammar
+ } else if tt == LeftParenthesisToken || tt == LeftBraceToken || tt == LeftBracketToken || tt == FunctionToken {
+ p.level++
+ } else if tt == RightParenthesisToken || tt == RightBraceToken || tt == RightBracketToken {
+ p.level--
+ }
+ if first {
+ if tt == LeftParenthesisToken || tt == LeftBracketToken {
+ p.prevWS = false
+ }
+ first = false
+ }
+ if len(data) == 1 && (data[0] == ',' || data[0] == ':') {
+ skipWS = true
+ } else if p.prevWS && !skipWS && tt != RightParenthesisToken {
+ p.pushBuf(WhitespaceToken, wsBytes)
+ } else {
+ skipWS = false
+ }
+ if tt == LeftParenthesisToken {
+ skipWS = true
+ }
+ p.pushBuf(tt, data)
+ }
+}
+
+func (p *Parser) parseAtRuleRuleList() GrammarType {
+ if p.tt == RightBraceToken || p.tt == ErrorToken {
+ p.state = p.state[:len(p.state)-1]
+ return EndAtRuleGrammar
+ } else if p.tt == AtKeywordToken {
+ return p.parseAtRule()
+ } else {
+ return p.parseQualifiedRule()
+ }
+}
+
+func (p *Parser) parseAtRuleDeclarationList() GrammarType {
+ for p.tt == SemicolonToken {
+ p.tt, p.data = p.popToken(false)
+ }
+ if p.tt == RightBraceToken || p.tt == ErrorToken {
+ p.state = p.state[:len(p.state)-1]
+ return EndAtRuleGrammar
+ }
+ return p.parseDeclarationList()
+}
+
+func (p *Parser) parseAtRuleUnknown() GrammarType {
+ p.keepWS = true
+ if p.tt == RightBraceToken && p.level == 0 || p.tt == ErrorToken {
+ p.state = p.state[:len(p.state)-1]
+ p.keepWS = false
+ return EndAtRuleGrammar
+ }
+ if p.tt == LeftParenthesisToken || p.tt == LeftBraceToken || p.tt == LeftBracketToken || p.tt == FunctionToken {
+ p.level++
+ } else if p.tt == RightParenthesisToken || p.tt == RightBraceToken || p.tt == RightBracketToken {
+ p.level--
+ }
+ return TokenGrammar
+}
+
+func (p *Parser) parseQualifiedRule() GrammarType {
+ p.initBuf()
+ first := true
+ inAttrSel := false
+ skipWS := true
+ var tt TokenType
+ var data []byte
+ for {
+ if first {
+ tt, data = p.tt, p.data
+ p.tt = WhitespaceToken
+ p.data = emptyBytes
+ first = false
+ } else {
+ tt, data = p.popToken(false)
+ }
+ if tt == LeftBraceToken && p.level == 0 {
+ p.state = append(p.state, (*Parser).parseQualifiedRuleDeclarationList)
+ return BeginRulesetGrammar
+ } else if tt == ErrorToken {
+ p.err, p.errPos = "CSS parse error: unexpected ending in qualified rule", p.l.r.Offset()
+ return ErrorGrammar
+ } else if tt == LeftParenthesisToken || tt == LeftBraceToken || tt == LeftBracketToken || tt == FunctionToken {
+ p.level++
+ } else if tt == RightParenthesisToken || tt == RightBraceToken || tt == RightBracketToken {
+ p.level--
+ }
+ if len(data) == 1 && (data[0] == ',' || data[0] == '>' || data[0] == '+' || data[0] == '~') {
+ if data[0] == ',' {
+ return QualifiedRuleGrammar
+ }
+ skipWS = true
+ } else if p.prevWS && !skipWS && !inAttrSel {
+ p.pushBuf(WhitespaceToken, wsBytes)
+ } else {
+ skipWS = false
+ }
+ if tt == LeftBracketToken {
+ inAttrSel = true
+ } else if tt == RightBracketToken {
+ inAttrSel = false
+ }
+ p.pushBuf(tt, data)
+ }
+}
+
+func (p *Parser) parseQualifiedRuleDeclarationList() GrammarType {
+ for p.tt == SemicolonToken {
+ p.tt, p.data = p.popToken(false)
+ }
+ if p.tt == RightBraceToken || p.tt == ErrorToken {
+ p.state = p.state[:len(p.state)-1]
+ return EndRulesetGrammar
+ }
+ return p.parseDeclarationList()
+}
+
+func (p *Parser) parseDeclaration() GrammarType {
+ p.initBuf()
+ p.data = parse.ToLower(parse.Copy(p.data))
+
+ ttName, dataName := p.tt, p.data
+ tt, data := p.popToken(false)
+ if tt != ColonToken {
+ p.l.r.Move(-len(data))
+ p.err, p.errPos = "CSS parse error: expected colon in declaration", p.l.r.Offset()
+ p.l.r.Move(len(data))
+ p.pushBuf(ttName, dataName)
+ return p.parseDeclarationError(tt, data)
+ }
+
+ skipWS := true
+ for {
+ tt, data := p.popToken(false)
+ if (tt == SemicolonToken || tt == RightBraceToken) && p.level == 0 || tt == ErrorToken {
+ p.prevEnd = (tt == RightBraceToken)
+ return DeclarationGrammar
+ } else if tt == LeftParenthesisToken || tt == LeftBraceToken || tt == LeftBracketToken || tt == FunctionToken {
+ p.level++
+ } else if tt == RightParenthesisToken || tt == RightBraceToken || tt == RightBracketToken {
+ p.level--
+ }
+ if len(data) == 1 && (data[0] == ',' || data[0] == '/' || data[0] == ':' || data[0] == '!' || data[0] == '=') {
+ skipWS = true
+ } else if (p.prevWS || p.prevComment) && !skipWS {
+ p.pushBuf(WhitespaceToken, wsBytes)
+ } else {
+ skipWS = false
+ }
+ p.pushBuf(tt, data)
+ }
+}
+
+func (p *Parser) parseDeclarationError(tt TokenType, data []byte) GrammarType {
+ // we're on the offending (tt,data), keep popping tokens till we reach ;, }, or EOF
+ p.tt, p.data = tt, data
+ for {
+ if (tt == SemicolonToken || tt == RightBraceToken) && p.level == 0 || tt == ErrorToken {
+ p.prevEnd = (tt == RightBraceToken)
+ if tt == SemicolonToken {
+ p.pushBuf(tt, data)
+ }
+ return ErrorGrammar
+ } else if tt == LeftParenthesisToken || tt == LeftBraceToken || tt == LeftBracketToken || tt == FunctionToken {
+ p.level++
+ } else if tt == RightParenthesisToken || tt == RightBraceToken || tt == RightBracketToken {
+ p.level--
+ }
+
+ if p.prevWS {
+ p.pushBuf(WhitespaceToken, wsBytes)
+ }
+ p.pushBuf(tt, data)
+
+ tt, data = p.popToken(false)
+ }
+}
+
+func (p *Parser) parseCustomProperty() GrammarType {
+ p.initBuf()
+ if tt, data := p.popToken(false); tt != ColonToken {
+ p.l.r.Move(-len(data))
+ p.err, p.errPos = "CSS parse error: expected colon in custom property", p.l.r.Offset()
+ p.l.r.Move(len(data))
+ return ErrorGrammar
+ }
+ val := []byte{}
+ for {
+ tt, data := p.l.Next()
+ if (tt == SemicolonToken || tt == RightBraceToken) && p.level == 0 || tt == ErrorToken {
+ p.prevEnd = (tt == RightBraceToken)
+ p.pushBuf(CustomPropertyValueToken, val)
+ return CustomPropertyGrammar
+ } else if tt == LeftParenthesisToken || tt == LeftBraceToken || tt == LeftBracketToken || tt == FunctionToken {
+ p.level++
+ } else if tt == RightParenthesisToken || tt == RightBraceToken || tt == RightBracketToken {
+ p.level--
+ }
+ val = append(val, data...)
+ }
+}
diff --git a/vendor/github.com/tdewolff/parse/v2/css/util.go b/vendor/github.com/tdewolff/parse/v2/css/util.go
new file mode 100644
index 0000000..20b99a7
--- /dev/null
+++ b/vendor/github.com/tdewolff/parse/v2/css/util.go
@@ -0,0 +1,47 @@
+package css
+
+import "github.com/tdewolff/parse/v2"
+
+// IsIdent returns true if the bytes are a valid identifier.
+func IsIdent(b []byte) bool {
+ l := NewLexer(parse.NewInputBytes(b))
+ l.consumeIdentToken()
+ l.r.Restore()
+ return l.r.Pos() == len(b)
+}
+
+// IsURLUnquoted returns true if the bytes are a valid unquoted URL.
+func IsURLUnquoted(b []byte) bool {
+ l := NewLexer(parse.NewInputBytes(b))
+ l.consumeUnquotedURL()
+ l.r.Restore()
+ return l.r.Pos() == len(b)
+}
+
+// HSL2RGB converts HSL to RGB with all of range [0,1]
+// from http://www.w3.org/TR/css3-color/#hsl-color
+func HSL2RGB(h, s, l float64) (float64, float64, float64) {
+ m2 := l * (s + 1)
+ if l > 0.5 {
+ m2 = l + s - l*s
+ }
+ m1 := l*2 - m2
+ return hue2rgb(m1, m2, h+1.0/3.0), hue2rgb(m1, m2, h), hue2rgb(m1, m2, h-1.0/3.0)
+}
+
+func hue2rgb(m1, m2, h float64) float64 {
+ if h < 0.0 {
+ h += 1.0
+ }
+ if h > 1.0 {
+ h -= 1.0
+ }
+ if h*6.0 < 1.0 {
+ return m1 + (m2-m1)*h*6.0
+ } else if h*2.0 < 1.0 {
+ return m2
+ } else if h*3.0 < 2.0 {
+ return m1 + (m2-m1)*(2.0/3.0-h)*6.0
+ }
+ return m1
+}
diff --git a/vendor/github.com/tdewolff/parse/v2/error.go b/vendor/github.com/tdewolff/parse/v2/error.go
new file mode 100644
index 0000000..f6657f7
--- /dev/null
+++ b/vendor/github.com/tdewolff/parse/v2/error.go
@@ -0,0 +1,47 @@
+package parse
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+)
+
+// Error is a parsing error returned by parser. It contains a message and an offset at which the error occurred.
+type Error struct {
+ Message string
+ Line int
+ Column int
+ Context string
+}
+
+// NewError creates a new error
+func NewError(r io.Reader, offset int, message string, a ...interface{}) *Error {
+ line, column, context := Position(r, offset)
+ if 0 < len(a) {
+ message = fmt.Sprintf(message, a...)
+ }
+ return &Error{
+ Message: message,
+ Line: line,
+ Column: column,
+ Context: context,
+ }
+}
+
+// NewErrorLexer creates a new error from an active Lexer.
+func NewErrorLexer(l *Input, message string, a ...interface{}) *Error {
+ r := bytes.NewBuffer(l.Bytes())
+ offset := l.Offset()
+ return NewError(r, offset, message, a...)
+}
+
+// Position returns the line, column, and context of the error.
+// Context is the entire line at which the error occurred.
+func (e *Error) Position() (int, int, string) {
+ return e.Line, e.Column, e.Context
+}
+
+// Error returns the error string, containing the context and line + column number.
+func (e *Error) Error() string {
+ return fmt.Sprintf("%s on line %d and column %d\n%s", e.Message, e.Line, e.Column, e.Context)
+}
diff --git a/vendor/github.com/tdewolff/parse/v2/html/README.md b/vendor/github.com/tdewolff/parse/v2/html/README.md
new file mode 100644
index 0000000..53145db
--- /dev/null
+++ b/vendor/github.com/tdewolff/parse/v2/html/README.md
@@ -0,0 +1,98 @@
+# HTML [![API reference](https://img.shields.io/badge/godoc-reference-5272B4)](https://pkg.go.dev/github.com/tdewolff/parse/v2/html?tab=doc)
+
+This package is an HTML5 lexer written in [Go][1]. It follows the specification at [The HTML syntax](http://www.w3.org/TR/html5/syntax.html). The lexer takes an io.Reader and converts it into tokens until the EOF.
+
+## Installation
+Run the following command
+
+ go get -u github.com/tdewolff/parse/v2/html
+
+or add the following import and run project with `go get`
+
+ import "github.com/tdewolff/parse/v2/html"
+
+## Lexer
+### Usage
+The following initializes a new Lexer with io.Reader `r`:
+``` go
+l := html.NewLexer(parse.NewInput(r))
+```
+
+To tokenize until EOF an error, use:
+``` go
+for {
+ tt, data := l.Next()
+ switch tt {
+ case html.ErrorToken:
+ // error or EOF set in l.Err()
+ return
+ case html.StartTagToken:
+ // ...
+ for {
+ ttAttr, dataAttr := l.Next()
+ if ttAttr != html.AttributeToken {
+ break
+ }
+ // ...
+ }
+ // ...
+ }
+}
+```
+
+All tokens:
+``` go
+ErrorToken TokenType = iota // extra token when errors occur
+CommentToken
+DoctypeToken
+StartTagToken
+StartTagCloseToken
+StartTagVoidToken
+EndTagToken
+AttributeToken
+TextToken
+```
+
+### Examples
+``` go
+package main
+
+import (
+ "os"
+
+ "github.com/tdewolff/parse/v2/html"
+)
+
+// Tokenize HTML from stdin.
+func main() {
+ l := html.NewLexer(parse.NewInput(os.Stdin))
+ for {
+ tt, data := l.Next()
+ switch tt {
+ case html.ErrorToken:
+ if l.Err() != io.EOF {
+ fmt.Println("Error on line", l.Line(), ":", l.Err())
+ }
+ return
+ case html.StartTagToken:
+ fmt.Println("Tag", string(data))
+ for {
+ ttAttr, dataAttr := l.Next()
+ if ttAttr != html.AttributeToken {
+ break
+ }
+
+ key := dataAttr
+ val := l.AttrVal()
+ fmt.Println("Attribute", string(key), "=", string(val))
+ }
+ // ...
+ }
+ }
+}
+```
+
+## License
+Released under the [MIT license](https://github.com/tdewolff/parse/blob/master/LICENSE.md).
+
+[1]: http://golang.org/ "Go Language"
diff --git a/vendor/github.com/tdewolff/parse/v2/html/hash.go b/vendor/github.com/tdewolff/parse/v2/html/hash.go
new file mode 100644
index 0000000..16432ad
--- /dev/null
+++ b/vendor/github.com/tdewolff/parse/v2/html/hash.go
@@ -0,0 +1,81 @@
+package html
+
+// generated by hasher -type=Hash -file=hash.go; DO NOT EDIT, except for adding more constants to the list and rerun go generate
+
+// uses github.com/tdewolff/hasher
+//go:generate hasher -type=Hash -file=hash.go
+
+// Hash defines perfect hashes for a predefined list of strings
+type Hash uint32
+
+// Unique hash definitions to be used instead of strings
+const (
+ Iframe Hash = 0x6 // iframe
+ Math Hash = 0x604 // math
+ Plaintext Hash = 0x1e09 // plaintext
+ Script Hash = 0xa06 // script
+ Style Hash = 0x1405 // style
+ Svg Hash = 0x1903 // svg
+ Textarea Hash = 0x2308 // textarea
+ Title Hash = 0xf05 // title
+ Xmp Hash = 0x1c03 // xmp
+)
+
+// String returns the hash' name.
+func (i Hash) String() string {
+ start := uint32(i >> 8)
+ n := uint32(i & 0xff)
+ if start+n > uint32(len(_Hash_text)) {
+ return ""
+ }
+ return _Hash_text[start : start+n]
+}
+
+// ToHash returns the hash whose name is s. It returns zero if there is no
+// such hash. It is case sensitive.
+func ToHash(s []byte) Hash {
+ if len(s) == 0 || len(s) > _Hash_maxLen {
+ return 0
+ }
+ h := uint32(_Hash_hash0)
+ for i := 0; i < len(s); i++ {
+ h ^= uint32(s[i])
+ h *= 16777619
+ }
+ if i := _Hash_table[h&uint32(len(_Hash_table)-1)]; int(i&0xff) == len(s) {
+ t := _Hash_text[i>>8 : i>>8+i&0xff]
+ for i := 0; i < len(s); i++ {
+ if t[i] != s[i] {
+ goto NEXT
+ }
+ }
+ return i
+ }
+NEXT:
+ if i := _Hash_table[(h>>16)&uint32(len(_Hash_table)-1)]; int(i&0xff) == len(s) {
+ t := _Hash_text[i>>8 : i>>8+i&0xff]
+ for i := 0; i < len(s); i++ {
+ if t[i] != s[i] {
+ return 0
+ }
+ }
+ return i
+ }
+ return 0
+}
+
+const _Hash_hash0 = 0x9acb0442
+const _Hash_maxLen = 9
+const _Hash_text = "iframemathscriptitlestylesvgxmplaintextarea"
+
+var _Hash_table = [1 << 4]Hash{
+ 0x0: 0x2308, // textarea
+ 0x2: 0x6, // iframe
+ 0x4: 0xf05, // title
+ 0x5: 0x1e09, // plaintext
+ 0x7: 0x1405, // style
+ 0x8: 0x604, // math
+ 0x9: 0xa06, // script
+ 0xa: 0x1903, // svg
+ 0xb: 0x1c03, // xmp
+}
diff --git a/vendor/github.com/tdewolff/parse/v2/html/lex.go b/vendor/github.com/tdewolff/parse/v2/html/lex.go
new file mode 100644
index 0000000..4325024
--- /dev/null
+++ b/vendor/github.com/tdewolff/parse/v2/html/lex.go
@@ -0,0 +1,494 @@
+// Package html is an HTML5 lexer following the specifications at http://www.w3.org/TR/html5/syntax.html.
+package html
+
+import (
+ "strconv"
+
+ "github.com/tdewolff/parse/v2"
+)
+
+// TokenType determines the type of token, eg. a number or a semicolon.
+type TokenType uint32
+
+// TokenType values.
+const (
+ ErrorToken TokenType = iota // extra token when errors occur
+ CommentToken
+ DoctypeToken
+ StartTagToken
+ StartTagCloseToken
+ StartTagVoidToken
+ EndTagToken
+ AttributeToken
+ TextToken
+ SvgToken
+ MathToken
+)
+
+// String returns the string representation of a TokenType.
+func (tt TokenType) String() string {
+ switch tt {
+ case ErrorToken:
+ return "Error"
+ case CommentToken:
+ return "Comment"
+ case DoctypeToken:
+ return "Doctype"
+ case StartTagToken:
+ return "StartTag"
+ case StartTagCloseToken:
+ return "StartTagClose"
+ case StartTagVoidToken:
+ return "StartTagVoid"
+ case EndTagToken:
+ return "EndTag"
+ case AttributeToken:
+ return "Attribute"
+ case TextToken:
+ return "Text"
+ case SvgToken:
+ return "Svg"
+ case MathToken:
+ return "Math"
+ }
+ return "Invalid(" + strconv.Itoa(int(tt)) + ")"
+}
+
+////////////////////////////////////////////////////////////////
+
+// Lexer is the state for the lexer.
+type Lexer struct {
+ r *parse.Input
+ err error
+
+ rawTag Hash
+ inTag bool
+
+ text []byte
+ attrVal []byte
+}
+
+// NewLexer returns a new Lexer for a given io.Reader.
+func NewLexer(r *parse.Input) *Lexer {
+ return &Lexer{
+ r: r,
+ }
+}
+
+// Err returns the error encountered during lexing, this is often io.EOF but also other errors can be returned.
+func (l *Lexer) Err() error {
+ if l.err != nil {
+ return l.err
+ }
+ return l.r.Err()
+}
+
+// Text returns the textual representation of a token. This excludes delimiters and additional leading/trailing characters.
+func (l *Lexer) Text() []byte {
+ return l.text
+}
+
+// AttrVal returns the attribute value when an AttributeToken was returned from Next.
+func (l *Lexer) AttrVal() []byte {
+ return l.attrVal
+}
+
+// Next returns the next Token. It returns ErrorToken when an error was encountered. Using Err() one can retrieve the error message.
+func (l *Lexer) Next() (TokenType, []byte) {
+ l.text = nil
+ var c byte
+ if l.inTag {
+ l.attrVal = nil
+ for { // before attribute name state
+ if c = l.r.Peek(0); c == ' ' || c == '\t' || c == '\n' || c == '\r' || c == '\f' {
+ l.r.Move(1)
+ continue
+ }
+ break
+ }
+ if c == 0 && l.r.Err() != nil {
+ return ErrorToken, nil
+ } else if c != '>' && (c != '/' || l.r.Peek(1) != '>') {
+ return AttributeToken, l.shiftAttribute()
+ }
+ l.r.Skip()
+ l.inTag = false
+ if c == '/' {
+ l.r.Move(2)
+ return StartTagVoidToken, l.r.Shift()
+ }
+ l.r.Move(1)
+ return StartTagCloseToken, l.r.Shift()
+ }
+
+ if l.rawTag != 0 {
+ if rawText := l.shiftRawText(); len(rawText) > 0 {
+ l.text = rawText
+ l.rawTag = 0
+ return TextToken, rawText
+ }
+ l.rawTag = 0
+ }
+
+ for {
+ c = l.r.Peek(0)
+ if c == '<' {
+ c = l.r.Peek(1)
+ isEndTag := c == '/' && l.r.Peek(2) != '>' && (l.r.Peek(2) != 0 || l.r.PeekErr(2) == nil)
+ if l.r.Pos() > 0 {
+ if isEndTag || 'a' <= c && c <= 'z' || 'A' <= c && c <= 'Z' || c == '!' || c == '?' {
+ // return currently buffered texttoken so that we can return tag next iteration
+ l.text = l.r.Shift()
+ return TextToken, l.text
+ }
+ } else if isEndTag {
+ l.r.Move(2)
+ // only endtags that are not followed by > or EOF arrive here
+ if c = l.r.Peek(0); !('a' <= c && c <= 'z' || 'A' <= c && c <= 'Z') {
+ return CommentToken, l.shiftBogusComment()
+ }
+ return EndTagToken, l.shiftEndTag()
+ } else if 'a' <= c && c <= 'z' || 'A' <= c && c <= 'Z' {
+ l.r.Move(1)
+ l.inTag = true
+ return l.shiftStartTag()
+ } else if c == '!' {
+ l.r.Move(2)
+ return l.readMarkup()
+ } else if c == '?' {
+ l.r.Move(1)
+ return CommentToken, l.shiftBogusComment()
+ }
+ } else if c == 0 && l.r.Err() != nil {
+ if l.r.Pos() > 0 {
+ l.text = l.r.Shift()
+ return TextToken, l.text
+ }
+ return ErrorToken, nil
+ }
+ l.r.Move(1)
+ }
+}
+
+////////////////////////////////////////////////////////////////
+
+// The following functions follow the specifications at https://html.spec.whatwg.org/multipage/parsing.html
+
+func (l *Lexer) shiftRawText() []byte {
+ if l.rawTag == Plaintext {
+ for {
+ if l.r.Peek(0) == 0 && l.r.Err() != nil {
+ return l.r.Shift()
+ }
+ l.r.Move(1)
+ }
+ } else { // RCDATA, RAWTEXT and SCRIPT
+ for {
+ c := l.r.Peek(0)
+ if c == '<' {
+ if l.r.Peek(1) == '/' {
+ mark := l.r.Pos()
+ l.r.Move(2)
+ for {
+ if c = l.r.Peek(0); !('a' <= c && c <= 'z' || 'A' <= c && c <= 'Z') {
+ break
+ }
+ l.r.Move(1)
+ }
+ if h := ToHash(parse.ToLower(parse.Copy(l.r.Lexeme()[mark+2:]))); h == l.rawTag { // copy so that ToLower doesn't change the case of the underlying slice
+ l.r.Rewind(mark)
+ return l.r.Shift()
+ }
+ } else if l.rawTag == Script && l.r.Peek(1) == '!' && l.r.Peek(2) == '-' && l.r.Peek(3) == '-' {
+ l.r.Move(4)
+ inScript := false
+ for {
+ c := l.r.Peek(0)
+ if c == '-' && l.r.Peek(1) == '-' && l.r.Peek(2) == '>' {
+ l.r.Move(3)
+ break
+ } else if c == '<' {
+ isEnd := l.r.Peek(1) == '/'
+ if isEnd {
+ l.r.Move(2)
+ } else {
+ l.r.Move(1)
+ }
+ mark := l.r.Pos()
+ for {
+ if c = l.r.Peek(0); !('a' <= c && c <= 'z' || 'A' <= c && c <= 'Z') {
+ break
+ }
+ l.r.Move(1)
+ }
+ if h := ToHash(parse.ToLower(parse.Copy(l.r.Lexeme()[mark:]))); h == Script { // copy so that ToLower doesn't change the case of the underlying slice
+ if !isEnd {
+ inScript = true
+ } else {
+ if !inScript {
+ l.r.Rewind(mark - 2)
+ return l.r.Shift()
+ }
+ inScript = false
+ }
+ }
+ } else if c == 0 && l.r.Err() != nil {
+ return l.r.Shift()
+ } else {
+ l.r.Move(1)
+ }
+ }
+ } else {
+ l.r.Move(1)
+ }
+ } else if c == 0 && l.r.Err() != nil {
+ return l.r.Shift()
+ } else {
+ l.r.Move(1)
+ }
+ }
+ }
+}
+
+func (l *Lexer) readMarkup() (TokenType, []byte) {
+ if l.at('-', '-') {
+ l.r.Move(2)
+ for {
+ if l.r.Peek(0) == 0 && l.r.Err() != nil {
+ l.text = l.r.Lexeme()[4:]
+ return CommentToken, l.r.Shift()
+ } else if l.at('-', '-', '>') {
+ l.text = l.r.Lexeme()[4:]
+ l.r.Move(3)
+ return CommentToken, l.r.Shift()
+ } else if l.at('-', '-', '!', '>') {
+ l.text = l.r.Lexeme()[4:]
+ l.r.Move(4)
+ return CommentToken, l.r.Shift()
+ }
+ l.r.Move(1)
+ }
+ } else if l.at('[', 'C', 'D', 'A', 'T', 'A', '[') {
+ l.r.Move(7)
+ for {
+ if l.r.Peek(0) == 0 && l.r.Err() != nil {
+ l.text = l.r.Lexeme()[9:]
+ return TextToken, l.r.Shift()
+ } else if l.at(']', ']', '>') {
+ l.text = l.r.Lexeme()[9:]
+ l.r.Move(3)
+ return TextToken, l.r.Shift()
+ }
+ l.r.Move(1)
+ }
+ } else {
+ if l.atCaseInsensitive('d', 'o', 'c', 't', 'y', 'p', 'e') {
+ l.r.Move(7)
+ if l.r.Peek(0) == ' ' {
+ l.r.Move(1)
+ }
+ for {
+ if c := l.r.Peek(0); c == '>' || c == 0 && l.r.Err() != nil {
+ l.text = l.r.Lexeme()[9:]
+ if c == '>' {
+ l.r.Move(1)
+ }
+ return DoctypeToken, l.r.Shift()
+ }
+ l.r.Move(1)
+ }
+ }
+ }
+ return CommentToken, l.shiftBogusComment()
+}
+
+func (l *Lexer) shiftBogusComment() []byte {
+ for {
+ c := l.r.Peek(0)
+ if c == '>' {
+ l.text = l.r.Lexeme()[2:]
+ l.r.Move(1)
+ return l.r.Shift()
+ } else if c == 0 && l.r.Err() != nil {
+ l.text = l.r.Lexeme()[2:]
+ return l.r.Shift()
+ }
+ l.r.Move(1)
+ }
+}
+
+func (l *Lexer) shiftStartTag() (TokenType, []byte) {
+ for {
+ if c := l.r.Peek(0); c == ' ' || c == '>' || c == '/' && l.r.Peek(1) == '>' || c == '\t' || c == '\n' || c == '\r' || c == '\f' || c == 0 && l.r.Err() != nil {
+ break
+ }
+ l.r.Move(1)
+ }
+ l.text = parse.ToLower(l.r.Lexeme()[1:])
+ if h := ToHash(l.text); h == Textarea || h == Title || h == Style || h == Xmp || h == Iframe || h == Script || h == Plaintext || h == Svg || h == Math {
+ if h == Svg || h == Math {
+ data := l.shiftXML(h)
+ if l.err != nil {
+ return ErrorToken, nil
+ }
+
+ l.inTag = false
+ if h == Svg {
+ return SvgToken, data
+ }
+ return MathToken, data
+ }
+ l.rawTag = h
+ }
+ return StartTagToken, l.r.Shift()
+}
+
+func (l *Lexer) shiftAttribute() []byte {
+ nameStart := l.r.Pos()
+ var c byte
+ for { // attribute name state
+ if c = l.r.Peek(0); c == ' ' || c == '=' || c == '>' || c == '/' && l.r.Peek(1) == '>' || c == '\t' || c == '\n' || c == '\r' || c == '\f' || c == 0 && l.r.Err() != nil {
+ break
+ }
+ l.r.Move(1)
+ }
+ nameEnd := l.r.Pos()
+ for { // after attribute name state
+ if c = l.r.Peek(0); c == ' ' || c == '\t' || c == '\n' || c == '\r' || c == '\f' {
+ l.r.Move(1)
+ continue
+ }
+ break
+ }
+ if c == '=' {
+ l.r.Move(1)
+ for { // before attribute value state
+ if c = l.r.Peek(0); c == ' ' || c == '\t' || c == '\n' || c == '\r' || c == '\f' {
+ l.r.Move(1)
+ continue
+ }
+ break
+ }
+ attrPos := l.r.Pos()
+ delim := c
+ if delim == '"' || delim == '\'' { // attribute value single- and double-quoted state
+ l.r.Move(1)
+ for {
+ c := l.r.Peek(0)
+ if c == delim {
+ l.r.Move(1)
+ break
+ } else if c == 0 && l.r.Err() != nil {
+ break
+ }
+ l.r.Move(1)
+ }
+ } else { // attribute value unquoted state
+ for {
+ if c := l.r.Peek(0); c == ' ' || c == '>' || c == '\t' || c == '\n' || c == '\r' || c == '\f' || c == 0 && l.r.Err() != nil {
+ break
+ }
+ l.r.Move(1)
+ }
+ }
+ l.attrVal = l.r.Lexeme()[attrPos:]
+ } else {
+ l.r.Rewind(nameEnd)
+ l.attrVal = nil
+ }
+ l.text = parse.ToLower(l.r.Lexeme()[nameStart:nameEnd])
+ return l.r.Shift()
+}
+
+func (l *Lexer) shiftEndTag() []byte {
+ for {
+ c := l.r.Peek(0)
+ if c == '>' {
+ l.text = l.r.Lexeme()[2:]
+ l.r.Move(1)
+ break
+ } else if c == 0 && l.r.Err() != nil {
+ l.text = l.r.Lexeme()[2:]
+ break
+ }
+ l.r.Move(1)
+ }
+
+ end := len(l.text)
+ for end > 0 {
+ if c := l.text[end-1]; c == ' ' || c == '\t' || c == '\n' || c == '\r' {
+ end--
+ continue
+ }
+ break
+ }
+ l.text = l.text[:end]
+ return parse.ToLower(l.r.Shift())
+}
+
+// shiftXML parses the content of a svg or math tag according to the XML 1.1 specifications, including the tag itself.
+// So far we have already parsed `<svg` or `<math`.
+func (l *Lexer) shiftXML(rawTag Hash) []byte {
+ inQuote := false
+ for {
+ c := l.r.Peek(0)
+ if c == '"' {
+ inQuote = !inQuote
+ l.r.Move(1)
+ } else if c == '<' && !inQuote && l.r.Peek(1) == '/' {
+ mark := l.r.Pos()
+ l.r.Move(2)
+ for {
+ if c = l.r.Peek(0); !('a' <= c && c <= 'z' || 'A' <= c && c <= 'Z') {
+ break
+ }
+ l.r.Move(1)
+ }
+ if h := ToHash(parse.ToLower(parse.Copy(l.r.Lexeme()[mark+2:]))); h == rawTag { // copy so that ToLower doesn't change the case of the underlying slice
+ break
+ }
+ } else if c == 0 {
+ if l.r.Err() == nil {
+ l.err = parse.NewErrorLexer(l.r, "HTML parse error: unexpected NULL character")
+ }
+ return l.r.Shift()
+ } else {
+ l.r.Move(1)
+ }
+ }
+
+ for {
+ c := l.r.Peek(0)
+ if c == '>' {
+ l.r.Move(1)
+ break
+ } else if c == 0 {
+ if l.r.Err() == nil {
+ l.err = parse.NewErrorLexer(l.r, "HTML parse error: unexpected NULL character")
+ }
+ return l.r.Shift()
+ }
+ l.r.Move(1)
+ }
+ return l.r.Shift()
+}
+
+////////////////////////////////////////////////////////////////
+
+func (l *Lexer) at(b ...byte) bool {
+ for i, c := range b {
+ if l.r.Peek(i) != c {
+ return false
+ }
+ }
+ return true
+}
+
+func (l *Lexer) atCaseInsensitive(b ...byte) bool {
+ for i, c := range b {
+ if l.r.Peek(i) != c && (l.r.Peek(i)+('a'-'A')) != c {
+ return false
+ }
+ }
+ return true
+}
diff --git a/vendor/github.com/tdewolff/parse/v2/html/util.go b/vendor/github.com/tdewolff/parse/v2/html/util.go
new file mode 100644
index 0000000..0dbd52c
--- /dev/null
+++ b/vendor/github.com/tdewolff/parse/v2/html/util.go
@@ -0,0 +1,113 @@
+package html
+
+var (
+ singleQuoteEntityBytes = []byte("&#39;")
+ doubleQuoteEntityBytes = []byte("&#34;")
+)
+
+// EscapeAttrVal returns the escaped attribute value bytes with quotes. Either single or double quotes are used, whichever is shorter. If there are no quotes present in the value and the value is in HTML (not XML), it will return the value without quotes.
+func EscapeAttrVal(buf *[]byte, b []byte, origQuote byte, mustQuote, isXML bool) []byte {
+ singles := 0
+ doubles := 0
+ unquoted := true
+ for _, c := range b {
+ if charTable[c] {
+ unquoted = false
+ if c == '"' {
+ doubles++
+ } else if c == '\'' {
+ singles++
+ }
+ }
+ }
+ if unquoted && (!mustQuote || origQuote == 0) && !isXML {
+ return b
+ } else if singles == 0 && origQuote == '\'' && !isXML || doubles == 0 && origQuote == '"' {
+ if len(b)+2 > cap(*buf) {
+ *buf = make([]byte, 0, len(b)+2)
+ }
+ t := (*buf)[:len(b)+2]
+ t[0] = origQuote
+ copy(t[1:], b)
+ t[1+len(b)] = origQuote
+ return t
+ }
+
+ n := len(b) + 2
+ var quote byte
+ var escapedQuote []byte
+ if singles >= doubles || isXML {
+ n += doubles * 4
+ quote = '"'
+ escapedQuote = doubleQuoteEntityBytes
+ if singles == doubles && origQuote == '\'' && !isXML {
+ quote = '\''
+ escapedQuote = singleQuoteEntityBytes
+ }
+ } else {
+ n += singles * 4
+ quote = '\''
+ escapedQuote = singleQuoteEntityBytes
+ }
+ if n > cap(*buf) {
+ *buf = make([]byte, 0, n) // maximum size, not actual size
+ }
+ t := (*buf)[:n] // maximum size, not actual size
+ t[0] = quote
+ j := 1
+ start := 0
+ for i, c := range b {
+ if c == quote {
+ j += copy(t[j:], b[start:i])
+ j += copy(t[j:], escapedQuote)
+ start = i + 1
+ }
+ }
+ j += copy(t[j:], b[start:])
+ t[j] = quote
+ return t[:j+1]
+}
+
+var charTable = [256]bool{
+ // ASCII
+ false, false, false, false, false, false, false, false,
+ false, true, true, false, true, true, false, false, // tab, line feed, form feed, carriage return
+ false, false, false, false, false, false, false, false,
+ false, false, false, false, false, false, false, false,
+
+ true, false, true, false, false, false, false, true, // space, "), '
+ false, false, false, false, false, false, false, false,
+ false, false, false, false, false, false, false, false,
+ false, false, false, false, true, true, true, false, // <, =, >
+
+ false, false, false, false, false, false, false, false,
+ false, false, false, false, false, false, false, false,
+ false, false, false, false, false, false, false, false,
+ false, false, false, false, false, false, false, false,
+
+ true, false, false, false, false, false, false, false, // `
+ false, false, false, false, false, false, false, false,
+ false, false, false, false, false, false, false, false,
+ false, false, false, false, false, false, false, false,
+
+ // non-ASCII
+ false, false, false, false, false, false, false, false,
+ false, false, false, false, false, false, false, false,
+ false, false, false, false, false, false, false, false,
+ false, false, false, false, false, false, false, false,
+
+ false, false, false, false, false, false, false, false,
+ false, false, false, false, false, false, false, false,
+ false, false, false, false, false, false, false, false,
+ false, false, false, false, false, false, false, false,
+
+ false, false, false, false, false, false, false, false,
+ false, false, false, false, false, false, false, false,
+ false, false, false, false, false, false, false, false,
+ false, false, false, false, false, false, false, false,
+
+ false, false, false, false, false, false, false, false,
+ false, false, false, false, false, false, false, false,
+ false, false, false, false, false, false, false, false,
+ false, false, false, false, false, false, false, false,
+}
diff --git a/vendor/github.com/tdewolff/parse/v2/input.go b/vendor/github.com/tdewolff/parse/v2/input.go
new file mode 100644
index 0000000..5b6d8f5
--- /dev/null
+++ b/vendor/github.com/tdewolff/parse/v2/input.go
@@ -0,0 +1,173 @@
+package parse
+
+import (
+ "io"
+ "io/ioutil"
+)
+
+var nullBuffer = []byte{0}
+
+// Input is a buffered reader that allows peeking forward and shifting, taking an io.Input.
+// It keeps data in-memory until Free, taking a byte length, is called to move beyond the data.
+type Input struct {
+ buf []byte
+ pos int // index in buf
+ start int // index in buf
+ err error
+
+ restore func()
+}
+
+// NewInput returns a new Input for a given io.Input and uses ioutil.ReadAll to read it into a byte slice.
+// If the io.Input implements Bytes, that is used instead. It will append a NULL at the end of the buffer.
+func NewInput(r io.Reader) *Input {
+ var b []byte
+ if r != nil {
+ if buffer, ok := r.(interface {
+ Bytes() []byte
+ }); ok {
+ b = buffer.Bytes()
+ } else {
+ var err error
+ b, err = ioutil.ReadAll(r)
+ if err != nil {
+ return &Input{
+ buf: nullBuffer,
+ err: err,
+ }
+ }
+ }
+ }
+ return NewInputBytes(b)
+}
+
+// NewInputString returns a new Input for a given string and appends NULL at the end.
+func NewInputString(s string) *Input {
+ return NewInputBytes([]byte(s))
+}
+
+// NewInputBytes returns a new Input for a given byte slice and appends NULL at the end.
+// To avoid reallocation, make sure the capacity has room for one more byte.
+func NewInputBytes(b []byte) *Input {
+ z := &Input{
+ buf: b,
+ }
+
+ n := len(b)
+ if n == 0 {
+ z.buf = nullBuffer
+ } else {
+ // Append NULL to buffer, but try to avoid reallocation
+ if cap(b) > n {
+ // Overwrite next byte but restore when done
+ b = b[:n+1]
+ c := b[n]
+ b[n] = 0
+
+ z.buf = b
+ z.restore = func() {
+ b[n] = c
+ }
+ } else {
+ z.buf = append(b, 0)
+ }
+ }
+ return z
+}
+
+// Restore restores the replaced byte past the end of the buffer by NULL.
+func (z *Input) Restore() {
+ if z.restore != nil {
+ z.restore()
+ z.restore = nil
+ }
+}
+
+// Err returns the error returned from io.Input or io.EOF when the end has been reached.
+func (z *Input) Err() error {
+ return z.PeekErr(0)
+}
+
+// PeekErr returns the error at position pos. When pos is zero, this is the same as calling Err().
+func (z *Input) PeekErr(pos int) error {
+ if z.err != nil {
+ return z.err
+ } else if z.pos+pos >= len(z.buf)-1 {
+ return io.EOF
+ }
+ return nil
+}
+
+// Peek returns the ith byte relative to the end position.
+// Peek returns 0 when an error has occurred, Err returns the erroz.
+func (z *Input) Peek(pos int) byte {
+ pos += z.pos
+ return z.buf[pos]
+}
+
+// PeekRune returns the rune and rune length of the ith byte relative to the end position.
+func (z *Input) PeekRune(pos int) (rune, int) {
+ // from unicode/utf8
+ c := z.Peek(pos)
+ if c < 0xC0 || z.Peek(pos+1) == 0 {
+ return rune(c), 1
+ } else if c < 0xE0 || z.Peek(pos+2) == 0 {
+ return rune(c&0x1F)<<6 | rune(z.Peek(pos+1)&0x3F), 2
+ } else if c < 0xF0 || z.Peek(pos+3) == 0 {
+ return rune(c&0x0F)<<12 | rune(z.Peek(pos+1)&0x3F)<<6 | rune(z.Peek(pos+2)&0x3F), 3
+ }
+ return rune(c&0x07)<<18 | rune(z.Peek(pos+1)&0x3F)<<12 | rune(z.Peek(pos+2)&0x3F)<<6 | rune(z.Peek(pos+3)&0x3F), 4
+}
+
+// Move advances the position.
+func (z *Input) Move(n int) {
+ z.pos += n
+}
+
+// Pos returns a mark to which can be rewinded.
+func (z *Input) Pos() int {
+ return z.pos - z.start
+}
+
+// Rewind rewinds the position to the given position.
+func (z *Input) Rewind(pos int) {
+ z.pos = z.start + pos
+}
+
+// Lexeme returns the bytes of the current selection.
+func (z *Input) Lexeme() []byte {
+ return z.buf[z.start:z.pos:z.pos]
+}
+
+// Skip collapses the position to the end of the selection.
+func (z *Input) Skip() {
+ z.start = z.pos
+}
+
+// Shift returns the bytes of the current selection and collapses the position to the end of the selection.
+func (z *Input) Shift() []byte {
+ b := z.buf[z.start:z.pos:z.pos]
+ z.start = z.pos
+ return b
+}
+
+// Offset returns the character position in the buffez.
+func (z *Input) Offset() int {
+ return z.pos
+}
+
+// Bytes returns the underlying buffez.
+func (z *Input) Bytes() []byte {
+ return z.buf[: len(z.buf)-1 : len(z.buf)-1]
+}
+
+// Len returns the length of the underlying buffez.
+func (z *Input) Len() int {
+ return len(z.buf) - 1
+}
+
+// Reset resets position to the underlying buffez.
+func (z *Input) Reset() {
+ z.start = 0
+ z.pos = 0
+}
diff --git a/vendor/github.com/tdewolff/parse/v2/js/README.md b/vendor/github.com/tdewolff/parse/v2/js/README.md
new file mode 100644
index 0000000..e1d4007
--- /dev/null
+++ b/vendor/github.com/tdewolff/parse/v2/js/README.md
@@ -0,0 +1,80 @@
+# JS [![API reference](https://img.shields.io/badge/godoc-reference-5272B4)](https://pkg.go.dev/github.com/tdewolff/minify/v2/parse/js?tab=doc)
+
+This package is a JS lexer (ECMAScript 2020) written in [Go][1]. It follows the specification at [ECMAScript 2020 Language Specification](https://tc39.es/ecma262/). The lexer takes an io.Reader and converts it into tokens until the EOF.
+
+## Installation
+Run the following command
+
+ go get -u github.com/tdewolff/parse/v2/js
+
+or add the following import and run project with `go get`
+
+ import "github.com/tdewolff/parse/v2/js"
+
+## Lexer
+### Usage
+The following initializes a new Lexer with io.Reader `r`:
+``` go
+l := js.NewLexer(parse.NewInput(r))
+```
+
+To tokenize until EOF an error, use:
+``` go
+for {
+ tt, text := l.Next()
+ switch tt {
+ case js.ErrorToken:
+ // error or EOF set in l.Err()
+ return
+ // ...
+ }
+}
+```
+
+### Regular Expressions
+The ECMAScript specification for `PunctuatorToken` (of which the `/` and `/=` symbols) and `RegExpToken` depend on a parser state to differentiate between the two. The lexer will always parse the first token as `/` or `/=` operator, upon which the parser can rescan that token to scan a regular expression using `RegExp()`.
+
+### Examples
+``` go
+package main
+
+import (
+ "os"
+
+ "github.com/tdewolff/parse/v2/js"
+)
+
+// Tokenize JS from stdin.
+func main() {
+ l := js.NewLexer(parse.NewInput(os.Stdin))
+ for {
+ tt, text := l.Next()
+ switch tt {
+ case js.ErrorToken:
+ if l.Err() != io.EOF {
+ fmt.Println("Error on line", l.Line(), ":", l.Err())
+ }
+ return
+ case js.IdentifierToken:
+ fmt.Println("Identifier", string(text))
+ case js.NumericToken:
+ fmt.Println("Numeric", string(text))
+ // ...
+ }
+ }
+}
+```
+
+## Parser
+### Usage
+The following parses a file and returns an abstract syntax tree (AST).
+``` go
+ast, err := js.NewParser(parse.NewInputString("if (state == 5) { console.log('In state five'); }"))
+```
+
+See [ast.go](https://github.com/tdewolff/parse/blob/master/js/ast.go) for all available data structures that can represent the abstact syntax tree.
+
+## License
+Released under the [MIT license](https://github.com/tdewolff/parse/blob/master/LICENSE.md).
+
+[1]: http://golang.org/ "Go Language"
diff --git a/vendor/github.com/tdewolff/parse/v2/js/ast.go b/vendor/github.com/tdewolff/parse/v2/js/ast.go
new file mode 100644
index 0000000..92e80d7
--- /dev/null
+++ b/vendor/github.com/tdewolff/parse/v2/js/ast.go
@@ -0,0 +1,3884 @@
+package js
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "strconv"
+
+ "github.com/tdewolff/parse/v2"
+)
+
+var ErrInvalidJSON = fmt.Errorf("invalid JSON")
+
+type JSONer interface {
+ JSON(*bytes.Buffer) error
+}
+
+// AST is the full ECMAScript abstract syntax tree.
+type AST struct {
+ Comments [][]byte // first comments in file
+ BlockStmt // module
+}
+
+func (ast *AST) String() string {
+ s := ""
+ for i, item := range ast.BlockStmt.List {
+ if i != 0 {
+ s += " "
+ }
+ s += item.String()
+ }
+ return s
+}
+
+////////////////////////////////////////////////////////////////
+
+// DeclType specifies the kind of declaration.
+type DeclType uint16
+
+// DeclType values.
+const (
+ NoDecl DeclType = iota // undeclared variables
+ VariableDecl // var
+ FunctionDecl // function
+ ArgumentDecl // function and method arguments
+ LexicalDecl // let, const, class
+ CatchDecl // catch statement argument
+ ExprDecl // function expression name or class expression name
+)
+
+func (decl DeclType) String() string {
+ switch decl {
+ case NoDecl:
+ return "NoDecl"
+ case VariableDecl:
+ return "VariableDecl"
+ case FunctionDecl:
+ return "FunctionDecl"
+ case ArgumentDecl:
+ return "ArgumentDecl"
+ case LexicalDecl:
+ return "LexicalDecl"
+ case CatchDecl:
+ return "CatchDecl"
+ case ExprDecl:
+ return "ExprDecl"
+ }
+ return "Invalid(" + strconv.Itoa(int(decl)) + ")"
+}
+
+// Var is a variable, where Decl is the type of declaration and can be var|function for function scoped variables, let|const|class for block scoped variables.
+type Var struct {
+ Data []byte
+ Link *Var // is set when merging variable uses, as in: {a} {var a} where the first links to the second, only used for undeclared variables
+ Uses uint16
+ Decl DeclType
+}
+
+// Name returns the variable name.
+func (v *Var) Name() []byte {
+ for v.Link != nil {
+ v = v.Link
+ }
+ return v.Data
+}
+
+func (v Var) String() string {
+ return string(v.Name())
+}
+
+// JS converts the node back to valid JavaScript
+func (v Var) JS() string {
+ return v.String()
+}
+
+// JS converts the node back to valid JavaScript (writes to io.Writer)
+func (v Var) JSWriteTo(w io.Writer) (i int, err error) {
+ return w.Write(v.Name())
+}
+
+// VarsByUses is sortable by uses in descending order.
+// TODO: write custom sorter for varsbyuses
+type VarsByUses VarArray
+
+func (vs VarsByUses) Len() int {
+ return len(vs)
+}
+
+func (vs VarsByUses) Swap(i, j int) {
+ vs[i], vs[j] = vs[j], vs[i]
+}
+
+func (vs VarsByUses) Less(i, j int) bool {
+ return vs[i].Uses > vs[j].Uses
+}
+
+////////////////////////////////////////////////////////////////
+
+// VarArray is a set of variables in scopes.
+type VarArray []*Var
+
+func (vs VarArray) String() string {
+ s := "["
+ for i, v := range vs {
+ if i != 0 {
+ s += ", "
+ }
+ links := 0
+ for v.Link != nil {
+ v = v.Link
+ links++
+ }
+ s += fmt.Sprintf("Var{%v %s %v %v}", v.Decl, string(v.Data), links, v.Uses)
+ }
+ return s + "]"
+}
+
+// Scope is a function or block scope with a list of variables declared and used.
+type Scope struct {
+ Parent, Func *Scope // Parent is nil for global scope
+ Declared VarArray // Link in Var are always nil
+ Undeclared VarArray
+ VarDecls []*VarDecl
+ NumForDecls uint16 // offset into Declared to mark variables used in for statements
+ NumFuncArgs uint16 // offset into Declared to mark variables used in function arguments
+ NumArgUses uint16 // offset into Undeclared to mark variables used in arguments
+ IsGlobalOrFunc bool
+ HasWith bool
+}
+
+func (s Scope) String() string {
+ return "Scope{Declared: " + s.Declared.String() + ", Undeclared: " + s.Undeclared.String() + "}"
+}
+
+// Declare declares a new variable.
+func (s *Scope) Declare(decl DeclType, name []byte) (*Var, bool) {
+ // refer to new variable for previously undeclared symbols in the current and lower scopes
+ // this happens in `{ a = 5; } var a` where both a's refer to the same variable
+ curScope := s
+ if decl == VariableDecl || decl == FunctionDecl {
+ // find function scope for var and function declarations
+ for s != s.Func {
+ // make sure that `{let i;{var i}}` is an error
+ if v := s.findDeclared(name, false); v != nil && v.Decl != decl && v.Decl != CatchDecl {
+ return nil, false
+ }
+ s = s.Parent
+ }
+ }
+
+ if v := s.findDeclared(name, true); v != nil {
+ // variable already declared, might be an error or a duplicate declaration
+ if (ArgumentDecl < v.Decl || FunctionDecl < decl) && v.Decl != ExprDecl {
+ // only allow (v.Decl,decl) of: (var|function|argument,var|function), (expr,*), any other combination is a syntax error
+ return nil, false
+ }
+ if v.Decl == ExprDecl {
+ v.Decl = decl
+ }
+ v.Uses++
+ for s != curScope {
+ curScope.AddUndeclared(v) // add variable declaration as used variable to the current scope
+ curScope = curScope.Parent
+ }
+ return v, true
+ }
+
+ var v *Var
+ // reuse variable if previously used, as in: a;var a
+ if decl != ArgumentDecl { // in case of function f(a=b,b), where the first b is different from the second
+ for i, uv := range s.Undeclared[s.NumArgUses:] {
+ // no need to evaluate v.Link as v.Data stays the same and Link is nil in the active scope
+ if 0 < uv.Uses && uv.Decl == NoDecl && bytes.Equal(name, uv.Data) {
+ // must be NoDecl so that it can't be a var declaration that has been added
+ v = uv
+ s.Undeclared = append(s.Undeclared[:int(s.NumArgUses)+i], s.Undeclared[int(s.NumArgUses)+i+1:]...)
+ break
+ }
+ }
+ }
+ if v == nil {
+ // add variable to the context list and to the scope
+ v = &Var{name, nil, 0, decl}
+ } else {
+ v.Decl = decl
+ }
+ v.Uses++
+ s.Declared = append(s.Declared, v)
+ for s != curScope {
+ curScope.AddUndeclared(v) // add variable declaration as used variable to the current scope
+ curScope = curScope.Parent
+ }
+ return v, true
+}
+
+// Use increments the usage of a variable.
+func (s *Scope) Use(name []byte) *Var {
+ // check if variable is declared in the current scope
+ v := s.findDeclared(name, false)
+ if v == nil {
+ // check if variable is already used before in the current or lower scopes
+ v = s.findUndeclared(name)
+ if v == nil {
+ // add variable to the context list and to the scope's undeclared
+ v = &Var{name, nil, 0, NoDecl}
+ s.Undeclared = append(s.Undeclared, v)
+ }
+ }
+ v.Uses++
+ return v
+}
+
+// findDeclared finds a declared variable in the current scope.
+func (s *Scope) findDeclared(name []byte, skipForDeclared bool) *Var {
+ start := 0
+ if skipForDeclared {
+ // we skip the for initializer for declarations (only has effect for let/const)
+ start = int(s.NumForDecls)
+ }
+ // reverse order to find the inner let first in `for(let a in []){let a; {a}}`
+ for i := len(s.Declared) - 1; start <= i; i-- {
+ v := s.Declared[i]
+ // no need to evaluate v.Link as v.Data stays the same, and Link is always nil in Declared
+ if bytes.Equal(name, v.Data) {
+ return v
+ }
+ }
+ return nil
+}
+
+// findUndeclared finds an undeclared variable in the current and contained scopes.
+func (s *Scope) findUndeclared(name []byte) *Var {
+ for _, v := range s.Undeclared {
+ // no need to evaluate v.Link as v.Data stays the same and Link is nil in the active scope
+ if 0 < v.Uses && bytes.Equal(name, v.Data) {
+ return v
+ }
+ }
+ return nil
+}
+
+// add undeclared variable to scope, this is called for the block scope when declaring a var in it
+func (s *Scope) AddUndeclared(v *Var) {
+ // don't add undeclared symbol if it's already there
+ for _, vorig := range s.Undeclared {
+ if v == vorig {
+ return
+ }
+ }
+ s.Undeclared = append(s.Undeclared, v) // add variable declaration as used variable to the current scope
+}
+
+// MarkForStmt marks the declared variables in current scope as for statement initializer to distinguish from declarations in body.
+func (s *Scope) MarkForStmt() {
+ s.NumForDecls = uint16(len(s.Declared))
+ s.NumArgUses = uint16(len(s.Undeclared)) // ensures for different b's in for(var a in b){let b}
+}
+
+// MarkFuncArgs marks the declared/undeclared variables in the current scope as function arguments.
+func (s *Scope) MarkFuncArgs() {
+ s.NumFuncArgs = uint16(len(s.Declared))
+ s.NumArgUses = uint16(len(s.Undeclared)) // ensures different b's in `function f(a=b){var b}`.
+}
+
+// HoistUndeclared copies all undeclared variables of the current scope to the parent scope.
+func (s *Scope) HoistUndeclared() {
+ for i, vorig := range s.Undeclared {
+ // no need to evaluate vorig.Link as vorig.Data stays the same
+ if 0 < vorig.Uses && vorig.Decl == NoDecl {
+ if v := s.Parent.findDeclared(vorig.Data, false); v != nil {
+ // check if variable is declared in parent scope
+ v.Uses += vorig.Uses
+ vorig.Link = v
+ s.Undeclared[i] = v // point reference to existing var (to avoid many Link chains)
+ } else if v := s.Parent.findUndeclared(vorig.Data); v != nil {
+ // check if variable is already used before in parent scope
+ v.Uses += vorig.Uses
+ vorig.Link = v
+ s.Undeclared[i] = v // point reference to existing var (to avoid many Link chains)
+ } else {
+ // add variable to the context list and to the scope's undeclared
+ s.Parent.Undeclared = append(s.Parent.Undeclared, vorig)
+ }
+ }
+ }
+}
+
+// UndeclareScope undeclares all declared variables in the current scope and adds them to the parent scope.
+// Called when possible arrow func ends up being a parenthesized expression, scope is not further used.
+func (s *Scope) UndeclareScope() {
+ // look if the variable already exists in the parent scope, if so replace the Var pointer in original use
+ for _, vorig := range s.Declared {
+ // no need to evaluate vorig.Link as vorig.Data stays the same, and Link is always nil in Declared
+ // vorig.Uses will be atleast 1
+ if v := s.Parent.findDeclared(vorig.Data, false); v != nil {
+ // check if variable has been declared in this scope
+ v.Uses += vorig.Uses
+ vorig.Link = v
+ } else if v := s.Parent.findUndeclared(vorig.Data); v != nil {
+ // check if variable is already used before in the current or lower scopes
+ v.Uses += vorig.Uses
+ vorig.Link = v
+ } else {
+ // add variable to the context list and to the scope's undeclared
+ vorig.Decl = NoDecl
+ s.Parent.Undeclared = append(s.Parent.Undeclared, vorig)
+ }
+ }
+ s.Declared = s.Declared[:0]
+ s.Undeclared = s.Undeclared[:0]
+}
+
+// Unscope moves all declared variables of the current scope to the parent scope. Undeclared variables are already in the parent scope.
+func (s *Scope) Unscope() {
+ for _, vorig := range s.Declared {
+ // no need to evaluate vorig.Link as vorig.Data stays the same, and Link is always nil in Declared
+ // vorig.Uses will be atleast 1
+ s.Parent.Declared = append(s.Parent.Declared, vorig)
+ }
+ s.Declared = s.Declared[:0]
+ s.Undeclared = s.Undeclared[:0]
+}
+
+////////////////////////////////////////////////////////////////
+
+// INode is an interface for AST nodes
+type INode interface {
+ String() string
+ JS() string
+ JSWriteTo(io.Writer) (int, error)
+}
+
+// IStmt is a dummy interface for statements.
+type IStmt interface {
+ INode
+ stmtNode()
+}
+
+// IBinding is a dummy interface for bindings.
+type IBinding interface {
+ INode
+ bindingNode()
+}
+
+// IExpr is a dummy interface for expressions.
+type IExpr interface {
+ INode
+ exprNode()
+}
+
+////////////////////////////////////////////////////////////////
+
+// BlockStmt is a block statement.
+type BlockStmt struct {
+ List []IStmt
+ Scope
+}
+
+func (n BlockStmt) String() string {
+ s := "Stmt({"
+ for _, item := range n.List {
+ s += " " + item.String()
+ }
+ return s + " })"
+}
+
+// JS converts the node back to valid JavaScript
+func (n BlockStmt) JS() string {
+ s := ""
+ if n.Scope.Parent != nil {
+ s += "{ "
+ }
+ for _, item := range n.List {
+ if _, isEmpty := item.(*EmptyStmt); !isEmpty {
+ s += item.JS() + "; "
+ }
+ }
+ if n.Scope.Parent != nil {
+ s += "}"
+ }
+ return s
+}
+
+// JS converts the node back to valid JavaScript (writes to io.Writer)
+func (n BlockStmt) JSWriteTo(w io.Writer) (i int, err error) {
+ var wn int
+ if n.Scope.Parent != nil {
+ wn, err = w.Write([]byte("{ "))
+ i += wn
+ if err != nil {
+ return
+ }
+ }
+ for _, item := range n.List {
+ if _, isEmpty := item.(*EmptyStmt); !isEmpty {
+ wn, err = item.JSWriteTo(w)
+ i += wn
+ if err != nil {
+ return
+ }
+ wn, err = w.Write([]byte("; "))
+ i += wn
+ if err != nil {
+ return
+ }
+ }
+ }
+ if n.Scope.Parent != nil {
+ wn, err = w.Write([]byte{'}'})
+ i += wn
+ if err != nil {
+ return
+ }
+ }
+ return
+}
+
+// EmptyStmt is an empty statement.
+type EmptyStmt struct {
+}
+
+func (n EmptyStmt) String() string {
+ return "Stmt(;)"
+}
+
+// JS converts the node back to valid JavaScript
+func (n EmptyStmt) JS() string {
+ return ";"
+}
+
+// JS converts the node back to valid JavaScript (writes to io.Writer)
+func (n EmptyStmt) JSWriteTo(w io.Writer) (i int, err error) {
+ wn, err := w.Write([]byte{';'})
+ i = wn
+ return
+}
+
+// ExprStmt is an expression statement.
+type ExprStmt struct {
+ Value IExpr
+}
+
+func (n ExprStmt) String() string {
+ val := n.Value.String()
+ if val[0] == '(' && val[len(val)-1] == ')' {
+ return "Stmt" + n.Value.String()
+ }
+ return "Stmt(" + n.Value.String() + ")"
+}
+
+// JS converts the node back to valid JavaScript
+func (n ExprStmt) JS() string {
+ return n.Value.JS()
+}
+
+// JS converts the node back to valid JavaScript (writes to io.Writer)
+func (n ExprStmt) JSWriteTo(w io.Writer) (i int, err error) {
+ return n.Value.JSWriteTo(w)
+}
+
+// IfStmt is an if statement.
+type IfStmt struct {
+ Cond IExpr
+ Body IStmt
+ Else IStmt // can be nil
+}
+
+func (n IfStmt) String() string {
+ s := "Stmt(if " + n.Cond.String() + " " + n.Body.String()
+ if n.Else != nil {
+ s += " else " + n.Else.String()
+ }
+ return s + ")"
+}
+
+// JS converts the node back to valid JavaScript
+func (n IfStmt) JS() string {
+ s := "if (" + n.Cond.JS() + ") "
+ switch n.Body.(type) {
+ case *BlockStmt:
+ s += n.Body.JS()
+ default:
+ s += "{ " + n.Body.JS() + " }"
+ }
+ if n.Else != nil {
+ switch n.Else.(type) {
+ case *BlockStmt:
+ s += " else " + n.Else.JS()
+ default:
+ s += " else { " + n.Else.JS() + " }"
+ }
+ }
+ return s
+}
+
+// JS converts the node back to valid JavaScript (writes to io.Writer)
+func (n IfStmt) JSWriteTo(w io.Writer) (i int, err error) {
+ var wn int
+ wn, err = w.Write([]byte("if ("))
+ i += wn
+ if err != nil {
+ return
+ }
+ wn, err = n.Cond.JSWriteTo(w)
+ i += wn
+ if err != nil {
+ return
+ }
+ wn, err = w.Write([]byte(") "))
+ i += wn
+ if err != nil {
+ return
+ }
+ switch n.Body.(type) {
+ case *BlockStmt:
+ wn, err = n.Body.JSWriteTo(w)
+ i += wn
+ if err != nil {
+ return
+ }
+ default:
+ wn, err = w.Write([]byte("{ "))
+ i += wn
+ if err != nil {
+ return
+ }
+ wn, err = n.Body.JSWriteTo(w)
+ i += wn
+ if err != nil {
+ return
+ }
+ wn, err = w.Write([]byte(" }"))
+ i += wn
+ if err != nil {
+ return
+ }
+ }
+ if n.Else != nil {
+ switch n.Else.(type) {
+ case *BlockStmt:
+ wn, err = w.Write([]byte(" else "))
+ i += wn
+ if err != nil {
+ return
+ }
+ wn, err = n.Else.JSWriteTo(w)
+ i += wn
+ if err != nil {
+ return
+ }
+ default:
+ wn, err = w.Write([]byte(" else { "))
+ i += wn
+ if err != nil {
+ return
+ }
+ wn, err = n.Else.JSWriteTo(w)
+ i += wn
+ if err != nil {
+ return
+ }
+ wn, err = w.Write([]byte(" }"))
+ i += wn
+ if err != nil {
+ return
+ }
+ }
+ }
+ return
+}
+
+// DoWhileStmt is a do-while iteration statement.
+type DoWhileStmt struct {
+ Cond IExpr
+ Body IStmt
+}
+
+func (n DoWhileStmt) String() string {
+ return "Stmt(do " + n.Body.String() + " while " + n.Cond.String() + ")"
+}
+
+// JS converts the node back to valid JavaScript
+func (n DoWhileStmt) JS() string {
+ s := "do "
+ switch n.Body.(type) {
+ case *BlockStmt:
+ s += n.Body.JS()
+ default:
+ s += "{ " + n.Body.JS() + " }"
+ }
+ return s + " while (" + n.Cond.JS() + ")"
+}
+
+// JS converts the node back to valid JavaScript (writes to io.Writer)
+func (n DoWhileStmt) JSWriteTo(w io.Writer) (i int, err error) {
+ var wn int
+ wn, err = w.Write([]byte("do "))
+ i += wn
+ if err != nil {
+ return
+ }
+ switch n.Body.(type) {
+ case *BlockStmt:
+ wn, err = n.Body.JSWriteTo(w)
+ i += wn
+ if err != nil {
+ return
+ }
+ default:
+ wn, err = w.Write([]byte("{ "))
+ i += wn
+ if err != nil {
+ return
+ }
+ wn, err = n.Body.JSWriteTo(w)
+ i += wn
+ if err != nil {
+ return
+ }
+ wn, err = w.Write([]byte(" }"))
+ i += wn
+ if err != nil {
+ return
+ }
+ }
+ wn, err = w.Write([]byte(" while ("))
+ i += wn
+ if err != nil {
+ return
+ }
+ wn, err = n.Cond.JSWriteTo(w)
+ i += wn
+ if err != nil {
+ return
+ }
+ wn, err = w.Write([]byte(")"))
+ i += wn
+ return
+}
+
+// WhileStmt is a while iteration statement.
+type WhileStmt struct {
+ Cond IExpr
+ Body IStmt
+}
+
+func (n WhileStmt) String() string {
+ return "Stmt(while " + n.Cond.String() + " " + n.Body.String() + ")"
+}
+
+// JS converts the node back to valid JavaScript
+func (n WhileStmt) JS() string {
+ s := "while (" + n.Cond.JS() + ") "
+ if n.Body != nil {
+ s += n.Body.JS()
+ }
+ return s
+}
+
+// JS converts the node back to valid JavaScript (writes to io.Writer)
+func (n WhileStmt) JSWriteTo(w io.Writer) (i int, err error) {
+ var wn int
+ wn, err = w.Write([]byte("while ("))
+ i += wn
+ if err != nil {
+ return
+ }
+ wn, err = n.Cond.JSWriteTo(w)
+ i += wn
+ if err != nil {
+ return
+ }
+ wn, err = w.Write([]byte(") "))
+ i += wn
+ if err != nil {
+ return
+ }
+ if n.Body != nil {
+ wn, err = n.Body.JSWriteTo(w)
+ i += wn
+ if err != nil {
+ return
+ }
+ }
+ return
+}
+
+// ForStmt is a regular for iteration statement.
+type ForStmt struct {
+ Init IExpr // can be nil
+ Cond IExpr // can be nil
+ Post IExpr // can be nil
+ Body *BlockStmt
+}
+
+func (n ForStmt) String() string {
+ s := "Stmt(for"
+ if v, ok := n.Init.(*VarDecl); !ok && n.Init != nil || ok && len(v.List) != 0 {
+ s += " " + n.Init.String()
+ }
+ s += " ;"
+ if n.Cond != nil {
+ s += " " + n.Cond.String()
+ }
+ s += " ;"
+ if n.Post != nil {
+ s += " " + n.Post.String()
+ }
+ return s + " " + n.Body.String() + ")"
+}
+
+// JS converts the node back to valid JavaScript
+func (n ForStmt) JS() string {
+ s := "for ("
+ if v, ok := n.Init.(*VarDecl); !ok && n.Init != nil || ok && len(v.List) != 0 {
+ s += n.Init.JS()
+ } else {
+ s += " "
+ }
+ s += "; "
+ if n.Cond != nil {
+ s += n.Cond.JS()
+ }
+ s += "; "
+ if n.Post != nil {
+ s += n.Post.JS()
+ }
+ return s + ") " + n.Body.JS()
+}
+
+// JS converts the node back to valid JavaScript (writes to io.Writer)
+func (n ForStmt) JSWriteTo(w io.Writer) (i int, err error) {
+ var wn int
+ wn, err = w.Write([]byte("for ("))
+ i += wn
+ if err != nil {
+ return
+ }
+ if v, ok := n.Init.(*VarDecl); !ok && n.Init != nil || ok && len(v.List) != 0 {
+ wn, err = n.Init.JSWriteTo(w)
+ i += wn
+ if err != nil {
+ return
+ }
+ } else {
+ wn, err = w.Write([]byte(" "))
+ i += wn
+ if err != nil {
+ return
+ }
+ }
+ wn, err = w.Write([]byte("; "))
+ i += wn
+ if err != nil {
+ return
+ }
+ if n.Cond != nil {
+ wn, err = n.Cond.JSWriteTo(w)
+ i += wn
+ if err != nil {
+ return
+ }
+ }
+ wn, err = w.Write([]byte("; "))
+ i += wn
+ if err != nil {
+ return
+ }
+ if n.Post != nil {
+ wn, err = n.Post.JSWriteTo(w)
+ i += wn
+ if err != nil {
+ return
+ }
+ }
+ wn, err = w.Write([]byte(") "))
+ i += wn
+ if err != nil {
+ return
+ }
+ wn, err = n.Body.JSWriteTo(w)
+ i += wn
+ return
+}
+
+// ForInStmt is a for-in iteration statement.
+type ForInStmt struct {
+ Init IExpr
+ Value IExpr
+ Body *BlockStmt
+}
+
+func (n ForInStmt) String() string {
+ return "Stmt(for " + n.Init.String() + " in " + n.Value.String() + " " + n.Body.String() + ")"
+}
+
+// JS converts the node back to valid JavaScript
+func (n ForInStmt) JS() string {
+ return "for (" + n.Init.JS() + " in " + n.Value.JS() + ") " + n.Body.JS()
+}
+
+// JS converts the node back to valid JavaScript (writes to io.Writer)
+func (n ForInStmt) JSWriteTo(w io.Writer) (i int, err error) {
+ var wn int
+ wn, err = w.Write([]byte("for ("))
+ i += wn
+ if err != nil {
+ return
+ }
+ wn, err = n.Init.JSWriteTo(w)
+ i += wn
+ if err != nil {
+ return
+ }
+ wn, err = w.Write([]byte(" in "))
+ i += wn
+ if err != nil {
+ return
+ }
+ wn, err = n.Value.JSWriteTo(w)
+ i += wn
+ if err != nil {
+ return
+ }
+ wn, err = w.Write([]byte(") "))
+ i += wn
+ if err != nil {
+ return
+ }
+ wn, err = n.Body.JSWriteTo(w)
+ i += wn
+ return
+}
+
+// ForOfStmt is a for-of iteration statement.
+type ForOfStmt struct {
+ Await bool
+ Init IExpr
+ Value IExpr
+ Body *BlockStmt
+}
+
+func (n ForOfStmt) String() string {
+ s := "Stmt(for"
+ if n.Await {
+ s += " await"
+ }
+ return s + " " + n.Init.String() + " of " + n.Value.String() + " " + n.Body.String() + ")"
+}
+
+// JS converts the node back to valid JavaScript
+func (n ForOfStmt) JS() string {
+ s := "for"
+ if n.Await {
+ s += " await"
+ }
+ return s + " (" + n.Init.JS() + " of " + n.Value.JS() + ") " + n.Body.JS()
+}
+
+// JS converts the node back to valid JavaScript (writes to io.Writer)
+func (n ForOfStmt) JSWriteTo(w io.Writer) (i int, err error) {
+ var wn int
+ wn, err = w.Write([]byte("for"))
+ i += wn
+ if err != nil {
+ return
+ }
+ if n.Await {
+ wn, err = w.Write([]byte(" await"))
+ i += wn
+ if err != nil {
+ return
+ }
+ }
+ wn, err = w.Write([]byte(" ("))
+ i += wn
+ if err != nil {
+ return
+ }
+ wn, err = n.Init.JSWriteTo(w)
+ i += wn
+ if err != nil {
+ return
+ }
+ wn, err = w.Write([]byte(" of "))
+ i += wn
+ if err != nil {
+ return
+ }
+ wn, err = n.Value.JSWriteTo(w)
+ i += wn
+ if err != nil {
+ return
+ }
+ wn, err = w.Write([]byte(") "))
+ i += wn
+ if err != nil {
+ return
+ }
+ wn, err = n.Body.JSWriteTo(w)
+ i += wn
+ return
+}
+
+// CaseClause is a case clause or default clause for a switch statement.
+type CaseClause struct {
+ TokenType
+ Cond IExpr // can be nil
+ List []IStmt
+}
+
+func (n CaseClause) String() string {
+ s := " Clause(" + n.TokenType.String()
+ if n.Cond != nil {
+ s += " " + n.Cond.String()
+ }
+ for _, item := range n.List {
+ s += " " + item.String()
+ }
+ return s + ")"
+}
+
+// JS converts the node back to valid JavaScript
+func (n CaseClause) JS() string {
+ s := " "
+ if n.Cond != nil {
+ s += "case " + n.Cond.JS()
+ } else {
+ s += "default"
+ }
+ s += ":"
+ for _, item := range n.List {
+ s += " " + item.JS() + ";"
+ }
+ return s
+}
+
+// JS converts the node back to valid JavaScript (writes to io.Writer)
+func (n CaseClause) JSWriteTo(w io.Writer) (i int, err error) {
+ var wn int
+ wn, err = w.Write([]byte(" "))
+ i += wn
+ if err != nil {
+ return
+ }
+ if n.Cond != nil {
+ wn, err = w.Write([]byte("case "))
+ i += wn
+ if err != nil {
+ return
+ }
+ wn, err = n.Cond.JSWriteTo(w)
+ i += wn
+ if err != nil {
+ return
+ }
+ } else {
+ wn, err = w.Write([]byte("default"))
+ i += wn
+ if err != nil {
+ return
+ }
+ }
+ wn, err = w.Write([]byte(":"))
+ i += wn
+ if err != nil {
+ return
+ }
+ for _, item := range n.List {
+ wn, err = w.Write([]byte(" "))
+ i += wn
+ if err != nil {
+ return
+ }
+ wn, err = item.JSWriteTo(w)
+ i += wn
+ if err != nil {
+ return
+ }
+ wn, err = w.Write([]byte(";"))
+ i += wn
+ if err != nil {
+ return
+ }
+ }
+ return
+}
+
+// SwitchStmt is a switch statement.
+type SwitchStmt struct {
+ Init IExpr
+ List []CaseClause
+ Scope
+}
+
+func (n SwitchStmt) String() string {
+ s := "Stmt(switch " + n.Init.String()
+ for _, clause := range n.List {
+ s += clause.String()
+ }
+ return s + ")"
+}
+
+// JS converts the node back to valid JavaScript
+func (n SwitchStmt) JS() string {
+ s := "switch (" + n.Init.JS() + ") {"
+ for _, clause := range n.List {
+ s += clause.JS()
+ }
+ return s + " }"
+}
+
+// JS converts the node back to valid JavaScript (writes to io.Writer)
+func (n SwitchStmt) JSWriteTo(w io.Writer) (i int, err error) {
+ var wn int
+ wn, err = w.Write([]byte("switch ("))
+ i += wn
+ if err != nil {
+ return
+ }
+ wn, err = n.Init.JSWriteTo(w)
+ i += wn
+ if err != nil {
+ return
+ }
+ wn, err = w.Write([]byte(") {"))
+ i += wn
+ if err != nil {
+ return
+ }
+ for _, clause := range n.List {
+ wn, err = clause.JSWriteTo(w)
+ i += wn
+ if err != nil {
+ return
+ }
+ }
+ wn, err = w.Write([]byte(" }"))
+ i += wn
+ return
+}
+
+// BranchStmt is a continue or break statement.
+type BranchStmt struct {
+ Type TokenType
+ Label []byte // can be nil
+}
+
+func (n BranchStmt) String() string {
+ s := "Stmt(" + n.Type.String()
+ if n.Label != nil {
+ s += " " + string(n.Label)
+ }
+ return s + ")"
+}
+
+// JS converts the node back to valid JavaScript
+func (n BranchStmt) JS() string {
+ s := n.Type.String()
+ if n.Label != nil {
+ s += " " + string(n.Label)
+ }
+ return s
+}
+
+// JS converts the node back to valid JavaScript (writes to io.Writer)
+func (n BranchStmt) JSWriteTo(w io.Writer) (i int, err error) {
+ var wn int
+ wn, err = w.Write(n.Type.Bytes())
+ i += wn
+ if err != nil {
+ return
+ }
+ if n.Label != nil {
+ wn, err = w.Write([]byte(" "))
+ i += wn
+ if err != nil {
+ return
+ }
+ wn, err = w.Write(n.Label)
+ i += wn
+ if err != nil {
+ return
+ }
+ }
+ return
+}
+
+// ReturnStmt is a return statement.
+type ReturnStmt struct {
+ Value IExpr // can be nil
+}
+
+func (n ReturnStmt) String() string {
+ s := "Stmt(return"
+ if n.Value != nil {
+ s += " " + n.Value.String()
+ }
+ return s + ")"
+}
+
+// JS converts the node back to valid JavaScript
+func (n ReturnStmt) JS() string {
+ s := "return"
+ if n.Value != nil {
+ s += " " + n.Value.JS()
+ }
+ return s
+}
+
+// JS converts the node back to valid JavaScript (writes to io.Writer)
+func (n ReturnStmt) JSWriteTo(w io.Writer) (i int, err error) {
+ var wn int
+ wn, err = w.Write([]byte("return"))
+ i += wn
+ if err != nil {
+ return
+ }
+ if n.Value != nil {
+ wn, err = w.Write([]byte(" "))
+ i += wn
+ if err != nil {
+ return
+ }
+ wn, err = n.Value.JSWriteTo(w)
+ i += wn
+ if err != nil {
+ return
+ }
+ }
+ return
+}
+
+// WithStmt is a with statement.
+type WithStmt struct {
+ Cond IExpr
+ Body IStmt
+}
+
+func (n WithStmt) String() string {
+ return "Stmt(with " + n.Cond.String() + " " + n.Body.String() + ")"
+}
+
+// JS converts the node back to valid JavaScript
+func (n WithStmt) JS() string {
+ return "with (" + n.Cond.JS() + ") " + n.Body.JS()
+}
+
+// JS converts the node back to valid JavaScript (writes to io.Writer)
+func (n WithStmt) JSWriteTo(w io.Writer) (i int, err error) {
+ var wn int
+ wn, err = w.Write([]byte("with ("))
+ i += wn
+ if err != nil {
+ return
+ }
+ wn, err = n.Cond.JSWriteTo(w)
+ i += wn
+ if err != nil {
+ return
+ }
+ wn, err = w.Write([]byte(") "))
+ i += wn
+ if err != nil {
+ return
+ }
+ wn, err = n.Body.JSWriteTo(w)
+ i += wn
+ return
+}
+
+// LabelledStmt is a labelled statement.
+type LabelledStmt struct {
+ Label []byte
+ Value IStmt
+}
+
+func (n LabelledStmt) String() string {
+ return "Stmt(" + string(n.Label) + " : " + n.Value.String() + ")"
+}
+
+// JS converts the node back to valid JavaScript
+func (n LabelledStmt) JS() string {
+ return string(n.Label) + ": " + n.Value.JS()
+}
+
+// JS converts the node back to valid JavaScript (writes to io.Writer)
+func (n LabelledStmt) JSWriteTo(w io.Writer) (i int, err error) {
+ var wn int
+ wn, err = w.Write(n.Label)
+ i += wn
+ if err != nil {
+ return
+ }
+ wn, err = w.Write([]byte(": "))
+ i += wn
+ if err != nil {
+ return
+ }
+ wn, err = n.Value.JSWriteTo(w)
+ i += wn
+ return
+}
+
+// ThrowStmt is a throw statement.
+type ThrowStmt struct {
+ Value IExpr
+}
+
+func (n ThrowStmt) String() string {
+ return "Stmt(throw " + n.Value.String() + ")"
+}
+
+// JS converts the node back to valid JavaScript
+func (n ThrowStmt) JS() string {
+ return "throw " + n.Value.JS()
+}
+
+// JS converts the node back to valid JavaScript (writes to io.Writer)
+func (n ThrowStmt) JSWriteTo(w io.Writer) (i int, err error) {
+ var wn int
+ wn, err = w.Write([]byte("throw "))
+ i += wn
+ if err != nil {
+ return
+ }
+ wn, err = n.Value.JSWriteTo(w)
+ i += wn
+ return
+}
+
+// TryStmt is a try statement.
+type TryStmt struct {
+ Body *BlockStmt
+ Binding IBinding // can be nil
+ Catch *BlockStmt // can be nil
+ Finally *BlockStmt // can be nil
+}
+
+func (n TryStmt) String() string {
+ s := "Stmt(try " + n.Body.String()
+ if n.Catch != nil {
+ s += " catch"
+ if n.Binding != nil {
+ s += " Binding(" + n.Binding.String() + ")"
+ }
+ s += " " + n.Catch.String()
+ }
+ if n.Finally != nil {
+ s += " finally " + n.Finally.String()
+ }
+ return s + ")"
+}
+
+// JS converts the node back to valid JavaScript
+func (n TryStmt) JS() string {
+ s := "try " + n.Body.JS()
+ if n.Catch != nil {
+ s += " catch"
+ if n.Binding != nil {
+ s += "(" + n.Binding.JS() + ")"
+ }
+ s += " " + n.Catch.JS()
+ }
+ if n.Finally != nil {
+ s += " finally " + n.Finally.JS()
+ }
+ return s
+}
+
+// JS converts the node back to valid JavaScript (writes to io.Writer)
+func (n TryStmt) JSWriteTo(w io.Writer) (i int, err error) {
+ var wn int
+ wn, err = w.Write([]byte("try "))
+ i += wn
+ if err != nil {
+ return
+ }
+ wn, err = n.Body.JSWriteTo(w)
+ i += wn
+ if err != nil {
+ return
+ }
+ if n.Catch != nil {
+ wn, err = w.Write([]byte(" catch"))
+ i += wn
+ if err != nil {
+ return
+ }
+ if n.Binding != nil {
+ wn, err = w.Write([]byte("("))
+ i += wn
+ if err != nil {
+ return
+ }
+ wn, err = n.Binding.JSWriteTo(w)
+ i += wn
+ if err != nil {
+ return
+ }
+ wn, err = w.Write([]byte(")"))
+ i += wn
+ if err != nil {
+ return
+ }
+ }
+ wn, err = w.Write([]byte(" "))
+ i += wn
+ if err != nil {
+ return
+ }
+ wn, err = n.Catch.JSWriteTo(w)
+ i += wn
+ if err != nil {
+ return
+ }
+ }
+ if n.Finally != nil {
+ wn, err = w.Write([]byte(" finally "))
+ i += wn
+ if err != nil {
+ return
+ }
+ wn, err = n.Finally.JSWriteTo(w)
+ i += wn
+ }
+ return
+}
+
+// DebuggerStmt is a debugger statement.
+type DebuggerStmt struct {
+}
+
+func (n DebuggerStmt) String() string {
+ return "Stmt(debugger)"
+}
+
+// JS converts the node back to valid JavaScript
+func (n DebuggerStmt) JS() string {
+ return "debugger"
+}
+
+// JS converts the node back to valid JavaScript (writes to io.Writer)
+func (n DebuggerStmt) JSWriteTo(w io.Writer) (i int, err error) {
+ var wn int
+ wn, err = w.Write([]byte("debugger"))
+ i += wn
+ return
+}
+
+// Alias is a name space import or import/export specifier for import/export statements.
+type Alias struct {
+ Name []byte // can be nil
+ Binding []byte // can be nil
+}
+
+func (alias Alias) String() string {
+ s := ""
+ if alias.Name != nil {
+ s += string(alias.Name) + " as "
+ }
+ return s + string(alias.Binding)
+}
+
+// JS converts the node back to valid JavaScript
+func (alias Alias) JS() string {
+ return alias.String()
+}
+
+// JS converts the node back to valid JavaScript (writes to io.Writer)
+func (alias Alias) JSWriteTo(w io.Writer) (i int, err error) {
+ var wn int
+ if alias.Name != nil {
+ wn, err = w.Write(alias.Name)
+ i += wn
+ if err != nil {
+ return
+ }
+ wn, err = w.Write([]byte(" as "))
+ i += wn
+ if err != nil {
+ return
+ }
+ }
+ wn, err = w.Write(alias.Binding)
+ i += wn
+ return
+}
+
+// ImportStmt is an import statement.
+type ImportStmt struct {
+ List []Alias
+ Default []byte // can be nil
+ Module []byte
+}
+
+func (n ImportStmt) String() string {
+ s := "Stmt(import"
+ if n.Default != nil {
+ s += " " + string(n.Default)
+ if len(n.List) != 0 {
+ s += " ,"
+ }
+ }
+ if len(n.List) == 1 && len(n.List[0].Name) == 1 && n.List[0].Name[0] == '*' {
+ s += " " + n.List[0].String()
+ } else if 0 < len(n.List) {
+ s += " {"
+ for i, item := range n.List {
+ if i != 0 {
+ s += " ,"
+ }
+ if item.Binding != nil {
+ s += " " + item.String()
+ }
+ }
+ s += " }"
+ }
+ if n.Default != nil || len(n.List) != 0 {
+ s += " from"
+ }
+ return s + " " + string(n.Module) + ")"
+}
+
+// JS converts the node back to valid JavaScript
+func (n ImportStmt) JS() string {
+ s := "import"
+ if n.Default != nil {
+ s += " " + string(n.Default)
+ if len(n.List) != 0 {
+ s += " ,"
+ }
+ }
+ if len(n.List) == 1 && len(n.List[0].Name) == 1 && n.List[0].Name[0] == '*' {
+ s += " " + n.List[0].JS()
+ } else if 0 < len(n.List) {
+ s += " {"
+ for i, item := range n.List {
+ if i != 0 {
+ s += " ,"
+ }
+ if item.Binding != nil {
+ s += " " + item.JS()
+ }
+ }
+ s += " }"
+ }
+ if n.Default != nil || len(n.List) != 0 {
+ s += " from"
+ }
+ return s + " " + string(n.Module)
+}
+
+// JS converts the node back to valid JavaScript (writes to io.Writer)
+func (n ImportStmt) JSWriteTo(w io.Writer) (i int, err error) {
+ var wn int
+ wn, err = w.Write([]byte("import"))
+ i += wn
+ if err != nil {
+ return
+ }
+ if n.Default != nil {
+ wn, err = w.Write([]byte(" "))
+ i += wn
+ if err != nil {
+ return
+ }
+ wn, err = w.Write(n.Default)
+ i += wn
+ if err != nil {
+ return
+ }
+ if len(n.List) != 0 {
+ wn, err = w.Write([]byte(" ,"))
+ i += wn
+ if err != nil {
+ return
+ }
+ }
+ }
+ if len(n.List) == 1 && len(n.List[0].Name) == 1 && n.List[0].Name[0] == '*' {
+ wn, err = w.Write([]byte(" "))
+ i += wn
+ if err != nil {
+ return
+ }
+ wn, err = n.List[0].JSWriteTo(w)
+ i += wn
+ if err != nil {
+ return
+ }
+ } else if 0 < len(n.List) {
+ wn, err = w.Write([]byte(" {"))
+ i += wn
+ if err != nil {
+ return
+ }
+ for j, item := range n.List {
+ if j != 0 {
+ wn, err = w.Write([]byte(" ,"))
+ i += wn
+ if err != nil {
+ return
+ }
+ }
+ if item.Binding != nil {
+ wn, err = w.Write([]byte(" "))
+ i += wn
+ if err != nil {
+ return
+ }
+ wn, err = item.JSWriteTo(w)
+ i += wn
+ if err != nil {
+ return
+ }
+ }
+ }
+ wn, err = w.Write([]byte(" }"))
+ i += wn
+ if err != nil {
+ return
+ }
+ }
+ if n.Default != nil || len(n.List) != 0 {
+ wn, err = w.Write([]byte(" from"))
+ i += wn
+ if err != nil {
+ return
+ }
+ }
+ wn, err = w.Write([]byte(" "))
+ i += wn
+ if err != nil {
+ return
+ }
+ wn, err = w.Write(n.Module)
+ i += wn
+ return
+}
+
+// ExportStmt is an export statement.
+type ExportStmt struct {
+ List []Alias
+ Module []byte // can be nil
+ Default bool
+ Decl IExpr
+}
+
+func (n ExportStmt) String() string {
+ s := "Stmt(export"
+ if n.Decl != nil {
+ if n.Default {
+ s += " default"
+ }
+ return s + " " + n.Decl.String() + ")"
+ } else if len(n.List) == 1 && (len(n.List[0].Name) == 1 && n.List[0].Name[0] == '*' || n.List[0].Name == nil && len(n.List[0].Binding) == 1 && n.List[0].Binding[0] == '*') {
+ s += " " + n.List[0].String()
+ } else if 0 < len(n.List) {
+ s += " {"
+ for i, item := range n.List {
+ if i != 0 {
+ s += " ,"
+ }
+ if item.Binding != nil {
+ s += " " + item.String()
+ }
+ }
+ s += " }"
+ }
+ if n.Module != nil {
+ s += " from " + string(n.Module)
+ }
+ return s + ")"
+}
+
+// JS converts the node back to valid JavaScript
+func (n ExportStmt) JS() string {
+ s := "export"
+ if n.Decl != nil {
+ if n.Default {
+ s += " default"
+ }
+ return s + " " + n.Decl.JS()
+ } else if len(n.List) == 1 && (len(n.List[0].Name) == 1 && n.List[0].Name[0] == '*' || n.List[0].Name == nil && len(n.List[0].Binding) == 1 && n.List[0].Binding[0] == '*') {
+ s += " " + n.List[0].JS()
+ } else if 0 < len(n.List) {
+ s += " {"
+ for i, item := range n.List {
+ if i != 0 {
+ s += " ,"
+ }
+ if item.Binding != nil {
+ s += " " + item.JS()
+ }
+ }
+ s += " }"
+ }
+ if n.Module != nil {
+ s += " from " + string(n.Module)
+ }
+ return s
+}
+
+// JS converts the node back to valid JavaScript (writes to io.Writer)
+func (n ExportStmt) JSWriteTo(w io.Writer) (i int, err error) {
+ var wn int
+ wn, err = w.Write([]byte("export"))
+ i += wn
+ if err != nil {
+ return
+ }
+ if n.Decl != nil {
+ if n.Default {
+ wn, err = w.Write([]byte(" default"))
+ i += wn
+ if err != nil {
+ return
+ }
+ }
+ wn, err = w.Write([]byte(" "))
+ i += wn
+ if err != nil {
+ return
+ }
+ wn, err = n.Decl.JSWriteTo(w)
+ i += wn
+ return
+ } else if len(n.List) == 1 && (len(n.List[0].Name) == 1 && n.List[0].Name[0] == '*' || n.List[0].Name == nil && len(n.List[0].Binding) == 1 && n.List[0].Binding[0] == '*') {
+ wn, err = w.Write([]byte(" "))
+ i += wn
+ if err != nil {
+ return
+ }
+ wn, err = n.List[0].JSWriteTo(w)
+ i += wn
+ if err != nil {
+ return
+ }
+ } else if 0 < len(n.List) {
+ wn, err = w.Write([]byte(" {"))
+ i += wn
+ if err != nil {
+ return
+ }
+ for j, item := range n.List {
+ if j != 0 {
+ wn, err = w.Write([]byte(" ,"))
+ i += wn
+ if err != nil {
+ return
+ }
+ }
+ if item.Binding != nil {
+ wn, err = w.Write([]byte(" "))
+ i += wn
+ if err != nil {
+ return
+ }
+ wn, err = item.JSWriteTo(w)
+ i += wn
+ if err != nil {
+ return
+ }
+ }
+ }
+ wn, err = w.Write([]byte(" }"))
+ i += wn
+ if err != nil {
+ return
+ }
+ }
+ if n.Module != nil {
+ wn, err = w.Write([]byte(" from "))
+ i += wn
+ if err != nil {
+ return
+ }
+ wn, err = w.Write(n.Module)
+ i += wn
+ if err != nil {
+ return
+ }
+ }
+ return
+}
+
+// DirectivePrologueStmt is a string literal at the beginning of a function or module (usually "use strict").
+type DirectivePrologueStmt struct {
+ Value []byte
+}
+
+func (n DirectivePrologueStmt) String() string {
+ return "Stmt(" + string(n.Value) + ")"
+}
+
+// JS converts the node back to valid JavaScript
+func (n DirectivePrologueStmt) JS() string {
+ return string(n.Value)
+}
+
+// JS converts the node back to valid JavaScript (writes to io.Writer)
+func (n DirectivePrologueStmt) JSWriteTo(w io.Writer) (i int, err error) {
+ var wn int
+ wn, err = w.Write(n.Value)
+ i += wn
+ return
+}
+
+func (n BlockStmt) stmtNode() {}
+func (n EmptyStmt) stmtNode() {}
+func (n ExprStmt) stmtNode() {}
+func (n IfStmt) stmtNode() {}
+func (n DoWhileStmt) stmtNode() {}
+func (n WhileStmt) stmtNode() {}
+func (n ForStmt) stmtNode() {}
+func (n ForInStmt) stmtNode() {}
+func (n ForOfStmt) stmtNode() {}
+func (n SwitchStmt) stmtNode() {}
+func (n BranchStmt) stmtNode() {}
+func (n ReturnStmt) stmtNode() {}
+func (n WithStmt) stmtNode() {}
+func (n LabelledStmt) stmtNode() {}
+func (n ThrowStmt) stmtNode() {}
+func (n TryStmt) stmtNode() {}
+func (n DebuggerStmt) stmtNode() {}
+func (n ImportStmt) stmtNode() {}
+func (n ExportStmt) stmtNode() {}
+func (n DirectivePrologueStmt) stmtNode() {}
+
+////////////////////////////////////////////////////////////////
+
+// PropertyName is a property name for binding properties, method names, and in object literals.
+type PropertyName struct {
+ Literal LiteralExpr
+ Computed IExpr // can be nil
+}
+
+// IsSet returns true is PropertyName is not nil.
+func (n PropertyName) IsSet() bool {
+ return n.IsComputed() || n.Literal.TokenType != ErrorToken
+}
+
+// IsComputed returns true if PropertyName is computed.
+func (n PropertyName) IsComputed() bool {
+ return n.Computed != nil
+}
+
+// IsIdent returns true if PropertyName equals the given identifier name.
+func (n PropertyName) IsIdent(data []byte) bool {
+ return !n.IsComputed() && n.Literal.TokenType == IdentifierToken && bytes.Equal(data, n.Literal.Data)
+}
+
+func (n PropertyName) String() string {
+ if n.Computed != nil {
+ val := n.Computed.String()
+ if val[0] == '(' {
+ return "[" + val[1:len(val)-1] + "]"
+ }
+ return "[" + val + "]"
+ }
+ return string(n.Literal.Data)
+}
+
+// JS converts the node back to valid JavaScript
+func (n PropertyName) JS() string {
+ if n.Computed != nil {
+ return "[" + n.Computed.JS() + "]"
+ }
+ return string(n.Literal.Data)
+}
+
+// JS converts the node back to valid JavaScript (writes to io.Writer)
+func (n PropertyName) JSWriteTo(w io.Writer) (i int, err error) {
+ var wn int
+ if n.Computed != nil {
+ wn, err = w.Write([]byte("["))
+ i += wn
+ if err != nil {
+ return
+ }
+ wn, err = n.Computed.JSWriteTo(w)
+ i += wn
+ if err != nil {
+ return
+ }
+ wn, err = w.Write([]byte("]"))
+ i += wn
+ return
+ }
+ wn, err = w.Write(n.Literal.Data)
+ i += wn
+ return
+}
+
+// BindingArray is an array binding pattern.
+type BindingArray struct {
+ List []BindingElement
+ Rest IBinding // can be nil
+}
+
+func (n BindingArray) String() string {
+ s := "["
+ for i, item := range n.List {
+ if i != 0 {
+ s += ","
+ }
+ s += " " + item.String()
+ }
+ if n.Rest != nil {
+ if len(n.List) != 0 {
+ s += ","
+ }
+ s += " ...Binding(" + n.Rest.String() + ")"
+ }
+ return s + " ]"
+}
+
+// JS converts the node back to valid JavaScript
+func (n BindingArray) JS() string {
+ s := "["
+ for i, item := range n.List {
+ if i != 0 {
+ s += ", "
+ }
+ s += item.JS()
+ }
+ if n.Rest != nil {
+ if len(n.List) != 0 {
+ s += ", "
+ }
+ s += "..." + n.Rest.JS()
+ }
+ return s + "]"
+}
+
+// JS converts the node back to valid JavaScript (writes to io.Writer)
+func (n BindingArray) JSWriteTo(w io.Writer) (i int, err error) {
+ var wn int
+ wn, err = w.Write([]byte("["))
+ i += wn
+ if err != nil {
+ return
+ }
+ for j, item := range n.List {
+ if j != 0 {
+ wn, err = w.Write([]byte(", "))
+ i += wn
+ if err != nil {
+ return
+ }
+ }
+ wn, err = item.JSWriteTo(w)
+ i += wn
+ if err != nil {
+ return
+ }
+ }
+ if n.Rest != nil {
+ if len(n.List) != 0 {
+ wn, err = w.Write([]byte(", "))
+ i += wn
+ if err != nil {
+ return
+ }
+ }
+ wn, err = w.Write([]byte("..."))
+ i += wn
+ if err != nil {
+ return
+ }
+ wn, err = n.Rest.JSWriteTo(w)
+ i += wn
+ if err != nil {
+ return
+ }
+ }
+ wn, err = w.Write([]byte("]"))
+ i += wn
+ return
+}
+
+// BindingObjectItem is a binding property.
+type BindingObjectItem struct {
+ Key *PropertyName // can be nil
+ Value BindingElement
+}
+
+func (n BindingObjectItem) String() string {
+ s := ""
+ if n.Key != nil {
+ if v, ok := n.Value.Binding.(*Var); !ok || !n.Key.IsIdent(v.Data) {
+ s += " " + n.Key.String() + ":"
+ }
+ }
+ return s + " " + n.Value.String()
+}
+
+// JS converts the node back to valid JavaScript
+func (n BindingObjectItem) JS() string {
+ s := ""
+ if n.Key != nil {
+ if v, ok := n.Value.Binding.(*Var); !ok || !n.Key.IsIdent(v.Data) {
+ s += n.Key.JS() + ": "
+ }
+ }
+ return s + n.Value.JS()
+}
+
+// JS converts the node back to valid JavaScript (writes to io.Writer)
+func (n BindingObjectItem) JSWriteTo(w io.Writer) (i int, err error) {
+ var wn int
+ if n.Key != nil {
+ if v, ok := n.Value.Binding.(*Var); !ok || !n.Key.IsIdent(v.Data) {
+ wn, err = n.Key.JSWriteTo(w)
+ i += wn
+ if err != nil {
+ return
+ }
+ wn, err = w.Write([]byte(": "))
+ i += wn
+ if err != nil {
+ return
+ }
+ }
+ }
+ wn, err = n.Value.JSWriteTo(w)
+ i += wn
+ return
+}
+
+// BindingObject is an object binding pattern.
+type BindingObject struct {
+ List []BindingObjectItem
+ Rest *Var // can be nil
+}
+
+func (n BindingObject) String() string {
+ s := "{"
+ for i, item := range n.List {
+ if i != 0 {
+ s += ","
+ }
+ s += item.String()
+ }
+ if n.Rest != nil {
+ if len(n.List) != 0 {
+ s += ","
+ }
+ s += " ...Binding(" + string(n.Rest.Data) + ")"
+ }
+ return s + " }"
+}
+
+// JS converts the node back to valid JavaScript
+func (n BindingObject) JS() string {
+ s := "{"
+ for i, item := range n.List {
+ if i != 0 {
+ s += ", "
+ }
+ s += item.JS()
+ }
+ if n.Rest != nil {
+ if len(n.List) != 0 {
+ s += ", "
+ }
+ s += "..." + string(n.Rest.Data)
+ }
+ return s + "}"
+}
+
+// JS converts the node back to valid JavaScript (writes to io.Writer)
+func (n BindingObject) JSWriteTo(w io.Writer) (i int, err error) {
+ var wn int
+ wn, err = w.Write([]byte("{"))
+ i += wn
+ if err != nil {
+ return
+ }
+ for j, item := range n.List {
+ if j != 0 {
+ wn, err = w.Write([]byte(", "))
+ i += wn
+ if err != nil {
+ return
+ }
+ }
+ wn, err = item.JSWriteTo(w)
+ i += wn
+ if err != nil {
+ return
+ }
+ }
+ if n.Rest != nil {
+ if len(n.List) != 0 {
+ wn, err = w.Write([]byte(", "))
+ i += wn
+ if err != nil {
+ return
+ }
+ }
+ wn, err = w.Write([]byte("..."))
+ i += wn
+ if err != nil {
+ return
+ }
+ wn, err = w.Write(n.Rest.Data)
+ i += wn
+ if err != nil {
+ return
+ }
+ }
+ wn, err = w.Write([]byte("}"))
+ i += wn
+ return
+}
+
+// BindingElement is a binding element.
+type BindingElement struct {
+ Binding IBinding // can be nil (in case of ellision)
+ Default IExpr // can be nil
+}
+
+func (n BindingElement) String() string {
+ if n.Binding == nil {
+ return "Binding()"
+ }
+ s := "Binding(" + n.Binding.String()
+ if n.Default != nil {
+ s += " = " + n.Default.String()
+ }
+ return s + ")"
+}
+
+// JS converts the node back to valid JavaScript
+func (n BindingElement) JS() string {
+ if n.Binding == nil {
+ return ""
+ }
+ s := n.Binding.JS()
+ if n.Default != nil {
+ s += " = " + n.Default.JS()
+ }
+ return s
+}
+
+// JS converts the node back to valid JavaScript (writes to io.Writer)
+func (n BindingElement) JSWriteTo(w io.Writer) (i int, err error) {
+ if n.Binding == nil {
+ return
+ }
+ var wn int
+ wn, err = n.Binding.JSWriteTo(w)
+ i += wn
+ if err != nil {
+ return
+ }
+ if n.Default != nil {
+ wn, err = w.Write([]byte(" = "))
+ i += wn
+ if err != nil {
+ return
+ }
+ wn, err = n.Default.JSWriteTo(w)
+ i += wn
+ if err != nil {
+ return
+ }
+ }
+ return
+}
+
+func (v *Var) bindingNode() {}
+func (n BindingArray) bindingNode() {}
+func (n BindingObject) bindingNode() {}
+
+////////////////////////////////////////////////////////////////
+
+// VarDecl is a variable statement or lexical declaration.
+type VarDecl struct {
+ TokenType
+ List []BindingElement
+ Scope *Scope
+ InFor, InForInOf bool
+}
+
+func (n VarDecl) String() string {
+ s := "Decl(" + n.TokenType.String()
+ for _, item := range n.List {
+ s += " " + item.String()
+ }
+ return s + ")"
+}
+
+// JS converts the node back to valid JavaScript
+func (n VarDecl) JS() string {
+ s := n.TokenType.String()
+ for i, item := range n.List {
+ if i != 0 {
+ s += ","
+ }
+ s += " " + item.JS()
+ }
+ return s
+}
+
+// JS converts the node back to valid JavaScript (writes to io.Writer)
+func (n VarDecl) JSWriteTo(w io.Writer) (i int, err error) {
+ var wn int
+ wn, err = w.Write(n.TokenType.Bytes())
+ i += wn
+ if err != nil {
+ return
+ }
+ for j, item := range n.List {
+ if j != 0 {
+ wn, err = w.Write([]byte(","))
+ i += wn
+ if err != nil {
+ return
+ }
+ }
+ wn, err = w.Write([]byte(" "))
+ i += wn
+ if err != nil {
+ return
+ }
+ wn, err = item.JSWriteTo(w)
+ i += wn
+ if err != nil {
+ return
+ }
+ }
+ return
+}
+
+// Params is a list of parameters for functions, methods, and arrow function.
+type Params struct {
+ List []BindingElement
+ Rest IBinding // can be nil
+}
+
+func (n Params) String() string {
+ s := "Params("
+ for i, item := range n.List {
+ if i != 0 {
+ s += ", "
+ }
+ s += item.String()
+ }
+ if n.Rest != nil {
+ if len(n.List) != 0 {
+ s += ", "
+ }
+ s += "...Binding(" + n.Rest.String() + ")"
+ }
+ return s + ")"
+}
+
+// JS converts the node back to valid JavaScript
+func (n Params) JS() string {
+ s := "("
+ for i, item := range n.List {
+ if i != 0 {
+ s += ", "
+ }
+ s += item.JS()
+ }
+ if n.Rest != nil {
+ if len(n.List) != 0 {
+ s += ", "
+ }
+ s += "..." + n.Rest.JS()
+ }
+ return s + ")"
+}
+
+// JS converts the node back to valid JavaScript (writes to io.Writer)
+func (n Params) JSWriteTo(w io.Writer) (i int, err error) {
+ var wn int
+ wn, err = w.Write([]byte("("))
+ i += wn
+ if err != nil {
+ return
+ }
+ for j, item := range n.List {
+ if j != 0 {
+ wn, err = w.Write([]byte(", "))
+ i += wn
+ if err != nil {
+ return
+ }
+ }
+ wn, err = item.JSWriteTo(w)
+ i += wn
+ if err != nil {
+ return
+ }
+ }
+ if n.Rest != nil {
+ if len(n.List) != 0 {
+ wn, err = w.Write([]byte(", "))
+ i += wn
+ if err != nil {
+ return
+ }
+ }
+ wn, err = w.Write([]byte("..."))
+ i += wn
+ if err != nil {
+ return
+ }
+ wn, err = n.Rest.JSWriteTo(w)
+ i += wn
+ if err != nil {
+ return
+ }
+ }
+ wn, err = w.Write([]byte(")"))
+ i += wn
+ return
+}
+
+// FuncDecl is an (async) (generator) function declaration or expression.
+type FuncDecl struct {
+ Async bool
+ Generator bool
+ Name *Var // can be nil
+ Params Params
+ Body BlockStmt
+}
+
+func (n FuncDecl) String() string {
+ s := "Decl("
+ if n.Async {
+ s += "async function"
+ } else {
+ s += "function"
+ }
+ if n.Generator {
+ s += "*"
+ }
+ if n.Name != nil {
+ s += " " + string(n.Name.Data)
+ }
+ return s + " " + n.Params.String() + " " + n.Body.String() + ")"
+}
+
+// JS converts the node back to valid JavaScript
+func (n FuncDecl) JS() string {
+ s := ""
+ if n.Async {
+ s += "async function"
+ } else {
+ s += "function"
+ }
+ if n.Generator {
+ s += "*"
+ }
+ if n.Name != nil {
+ s += " " + string(n.Name.Data)
+ }
+ return s + " " + n.Params.JS() + " " + n.Body.JS()
+}
+
+// JS converts the node back to valid JavaScript (writes to io.Writer)
+func (n FuncDecl) JSWriteTo(w io.Writer) (i int, err error) {
+ var wn int
+ if n.Async {
+ wn, err = w.Write([]byte("async function"))
+ } else {
+ wn, err = w.Write([]byte("function"))
+ }
+ i += wn
+ if err != nil {
+ return
+ }
+
+ if n.Generator {
+ wn, err = w.Write([]byte("*"))
+ i += wn
+ if err != nil {
+ return
+ }
+ }
+ if n.Name != nil {
+ wn, err = w.Write([]byte(" "))
+ i += wn
+ if err != nil {
+ return
+ }
+ wn, err = w.Write(n.Name.Data)
+ i += wn
+ if err != nil {
+ return
+ }
+ }
+ wn, err = w.Write([]byte(" "))
+ i += wn
+ if err != nil {
+ return
+ }
+ wn, err = n.Params.JSWriteTo(w)
+ i += wn
+ if err != nil {
+ return
+ }
+ wn, err = w.Write([]byte(" "))
+ i += wn
+ if err != nil {
+ return
+ }
+ wn, err = n.Body.JSWriteTo(w)
+ i += wn
+ return
+}
+
+// MethodDecl is a method definition in a class declaration.
+type MethodDecl struct {
+ Static bool
+ Async bool
+ Generator bool
+ Get bool
+ Set bool
+ Name PropertyName
+ Params Params
+ Body BlockStmt
+}
+
+func (n MethodDecl) String() string {
+ s := ""
+ if n.Static {
+ s += " static"
+ }
+ if n.Async {
+ s += " async"
+ }
+ if n.Generator {
+ s += " *"
+ }
+ if n.Get {
+ s += " get"
+ }
+ if n.Set {
+ s += " set"
+ }
+ s += " " + n.Name.String() + " " + n.Params.String() + " " + n.Body.String()
+ return "Method(" + s[1:] + ")"
+}
+
+// JS converts the node back to valid JavaScript
+func (n MethodDecl) JS() string {
+ s := ""
+ if n.Static {
+ s += " static"
+ }
+ if n.Async {
+ s += " async"
+ }
+ if n.Generator {
+ s += " *"
+ }
+ if n.Get {
+ s += " get"
+ }
+ if n.Set {
+ s += " set"
+ }
+ s += " " + n.Name.JS() + " " + n.Params.JS() + " " + n.Body.JS()
+ return s[1:]
+}
+
+// JS converts the node back to valid JavaScript (writes to io.Writer)
+func (n MethodDecl) JSWriteTo(w io.Writer) (i int, err error) {
+ var wn int
+ if n.Static {
+ wn, err = w.Write([]byte("static"))
+ i += wn
+ if err != nil {
+ return
+ }
+ }
+ if n.Async {
+ if wn > 0 {
+ wn, err = w.Write([]byte(" "))
+ i += wn
+ if err != nil {
+ return
+ }
+ }
+ wn, err = w.Write([]byte("async"))
+ i += wn
+ if err != nil {
+ return
+ }
+ }
+ if n.Generator {
+ if wn > 0 {
+ wn, err = w.Write([]byte(" "))
+ i += wn
+ if err != nil {
+ return
+ }
+ }
+ wn, err = w.Write([]byte("*"))
+ i += wn
+ if err != nil {
+ return
+ }
+ }
+ if n.Get {
+ if wn > 0 {
+ wn, err = w.Write([]byte(" "))
+ i += wn
+ if err != nil {
+ return
+ }
+ }
+ wn, err = w.Write([]byte("get"))
+ i += wn
+ if err != nil {
+ return
+ }
+ }
+ if n.Set {
+ if wn > 0 {
+ wn, err = w.Write([]byte(" "))
+ i += wn
+ if err != nil {
+ return
+ }
+ }
+ wn, err = w.Write([]byte("set"))
+ i += wn
+ if err != nil {
+ return
+ }
+ }
+ if wn > 0 {
+ wn, err = w.Write([]byte(" "))
+ i += wn
+ if err != nil {
+ return
+ }
+ }
+ wn, err = n.Name.JSWriteTo(w)
+ i += wn
+ if err != nil {
+ return
+ }
+ wn, err = w.Write([]byte(" "))
+ i += wn
+ if err != nil {
+ return
+ }
+ wn, err = n.Params.JSWriteTo(w)
+ i += wn
+ if err != nil {
+ return
+ }
+ wn, err = w.Write([]byte(" "))
+ i += wn
+ if err != nil {
+ return
+ }
+ wn, err = n.Body.JSWriteTo(w)
+ i += wn
+ return
+}
+
+// Field is a field definition in a class declaration.
+type Field struct {
+ Static bool
+ Name PropertyName
+ Init IExpr
+}
+
+func (n Field) String() string {
+ s := "Field("
+ if n.Static {
+ s += "static "
+ }
+ s += n.Name.String()
+ if n.Init != nil {
+ s += " = " + n.Init.String()
+ }
+ return s + ")"
+}
+
+// JS converts the node back to valid JavaScript
+func (n Field) JS() string {
+ s := ""
+ if n.Static {
+ s += "static "
+ }
+ s += n.Name.String()
+ if n.Init != nil {
+ s += " = " + n.Init.JS()
+ }
+ return s
+}
+
+// JS converts the node back to valid JavaScript (writes to io.Writer)
+func (n Field) JSWriteTo(w io.Writer) (i int, err error) {
+ var wn int
+ if n.Static {
+ wn, err = w.Write([]byte("static "))
+ i += wn
+ if err != nil {
+ return
+ }
+ }
+ wn, err = n.Name.JSWriteTo(w)
+ i += wn
+ if err != nil {
+ return
+ }
+ if n.Init != nil {
+ wn, err = w.Write([]byte(" = "))
+ i += wn
+ if err != nil {
+ return
+ }
+ wn, err = n.Init.JSWriteTo(w)
+ i += wn
+ if err != nil {
+ return
+ }
+ }
+ return
+}
+
+// ClassElement is a class element that is either a static block, a field definition, or a class method
+type ClassElement struct {
+ StaticBlock *BlockStmt // can be nil
+ Method *MethodDecl // can be nil
+ Field
+}
+
+func (n ClassElement) String() string {
+ if n.StaticBlock != nil {
+ return "Static(" + n.StaticBlock.String() + ")"
+ } else if n.Method != nil {
+ return n.Method.String()
+ }
+ return n.Field.String()
+}
+
+// JS converts the node back to valid JavaScript
+func (n ClassElement) JS() string {
+ if n.StaticBlock != nil {
+ return "static " + n.StaticBlock.JS()
+ } else if n.Method != nil {
+ return n.Method.JS()
+ }
+ return n.Field.JS()
+}
+
+// JS converts the node back to valid JavaScript (writes to io.Writer)
+func (n ClassElement) JSWriteTo(w io.Writer) (i int, err error) {
+ var wn int
+ if n.StaticBlock != nil {
+ wn, err = w.Write([]byte("static "))
+ i += wn
+ if err != nil {
+ return
+ }
+ wn, err = n.StaticBlock.JSWriteTo(w)
+ i += wn
+ return
+ } else if n.Method != nil {
+ wn, err = n.Method.JSWriteTo(w)
+ i += wn
+ return
+ }
+ wn, err = n.Field.JSWriteTo(w)
+ i += wn
+ return
+}
+
+// ClassDecl is a class declaration.
+type ClassDecl struct {
+ Name *Var // can be nil
+ Extends IExpr // can be nil
+ List []ClassElement
+}
+
+func (n ClassDecl) String() string {
+ s := "Decl(class"
+ if n.Name != nil {
+ s += " " + string(n.Name.Data)
+ }
+ if n.Extends != nil {
+ s += " extends " + n.Extends.String()
+ }
+ for _, item := range n.List {
+ s += " " + item.String()
+ }
+ return s + ")"
+}
+
+// JS converts the node back to valid JavaScript
+func (n ClassDecl) JS() string {
+ s := "class"
+ if n.Name != nil {
+ s += " " + string(n.Name.Data)
+ }
+ if n.Extends != nil {
+ s += " extends " + n.Extends.JS()
+ }
+ s += " { "
+ for _, item := range n.List {
+ s += item.JS() + "; "
+ }
+ return s + "}"
+}
+
+// JS converts the node back to valid JavaScript (writes to io.Writer)
+func (n ClassDecl) JSWriteTo(w io.Writer) (i int, err error) {
+ var wn int
+ wn, err = w.Write([]byte("class"))
+ i += wn
+ if err != nil {
+ return
+ }
+ if n.Name != nil {
+ wn, err = w.Write([]byte(" "))
+ i += wn
+ if err != nil {
+ return
+ }
+ wn, err = w.Write(n.Name.Data)
+ i += wn
+ if err != nil {
+ return
+ }
+ }
+ if n.Extends != nil {
+ wn, err = w.Write([]byte(" extends "))
+ i += wn
+ if err != nil {
+ return
+ }
+ wn, err = n.Extends.JSWriteTo(w)
+ i += wn
+ if err != nil {
+ return
+ }
+ }
+ wn, err = w.Write([]byte(" { "))
+ i += wn
+ if err != nil {
+ return
+ }
+ for _, item := range n.List {
+ wn, err = item.JSWriteTo(w)
+ i += wn
+ if err != nil {
+ return
+ }
+ wn, err = w.Write([]byte("; "))
+ i += wn
+ if err != nil {
+ return
+ }
+ }
+ wn, err = w.Write([]byte("}"))
+ i += wn
+ return
+}
+
+func (n VarDecl) stmtNode() {}
+func (n FuncDecl) stmtNode() {}
+func (n ClassDecl) stmtNode() {}
+
+func (n VarDecl) exprNode() {} // not a real IExpr, used for ForInit and ExportDecl
+func (n FuncDecl) exprNode() {}
+func (n ClassDecl) exprNode() {}
+func (n MethodDecl) exprNode() {} // not a real IExpr, used for ObjectExpression PropertyName
+
+////////////////////////////////////////////////////////////////
+
+// LiteralExpr can be this, null, boolean, numeric, string, or regular expression literals.
+type LiteralExpr struct {
+ TokenType
+ Data []byte
+}
+
+func (n LiteralExpr) String() string {
+ return string(n.Data)
+}
+
+// JS converts the node back to valid JavaScript
+func (n LiteralExpr) JS() string {
+ return string(n.Data)
+}
+
+// JS converts the node back to valid JavaScript (writes to io.Writer)
+func (n LiteralExpr) JSWriteTo(w io.Writer) (i int, err error) {
+ var wn int
+ wn, err = w.Write(n.Data)
+ i += wn
+ return
+}
+
+// JSON converts the node back to valid JSON
+func (n LiteralExpr) JSON(buf *bytes.Buffer) error {
+ if n.TokenType == TrueToken || n.TokenType == FalseToken || n.TokenType == NullToken || n.TokenType == DecimalToken {
+ buf.Write(n.Data)
+ return nil
+ } else if n.TokenType == StringToken {
+ data := n.Data
+ if n.Data[0] == '\'' {
+ data = parse.Copy(data)
+ data = bytes.ReplaceAll(data, []byte(`"`), []byte(`\"`))
+ data[0] = '"'
+ data[len(data)-1] = '"'
+ }
+ buf.Write(data)
+ return nil
+ }
+ return ErrInvalidJSON
+}
+
+// Element is an array literal element.
+type Element struct {
+ Value IExpr // can be nil
+ Spread bool
+}
+
+func (n Element) String() string {
+ s := ""
+ if n.Value != nil {
+ if n.Spread {
+ s += "..."
+ }
+ s += n.Value.String()
+ }
+ return s
+}
+
+// JS converts the node back to valid JavaScript
+func (n Element) JS() string {
+ s := ""
+ if n.Value != nil {
+ if n.Spread {
+ s += "..."
+ }
+ s += n.Value.JS()
+ }
+ return s
+}
+
+// JS converts the node back to valid JavaScript (writes to io.Writer)
+func (n Element) JSWriteTo(w io.Writer) (i int, err error) {
+ var wn int
+ if n.Value != nil {
+ if n.Spread {
+ wn, err = w.Write([]byte("..."))
+ i += wn
+ if err != nil {
+ return
+ }
+ }
+ wn, err = n.Value.JSWriteTo(w)
+ i += wn
+ }
+ return
+}
+
+// ArrayExpr is an array literal.
+type ArrayExpr struct {
+ List []Element
+}
+
+func (n ArrayExpr) String() string {
+ s := "["
+ for i, item := range n.List {
+ if i != 0 {
+ s += ", "
+ }
+ if item.Value != nil {
+ if item.Spread {
+ s += "..."
+ }
+ s += item.Value.String()
+ }
+ }
+ if 0 < len(n.List) && n.List[len(n.List)-1].Value == nil {
+ s += ","
+ }
+ return s + "]"
+}
+
+// JS converts the node back to valid JavaScript
+func (n ArrayExpr) JS() string {
+ s := "["
+ for i, item := range n.List {
+ if i != 0 {
+ s += ", "
+ }
+ if item.Value != nil {
+ if item.Spread {
+ s += "..."
+ }
+ s += item.Value.JS()
+ }
+ }
+ if 0 < len(n.List) && n.List[len(n.List)-1].Value == nil {
+ s += ","
+ }
+ return s + "]"
+}
+
+// JS converts the node back to valid JavaScript (writes to io.Writer)
+func (n ArrayExpr) JSWriteTo(w io.Writer) (i int, err error) {
+ var wn int
+ wn, err = w.Write([]byte("["))
+ i += wn
+ if err != nil {
+ return
+ }
+ for j, item := range n.List {
+ if j != 0 {
+ wn, err = w.Write([]byte(", "))
+ i += wn
+ if err != nil {
+ return
+ }
+ }
+ if item.Value != nil {
+ if item.Spread {
+ wn, err = w.Write([]byte("..."))
+ i += wn
+ if err != nil {
+ return
+ }
+ }
+ wn, err = item.Value.JSWriteTo(w)
+ i += wn
+ if err != nil {
+ return
+ }
+ }
+ }
+ if 0 < len(n.List) && n.List[len(n.List)-1].Value == nil {
+ wn, err = w.Write([]byte(","))
+ i += wn
+ if err != nil {
+ return
+ }
+ }
+ wn, err = w.Write([]byte("]"))
+ i += wn
+ return
+}
+
+// JSON converts the node back to valid JSON
+func (n ArrayExpr) JSON(buf *bytes.Buffer) error {
+ buf.WriteByte('[')
+ for i, item := range n.List {
+ if i != 0 {
+ buf.WriteString(", ")
+ }
+ if item.Value == nil || item.Spread {
+ return ErrInvalidJSON
+ }
+ val, ok := item.Value.(JSONer)
+ if !ok {
+ return ErrInvalidJSON
+ } else if err := val.JSON(buf); err != nil {
+ return err
+ }
+ }
+ buf.WriteByte(']')
+ return nil
+}
+
+// Property is a property definition in an object literal.
+type Property struct {
+ // either Name or Spread are set. When Spread is set then Value is AssignmentExpression
+ // if Init is set then Value is IdentifierReference, otherwise it can also be MethodDefinition
+ Name *PropertyName // can be nil
+ Spread bool
+ Value IExpr
+ Init IExpr // can be nil
+}
+
+func (n Property) String() string {
+ s := ""
+ if n.Name != nil {
+ if v, ok := n.Value.(*Var); !ok || !n.Name.IsIdent(v.Data) {
+ s += n.Name.String() + ": "
+ }
+ } else if n.Spread {
+ s += "..."
+ }
+ s += n.Value.String()
+ if n.Init != nil {
+ s += " = " + n.Init.String()
+ }
+ return s
+}
+
+// JS converts the node back to valid JavaScript
+func (n Property) JS() string {
+ s := ""
+ if n.Name != nil {
+ if v, ok := n.Value.(*Var); !ok || !n.Name.IsIdent(v.Data) {
+ s += n.Name.JS() + ": "
+ }
+ } else if n.Spread {
+ s += "..."
+ }
+ s += n.Value.JS()
+ if n.Init != nil {
+ s += " = " + n.Init.JS()
+ }
+ return s
+}
+
+// JS converts the node back to valid JavaScript (writes to io.Writer)
+func (n Property) JSWriteTo(w io.Writer) (i int, err error) {
+ var wn int
+ if n.Name != nil {
+ if v, ok := n.Value.(*Var); !ok || !n.Name.IsIdent(v.Data) {
+ wn, err = n.Name.JSWriteTo(w)
+ i += wn
+ if err != nil {
+ return
+ }
+ wn, err = w.Write([]byte(": "))
+ i += wn
+ if err != nil {
+ return
+ }
+ }
+ } else if n.Spread {
+ wn, err = w.Write([]byte("..."))
+ i += wn
+ if err != nil {
+ return
+ }
+ }
+ wn, err = n.Value.JSWriteTo(w)
+ i += wn
+ if err != nil {
+ return
+ }
+ if n.Init != nil {
+ wn, err = w.Write([]byte(" = "))
+ i += wn
+ if err != nil {
+ return
+ }
+ wn, err = n.Init.JSWriteTo(w)
+ i += wn
+ if err != nil {
+ return
+ }
+ }
+ return
+}
+
+// JSON converts the node back to valid JSON
+func (n Property) JSON(buf *bytes.Buffer) error {
+ if n.Name == nil || n.Name.Literal.TokenType != StringToken && n.Name.Literal.TokenType != IdentifierToken || n.Spread || n.Init != nil {
+ return ErrInvalidJSON
+ } else if n.Name.Literal.TokenType == IdentifierToken {
+ buf.WriteByte('"')
+ buf.Write(n.Name.Literal.Data)
+ buf.WriteByte('"')
+ } else {
+ _ = n.Name.Literal.JSON(buf)
+ }
+ buf.WriteString(": ")
+
+ val, ok := n.Value.(JSONer)
+ if !ok {
+ return ErrInvalidJSON
+ } else if err := val.JSON(buf); err != nil {
+ return err
+ }
+ return nil
+}
+
+// ObjectExpr is an object literal.
+type ObjectExpr struct {
+ List []Property
+}
+
+func (n ObjectExpr) String() string {
+ s := "{"
+ for i, item := range n.List {
+ if i != 0 {
+ s += ", "
+ }
+ s += item.String()
+ }
+ return s + "}"
+}
+
+// JS converts the node back to valid JavaScript
+func (n ObjectExpr) JS() string {
+ s := "{"
+ for i, item := range n.List {
+ if i != 0 {
+ s += ", "
+ }
+ s += item.JS()
+ }
+ return s + "}"
+}
+
+// JS converts the node back to valid JavaScript (writes to io.Writer)
+func (n ObjectExpr) JSWriteTo(w io.Writer) (i int, err error) {
+ var wn int
+ wn, err = w.Write([]byte("{"))
+ i += wn
+ if err != nil {
+ return
+ }
+ for j, item := range n.List {
+ if j != 0 {
+ wn, err = w.Write([]byte(", "))
+ i += wn
+ if err != nil {
+ return
+ }
+ }
+ wn, err = item.JSWriteTo(w)
+ i += wn
+ if err != nil {
+ return
+ }
+ }
+ wn, err = w.Write([]byte("}"))
+ i += wn
+ return
+}
+
+// JSON converts the node back to valid JSON
+func (n ObjectExpr) JSON(buf *bytes.Buffer) error {
+ buf.WriteByte('{')
+ for i, item := range n.List {
+ if i != 0 {
+ buf.WriteString(", ")
+ }
+ if err := item.JSON(buf); err != nil {
+ return err
+ }
+ }
+ buf.WriteByte('}')
+ return nil
+}
+
+// TemplatePart is a template head or middle.
+type TemplatePart struct {
+ Value []byte
+ Expr IExpr
+}
+
+func (n TemplatePart) String() string {
+ return string(n.Value) + n.Expr.String()
+}
+
+// JS converts the node back to valid JavaScript
+func (n TemplatePart) JS() string {
+ return string(n.Value) + n.Expr.JS()
+}
+
+// JS converts the node back to valid JavaScript (writes to io.Writer)
+func (n TemplatePart) JSWriteTo(w io.Writer) (i int, err error) {
+ var wn int
+ wn, err = w.Write(n.Value)
+ i += wn
+ if err != nil {
+ return
+ }
+ wn, err = n.Expr.JSWriteTo(w)
+ i += wn
+ return
+}
+
+// TemplateExpr is a template literal or member/call expression, super property, or optional chain with template literal.
+type TemplateExpr struct {
+ Tag IExpr // can be nil
+ List []TemplatePart
+ Tail []byte
+ Prec OpPrec
+ Optional bool
+}
+
+func (n TemplateExpr) String() string {
+ s := ""
+ if n.Tag != nil {
+ s += n.Tag.String()
+ if n.Optional {
+ s += "?."
+ }
+ }
+ for _, item := range n.List {
+ s += item.String()
+ }
+ return s + string(n.Tail)
+}
+
+// JS converts the node back to valid JavaScript
+func (n TemplateExpr) JS() string {
+ s := ""
+ if n.Tag != nil {
+ s += n.Tag.JS()
+ if n.Optional {
+ s += "?."
+ }
+ }
+ for _, item := range n.List {
+ s += item.JS()
+ }
+ return s + string(n.Tail)
+}
+
+// JS converts the node back to valid JavaScript (writes to io.Writer)
+func (n TemplateExpr) JSWriteTo(w io.Writer) (i int, err error) {
+ var wn int
+ if n.Tag != nil {
+ wn, err = n.Tag.JSWriteTo(w)
+ i += wn
+ if err != nil {
+ return
+ }
+ if n.Optional {
+ wn, err = w.Write([]byte("?."))
+ i += wn
+ if err != nil {
+ return
+ }
+ }
+ }
+ for _, item := range n.List {
+ wn, err = item.JSWriteTo(w)
+ i += wn
+ if err != nil {
+ return
+ }
+ }
+ wn, err = w.Write(n.Tail)
+ i += wn
+ return
+}
+
+// GroupExpr is a parenthesized expression.
+type GroupExpr struct {
+ X IExpr
+}
+
+func (n GroupExpr) String() string {
+ return "(" + n.X.String() + ")"
+}
+
+// JS converts the node back to valid JavaScript
+func (n GroupExpr) JS() string {
+ return "(" + n.X.JS() + ")"
+}
+
+// JS converts the node back to valid JavaScript (writes to io.Writer)
+func (n GroupExpr) JSWriteTo(w io.Writer) (i int, err error) {
+ var wn int
+ wn, err = w.Write([]byte("("))
+ i += wn
+ if err != nil {
+ return
+ }
+ wn, err = n.X.JSWriteTo(w)
+ i += wn
+ if err != nil {
+ return
+ }
+ wn, err = w.Write([]byte(")"))
+ i += wn
+ return
+}
+
+// IndexExpr is a member/call expression, super property, or optional chain with an index expression.
+type IndexExpr struct {
+ X IExpr
+ Y IExpr
+ Prec OpPrec
+ Optional bool
+}
+
+func (n IndexExpr) String() string {
+ if n.Optional {
+ return "(" + n.X.String() + "?.[" + n.Y.String() + "])"
+ }
+ return "(" + n.X.String() + "[" + n.Y.String() + "])"
+}
+
+// JS converts the node back to valid JavaScript
+func (n IndexExpr) JS() string {
+ if n.Optional {
+ return n.X.JS() + "?.[" + n.Y.JS() + "]"
+ }
+ return n.X.JS() + "[" + n.Y.JS() + "]"
+}
+
+// JS converts the node back to valid JavaScript (writes to io.Writer)
+func (n IndexExpr) JSWriteTo(w io.Writer) (i int, err error) {
+ var wn int
+ wn, err = n.X.JSWriteTo(w)
+ i += wn
+ if err != nil {
+ return
+ }
+ if n.Optional {
+ wn, err = w.Write([]byte("?.["))
+ i += wn
+ if err != nil {
+ return
+ }
+ } else {
+ wn, err = w.Write([]byte("["))
+ i += wn
+ if err != nil {
+ return
+ }
+ }
+ wn, err = n.Y.JSWriteTo(w)
+ i += wn
+ if err != nil {
+ return
+ }
+ wn, err = w.Write([]byte("]"))
+ i += wn
+ return
+}
+
+// DotExpr is a member/call expression, super property, or optional chain with a dot expression.
+type DotExpr struct {
+ X IExpr
+ Y LiteralExpr
+ Prec OpPrec
+ Optional bool
+}
+
+func (n DotExpr) String() string {
+ if n.Optional {
+ return "(" + n.X.String() + "?." + n.Y.String() + ")"
+ }
+ return "(" + n.X.String() + "." + n.Y.String() + ")"
+}
+
+// JS converts the node back to valid JavaScript
+func (n DotExpr) JS() string {
+ if n.Optional {
+ return n.X.JS() + "?." + n.Y.JS()
+ }
+ return n.X.JS() + "." + n.Y.JS()
+}
+
+// JS converts the node back to valid JavaScript (writes to io.Writer)
+func (n DotExpr) JSWriteTo(w io.Writer) (i int, err error) {
+ var wn int
+ wn, err = n.X.JSWriteTo(w)
+ i += wn
+ if err != nil {
+ return
+ }
+ if n.Optional {
+ wn, err = w.Write([]byte("?."))
+ i += wn
+ if err != nil {
+ return
+ }
+ } else {
+ wn, err = w.Write([]byte("."))
+ i += wn
+ if err != nil {
+ return
+ }
+ }
+ wn, err = n.Y.JSWriteTo(w)
+ i += wn
+ return
+}
+
+// NewTargetExpr is a new target meta property.
+type NewTargetExpr struct {
+}
+
+func (n NewTargetExpr) String() string {
+ return "(new.target)"
+}
+
+// JS converts the node back to valid JavaScript
+func (n NewTargetExpr) JS() string {
+ return "new.target"
+}
+
+// JS converts the node back to valid JavaScript (writes to io.Writer)
+func (n NewTargetExpr) JSWriteTo(w io.Writer) (i int, err error) {
+ var wn int
+ wn, err = w.Write([]byte("new.target"))
+ i += wn
+ return
+}
+
+// ImportMetaExpr is a import meta meta property.
+type ImportMetaExpr struct {
+}
+
+func (n ImportMetaExpr) String() string {
+ return "(import.meta)"
+}
+
+// JS converts the node back to valid JavaScript
+func (n ImportMetaExpr) JS() string {
+ return "import.meta"
+}
+
+// JS converts the node back to valid JavaScript (writes to io.Writer)
+func (n ImportMetaExpr) JSWriteTo(w io.Writer) (i int, err error) {
+ var wn int
+ wn, err = w.Write([]byte("import.meta"))
+ i += wn
+ return
+}
+
+type Arg struct {
+ Value IExpr
+ Rest bool
+}
+
+func (n Arg) String() string {
+ s := ""
+ if n.Rest {
+ s += "..."
+ }
+ return s + n.Value.String()
+}
+
+// JS converts the node back to valid JavaScript
+func (n Arg) JS() string {
+ s := ""
+ if n.Rest {
+ s += "..."
+ }
+ return s + n.Value.JS()
+}
+
+// JS converts the node back to valid JavaScript (writes to io.Writer)
+func (n Arg) JSWriteTo(w io.Writer) (i int, err error) {
+ var wn int
+ if n.Rest {
+ wn, err = w.Write([]byte("..."))
+ i += wn
+ if err != nil {
+ return
+ }
+ }
+ wn, err = n.Value.JSWriteTo(w)
+ i += wn
+ return
+}
+
+// Args is a list of arguments as used by new and call expressions.
+type Args struct {
+ List []Arg
+}
+
+func (n Args) String() string {
+ s := "("
+ for i, item := range n.List {
+ if i != 0 {
+ s += ", "
+ }
+ s += item.String()
+ }
+ return s + ")"
+}
+
+// JS converts the node back to valid JavaScript
+func (n Args) JS() string {
+ s := ""
+ for i, item := range n.List {
+ if i != 0 {
+ s += ", "
+ }
+ s += item.JS()
+ }
+ return s
+}
+
+// JS converts the node back to valid JavaScript (writes to io.Writer)
+func (n Args) JSWriteTo(w io.Writer) (i int, err error) {
+ var wn int
+ for j, item := range n.List {
+ if j != 0 {
+ wn, err = w.Write([]byte(", "))
+ i += wn
+ if err != nil {
+ return
+ }
+ }
+ wn, err = item.JSWriteTo(w)
+ i += wn
+ if err != nil {
+ return
+ }
+ }
+ return
+}
+
+// NewExpr is a new expression or new member expression.
+type NewExpr struct {
+ X IExpr
+ Args *Args // can be nil
+}
+
+func (n NewExpr) String() string {
+ if n.Args != nil {
+ return "(new " + n.X.String() + n.Args.String() + ")"
+ }
+ return "(new " + n.X.String() + ")"
+}
+
+// JS converts the node back to valid JavaScript
+func (n NewExpr) JS() string {
+ if n.Args != nil {
+ return "new " + n.X.JS() + "(" + n.Args.JS() + ")"
+ }
+
+ // always use parentheses to prevent errors when chaining e.g. new Date().getTime()
+ return "new " + n.X.JS() + "()"
+}
+
+// JS converts the node back to valid JavaScript (writes to io.Writer)
+func (n NewExpr) JSWriteTo(w io.Writer) (i int, err error) {
+ var wn int
+ wn, err = w.Write([]byte("new "))
+ i += wn
+ if err != nil {
+ return
+ }
+ wn, err = n.X.JSWriteTo(w)
+ i += wn
+ if err != nil {
+ return
+ }
+ if n.Args != nil {
+ wn, err = w.Write([]byte("("))
+ i += wn
+ if err != nil {
+ return
+ }
+ wn, err = n.Args.JSWriteTo(w)
+ i += wn
+ if err != nil {
+ return
+ }
+ wn, err = w.Write([]byte(")"))
+ i += wn
+ if err != nil {
+ return
+ }
+ } else {
+ wn, err = w.Write([]byte("()"))
+ i += wn
+ if err != nil {
+ return
+ }
+ }
+ return
+}
+
+// CallExpr is a call expression.
+type CallExpr struct {
+ X IExpr
+ Args Args
+ Optional bool
+}
+
+func (n CallExpr) String() string {
+ if n.Optional {
+ return "(" + n.X.String() + "?." + n.Args.String() + ")"
+ }
+ return "(" + n.X.String() + n.Args.String() + ")"
+}
+
+// JS converts the node back to valid JavaScript
+func (n CallExpr) JS() string {
+ if n.Optional {
+ return n.X.JS() + "?.(" + n.Args.JS() + ")"
+ }
+ return n.X.JS() + "(" + n.Args.JS() + ")"
+}
+
+// JS converts the node back to valid JavaScript (writes to io.Writer)
+func (n CallExpr) JSWriteTo(w io.Writer) (i int, err error) {
+ var wn int
+ wn, err = n.X.JSWriteTo(w)
+ i += wn
+ if err != nil {
+ return
+ }
+ if n.Optional {
+ wn, err = w.Write([]byte("?.("))
+ i += wn
+ if err != nil {
+ return
+ }
+ } else {
+ wn, err = w.Write([]byte("("))
+ i += wn
+ if err != nil {
+ return
+ }
+ }
+ wn, err = n.Args.JSWriteTo(w)
+ i += wn
+ if err != nil {
+ return
+ }
+ wn, err = w.Write([]byte(")"))
+ i += wn
+ if err != nil {
+ return
+ }
+ return
+}
+
+// UnaryExpr is an update or unary expression.
+type UnaryExpr struct {
+ Op TokenType
+ X IExpr
+}
+
+func (n UnaryExpr) String() string {
+ if n.Op == PostIncrToken || n.Op == PostDecrToken {
+ return "(" + n.X.String() + n.Op.String() + ")"
+ } else if IsIdentifierName(n.Op) {
+ return "(" + n.Op.String() + " " + n.X.String() + ")"
+ }
+ return "(" + n.Op.String() + n.X.String() + ")"
+}
+
+// JS converts the node back to valid JavaScript
+func (n UnaryExpr) JS() string {
+ if n.Op == PostIncrToken || n.Op == PostDecrToken {
+ return n.X.JS() + n.Op.String()
+ } else if IsIdentifierName(n.Op) {
+ return n.Op.String() + " " + n.X.JS()
+ }
+ return n.Op.String() + n.X.JS()
+}
+
+// JS converts the node back to valid JavaScript (writes to io.Writer)
+func (n UnaryExpr) JSWriteTo(w io.Writer) (i int, err error) {
+ var wn int
+ if n.Op == PostIncrToken || n.Op == PostDecrToken {
+ wn, err = n.X.JSWriteTo(w)
+ i += wn
+ if err != nil {
+ return
+ }
+ wn, err = w.Write(n.Op.Bytes())
+ i += wn
+ return
+ } else if IsIdentifierName(n.Op) {
+ wn, err = w.Write(n.Op.Bytes())
+ i += wn
+ if err != nil {
+ return
+ }
+ wn, err = w.Write([]byte(" "))
+ i += wn
+ if err != nil {
+ return
+ }
+ wn, err = n.X.JSWriteTo(w)
+ i += wn
+ return
+ }
+ wn, err = w.Write(n.Op.Bytes())
+ i += wn
+ if err != nil {
+ return
+ }
+ wn, err = n.X.JSWriteTo(w)
+ i += wn
+ return
+}
+
+// JSON converts the node back to valid JSON
+func (n UnaryExpr) JSON(buf *bytes.Buffer) error {
+ if lit, ok := n.X.(*LiteralExpr); ok && n.Op == NegToken && lit.TokenType == DecimalToken {
+ buf.WriteByte('-')
+ buf.Write(lit.Data)
+ return nil
+ }
+ return ErrInvalidJSON
+}
+
+// BinaryExpr is a binary expression.
+type BinaryExpr struct {
+ Op TokenType
+ X, Y IExpr
+}
+
+func (n BinaryExpr) String() string {
+ if IsIdentifierName(n.Op) {
+ return "(" + n.X.String() + " " + n.Op.String() + " " + n.Y.String() + ")"
+ }
+ return "(" + n.X.String() + n.Op.String() + n.Y.String() + ")"
+}
+
+// JS converts the node back to valid JavaScript
+func (n BinaryExpr) JS() string {
+ return n.X.JS() + " " + n.Op.String() + " " + n.Y.JS()
+}
+
+// JS converts the node back to valid JavaScript (writes to io.Writer)
+func (n BinaryExpr) JSWriteTo(w io.Writer) (i int, err error) {
+ var wn int
+ wn, err = n.X.JSWriteTo(w)
+ i += wn
+ if err != nil {
+ return
+ }
+ wn, err = w.Write([]byte(" "))
+ i += wn
+ if err != nil {
+ return
+ }
+ wn, err = w.Write(n.Op.Bytes())
+ i += wn
+ if err != nil {
+ return
+ }
+ wn, err = w.Write([]byte(" "))
+ i += wn
+ if err != nil {
+ return
+ }
+ wn, err = n.Y.JSWriteTo(w)
+ i += wn
+ return
+}
+
+// CondExpr is a conditional expression.
+type CondExpr struct {
+ Cond, X, Y IExpr
+}
+
+func (n CondExpr) String() string {
+ return "(" + n.Cond.String() + " ? " + n.X.String() + " : " + n.Y.String() + ")"
+}
+
+// JS converts the node back to valid JavaScript
+func (n CondExpr) JS() string {
+ return n.Cond.JS() + " ? " + n.X.JS() + " : " + n.Y.JS()
+}
+
+// JS converts the node back to valid JavaScript (writes to io.Writer)
+func (n CondExpr) JSWriteTo(w io.Writer) (i int, err error) {
+ var wn int
+ wn, err = n.Cond.JSWriteTo(w)
+ i += wn
+ if err != nil {
+ return
+ }
+ wn, err = w.Write([]byte(" ? "))
+ i += wn
+ if err != nil {
+ return
+ }
+ wn, err = n.X.JSWriteTo(w)
+ i += wn
+ if err != nil {
+ return
+ }
+ wn, err = w.Write([]byte(" : "))
+ i += wn
+ if err != nil {
+ return
+ }
+ wn, err = n.Y.JSWriteTo(w)
+ i += wn
+ return
+}
+
+// YieldExpr is a yield expression.
+type YieldExpr struct {
+ Generator bool
+ X IExpr // can be nil
+}
+
+func (n YieldExpr) String() string {
+ if n.X == nil {
+ return "(yield)"
+ }
+ s := "(yield"
+ if n.Generator {
+ s += "*"
+ }
+ return s + " " + n.X.String() + ")"
+}
+
+// JS converts the node back to valid JavaScript
+func (n YieldExpr) JS() string {
+ if n.X == nil {
+ return "yield"
+ }
+ s := "yield"
+ if n.Generator {
+ s += "*"
+ }
+ return s + " " + n.X.JS()
+}
+
+// JS converts the node back to valid JavaScript (writes to io.Writer)
+func (n YieldExpr) JSWriteTo(w io.Writer) (i int, err error) {
+ var wn int
+ wn, err = w.Write([]byte("yield"))
+ i += wn
+ if err != nil {
+ return
+ }
+ if n.X == nil {
+ return
+ }
+ if n.Generator {
+ wn, err = w.Write([]byte("*"))
+ i += wn
+ if err != nil {
+ return
+ }
+ }
+ wn, err = w.Write([]byte(" "))
+ i += wn
+ if err != nil {
+ return
+ }
+ wn, err = n.X.JSWriteTo(w)
+ i += wn
+ return
+}
+
+// ArrowFunc is an (async) arrow function.
+type ArrowFunc struct {
+ Async bool
+ Params Params
+ Body BlockStmt
+}
+
+func (n ArrowFunc) String() string {
+ s := "("
+ if n.Async {
+ s += "async "
+ }
+ return s + n.Params.String() + " => " + n.Body.String() + ")"
+}
+
+// JS converts the node back to valid JavaScript
+func (n ArrowFunc) JS() string {
+ s := ""
+ if n.Async {
+ s += "async "
+ }
+ return s + n.Params.JS() + " => " + n.Body.JS()
+}
+
+// JS converts the node back to valid JavaScript (writes to io.Writer)
+func (n ArrowFunc) JSWriteTo(w io.Writer) (i int, err error) {
+ var wn int
+ if n.Async {
+ wn, err = w.Write([]byte("async "))
+ i += wn
+ if err != nil {
+ return
+ }
+ }
+ wn, err = n.Params.JSWriteTo(w)
+ i += wn
+ if err != nil {
+ return
+ }
+ wn, err = w.Write([]byte(" => "))
+ i += wn
+ if err != nil {
+ return
+ }
+ wn, err = n.Body.JSWriteTo(w)
+ i += wn
+ return
+}
+
+// CommaExpr is a series of comma expressions.
+type CommaExpr struct {
+ List []IExpr
+}
+
+func (n CommaExpr) String() string {
+ s := "("
+ for i, item := range n.List {
+ if i != 0 {
+ s += ","
+ }
+ s += item.String()
+ }
+ return s + ")"
+}
+
+// JS converts the node back to valid JavaScript
+func (n CommaExpr) JS() string {
+ s := ""
+ for i, item := range n.List {
+ if i != 0 {
+ s += ","
+ }
+ s += item.JS()
+ }
+ return s
+}
+
+// JS converts the node back to valid JavaScript (writes to io.Writer)
+func (n CommaExpr) JSWriteTo(w io.Writer) (i int, err error) {
+ var wn int
+ for j, item := range n.List {
+ if j != 0 {
+ wn, err = w.Write([]byte(","))
+ i += wn
+ if err != nil {
+ return
+ }
+ }
+ wn, err = item.JSWriteTo(w)
+ i += wn
+ if err != nil {
+ return
+ }
+ }
+ return
+}
+
+func (v *Var) exprNode() {}
+func (n LiteralExpr) exprNode() {}
+func (n ArrayExpr) exprNode() {}
+func (n ObjectExpr) exprNode() {}
+func (n TemplateExpr) exprNode() {}
+func (n GroupExpr) exprNode() {}
+func (n DotExpr) exprNode() {}
+func (n IndexExpr) exprNode() {}
+func (n NewTargetExpr) exprNode() {}
+func (n ImportMetaExpr) exprNode() {}
+func (n NewExpr) exprNode() {}
+func (n CallExpr) exprNode() {}
+func (n UnaryExpr) exprNode() {}
+func (n BinaryExpr) exprNode() {}
+func (n CondExpr) exprNode() {}
+func (n YieldExpr) exprNode() {}
+func (n ArrowFunc) exprNode() {}
+func (n CommaExpr) exprNode() {}
diff --git a/vendor/github.com/tdewolff/parse/v2/js/lex.go b/vendor/github.com/tdewolff/parse/v2/js/lex.go
new file mode 100644
index 0000000..7d75bf5
--- /dev/null
+++ b/vendor/github.com/tdewolff/parse/v2/js/lex.go
@@ -0,0 +1,793 @@
+// Package js is an ECMAScript5.1 lexer following the specifications at http://www.ecma-international.org/ecma-262/5.1/.
+package js
+
+import (
+ "unicode"
+ "unicode/utf8"
+
+ "github.com/tdewolff/parse/v2"
+)
+
+var identifierStart = []*unicode.RangeTable{unicode.Lu, unicode.Ll, unicode.Lt, unicode.Lm, unicode.Lo, unicode.Nl, unicode.Other_ID_Start}
+var identifierContinue = []*unicode.RangeTable{unicode.Lu, unicode.Ll, unicode.Lt, unicode.Lm, unicode.Lo, unicode.Nl, unicode.Mn, unicode.Mc, unicode.Nd, unicode.Pc, unicode.Other_ID_Continue}
+
+// IsIdentifierStart returns true if the byte-slice start is the start of an identifier
+func IsIdentifierStart(b []byte) bool {
+ r, _ := utf8.DecodeRune(b)
+ return r == '$' || r == '\\' || r == '_' || unicode.IsOneOf(identifierStart, r)
+}
+
+// IsIdentifierContinue returns true if the byte-slice start is a continuation of an identifier
+func IsIdentifierContinue(b []byte) bool {
+ r, _ := utf8.DecodeRune(b)
+ return r == '$' || r == '\\' || r == '\u200C' || r == '\u200D' || unicode.IsOneOf(identifierContinue, r)
+}
+
+// IsIdentifierEnd returns true if the byte-slice end is a start or continuation of an identifier
+func IsIdentifierEnd(b []byte) bool {
+ r, _ := utf8.DecodeLastRune(b)
+ return r == '$' || r == '\\' || r == '\u200C' || r == '\u200D' || unicode.IsOneOf(identifierContinue, r)
+}
+
+////////////////////////////////////////////////////////////////
+
+// Lexer is the state for the lexer.
+type Lexer struct {
+ r *parse.Input
+ err error
+ prevLineTerminator bool
+ prevNumericLiteral bool
+ level int
+ templateLevels []int
+}
+
+// NewLexer returns a new Lexer for a given io.Reader.
+func NewLexer(r *parse.Input) *Lexer {
+ return &Lexer{
+ r: r,
+ prevLineTerminator: true,
+ level: 0,
+ templateLevels: []int{},
+ }
+}
+
+// Err returns the error encountered during lexing, this is often io.EOF but also other errors can be returned.
+func (l *Lexer) Err() error {
+ if l.err != nil {
+ return l.err
+ }
+ return l.r.Err()
+}
+
+// RegExp reparses the input stream for a regular expression. It is assumed that we just received DivToken or DivEqToken with Next(). This function will go back and read that as a regular expression.
+func (l *Lexer) RegExp() (TokenType, []byte) {
+ if 0 < l.r.Offset() && l.r.Peek(-1) == '/' {
+ l.r.Move(-1)
+ } else if 1 < l.r.Offset() && l.r.Peek(-1) == '=' && l.r.Peek(-2) == '/' {
+ l.r.Move(-2)
+ } else {
+ l.err = parse.NewErrorLexer(l.r, "expected / or /=")
+ return ErrorToken, nil
+ }
+ l.r.Skip() // trick to set start = pos
+
+ if l.consumeRegExpToken() {
+ return RegExpToken, l.r.Shift()
+ }
+ l.err = parse.NewErrorLexer(l.r, "unexpected EOF or newline")
+ return ErrorToken, nil
+}
+
+// Next returns the next Token. It returns ErrorToken when an error was encountered. Using Err() one can retrieve the error message.
+func (l *Lexer) Next() (TokenType, []byte) {
+ prevLineTerminator := l.prevLineTerminator
+ l.prevLineTerminator = false
+
+ prevNumericLiteral := l.prevNumericLiteral
+ l.prevNumericLiteral = false
+
+ // study on 50x jQuery shows:
+ // spaces: 20k
+ // alpha: 16k
+ // newlines: 14.4k
+ // operators: 4k
+ // numbers and dot: 3.6k
+ // (): 3.4k
+ // {}: 1.8k
+ // []: 0.9k
+ // "': 1k
+ // semicolon: 2.4k
+ // colon: 0.8k
+ // comma: 2.4k
+ // slash: 1.4k
+ // `~: almost 0
+
+ c := l.r.Peek(0)
+ switch c {
+ case ' ', '\t', '\v', '\f':
+ l.r.Move(1)
+ for l.consumeWhitespace() {
+ }
+ l.prevLineTerminator = prevLineTerminator
+ return WhitespaceToken, l.r.Shift()
+ case '\n', '\r':
+ l.r.Move(1)
+ for l.consumeLineTerminator() {
+ }
+ l.prevLineTerminator = true
+ return LineTerminatorToken, l.r.Shift()
+ case '>', '=', '!', '+', '*', '%', '&', '|', '^', '~', '?':
+ if tt := l.consumeOperatorToken(); tt != ErrorToken {
+ return tt, l.r.Shift()
+ }
+ case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', '.':
+ if tt := l.consumeNumericToken(); tt != ErrorToken || l.r.Pos() != 0 {
+ l.prevNumericLiteral = true
+ return tt, l.r.Shift()
+ } else if c == '.' {
+ l.r.Move(1)
+ if l.r.Peek(0) == '.' && l.r.Peek(1) == '.' {
+ l.r.Move(2)
+ return EllipsisToken, l.r.Shift()
+ }
+ return DotToken, l.r.Shift()
+ }
+ case ',':
+ l.r.Move(1)
+ return CommaToken, l.r.Shift()
+ case ';':
+ l.r.Move(1)
+ return SemicolonToken, l.r.Shift()
+ case '(':
+ l.level++
+ l.r.Move(1)
+ return OpenParenToken, l.r.Shift()
+ case ')':
+ l.level--
+ l.r.Move(1)
+ return CloseParenToken, l.r.Shift()
+ case '/':
+ if tt := l.consumeCommentToken(); tt != ErrorToken {
+ return tt, l.r.Shift()
+ } else if tt := l.consumeOperatorToken(); tt != ErrorToken {
+ return tt, l.r.Shift()
+ }
+ case '{':
+ l.level++
+ l.r.Move(1)
+ return OpenBraceToken, l.r.Shift()
+ case '}':
+ l.level--
+ if len(l.templateLevels) != 0 && l.level == l.templateLevels[len(l.templateLevels)-1] {
+ return l.consumeTemplateToken(), l.r.Shift()
+ }
+ l.r.Move(1)
+ return CloseBraceToken, l.r.Shift()
+ case ':':
+ l.r.Move(1)
+ return ColonToken, l.r.Shift()
+ case '\'', '"':
+ return l.consumeStringToken(), l.r.Shift()
+ case ']':
+ l.r.Move(1)
+ return CloseBracketToken, l.r.Shift()
+ case '[':
+ l.r.Move(1)
+ return OpenBracketToken, l.r.Shift()
+ case '<', '-':
+ if l.consumeHTMLLikeCommentToken(prevLineTerminator) {
+ return CommentToken, l.r.Shift()
+ } else if tt := l.consumeOperatorToken(); tt != ErrorToken {
+ return tt, l.r.Shift()
+ }
+ case '`':
+ l.templateLevels = append(l.templateLevels, l.level)
+ return l.consumeTemplateToken(), l.r.Shift()
+ case '#':
+ l.r.Move(1)
+ if l.consumeIdentifierToken() {
+ return PrivateIdentifierToken, l.r.Shift()
+ }
+ return ErrorToken, nil
+ default:
+ if l.consumeIdentifierToken() {
+ if prevNumericLiteral {
+ l.err = parse.NewErrorLexer(l.r, "unexpected identifier after number")
+ return ErrorToken, nil
+ } else if keyword, ok := Keywords[string(l.r.Lexeme())]; ok {
+ return keyword, l.r.Shift()
+ }
+ return IdentifierToken, l.r.Shift()
+ }
+ if 0xC0 <= c {
+ if l.consumeWhitespace() {
+ for l.consumeWhitespace() {
+ }
+ l.prevLineTerminator = prevLineTerminator
+ return WhitespaceToken, l.r.Shift()
+ } else if l.consumeLineTerminator() {
+ for l.consumeLineTerminator() {
+ }
+ l.prevLineTerminator = true
+ return LineTerminatorToken, l.r.Shift()
+ }
+ } else if c == 0 && l.r.Err() != nil {
+ return ErrorToken, nil
+ }
+ }
+
+ r, _ := l.r.PeekRune(0)
+ l.err = parse.NewErrorLexer(l.r, "unexpected %s", parse.Printable(r))
+ return ErrorToken, l.r.Shift()
+}
+
+////////////////////////////////////////////////////////////////
+
+/*
+The following functions follow the specifications at http://www.ecma-international.org/ecma-262/5.1/
+*/
+
+func (l *Lexer) consumeWhitespace() bool {
+ c := l.r.Peek(0)
+ if c == ' ' || c == '\t' || c == '\v' || c == '\f' {
+ l.r.Move(1)
+ return true
+ } else if 0xC0 <= c {
+ if r, n := l.r.PeekRune(0); r == '\u00A0' || r == '\uFEFF' || unicode.Is(unicode.Zs, r) {
+ l.r.Move(n)
+ return true
+ }
+ }
+ return false
+}
+
+func (l *Lexer) isLineTerminator() bool {
+ c := l.r.Peek(0)
+ if c == '\n' || c == '\r' {
+ return true
+ } else if c == 0xE2 && l.r.Peek(1) == 0x80 && (l.r.Peek(2) == 0xA8 || l.r.Peek(2) == 0xA9) {
+ return true
+ }
+ return false
+}
+
+func (l *Lexer) consumeLineTerminator() bool {
+ c := l.r.Peek(0)
+ if c == '\n' {
+ l.r.Move(1)
+ return true
+ } else if c == '\r' {
+ if l.r.Peek(1) == '\n' {
+ l.r.Move(2)
+ } else {
+ l.r.Move(1)
+ }
+ return true
+ } else if c == 0xE2 && l.r.Peek(1) == 0x80 && (l.r.Peek(2) == 0xA8 || l.r.Peek(2) == 0xA9) {
+ l.r.Move(3)
+ return true
+ }
+ return false
+}
+
+func (l *Lexer) consumeDigit() bool {
+ if c := l.r.Peek(0); c >= '0' && c <= '9' {
+ l.r.Move(1)
+ return true
+ }
+ return false
+}
+
+func (l *Lexer) consumeHexDigit() bool {
+ if c := l.r.Peek(0); (c >= '0' && c <= '9') || (c >= 'a' && c <= 'f') || (c >= 'A' && c <= 'F') {
+ l.r.Move(1)
+ return true
+ }
+ return false
+}
+
+func (l *Lexer) consumeBinaryDigit() bool {
+ if c := l.r.Peek(0); c == '0' || c == '1' {
+ l.r.Move(1)
+ return true
+ }
+ return false
+}
+
+func (l *Lexer) consumeOctalDigit() bool {
+ if c := l.r.Peek(0); c >= '0' && c <= '7' {
+ l.r.Move(1)
+ return true
+ }
+ return false
+}
+
+func (l *Lexer) consumeUnicodeEscape() bool {
+ if l.r.Peek(0) != '\\' || l.r.Peek(1) != 'u' {
+ return false
+ }
+ mark := l.r.Pos()
+ l.r.Move(2)
+ if c := l.r.Peek(0); c == '{' {
+ l.r.Move(1)
+ if l.consumeHexDigit() {
+ for l.consumeHexDigit() {
+ }
+ if c := l.r.Peek(0); c == '}' {
+ l.r.Move(1)
+ return true
+ }
+ }
+ l.r.Rewind(mark)
+ return false
+ } else if !l.consumeHexDigit() || !l.consumeHexDigit() || !l.consumeHexDigit() || !l.consumeHexDigit() {
+ l.r.Rewind(mark)
+ return false
+ }
+ return true
+}
+
+func (l *Lexer) consumeSingleLineComment() {
+ for {
+ c := l.r.Peek(0)
+ if c == '\r' || c == '\n' || c == 0 && l.r.Err() != nil {
+ break
+ } else if 0xC0 <= c {
+ if r, _ := l.r.PeekRune(0); r == '\u2028' || r == '\u2029' {
+ break
+ }
+ }
+ l.r.Move(1)
+ }
+}
+
+////////////////////////////////////////////////////////////////
+
+func (l *Lexer) consumeHTMLLikeCommentToken(prevLineTerminator bool) bool {
+ c := l.r.Peek(0)
+ if c == '<' && l.r.Peek(1) == '!' && l.r.Peek(2) == '-' && l.r.Peek(3) == '-' {
+ // opening HTML-style single line comment
+ l.r.Move(4)
+ l.consumeSingleLineComment()
+ return true
+ } else if prevLineTerminator && c == '-' && l.r.Peek(1) == '-' && l.r.Peek(2) == '>' {
+ // closing HTML-style single line comment
+ // (only if current line didn't contain any meaningful tokens)
+ l.r.Move(3)
+ l.consumeSingleLineComment()
+ return true
+ }
+ return false
+}
+
+func (l *Lexer) consumeCommentToken() TokenType {
+ c := l.r.Peek(1)
+ if c == '/' {
+ // single line comment
+ l.r.Move(2)
+ l.consumeSingleLineComment()
+ return CommentToken
+ } else if c == '*' {
+ l.r.Move(2)
+ tt := CommentToken
+ for {
+ c := l.r.Peek(0)
+ if c == '*' && l.r.Peek(1) == '/' {
+ l.r.Move(2)
+ break
+ } else if c == 0 && l.r.Err() != nil {
+ break
+ } else if l.consumeLineTerminator() {
+ l.prevLineTerminator = true
+ tt = CommentLineTerminatorToken
+ } else {
+ l.r.Move(1)
+ }
+ }
+ return tt
+ }
+ return ErrorToken
+}
+
+var opTokens = map[byte]TokenType{
+ '=': EqToken,
+ '!': NotToken,
+ '<': LtToken,
+ '>': GtToken,
+ '+': AddToken,
+ '-': SubToken,
+ '*': MulToken,
+ '/': DivToken,
+ '%': ModToken,
+ '&': BitAndToken,
+ '|': BitOrToken,
+ '^': BitXorToken,
+ '~': BitNotToken,
+ '?': QuestionToken,
+}
+
+var opEqTokens = map[byte]TokenType{
+ '=': EqEqToken,
+ '!': NotEqToken,
+ '<': LtEqToken,
+ '>': GtEqToken,
+ '+': AddEqToken,
+ '-': SubEqToken,
+ '*': MulEqToken,
+ '/': DivEqToken,
+ '%': ModEqToken,
+ '&': BitAndEqToken,
+ '|': BitOrEqToken,
+ '^': BitXorEqToken,
+}
+
+var opOpTokens = map[byte]TokenType{
+ '<': LtLtToken,
+ '+': IncrToken,
+ '-': DecrToken,
+ '*': ExpToken,
+ '&': AndToken,
+ '|': OrToken,
+ '?': NullishToken,
+}
+
+var opOpEqTokens = map[byte]TokenType{
+ '<': LtLtEqToken,
+ '*': ExpEqToken,
+ '&': AndEqToken,
+ '|': OrEqToken,
+ '?': NullishEqToken,
+}
+
+func (l *Lexer) consumeOperatorToken() TokenType {
+ c := l.r.Peek(0)
+ l.r.Move(1)
+ if l.r.Peek(0) == '=' {
+ l.r.Move(1)
+ if l.r.Peek(0) == '=' && (c == '!' || c == '=') {
+ l.r.Move(1)
+ if c == '!' {
+ return NotEqEqToken
+ }
+ return EqEqEqToken
+ }
+ return opEqTokens[c]
+ } else if l.r.Peek(0) == c && (c == '+' || c == '-' || c == '*' || c == '&' || c == '|' || c == '?' || c == '<') {
+ l.r.Move(1)
+ if l.r.Peek(0) == '=' && c != '+' && c != '-' {
+ l.r.Move(1)
+ return opOpEqTokens[c]
+ }
+ return opOpTokens[c]
+ } else if c == '?' && l.r.Peek(0) == '.' && (l.r.Peek(1) < '0' || l.r.Peek(1) > '9') {
+ l.r.Move(1)
+ return OptChainToken
+ } else if c == '=' && l.r.Peek(0) == '>' {
+ l.r.Move(1)
+ return ArrowToken
+ } else if c == '>' && l.r.Peek(0) == '>' {
+ l.r.Move(1)
+ if l.r.Peek(0) == '>' {
+ l.r.Move(1)
+ if l.r.Peek(0) == '=' {
+ l.r.Move(1)
+ return GtGtGtEqToken
+ }
+ return GtGtGtToken
+ } else if l.r.Peek(0) == '=' {
+ l.r.Move(1)
+ return GtGtEqToken
+ }
+ return GtGtToken
+ }
+ return opTokens[c]
+}
+
+func (l *Lexer) consumeIdentifierToken() bool {
+ c := l.r.Peek(0)
+ if identifierStartTable[c] {
+ l.r.Move(1)
+ } else if 0xC0 <= c {
+ if r, n := l.r.PeekRune(0); unicode.IsOneOf(identifierStart, r) {
+ l.r.Move(n)
+ } else {
+ return false
+ }
+ } else if !l.consumeUnicodeEscape() {
+ return false
+ }
+ for {
+ c := l.r.Peek(0)
+ if identifierTable[c] {
+ l.r.Move(1)
+ } else if 0xC0 <= c {
+ if r, n := l.r.PeekRune(0); r == '\u200C' || r == '\u200D' || unicode.IsOneOf(identifierContinue, r) {
+ l.r.Move(n)
+ } else {
+ break
+ }
+ } else if !l.consumeUnicodeEscape() {
+ break
+ }
+ }
+ return true
+}
+
+func (l *Lexer) consumeNumericSeparator(f func() bool) bool {
+ if l.r.Peek(0) != '_' {
+ return false
+ }
+ l.r.Move(1)
+ if !f() {
+ l.r.Move(-1)
+ return false
+ }
+ return true
+}
+
+func (l *Lexer) consumeNumericToken() TokenType {
+ // assume to be on 0 1 2 3 4 5 6 7 8 9 .
+ first := l.r.Peek(0)
+ if first == '0' {
+ l.r.Move(1)
+ if l.r.Peek(0) == 'x' || l.r.Peek(0) == 'X' {
+ l.r.Move(1)
+ if l.consumeHexDigit() {
+ for l.consumeHexDigit() || l.consumeNumericSeparator(l.consumeHexDigit) {
+ }
+ return HexadecimalToken
+ }
+ l.err = parse.NewErrorLexer(l.r, "invalid hexadecimal number")
+ return ErrorToken
+ } else if l.r.Peek(0) == 'b' || l.r.Peek(0) == 'B' {
+ l.r.Move(1)
+ if l.consumeBinaryDigit() {
+ for l.consumeBinaryDigit() || l.consumeNumericSeparator(l.consumeBinaryDigit) {
+ }
+ return BinaryToken
+ }
+ l.err = parse.NewErrorLexer(l.r, "invalid binary number")
+ return ErrorToken
+ } else if l.r.Peek(0) == 'o' || l.r.Peek(0) == 'O' {
+ l.r.Move(1)
+ if l.consumeOctalDigit() {
+ for l.consumeOctalDigit() || l.consumeNumericSeparator(l.consumeOctalDigit) {
+ }
+ return OctalToken
+ }
+ l.err = parse.NewErrorLexer(l.r, "invalid octal number")
+ return ErrorToken
+ } else if l.r.Peek(0) == 'n' {
+ l.r.Move(1)
+ return BigIntToken
+ } else if '0' <= l.r.Peek(0) && l.r.Peek(0) <= '9' {
+ l.err = parse.NewErrorLexer(l.r, "legacy octal numbers are not supported")
+ return ErrorToken
+ }
+ } else if first != '.' {
+ for l.consumeDigit() || l.consumeNumericSeparator(l.consumeDigit) {
+ }
+ }
+ // we have parsed a 0 or an integer number
+ c := l.r.Peek(0)
+ if c == '.' {
+ l.r.Move(1)
+ if l.consumeDigit() {
+ for l.consumeDigit() || l.consumeNumericSeparator(l.consumeDigit) {
+ }
+ c = l.r.Peek(0)
+ } else if first == '.' {
+ // number starts with a dot and must be followed by digits
+ l.r.Move(-1)
+ return ErrorToken // may be dot or ellipsis
+ } else {
+ c = l.r.Peek(0)
+ }
+ } else if c == 'n' {
+ l.r.Move(1)
+ return BigIntToken
+ }
+ if c == 'e' || c == 'E' {
+ l.r.Move(1)
+ c = l.r.Peek(0)
+ if c == '+' || c == '-' {
+ l.r.Move(1)
+ }
+ if !l.consumeDigit() {
+ l.err = parse.NewErrorLexer(l.r, "invalid number")
+ return ErrorToken
+ }
+ for l.consumeDigit() || l.consumeNumericSeparator(l.consumeDigit) {
+ }
+ }
+ return DecimalToken
+}
+
+func (l *Lexer) consumeStringToken() TokenType {
+ // assume to be on ' or "
+ delim := l.r.Peek(0)
+ l.r.Move(1)
+ for {
+ c := l.r.Peek(0)
+ if c == delim {
+ l.r.Move(1)
+ break
+ } else if c == '\\' {
+ l.r.Move(1)
+ if !l.consumeLineTerminator() {
+ if c := l.r.Peek(0); c == delim || c == '\\' {
+ l.r.Move(1)
+ }
+ }
+ continue
+ } else if c == '\n' || c == '\r' || c == 0 && l.r.Err() != nil {
+ l.err = parse.NewErrorLexer(l.r, "unterminated string literal")
+ return ErrorToken
+ }
+ l.r.Move(1)
+ }
+ return StringToken
+}
+
+func (l *Lexer) consumeRegExpToken() bool {
+ // assume to be on /
+ l.r.Move(1)
+ inClass := false
+ for {
+ c := l.r.Peek(0)
+ if !inClass && c == '/' {
+ l.r.Move(1)
+ break
+ } else if c == '[' {
+ inClass = true
+ } else if c == ']' {
+ inClass = false
+ } else if c == '\\' {
+ l.r.Move(1)
+ if l.isLineTerminator() || l.r.Peek(0) == 0 && l.r.Err() != nil {
+ return false
+ }
+ } else if l.isLineTerminator() || c == 0 && l.r.Err() != nil {
+ return false
+ }
+ l.r.Move(1)
+ }
+ // flags
+ for {
+ c := l.r.Peek(0)
+ if identifierTable[c] {
+ l.r.Move(1)
+ } else if 0xC0 <= c {
+ if r, n := l.r.PeekRune(0); r == '\u200C' || r == '\u200D' || unicode.IsOneOf(identifierContinue, r) {
+ l.r.Move(n)
+ } else {
+ break
+ }
+ } else {
+ break
+ }
+ }
+ return true
+}
+
+func (l *Lexer) consumeTemplateToken() TokenType {
+ // assume to be on ` or } when already within template
+ continuation := l.r.Peek(0) == '}'
+ l.r.Move(1)
+ for {
+ c := l.r.Peek(0)
+ if c == '`' {
+ l.templateLevels = l.templateLevels[:len(l.templateLevels)-1]
+ l.r.Move(1)
+ if continuation {
+ return TemplateEndToken
+ }
+ return TemplateToken
+ } else if c == '$' && l.r.Peek(1) == '{' {
+ l.level++
+ l.r.Move(2)
+ if continuation {
+ return TemplateMiddleToken
+ }
+ return TemplateStartToken
+ } else if c == '\\' {
+ l.r.Move(1)
+ if c := l.r.Peek(0); c != 0 {
+ l.r.Move(1)
+ }
+ continue
+ } else if c == 0 && l.r.Err() != nil {
+ l.err = parse.NewErrorLexer(l.r, "unterminated template literal")
+ return ErrorToken
+ }
+ l.r.Move(1)
+ }
+}
+
+var identifierStartTable = [256]bool{
+ // ASCII
+ false, false, false, false, false, false, false, false,
+ false, false, false, false, false, false, false, false,
+ false, false, false, false, false, false, false, false,
+ false, false, false, false, false, false, false, false,
+
+ false, false, false, false, true, false, false, false, // $
+ false, false, false, false, false, false, false, false,
+ false, false, false, false, false, false, false, false,
+ false, false, false, false, false, false, false, false,
+
+ false, true, true, true, true, true, true, true, // A, B, C, D, E, F, G
+ true, true, true, true, true, true, true, true, // H, I, J, K, L, M, N, O
+ true, true, true, true, true, true, true, true, // P, Q, R, S, T, U, V, W
+ true, true, true, false, false, false, false, true, // X, Y, Z, _
+
+ false, true, true, true, true, true, true, true, // a, b, c, d, e, f, g
+ true, true, true, true, true, true, true, true, // h, i, j, k, l, m, n, o
+ true, true, true, true, true, true, true, true, // p, q, r, s, t, u, v, w
+ true, true, true, false, false, false, false, false, // x, y, z
+
+ // non-ASCII
+ false, false, false, false, false, false, false, false,
+ false, false, false, false, false, false, false, false,
+ false, false, false, false, false, false, false, false,
+ false, false, false, false, false, false, false, false,
+
+ false, false, false, false, false, false, false, false,
+ false, false, false, false, false, false, false, false,
+ false, false, false, false, false, false, false, false,
+ false, false, false, false, false, false, false, false,
+
+ false, false, false, false, false, false, false, false,
+ false, false, false, false, false, false, false, false,
+ false, false, false, false, false, false, false, false,
+ false, false, false, false, false, false, false, false,
+
+ false, false, false, false, false, false, false, false,
+ false, false, false, false, false, false, false, false,
+ false, false, false, false, false, false, false, false,
+ false, false, false, false, false, false, false, false,
+}
+
+var identifierTable = [256]bool{
+ // ASCII
+ false, false, false, false, false, false, false, false,
+ false, false, false, false, false, false, false, false,
+ false, false, false, false, false, false, false, false,
+ false, false, false, false, false, false, false, false,
+
+ false, false, false, false, true, false, false, false, // $
+ false, false, false, false, false, false, false, false,
+ true, true, true, true, true, true, true, true, // 0, 1, 2, 3, 4, 5, 6, 7
+ true, true, false, false, false, false, false, false, // 8, 9
+
+ false, true, true, true, true, true, true, true, // A, B, C, D, E, F, G
+ true, true, true, true, true, true, true, true, // H, I, J, K, L, M, N, O
+ true, true, true, true, true, true, true, true, // P, Q, R, S, T, U, V, W
+ true, true, true, false, false, false, false, true, // X, Y, Z, _
+
+ false, true, true, true, true, true, true, true, // a, b, c, d, e, f, g
+ true, true, true, true, true, true, true, true, // h, i, j, k, l, m, n, o
+ true, true, true, true, true, true, true, true, // p, q, r, s, t, u, v, w
+ true, true, true, false, false, false, false, false, // x, y, z
+
+ // non-ASCII
+ false, false, false, false, false, false, false, false,
+ false, false, false, false, false, false, false, false,
+ false, false, false, false, false, false, false, false,
+ false, false, false, false, false, false, false, false,
+
+ false, false, false, false, false, false, false, false,
+ false, false, false, false, false, false, false, false,
+ false, false, false, false, false, false, false, false,
+ false, false, false, false, false, false, false, false,
+
+ false, false, false, false, false, false, false, false,
+ false, false, false, false, false, false, false, false,
+ false, false, false, false, false, false, false, false,
+ false, false, false, false, false, false, false, false,
+
+ false, false, false, false, false, false, false, false,
+ false, false, false, false, false, false, false, false,
+ false, false, false, false, false, false, false, false,
+ false, false, false, false, false, false, false, false,
+}
diff --git a/vendor/github.com/tdewolff/parse/v2/js/parse.go b/vendor/github.com/tdewolff/parse/v2/js/parse.go
new file mode 100644
index 0000000..3d06237
--- /dev/null
+++ b/vendor/github.com/tdewolff/parse/v2/js/parse.go
@@ -0,0 +1,2292 @@
+package js
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "io"
+
+ "github.com/tdewolff/parse/v2"
+ "github.com/tdewolff/parse/v2/buffer"
+)
+
+type Options struct {
+ WhileToFor bool
+}
+
+// Parser is the state for the parser.
+type Parser struct {
+ l *Lexer
+ o Options
+ err error
+
+ data []byte
+ tt TokenType
+ prevLT bool
+ inFor bool
+ await, yield bool
+ assumeArrowFunc bool
+ allowDirectivePrologue bool
+
+ stmtLevel int
+ exprLevel int
+
+ scope *Scope
+}
+
+// Parse returns a JS AST tree of.
+func Parse(r *parse.Input, o Options) (*AST, error) {
+ ast := &AST{}
+ p := &Parser{
+ l: NewLexer(r),
+ o: o,
+ tt: WhitespaceToken, // trick so that next() works
+ await: true,
+ }
+
+ // process shebang
+ if r.Peek(0) == '#' && r.Peek(1) == '!' {
+ r.Move(2)
+ p.l.consumeSingleLineComment() // consume till end-of-line
+ ast.Comments = append(ast.Comments, r.Shift())
+ }
+
+ p.tt, p.data = p.l.Next()
+ for p.tt == CommentToken || p.tt == CommentLineTerminatorToken {
+ ast.Comments = append(ast.Comments, p.data)
+ p.tt, p.data = p.l.Next()
+ if p.tt == WhitespaceToken || p.tt == LineTerminatorToken {
+ p.tt, p.data = p.l.Next()
+ }
+ }
+ if p.tt == WhitespaceToken || p.tt == LineTerminatorToken {
+ p.next()
+ }
+ // prevLT may be wrong but that is not a problem
+ ast.BlockStmt = p.parseModule()
+
+ if p.err == nil {
+ p.err = p.l.Err()
+ } else {
+ offset := p.l.r.Offset() - len(p.data)
+ p.err = parse.NewError(buffer.NewReader(p.l.r.Bytes()), offset, p.err.Error())
+ }
+ if p.err == io.EOF {
+ p.err = nil
+ }
+ return ast, p.err
+}
+
+////////////////////////////////////////////////////////////////
+
+func (p *Parser) next() {
+ p.prevLT = false
+ p.tt, p.data = p.l.Next()
+ for p.tt == WhitespaceToken || p.tt == LineTerminatorToken || p.tt == CommentToken || p.tt == CommentLineTerminatorToken {
+ if p.tt == LineTerminatorToken || p.tt == CommentLineTerminatorToken {
+ p.prevLT = true
+ }
+ p.tt, p.data = p.l.Next()
+ }
+}
+
+func (p *Parser) failMessage(msg string, args ...interface{}) {
+ if p.err == nil {
+ p.err = fmt.Errorf(msg, args...)
+ p.tt = ErrorToken
+ }
+}
+
+func (p *Parser) fail(in string, expected ...TokenType) {
+ if p.err == nil {
+ msg := "unexpected"
+ if 0 < len(expected) {
+ msg = "expected"
+ for i, tt := range expected[:len(expected)-1] {
+ if 0 < i {
+ msg += ","
+ }
+ msg += " " + tt.String() + ""
+ }
+ if 2 < len(expected) {
+ msg += ", or"
+ } else if 1 < len(expected) {
+ msg += " or"
+ }
+ msg += " " + expected[len(expected)-1].String() + " instead of"
+ }
+
+ if p.tt == ErrorToken {
+ if p.l.Err() == io.EOF {
+ msg += " EOF"
+ } else if lexerErr, ok := p.l.Err().(*parse.Error); ok {
+ msg = lexerErr.Message
+ } else {
+ // does not happen
+ }
+ } else {
+ msg += " " + string(p.data) + ""
+ }
+ if in != "" {
+ msg += " in " + in
+ }
+
+ p.err = errors.New(msg)
+ p.tt = ErrorToken
+ }
+}
+
+func (p *Parser) consume(in string, tt TokenType) bool {
+ if p.tt != tt {
+ p.fail(in, tt)
+ return false
+ }
+ p.next()
+ return true
+}
+
+// TODO: refactor
+//type ScopeState struct {
+// scope *Scope
+// async bool
+// generator bool
+// assumeArrowFunc bool
+//}
+
+func (p *Parser) enterScope(scope *Scope, isFunc bool) *Scope {
+ // create a new scope object and add it to the parent
+ parent := p.scope
+ p.scope = scope
+ *scope = Scope{
+ Parent: parent,
+ }
+ if isFunc {
+ scope.Func = scope
+ } else if parent != nil {
+ scope.Func = parent.Func
+ }
+ return parent
+}
+
+func (p *Parser) exitScope(parent *Scope) {
+ p.scope.HoistUndeclared()
+ p.scope = parent
+}
+
+func (p *Parser) parseModule() (module BlockStmt) {
+ p.enterScope(&module.Scope, true)
+ p.allowDirectivePrologue = true
+ for {
+ switch p.tt {
+ case ErrorToken:
+ return
+ case ImportToken:
+ p.next()
+ if p.tt == OpenParenToken {
+ // could be an import call expression
+ left := &LiteralExpr{ImportToken, []byte("import")}
+ p.exprLevel++
+ suffix := p.parseExpressionSuffix(left, OpExpr, OpCall)
+ p.exprLevel--
+ module.List = append(module.List, &ExprStmt{suffix})
+ } else {
+ importStmt := p.parseImportStmt()
+ module.List = append(module.List, &importStmt)
+ }
+ case ExportToken:
+ exportStmt := p.parseExportStmt()
+ module.List = append(module.List, &exportStmt)
+ default:
+ module.List = append(module.List, p.parseStmt(true))
+ }
+ }
+}
+
+func (p *Parser) parseStmt(allowDeclaration bool) (stmt IStmt) {
+ p.stmtLevel++
+ if 1000 < p.stmtLevel {
+ p.failMessage("too many nested statements")
+ return nil
+ }
+
+ allowDirectivePrologue := p.allowDirectivePrologue
+ p.allowDirectivePrologue = false
+
+ switch tt := p.tt; tt {
+ case OpenBraceToken:
+ stmt = p.parseBlockStmt("block statement")
+ case ConstToken, VarToken:
+ if !allowDeclaration && tt == ConstToken {
+ p.fail("statement")
+ return
+ }
+ p.next()
+ varDecl := p.parseVarDecl(tt, true)
+ stmt = varDecl
+ if !p.prevLT && p.tt != SemicolonToken && p.tt != CloseBraceToken && p.tt != ErrorToken {
+ if tt == ConstToken {
+ p.fail("const declaration")
+ } else {
+ p.fail("var statement")
+ }
+ return
+ }
+ case LetToken:
+ let := p.data
+ p.next()
+ if allowDeclaration && (IsIdentifier(p.tt) || p.tt == YieldToken || p.tt == AwaitToken || p.tt == OpenBracketToken || p.tt == OpenBraceToken) {
+ stmt = p.parseVarDecl(tt, false)
+ if !p.prevLT && p.tt != SemicolonToken && p.tt != CloseBraceToken && p.tt != ErrorToken {
+ p.fail("let declaration")
+ return
+ }
+ } else {
+ // expression
+ stmt = &ExprStmt{p.parseIdentifierExpression(OpExpr, let)}
+ if !p.prevLT && p.tt != SemicolonToken && p.tt != CloseBraceToken && p.tt != ErrorToken {
+ p.fail("expression")
+ return
+ }
+ }
+ case IfToken:
+ p.next()
+ if !p.consume("if statement", OpenParenToken) {
+ return
+ }
+ cond := p.parseExpression(OpExpr)
+ if !p.consume("if statement", CloseParenToken) {
+ return
+ }
+ body := p.parseStmt(false)
+
+ var elseBody IStmt
+ if p.tt == ElseToken {
+ p.next()
+ elseBody = p.parseStmt(false)
+ }
+ stmt = &IfStmt{cond, body, elseBody}
+ case ContinueToken, BreakToken:
+ tt := p.tt
+ p.next()
+ var label []byte
+ if !p.prevLT && p.isIdentifierReference(p.tt) {
+ label = p.data
+ p.next()
+ }
+ stmt = &BranchStmt{tt, label}
+ case ReturnToken:
+ p.next()
+ var value IExpr
+ if !p.prevLT && p.tt != SemicolonToken && p.tt != CloseBraceToken && p.tt != ErrorToken {
+ value = p.parseExpression(OpExpr)
+ }
+ stmt = &ReturnStmt{value}
+ case WithToken:
+ p.next()
+ if !p.consume("with statement", OpenParenToken) {
+ return
+ }
+ cond := p.parseExpression(OpExpr)
+ if !p.consume("with statement", CloseParenToken) {
+ return
+ }
+
+ p.scope.Func.HasWith = true
+ stmt = &WithStmt{cond, p.parseStmt(false)}
+ case DoToken:
+ stmt = &DoWhileStmt{}
+ p.next()
+ body := p.parseStmt(false)
+ if !p.consume("do-while statement", WhileToken) {
+ return
+ }
+ if !p.consume("do-while statement", OpenParenToken) {
+ return
+ }
+ stmt = &DoWhileStmt{p.parseExpression(OpExpr), body}
+ if !p.consume("do-while statement", CloseParenToken) {
+ return
+ }
+ case WhileToken:
+ p.next()
+ if !p.consume("while statement", OpenParenToken) {
+ return
+ }
+ cond := p.parseExpression(OpExpr)
+ if !p.consume("while statement", CloseParenToken) {
+ return
+ }
+ body := p.parseStmt(false)
+ if p.o.WhileToFor {
+ varDecl := &VarDecl{TokenType: VarToken, Scope: p.scope, InFor: true}
+ p.scope.Func.VarDecls = append(p.scope.Func.VarDecls, varDecl)
+
+ block, ok := body.(*BlockStmt)
+ if !ok {
+ block = &BlockStmt{List: []IStmt{body}}
+ }
+ stmt = &ForStmt{varDecl, cond, nil, block}
+ } else {
+ stmt = &WhileStmt{cond, body}
+ }
+ case ForToken:
+ p.next()
+ await := p.await && p.tt == AwaitToken
+ if await {
+ p.next()
+ }
+ if !p.consume("for statement", OpenParenToken) {
+ return
+ }
+
+ body := &BlockStmt{}
+ parent := p.enterScope(&body.Scope, false)
+
+ var init IExpr
+ p.inFor = true
+ if p.tt == VarToken || p.tt == LetToken || p.tt == ConstToken {
+ tt := p.tt
+ p.next()
+ varDecl := p.parseVarDecl(tt, true)
+ if p.tt != SemicolonToken && (1 < len(varDecl.List) || varDecl.List[0].Default != nil) {
+ p.fail("for statement")
+ return
+ } else if p.tt == SemicolonToken && varDecl.List[0].Default == nil {
+ // all but the first item were already verified
+ if _, ok := varDecl.List[0].Binding.(*Var); !ok {
+ p.fail("for statement")
+ return
+ }
+ }
+ init = varDecl
+ } else if p.tt != SemicolonToken {
+ init = p.parseExpression(OpExpr)
+ }
+ p.inFor = false
+
+ if p.tt == SemicolonToken {
+ var cond, post IExpr
+ if await {
+ p.fail("for statement", OfToken)
+ return
+ }
+ p.next()
+ if p.tt != SemicolonToken {
+ cond = p.parseExpression(OpExpr)
+ }
+ if !p.consume("for statement", SemicolonToken) {
+ return
+ }
+ if p.tt != CloseParenToken {
+ post = p.parseExpression(OpExpr)
+ }
+ if !p.consume("for statement", CloseParenToken) {
+ return
+ }
+ p.scope.MarkForStmt()
+ if p.tt == OpenBraceToken {
+ body.List = p.parseStmtList("")
+ } else if p.tt != SemicolonToken {
+ body.List = []IStmt{p.parseStmt(false)}
+ }
+ if init == nil {
+ varDecl := &VarDecl{TokenType: VarToken, Scope: p.scope, InFor: true}
+ p.scope.Func.VarDecls = append(p.scope.Func.VarDecls, varDecl)
+ init = varDecl
+ } else if varDecl, ok := init.(*VarDecl); ok {
+ varDecl.InFor = true
+ }
+ stmt = &ForStmt{init, cond, post, body}
+ } else if p.tt == InToken {
+ if await {
+ p.fail("for statement", OfToken)
+ return
+ }
+ p.next()
+ value := p.parseExpression(OpExpr)
+ if !p.consume("for statement", CloseParenToken) {
+ return
+ }
+ p.scope.MarkForStmt()
+ if p.tt == OpenBraceToken {
+ body.List = p.parseStmtList("")
+ } else if p.tt != SemicolonToken {
+ body.List = []IStmt{p.parseStmt(false)}
+ }
+ if varDecl, ok := init.(*VarDecl); ok {
+ varDecl.InForInOf = true
+ }
+ stmt = &ForInStmt{init, value, body}
+ } else if p.tt == OfToken {
+ p.next()
+ value := p.parseExpression(OpAssign)
+ if !p.consume("for statement", CloseParenToken) {
+ return
+ }
+ p.scope.MarkForStmt()
+ if p.tt == OpenBraceToken {
+ body.List = p.parseStmtList("")
+ } else if p.tt != SemicolonToken {
+ body.List = []IStmt{p.parseStmt(false)}
+ }
+ if varDecl, ok := init.(*VarDecl); ok {
+ varDecl.InForInOf = true
+ }
+ stmt = &ForOfStmt{await, init, value, body}
+ } else {
+ p.fail("for statement", InToken, OfToken, SemicolonToken)
+ return
+ }
+ p.exitScope(parent)
+ case SwitchToken:
+ p.next()
+ if !p.consume("switch statement", OpenParenToken) {
+ return
+ }
+ init := p.parseExpression(OpExpr)
+ if !p.consume("switch statement", CloseParenToken) {
+ return
+ }
+
+ // case block
+ if !p.consume("switch statement", OpenBraceToken) {
+ return
+ }
+
+ switchStmt := &SwitchStmt{Init: init}
+ parent := p.enterScope(&switchStmt.Scope, false)
+ for {
+ if p.tt == ErrorToken {
+ p.fail("switch statement")
+ return
+ } else if p.tt == CloseBraceToken {
+ p.next()
+ break
+ }
+
+ clause := p.tt
+ var list IExpr
+ if p.tt == CaseToken {
+ p.next()
+ list = p.parseExpression(OpExpr)
+ } else if p.tt == DefaultToken {
+ p.next()
+ } else {
+ p.fail("switch statement", CaseToken, DefaultToken)
+ return
+ }
+ if !p.consume("switch statement", ColonToken) {
+ return
+ }
+
+ var stmts []IStmt
+ for p.tt != CaseToken && p.tt != DefaultToken && p.tt != CloseBraceToken && p.tt != ErrorToken {
+ stmts = append(stmts, p.parseStmt(true))
+ }
+ switchStmt.List = append(switchStmt.List, CaseClause{clause, list, stmts})
+ }
+ p.exitScope(parent)
+ stmt = switchStmt
+ case FunctionToken:
+ if !allowDeclaration {
+ p.fail("statement")
+ return
+ }
+ stmt = p.parseFuncDecl()
+ case AsyncToken: // async function
+ if !allowDeclaration {
+ p.fail("statement")
+ return
+ }
+ async := p.data
+ p.next()
+ if p.tt == FunctionToken && !p.prevLT {
+ stmt = p.parseAsyncFuncDecl()
+ } else {
+ // expression
+ stmt = &ExprStmt{p.parseAsyncExpression(OpExpr, async)}
+ if !p.prevLT && p.tt != SemicolonToken && p.tt != CloseBraceToken && p.tt != ErrorToken {
+ p.fail("expression")
+ return
+ }
+ }
+ case ClassToken:
+ if !allowDeclaration {
+ p.fail("statement")
+ return
+ }
+ stmt = p.parseClassDecl()
+ case ThrowToken:
+ p.next()
+ var value IExpr
+ if !p.prevLT {
+ value = p.parseExpression(OpExpr)
+ }
+ stmt = &ThrowStmt{value}
+ case TryToken:
+ p.next()
+ body := p.parseBlockStmt("try statement")
+ var binding IBinding
+ var catch, finally *BlockStmt
+ if p.tt == CatchToken {
+ p.next()
+ catch = &BlockStmt{}
+ parent := p.enterScope(&catch.Scope, false)
+ if p.tt == OpenParenToken {
+ p.next()
+ binding = p.parseBinding(CatchDecl) // local to block scope of catch
+ if !p.consume("try-catch statement", CloseParenToken) {
+ return
+ }
+ }
+ catch.List = p.parseStmtList("try-catch statement")
+ p.exitScope(parent)
+ } else if p.tt != FinallyToken {
+ p.fail("try statement", CatchToken, FinallyToken)
+ return
+ }
+ if p.tt == FinallyToken {
+ p.next()
+ finally = p.parseBlockStmt("try-finally statement")
+ }
+ stmt = &TryStmt{body, binding, catch, finally}
+ case DebuggerToken:
+ p.next()
+ stmt = &DebuggerStmt{}
+ case SemicolonToken, ErrorToken:
+ stmt = &EmptyStmt{}
+ default:
+ if p.isIdentifierReference(p.tt) {
+ // labelled statement or expression
+ label := p.data
+ p.next()
+ if p.tt == ColonToken {
+ p.next()
+ stmt = &LabelledStmt{label, p.parseStmt(true)} // allows illegal async function, generator function, let, const, or class declarations
+ } else {
+ // expression
+ stmt = &ExprStmt{p.parseIdentifierExpression(OpExpr, label)}
+ if !p.prevLT && p.tt != SemicolonToken && p.tt != CloseBraceToken && p.tt != ErrorToken {
+ p.fail("expression")
+ return
+ }
+ }
+ } else {
+ // expression
+ stmt = &ExprStmt{p.parseExpression(OpExpr)}
+ if !p.prevLT && p.tt != SemicolonToken && p.tt != CloseBraceToken && p.tt != ErrorToken {
+ p.fail("expression")
+ return
+ } else if lit, ok := stmt.(*ExprStmt).Value.(*LiteralExpr); ok && allowDirectivePrologue && lit.TokenType == StringToken && len(lit.Data) == 12 && bytes.Equal(lit.Data[1:11], []byte("use strict")) {
+ stmt = &DirectivePrologueStmt{lit.Data}
+ p.allowDirectivePrologue = true
+ }
+ }
+ }
+ if p.tt == SemicolonToken {
+ p.next()
+ }
+ p.stmtLevel--
+ return
+}
+
+func (p *Parser) parseStmtList(in string) (list []IStmt) {
+ if !p.consume(in, OpenBraceToken) {
+ return
+ }
+ for {
+ if p.tt == ErrorToken {
+ p.fail("")
+ return
+ } else if p.tt == CloseBraceToken {
+ p.next()
+ break
+ }
+ list = append(list, p.parseStmt(true))
+ }
+ return
+}
+
+func (p *Parser) parseBlockStmt(in string) (blockStmt *BlockStmt) {
+ blockStmt = &BlockStmt{}
+ parent := p.enterScope(&blockStmt.Scope, false)
+ blockStmt.List = p.parseStmtList(in)
+ p.exitScope(parent)
+ return
+}
+
+func (p *Parser) parseImportStmt() (importStmt ImportStmt) {
+ // assume we're passed import
+ if p.tt == StringToken {
+ importStmt.Module = p.data
+ p.next()
+ } else {
+ if IsIdentifier(p.tt) || p.tt == YieldToken {
+ importStmt.Default = p.data
+ p.next()
+ if p.tt == CommaToken {
+ p.next()
+ }
+ }
+ if p.tt == MulToken {
+ star := p.data
+ p.next()
+ if !p.consume("import statement", AsToken) {
+ return
+ }
+ if !IsIdentifier(p.tt) && p.tt != YieldToken {
+ p.fail("import statement", IdentifierToken)
+ return
+ }
+ importStmt.List = []Alias{Alias{star, p.data}}
+ p.next()
+ } else if p.tt == OpenBraceToken {
+ p.next()
+ for IsIdentifierName(p.tt) || p.tt == StringToken {
+ tt := p.tt
+ var name, binding []byte = nil, p.data
+ p.next()
+ if p.tt == AsToken {
+ p.next()
+ if !IsIdentifier(p.tt) && p.tt != YieldToken {
+ p.fail("import statement", IdentifierToken)
+ return
+ }
+ name = binding
+ binding = p.data
+ p.next()
+ } else if !IsIdentifier(tt) && tt != YieldToken || tt == StringToken {
+ p.fail("import statement", IdentifierToken, StringToken)
+ return
+ }
+ importStmt.List = append(importStmt.List, Alias{name, binding})
+ if p.tt == CommaToken {
+ p.next()
+ if p.tt == CloseBraceToken {
+ importStmt.List = append(importStmt.List, Alias{})
+ break
+ }
+ }
+ }
+ if !p.consume("import statement", CloseBraceToken) {
+ return
+ }
+ }
+ if importStmt.Default == nil && len(importStmt.List) == 0 {
+ p.fail("import statement", StringToken, IdentifierToken, MulToken, OpenBraceToken)
+ return
+ }
+
+ if !p.consume("import statement", FromToken) {
+ return
+ }
+ if p.tt != StringToken {
+ p.fail("import statement", StringToken)
+ return
+ }
+ importStmt.Module = p.data
+ p.next()
+ }
+ if p.tt == SemicolonToken {
+ p.next()
+ }
+ return
+}
+
+func (p *Parser) parseExportStmt() (exportStmt ExportStmt) {
+ // assume we're at export
+ p.next()
+ if p.tt == MulToken || p.tt == OpenBraceToken {
+ if p.tt == MulToken {
+ star := p.data
+ p.next()
+ if p.tt == AsToken {
+ p.next()
+ if !IsIdentifierName(p.tt) && p.tt != StringToken {
+ p.fail("export statement", IdentifierToken, StringToken)
+ return
+ }
+ exportStmt.List = []Alias{Alias{star, p.data}}
+ p.next()
+ } else {
+ exportStmt.List = []Alias{Alias{nil, star}}
+ }
+ if p.tt != FromToken {
+ p.fail("export statement", FromToken)
+ return
+ }
+ } else {
+ p.next()
+ for IsIdentifierName(p.tt) || p.tt == StringToken {
+ var name, binding []byte = nil, p.data
+ p.next()
+ if p.tt == AsToken {
+ p.next()
+ if !IsIdentifierName(p.tt) && p.tt != StringToken {
+ p.fail("export statement", IdentifierToken, StringToken)
+ return
+ }
+ name = binding
+ binding = p.data
+ p.next()
+ }
+ exportStmt.List = append(exportStmt.List, Alias{name, binding})
+ if p.tt == CommaToken {
+ p.next()
+ if p.tt == CloseBraceToken {
+ exportStmt.List = append(exportStmt.List, Alias{})
+ break
+ }
+ }
+ }
+ if !p.consume("export statement", CloseBraceToken) {
+ return
+ }
+ }
+ if p.tt == FromToken {
+ p.next()
+ if p.tt != StringToken {
+ p.fail("export statement", StringToken)
+ return
+ }
+ exportStmt.Module = p.data
+ p.next()
+ }
+ } else if p.tt == VarToken || p.tt == ConstToken || p.tt == LetToken {
+ tt := p.tt
+ p.next()
+ exportStmt.Decl = p.parseVarDecl(tt, false)
+ } else if p.tt == FunctionToken {
+ exportStmt.Decl = p.parseFuncDecl()
+ } else if p.tt == AsyncToken { // async function
+ p.next()
+ if p.tt != FunctionToken || p.prevLT {
+ p.fail("export statement", FunctionToken)
+ return
+ }
+ exportStmt.Decl = p.parseAsyncFuncDecl()
+ } else if p.tt == ClassToken {
+ exportStmt.Decl = p.parseClassDecl()
+ } else if p.tt == DefaultToken {
+ exportStmt.Default = true
+ p.next()
+ if p.tt == FunctionToken {
+ exportStmt.Decl = p.parseFuncDeclDefault()
+ } else if p.tt == AsyncToken { // async function or async arrow function
+ async := p.data
+ p.next()
+ if p.tt == FunctionToken && !p.prevLT {
+ exportStmt.Decl = p.parseAsyncFuncDeclDefault()
+ } else {
+ // expression
+ exportStmt.Decl = p.parseAsyncExpression(OpExpr, async)
+ }
+ } else if p.tt == ClassToken {
+ exportStmt.Decl = p.parseClassDeclDefault()
+ } else {
+ exportStmt.Decl = p.parseExpression(OpAssign)
+ }
+ } else {
+ p.fail("export statement", MulToken, OpenBraceToken, VarToken, LetToken, ConstToken, FunctionToken, AsyncToken, ClassToken, DefaultToken)
+ return
+ }
+ if p.tt == SemicolonToken {
+ p.next()
+ }
+ return
+}
+
+func (p *Parser) parseVarDecl(tt TokenType, canBeHoisted bool) (varDecl *VarDecl) {
+ // assume we're past var, let or const
+ varDecl = &VarDecl{
+ TokenType: tt,
+ Scope: p.scope,
+ }
+ declType := LexicalDecl
+ if tt == VarToken {
+ declType = VariableDecl
+ if canBeHoisted {
+ p.scope.Func.VarDecls = append(p.scope.Func.VarDecls, varDecl)
+ }
+ }
+ for {
+ // binding element, var declaration in for-in or for-of can never have a default
+ var bindingElement BindingElement
+ parentInFor := p.inFor
+ p.inFor = false
+ bindingElement.Binding = p.parseBinding(declType)
+ p.inFor = parentInFor
+ if p.tt == EqToken {
+ p.next()
+ bindingElement.Default = p.parseExpression(OpAssign)
+ } else if _, ok := bindingElement.Binding.(*Var); !ok && (!p.inFor || 0 < len(varDecl.List)) {
+ p.fail("var statement", EqToken)
+ return
+ } else if tt == ConstToken && (!p.inFor || p.inFor && p.tt != OfToken && p.tt != InToken) {
+ p.fail("const statement", EqToken)
+ }
+
+ varDecl.List = append(varDecl.List, bindingElement)
+ if p.tt == CommaToken {
+ p.next()
+ } else {
+ break
+ }
+ }
+ return
+}
+
+func (p *Parser) parseFuncParams(in string) (params Params) {
+ if !p.consume(in, OpenParenToken) {
+ return
+ }
+
+ for p.tt != CloseParenToken && p.tt != ErrorToken {
+ if p.tt == EllipsisToken {
+ // binding rest element
+ p.next()
+ params.Rest = p.parseBinding(ArgumentDecl)
+ p.consume(in, CloseParenToken)
+ return
+ }
+ params.List = append(params.List, p.parseBindingElement(ArgumentDecl))
+ if p.tt != CommaToken {
+ break
+ }
+ p.next()
+ }
+ if p.tt != CloseParenToken {
+ p.fail(in)
+ return
+ }
+ p.next()
+
+ // mark undeclared vars as arguments in `function f(a=b){var b}` where the b's are different vars
+ p.scope.MarkFuncArgs()
+ return
+}
+
+func (p *Parser) parseFuncDecl() (funcDecl *FuncDecl) {
+ return p.parseAnyFunc(false, false, false)
+}
+
+func (p *Parser) parseFuncDeclDefault() (funcDecl *FuncDecl) {
+ return p.parseAnyFunc(false, true, false)
+}
+
+func (p *Parser) parseAsyncFuncDecl() (funcDecl *FuncDecl) {
+ return p.parseAnyFunc(true, false, false)
+}
+
+func (p *Parser) parseAsyncFuncDeclDefault() (funcDecl *FuncDecl) {
+ return p.parseAnyFunc(true, true, false)
+}
+
+func (p *Parser) parseFuncExpr() (funcDecl *FuncDecl) {
+ return p.parseAnyFunc(false, false, true)
+}
+
+func (p *Parser) parseAsyncFuncExpr() (funcDecl *FuncDecl) {
+ return p.parseAnyFunc(true, false, true)
+}
+
+func (p *Parser) parseAnyFunc(async, exportDefault, expr bool) (funcDecl *FuncDecl) {
+ // assume we're at function
+ p.next()
+ funcDecl = &FuncDecl{}
+ funcDecl.Async = async
+ funcDecl.Generator = p.tt == MulToken
+ if funcDecl.Generator {
+ p.next()
+ }
+ var ok bool
+ var name []byte
+ if expr && (IsIdentifier(p.tt) || p.tt == YieldToken || p.tt == AwaitToken) || !expr && p.isIdentifierReference(p.tt) {
+ name = p.data
+ if !expr {
+ funcDecl.Name, ok = p.scope.Declare(FunctionDecl, p.data)
+ if !ok {
+ p.failMessage("identifier %s has already been declared", string(p.data))
+ return
+ }
+ }
+ p.next()
+ } else if !expr && !exportDefault {
+ p.fail("function declaration", IdentifierToken)
+ return
+ } else if p.tt != OpenParenToken {
+ p.fail("function declaration", IdentifierToken, OpenParenToken)
+ return
+ }
+ parent := p.enterScope(&funcDecl.Body.Scope, true)
+ parentAwait, parentYield := p.await, p.yield
+ p.await, p.yield = funcDecl.Async, funcDecl.Generator
+
+ if expr && name != nil {
+ funcDecl.Name, _ = p.scope.Declare(ExprDecl, name) // cannot fail
+ }
+ funcDecl.Params = p.parseFuncParams("function declaration")
+ p.allowDirectivePrologue = true
+ funcDecl.Body.List = p.parseStmtList("function declaration")
+
+ p.await, p.yield = parentAwait, parentYield
+ p.exitScope(parent)
+ return
+}
+
+func (p *Parser) parseClassDecl() (classDecl *ClassDecl) {
+ return p.parseAnyClass(false, false)
+}
+
+func (p *Parser) parseClassDeclDefault() (classDecl *ClassDecl) {
+ return p.parseAnyClass(true, false)
+}
+
+func (p *Parser) parseClassExpr() (classDecl *ClassDecl) {
+ return p.parseAnyClass(false, true)
+}
+
+func (p *Parser) parseAnyClass(exportDefault, expr bool) (classDecl *ClassDecl) {
+ // assume we're at class
+ p.next()
+ classDecl = &ClassDecl{}
+ if IsIdentifier(p.tt) || p.tt == YieldToken || p.tt == AwaitToken {
+ if !expr {
+ var ok bool
+ classDecl.Name, ok = p.scope.Declare(LexicalDecl, p.data)
+ if !ok {
+ p.failMessage("identifier %s has already been declared", string(p.data))
+ return
+ }
+ } else {
+ //classDecl.Name, ok = p.scope.Declare(ExprDecl, p.data) // classes do not register vars
+ classDecl.Name = &Var{p.data, nil, 1, ExprDecl}
+ }
+ p.next()
+ } else if !expr && !exportDefault {
+ p.fail("class declaration", IdentifierToken)
+ return
+ }
+ if p.tt == ExtendsToken {
+ p.next()
+ classDecl.Extends = p.parseExpression(OpLHS)
+ }
+
+ if !p.consume("class declaration", OpenBraceToken) {
+ return
+ }
+ for {
+ if p.tt == ErrorToken {
+ p.fail("class declaration")
+ return
+ } else if p.tt == SemicolonToken {
+ p.next()
+ continue
+ } else if p.tt == CloseBraceToken {
+ p.next()
+ break
+ }
+
+ classDecl.List = append(classDecl.List, p.parseClassElement())
+ }
+ return
+}
+
+func (p *Parser) parseClassElement() ClassElement {
+ method := &MethodDecl{}
+ var data []byte // either static, async, get, or set
+ if p.tt == StaticToken {
+ method.Static = true
+ data = p.data
+ p.next()
+ if p.tt == OpenBraceToken {
+ return ClassElement{StaticBlock: p.parseBlockStmt("class static block")}
+ }
+ }
+ if p.tt == MulToken {
+ method.Generator = true
+ p.next()
+ } else if p.tt == AsyncToken {
+ data = p.data
+ p.next()
+ if !p.prevLT {
+ method.Async = true
+ if p.tt == MulToken {
+ method.Generator = true
+ data = nil
+ p.next()
+ }
+ }
+ } else if p.tt == GetToken {
+ method.Get = true
+ data = p.data
+ p.next()
+ } else if p.tt == SetToken {
+ method.Set = true
+ data = p.data
+ p.next()
+ }
+
+ isField := false
+ if data != nil && p.tt == OpenParenToken {
+ // (static) method name is: static, async, get, or set
+ method.Name.Literal = LiteralExpr{IdentifierToken, data}
+ if method.Async || method.Get || method.Set {
+ method.Async = false
+ method.Get = false
+ method.Set = false
+ } else {
+ method.Static = false
+ }
+ } else if data != nil && (p.tt == EqToken || p.tt == SemicolonToken || p.tt == CloseBraceToken) {
+ // (static) field name is: static, async, get, or set
+ method.Name.Literal = LiteralExpr{IdentifierToken, data}
+ if !method.Async && !method.Get && !method.Set {
+ method.Static = false
+ }
+ isField = true
+ } else {
+ if p.tt == PrivateIdentifierToken {
+ method.Name.Literal = LiteralExpr{p.tt, p.data}
+ p.next()
+ } else {
+ method.Name = p.parsePropertyName("method or field definition")
+ }
+ if (data == nil || method.Static) && p.tt != OpenParenToken {
+ isField = true
+ }
+ }
+
+ if isField {
+ var init IExpr
+ if p.tt == EqToken {
+ p.next()
+ init = p.parseExpression(OpAssign)
+ }
+ return ClassElement{Field: Field{Static: method.Static, Name: method.Name, Init: init}}
+ }
+
+ parent := p.enterScope(&method.Body.Scope, true)
+ parentAwait, parentYield := p.await, p.yield
+ p.await, p.yield = method.Async, method.Generator
+
+ method.Params = p.parseFuncParams("method definition")
+ p.allowDirectivePrologue = true
+ method.Body.List = p.parseStmtList("method definition")
+
+ p.await, p.yield = parentAwait, parentYield
+ p.exitScope(parent)
+ return ClassElement{Method: method}
+}
+
+func (p *Parser) parsePropertyName(in string) (propertyName PropertyName) {
+ if IsIdentifierName(p.tt) {
+ propertyName.Literal = LiteralExpr{IdentifierToken, p.data}
+ p.next()
+ } else if p.tt == StringToken {
+ // reinterpret string as identifier or number if we can, except for empty strings
+ if isIdent := AsIdentifierName(p.data[1 : len(p.data)-1]); isIdent {
+ propertyName.Literal = LiteralExpr{IdentifierToken, p.data[1 : len(p.data)-1]}
+ } else if isNum := AsDecimalLiteral(p.data[1 : len(p.data)-1]); isNum {
+ propertyName.Literal = LiteralExpr{DecimalToken, p.data[1 : len(p.data)-1]}
+ } else {
+ propertyName.Literal = LiteralExpr{p.tt, p.data}
+ }
+ p.next()
+ } else if IsNumeric(p.tt) {
+ propertyName.Literal = LiteralExpr{p.tt, p.data}
+ p.next()
+ } else if p.tt == OpenBracketToken {
+ p.next()
+ propertyName.Computed = p.parseExpression(OpAssign)
+ if !p.consume(in, CloseBracketToken) {
+ return
+ }
+ } else {
+ p.fail(in, IdentifierToken, StringToken, NumericToken, OpenBracketToken)
+ return
+ }
+ return
+}
+
+func (p *Parser) parseBindingElement(decl DeclType) (bindingElement BindingElement) {
+ // binding element
+ bindingElement.Binding = p.parseBinding(decl)
+ if p.tt == EqToken {
+ p.next()
+ bindingElement.Default = p.parseExpression(OpAssign)
+ }
+ return
+}
+
+func (p *Parser) parseBinding(decl DeclType) (binding IBinding) {
+ // binding identifier or binding pattern
+ if p.isIdentifierReference(p.tt) {
+ var ok bool
+ binding, ok = p.scope.Declare(decl, p.data)
+ if !ok {
+ p.failMessage("identifier %s has already been declared", string(p.data))
+ return
+ }
+ p.next()
+ } else if p.tt == OpenBracketToken {
+ p.next()
+ array := BindingArray{}
+ if p.tt == CommaToken {
+ array.List = append(array.List, BindingElement{})
+ }
+ last := 0
+ for p.tt != CloseBracketToken {
+ // elision
+ for p.tt == CommaToken {
+ p.next()
+ if p.tt == CommaToken {
+ array.List = append(array.List, BindingElement{})
+ }
+ }
+ // binding rest element
+ if p.tt == EllipsisToken {
+ p.next()
+ array.Rest = p.parseBinding(decl)
+ if p.tt != CloseBracketToken {
+ p.fail("array binding pattern", CloseBracketToken)
+ return
+ }
+ break
+ } else if p.tt == CloseBracketToken {
+ array.List = array.List[:last]
+ break
+ }
+
+ array.List = append(array.List, p.parseBindingElement(decl))
+ last = len(array.List)
+
+ if p.tt != CommaToken && p.tt != CloseBracketToken {
+ p.fail("array binding pattern", CommaToken, CloseBracketToken)
+ return
+ }
+ }
+ p.next() // always CloseBracketToken
+ binding = &array
+ } else if p.tt == OpenBraceToken {
+ p.next()
+ object := BindingObject{}
+ for p.tt != CloseBraceToken {
+ // binding rest property
+ if p.tt == EllipsisToken {
+ p.next()
+ if !p.isIdentifierReference(p.tt) {
+ p.fail("object binding pattern", IdentifierToken)
+ return
+ }
+ var ok bool
+ object.Rest, ok = p.scope.Declare(decl, p.data)
+ if !ok {
+ p.failMessage("identifier %s has already been declared", string(p.data))
+ return
+ }
+ p.next()
+ if p.tt != CloseBraceToken {
+ p.fail("object binding pattern", CloseBraceToken)
+ return
+ }
+ break
+ }
+
+ item := BindingObjectItem{}
+ if p.isIdentifierReference(p.tt) {
+ name := p.data
+ item.Key = &PropertyName{LiteralExpr{IdentifierToken, p.data}, nil}
+ p.next()
+ if p.tt == ColonToken {
+ // property name + : + binding element
+ p.next()
+ item.Value = p.parseBindingElement(decl)
+ } else {
+ // single name binding
+ var ok bool
+ item.Key.Literal.Data = parse.Copy(item.Key.Literal.Data) // copy so that renaming doesn't rename the key
+ item.Value.Binding, ok = p.scope.Declare(decl, name)
+ if !ok {
+ p.failMessage("identifier %s has already been declared", string(name))
+ return
+ }
+ if p.tt == EqToken {
+ p.next()
+ item.Value.Default = p.parseExpression(OpAssign)
+ }
+ }
+ } else {
+ propertyName := p.parsePropertyName("object binding pattern")
+ item.Key = &propertyName
+ if !p.consume("object binding pattern", ColonToken) {
+ return
+ }
+ item.Value = p.parseBindingElement(decl)
+ }
+ object.List = append(object.List, item)
+
+ if p.tt == CommaToken {
+ p.next()
+ } else if p.tt != CloseBraceToken {
+ p.fail("object binding pattern", CommaToken, CloseBraceToken)
+ return
+ }
+ }
+ p.next() // always CloseBracketToken
+ binding = &object
+ } else {
+ p.fail("binding")
+ return
+ }
+ return
+}
+
+func (p *Parser) parseArrayLiteral() (array ArrayExpr) {
+ // assume we're on [
+ p.next()
+ prevComma := true
+ for {
+ if p.tt == ErrorToken {
+ p.fail("expression")
+ return
+ } else if p.tt == CloseBracketToken {
+ p.next()
+ break
+ } else if p.tt == CommaToken {
+ if prevComma {
+ array.List = append(array.List, Element{})
+ }
+ prevComma = true
+ p.next()
+ } else {
+ spread := p.tt == EllipsisToken
+ if spread {
+ p.next()
+ }
+ array.List = append(array.List, Element{p.parseAssignmentExpression(), spread})
+ prevComma = false
+ if spread && p.tt != CloseBracketToken {
+ p.assumeArrowFunc = false
+ }
+ }
+ }
+ return
+}
+
+func (p *Parser) parseObjectLiteral() (object ObjectExpr) {
+ // assume we're on {
+ p.next()
+ for {
+ if p.tt == ErrorToken {
+ p.fail("object literal", CloseBraceToken)
+ return
+ } else if p.tt == CloseBraceToken {
+ p.next()
+ break
+ }
+
+ property := Property{}
+ if p.tt == EllipsisToken {
+ p.next()
+ property.Spread = true
+ property.Value = p.parseAssignmentExpression()
+ if _, isIdent := property.Value.(*Var); !isIdent || p.tt != CloseBraceToken {
+ p.assumeArrowFunc = false
+ }
+ } else {
+ // try to parse as MethodDefinition, otherwise fall back to PropertyName:AssignExpr or IdentifierReference
+ var data []byte
+ method := MethodDecl{}
+ if p.tt == MulToken {
+ p.next()
+ method.Generator = true
+ } else if p.tt == AsyncToken {
+ data = p.data
+ p.next()
+ if !p.prevLT {
+ method.Async = true
+ if p.tt == MulToken {
+ p.next()
+ method.Generator = true
+ data = nil
+ }
+ } else {
+ method.Name.Literal = LiteralExpr{IdentifierToken, data}
+ data = nil
+ }
+ } else if p.tt == GetToken {
+ data = p.data
+ p.next()
+ method.Get = true
+ } else if p.tt == SetToken {
+ data = p.data
+ p.next()
+ method.Set = true
+ }
+
+ // PropertyName
+ if data != nil && !method.Generator && (p.tt == EqToken || p.tt == CommaToken || p.tt == CloseBraceToken || p.tt == ColonToken || p.tt == OpenParenToken) {
+ method.Name.Literal = LiteralExpr{IdentifierToken, data}
+ method.Async = false
+ method.Get = false
+ method.Set = false
+ } else if !method.Name.IsSet() { // did not parse async [LT]
+ method.Name = p.parsePropertyName("object literal")
+ if !method.Name.IsSet() {
+ return
+ }
+ }
+
+ if p.tt == OpenParenToken {
+ // MethodDefinition
+ parent := p.enterScope(&method.Body.Scope, true)
+ parentAwait, parentYield := p.await, p.yield
+ p.await, p.yield = method.Async, method.Generator
+
+ method.Params = p.parseFuncParams("method definition")
+ method.Body.List = p.parseStmtList("method definition")
+
+ p.await, p.yield = parentAwait, parentYield
+ p.exitScope(parent)
+ property.Value = &method
+ p.assumeArrowFunc = false
+ } else if p.tt == ColonToken {
+ // PropertyName : AssignmentExpression
+ p.next()
+ property.Name = &method.Name
+ property.Value = p.parseAssignmentExpression()
+ } else if method.Name.IsComputed() || !p.isIdentifierReference(method.Name.Literal.TokenType) {
+ p.fail("object literal", ColonToken, OpenParenToken)
+ return
+ } else {
+ // IdentifierReference (= AssignmentExpression)?
+ name := method.Name.Literal.Data
+ method.Name.Literal.Data = parse.Copy(method.Name.Literal.Data) // copy so that renaming doesn't rename the key
+ property.Name = &method.Name // set key explicitly so after renaming the original is still known
+ if p.assumeArrowFunc {
+ var ok bool
+ property.Value, ok = p.scope.Declare(ArgumentDecl, name)
+ if !ok {
+ property.Value = p.scope.Use(name)
+ p.assumeArrowFunc = false
+ }
+ } else {
+ property.Value = p.scope.Use(name)
+ }
+ if p.tt == EqToken {
+ p.next()
+ parentAssumeArrowFunc := p.assumeArrowFunc
+ p.assumeArrowFunc = false
+ property.Init = p.parseExpression(OpAssign)
+ p.assumeArrowFunc = parentAssumeArrowFunc
+ }
+ }
+ }
+ object.List = append(object.List, property)
+ if p.tt == CommaToken {
+ p.next()
+ } else if p.tt != CloseBraceToken {
+ p.fail("object literal")
+ return
+ }
+ }
+ return
+}
+
+func (p *Parser) parseTemplateLiteral(precLeft OpPrec) (template TemplateExpr) {
+ // assume we're on 'Template' or 'TemplateStart'
+ template.Prec = OpMember
+ if precLeft < OpMember {
+ template.Prec = OpCall
+ }
+ for p.tt == TemplateStartToken || p.tt == TemplateMiddleToken {
+ tpl := p.data
+ p.next()
+ template.List = append(template.List, TemplatePart{tpl, p.parseExpression(OpExpr)})
+ }
+ if p.tt != TemplateToken && p.tt != TemplateEndToken {
+ p.fail("template literal", TemplateToken)
+ return
+ }
+ template.Tail = p.data
+ p.next() // TemplateEndToken
+ return
+}
+
+func (p *Parser) parseArguments() (args Args) {
+ // assume we're on (
+ p.next()
+ args.List = make([]Arg, 0, 4)
+ for {
+ rest := p.tt == EllipsisToken
+ if rest {
+ p.next()
+ }
+
+ if p.tt == CloseParenToken || p.tt == ErrorToken {
+ break
+ }
+ args.List = append(args.List, Arg{
+ Value: p.parseExpression(OpAssign),
+ Rest: rest,
+ })
+ if p.tt == CommaToken {
+ p.next()
+ }
+ }
+ p.consume("arguments", CloseParenToken)
+ return
+}
+
+func (p *Parser) parseAsyncArrowFunc() (arrowFunc *ArrowFunc) {
+ // expect we're at Identifier or Yield or (
+ arrowFunc = &ArrowFunc{}
+ parent := p.enterScope(&arrowFunc.Body.Scope, true)
+ parentAwait, parentYield := p.await, p.yield
+ p.await, p.yield = true, false
+
+ if IsIdentifier(p.tt) || !p.yield && p.tt == YieldToken {
+ ref, _ := p.scope.Declare(ArgumentDecl, p.data) // cannot fail
+ p.next()
+ arrowFunc.Params.List = []BindingElement{{Binding: ref}}
+ } else {
+ arrowFunc.Params = p.parseFuncParams("arrow function")
+
+ // could be CallExpression of: async(params)
+ if p.tt != ArrowToken {
+ // TODO?
+ }
+ }
+
+ p.await, p.yield = true, parentYield
+ arrowFunc.Async = true
+ arrowFunc.Body.List = p.parseArrowFuncBody()
+
+ p.await, p.yield = parentAwait, parentYield
+ p.exitScope(parent)
+ return
+}
+
+func (p *Parser) parseIdentifierArrowFunc(v *Var) (arrowFunc *ArrowFunc) {
+ // expect we're at =>
+ arrowFunc = &ArrowFunc{}
+ parent := p.enterScope(&arrowFunc.Body.Scope, true)
+ parentAwait, parentYield := p.await, p.yield
+
+ if 1 < v.Uses {
+ v.Uses--
+ v, _ = p.scope.Declare(ArgumentDecl, parse.Copy(v.Data)) // cannot fail
+ } else {
+ // if v.Uses==1 it must be undeclared and be the last added
+ p.scope.Parent.Undeclared = p.scope.Parent.Undeclared[:len(p.scope.Parent.Undeclared)-1]
+ v.Decl = ArgumentDecl
+ p.scope.Declared = append(p.scope.Declared, v)
+ }
+
+ p.await = false
+ arrowFunc.Params.List = []BindingElement{{v, nil}}
+ arrowFunc.Body.List = p.parseArrowFuncBody()
+
+ p.await, p.yield = parentAwait, parentYield
+ p.exitScope(parent)
+ return
+}
+
+func (p *Parser) parseArrowFuncBody() (list []IStmt) {
+ // expect we're at arrow
+ if p.tt != ArrowToken {
+ p.fail("arrow function", ArrowToken)
+ return
+ } else if p.prevLT {
+ p.fail("expression")
+ return
+ }
+ p.next()
+
+ // mark undeclared vars as arguments in `function f(a=b){var b}` where the b's are different vars
+ p.scope.MarkFuncArgs()
+
+ if p.tt == OpenBraceToken {
+ parentInFor := p.inFor
+ p.inFor = false
+ p.yield = false
+ p.allowDirectivePrologue = true
+ list = p.parseStmtList("arrow function")
+ p.inFor = parentInFor
+ } else {
+ list = []IStmt{&ReturnStmt{p.parseExpression(OpAssign)}}
+ }
+ return
+}
+
+func (p *Parser) parseIdentifierExpression(prec OpPrec, ident []byte) IExpr {
+ var left IExpr
+ left = p.scope.Use(ident)
+ return p.parseExpressionSuffix(left, prec, OpPrimary)
+}
+
+func (p *Parser) parseAsyncExpression(prec OpPrec, async []byte) IExpr {
+ // assume we're at a token after async
+ var left IExpr
+ precLeft := OpPrimary
+ if !p.prevLT && p.tt == FunctionToken {
+ // primary expression
+ left = p.parseAsyncFuncExpr()
+ } else if !p.prevLT && prec <= OpAssign && (p.tt == OpenParenToken || IsIdentifier(p.tt) || !p.yield && p.tt == YieldToken || p.tt == AwaitToken) {
+ // async arrow function expression
+ if p.tt == AwaitToken {
+ p.fail("arrow function")
+ return nil
+ } else if p.tt == OpenParenToken {
+ return p.parseParenthesizedExpressionOrArrowFunc(prec, async)
+ }
+ left = p.parseAsyncArrowFunc()
+ precLeft = OpAssign
+ } else {
+ left = p.scope.Use(async)
+ }
+ return p.parseExpressionSuffix(left, prec, precLeft)
+}
+
+// parseExpression parses an expression that has a precedence of prec or higher.
+func (p *Parser) parseExpression(prec OpPrec) IExpr {
+ p.exprLevel++
+ if 1000 < p.exprLevel {
+ p.failMessage("too many nested expressions")
+ return nil
+ }
+
+ // reparse input if we have / or /= as the beginning of a new expression, this should be a regular expression!
+ if p.tt == DivToken || p.tt == DivEqToken {
+ p.tt, p.data = p.l.RegExp()
+ if p.tt == ErrorToken {
+ p.fail("regular expression")
+ return nil
+ }
+ }
+
+ var left IExpr
+ precLeft := OpPrimary
+
+ if IsIdentifier(p.tt) && p.tt != AsyncToken {
+ left = p.scope.Use(p.data)
+ p.next()
+ suffix := p.parseExpressionSuffix(left, prec, precLeft)
+ p.exprLevel--
+ return suffix
+ } else if IsNumeric(p.tt) {
+ left = &LiteralExpr{p.tt, p.data}
+ p.next()
+ suffix := p.parseExpressionSuffix(left, prec, precLeft)
+ p.exprLevel--
+ return suffix
+ }
+
+ switch tt := p.tt; tt {
+ case StringToken, ThisToken, NullToken, TrueToken, FalseToken, RegExpToken:
+ left = &LiteralExpr{p.tt, p.data}
+ p.next()
+ case OpenBracketToken:
+ parentInFor := p.inFor
+ p.inFor = false
+ array := p.parseArrayLiteral()
+ left = &array
+ p.inFor = parentInFor
+ case OpenBraceToken:
+ parentInFor := p.inFor
+ p.inFor = false
+ object := p.parseObjectLiteral()
+ left = &object
+ p.inFor = parentInFor
+ case OpenParenToken:
+ // parenthesized expression or arrow parameter list
+ if OpAssign < prec {
+ // must be a parenthesized expression
+ p.next()
+ parentInFor := p.inFor
+ p.inFor = false
+ left = &GroupExpr{p.parseExpression(OpExpr)}
+ p.inFor = parentInFor
+ if !p.consume("expression", CloseParenToken) {
+ return nil
+ }
+ break
+ }
+ suffix := p.parseParenthesizedExpressionOrArrowFunc(prec, nil)
+ p.exprLevel--
+ return suffix
+ case NotToken, BitNotToken, TypeofToken, VoidToken, DeleteToken:
+ if OpUnary < prec {
+ p.fail("expression")
+ return nil
+ }
+ p.next()
+ left = &UnaryExpr{tt, p.parseExpression(OpUnary)}
+ precLeft = OpUnary
+ case AddToken:
+ if OpUnary < prec {
+ p.fail("expression")
+ return nil
+ }
+ p.next()
+ left = &UnaryExpr{PosToken, p.parseExpression(OpUnary)}
+ precLeft = OpUnary
+ case SubToken:
+ if OpUnary < prec {
+ p.fail("expression")
+ return nil
+ }
+ p.next()
+ left = &UnaryExpr{NegToken, p.parseExpression(OpUnary)}
+ precLeft = OpUnary
+ case IncrToken:
+ if OpUpdate < prec {
+ p.fail("expression")
+ return nil
+ }
+ p.next()
+ left = &UnaryExpr{PreIncrToken, p.parseExpression(OpUnary)}
+ precLeft = OpUnary
+ case DecrToken:
+ if OpUpdate < prec {
+ p.fail("expression")
+ return nil
+ }
+ p.next()
+ left = &UnaryExpr{PreDecrToken, p.parseExpression(OpUnary)}
+ precLeft = OpUnary
+ case AwaitToken:
+ // either accepted as IdentifierReference or as AwaitExpression
+ if p.await && prec <= OpUnary {
+ p.next()
+ left = &UnaryExpr{tt, p.parseExpression(OpUnary)}
+ precLeft = OpUnary
+ } else if p.await {
+ p.fail("expression")
+ return nil
+ } else {
+ left = p.scope.Use(p.data)
+ p.next()
+ }
+ case NewToken:
+ p.next()
+ if p.tt == DotToken {
+ p.next()
+ if !p.consume("new.target expression", TargetToken) {
+ return nil
+ }
+ left = &NewTargetExpr{}
+ precLeft = OpMember
+ } else {
+ newExpr := &NewExpr{p.parseExpression(OpNew), nil}
+ if p.tt == OpenParenToken {
+ args := p.parseArguments()
+ if len(args.List) != 0 {
+ newExpr.Args = &args
+ }
+ precLeft = OpMember
+ } else {
+ precLeft = OpNew
+ }
+ left = newExpr
+ }
+ case ImportToken:
+ // OpMember < prec does never happen
+ left = &LiteralExpr{p.tt, p.data}
+ p.next()
+ if p.tt == DotToken {
+ p.next()
+ if !p.consume("import.meta expression", MetaToken) {
+ return nil
+ }
+ left = &ImportMetaExpr{}
+ precLeft = OpMember
+ } else if p.tt != OpenParenToken {
+ p.fail("import expression", OpenParenToken)
+ return nil
+ } else if OpCall < prec {
+ p.fail("expression")
+ return nil
+ } else {
+ precLeft = OpCall
+ }
+ case SuperToken:
+ // OpMember < prec does never happen
+ left = &LiteralExpr{p.tt, p.data}
+ p.next()
+ if OpCall < prec && p.tt != DotToken && p.tt != OpenBracketToken {
+ p.fail("super expression", OpenBracketToken, DotToken)
+ return nil
+ } else if p.tt != DotToken && p.tt != OpenBracketToken && p.tt != OpenParenToken {
+ p.fail("super expression", OpenBracketToken, OpenParenToken, DotToken)
+ return nil
+ }
+ if OpCall < prec {
+ precLeft = OpMember
+ } else {
+ precLeft = OpCall
+ }
+ case YieldToken:
+ // either accepted as IdentifierReference or as YieldExpression
+ if p.yield && prec <= OpAssign {
+ // YieldExpression
+ p.next()
+ yieldExpr := YieldExpr{}
+ if !p.prevLT {
+ yieldExpr.Generator = p.tt == MulToken
+ if yieldExpr.Generator {
+ p.next()
+ yieldExpr.X = p.parseExpression(OpAssign)
+ } else if p.tt != CloseBraceToken && p.tt != CloseBracketToken && p.tt != CloseParenToken && p.tt != ColonToken && p.tt != CommaToken && p.tt != SemicolonToken {
+ yieldExpr.X = p.parseExpression(OpAssign)
+ }
+ }
+ left = &yieldExpr
+ precLeft = OpAssign
+ } else if p.yield {
+ p.fail("expression")
+ return nil
+ } else {
+ left = p.scope.Use(p.data)
+ p.next()
+ }
+ case AsyncToken:
+ async := p.data
+ p.next()
+ left = p.parseAsyncExpression(prec, async)
+ case ClassToken:
+ parentInFor := p.inFor
+ p.inFor = false
+ left = p.parseClassExpr()
+ p.inFor = parentInFor
+ case FunctionToken:
+ parentInFor := p.inFor
+ p.inFor = false
+ left = p.parseFuncExpr()
+ p.inFor = parentInFor
+ case TemplateToken, TemplateStartToken:
+ parentInFor := p.inFor
+ p.inFor = false
+ template := p.parseTemplateLiteral(precLeft)
+ left = &template
+ p.inFor = parentInFor
+ default:
+ p.fail("expression")
+ return nil
+ }
+ suffix := p.parseExpressionSuffix(left, prec, precLeft)
+ p.exprLevel--
+ return suffix
+}
+
+func (p *Parser) parseExpressionSuffix(left IExpr, prec, precLeft OpPrec) IExpr {
+ for i := 0; ; i++ {
+ if 1000 < p.exprLevel+i {
+ p.failMessage("too many nested expressions")
+ return nil
+ }
+
+ switch tt := p.tt; tt {
+ case EqToken, MulEqToken, DivEqToken, ModEqToken, ExpEqToken, AddEqToken, SubEqToken, LtLtEqToken, GtGtEqToken, GtGtGtEqToken, BitAndEqToken, BitXorEqToken, BitOrEqToken, AndEqToken, OrEqToken, NullishEqToken:
+ if OpAssign < prec {
+ return left
+ } else if precLeft < OpLHS {
+ p.fail("expression")
+ return nil
+ }
+ p.next()
+ left = &BinaryExpr{tt, left, p.parseExpression(OpAssign)}
+ precLeft = OpAssign
+ case LtToken, LtEqToken, GtToken, GtEqToken, InToken, InstanceofToken:
+ if OpCompare < prec || p.inFor && tt == InToken {
+ return left
+ } else if precLeft < OpCompare {
+ // can only fail after a yield or arrow function expression
+ p.fail("expression")
+ return nil
+ }
+ p.next()
+ left = &BinaryExpr{tt, left, p.parseExpression(OpShift)}
+ precLeft = OpCompare
+ case EqEqToken, NotEqToken, EqEqEqToken, NotEqEqToken:
+ if OpEquals < prec {
+ return left
+ } else if precLeft < OpEquals {
+ // can only fail after a yield or arrow function expression
+ p.fail("expression")
+ return nil
+ }
+ p.next()
+ left = &BinaryExpr{tt, left, p.parseExpression(OpCompare)}
+ precLeft = OpEquals
+ case AndToken:
+ if OpAnd < prec {
+ return left
+ } else if precLeft < OpAnd {
+ p.fail("expression")
+ return nil
+ }
+ p.next()
+ left = &BinaryExpr{tt, left, p.parseExpression(OpBitOr)}
+ precLeft = OpAnd
+ case OrToken:
+ if OpOr < prec {
+ return left
+ } else if precLeft < OpOr {
+ p.fail("expression")
+ return nil
+ }
+ p.next()
+ left = &BinaryExpr{tt, left, p.parseExpression(OpAnd)}
+ precLeft = OpOr
+ case NullishToken:
+ if OpCoalesce < prec {
+ return left
+ } else if precLeft < OpBitOr && precLeft != OpCoalesce {
+ p.fail("expression")
+ return nil
+ }
+ p.next()
+ left = &BinaryExpr{tt, left, p.parseExpression(OpBitOr)}
+ precLeft = OpCoalesce
+ case DotToken:
+ // OpMember < prec does never happen
+ if precLeft < OpCall {
+ p.fail("expression")
+ return nil
+ }
+ p.next()
+ if !IsIdentifierName(p.tt) && p.tt != PrivateIdentifierToken {
+ p.fail("dot expression", IdentifierToken)
+ return nil
+ }
+ exprPrec := OpMember
+ if precLeft < OpMember {
+ exprPrec = OpCall
+ }
+ if p.tt != PrivateIdentifierToken {
+ p.tt = IdentifierToken
+ }
+ left = &DotExpr{left, LiteralExpr{p.tt, p.data}, exprPrec, false}
+ p.next()
+ if precLeft < OpMember {
+ precLeft = OpCall
+ } else {
+ precLeft = OpMember
+ }
+ case OpenBracketToken:
+ // OpMember < prec does never happen
+ if precLeft < OpCall {
+ p.fail("expression")
+ return nil
+ }
+ p.next()
+ exprPrec := OpMember
+ if precLeft < OpMember {
+ exprPrec = OpCall
+ }
+ parentInFor := p.inFor
+ p.inFor = false
+ left = &IndexExpr{left, p.parseExpression(OpExpr), exprPrec, false}
+ p.inFor = parentInFor
+ if !p.consume("index expression", CloseBracketToken) {
+ return nil
+ }
+ if precLeft < OpMember {
+ precLeft = OpCall
+ } else {
+ precLeft = OpMember
+ }
+ case OpenParenToken:
+ if OpCall < prec {
+ return left
+ } else if precLeft < OpCall {
+ p.fail("expression")
+ return nil
+ }
+ parentInFor := p.inFor
+ p.inFor = false
+ left = &CallExpr{left, p.parseArguments(), false}
+ precLeft = OpCall
+ p.inFor = parentInFor
+ case TemplateToken, TemplateStartToken:
+ // OpMember < prec does never happen
+ if precLeft < OpCall {
+ p.fail("expression")
+ return nil
+ }
+ parentInFor := p.inFor
+ p.inFor = false
+ template := p.parseTemplateLiteral(precLeft)
+ template.Tag = left
+ left = &template
+ if precLeft < OpMember {
+ precLeft = OpCall
+ } else {
+ precLeft = OpMember
+ }
+ p.inFor = parentInFor
+ case OptChainToken:
+ if OpCall < prec {
+ return left
+ }
+ p.next()
+ if p.tt == OpenParenToken {
+ left = &CallExpr{left, p.parseArguments(), true}
+ } else if p.tt == OpenBracketToken {
+ p.next()
+ left = &IndexExpr{left, p.parseExpression(OpExpr), OpCall, true}
+ if !p.consume("optional chaining expression", CloseBracketToken) {
+ return nil
+ }
+ } else if p.tt == TemplateToken || p.tt == TemplateStartToken {
+ template := p.parseTemplateLiteral(precLeft)
+ template.Prec = OpCall
+ template.Tag = left
+ template.Optional = true
+ left = &template
+ } else if IsIdentifierName(p.tt) {
+ left = &DotExpr{left, LiteralExpr{IdentifierToken, p.data}, OpCall, true}
+ p.next()
+ } else if p.tt == PrivateIdentifierToken {
+ left = &DotExpr{left, LiteralExpr{p.tt, p.data}, OpCall, true}
+ p.next()
+ } else {
+ p.fail("optional chaining expression", IdentifierToken, OpenParenToken, OpenBracketToken, TemplateToken)
+ return nil
+ }
+ precLeft = OpCall
+ case IncrToken:
+ if p.prevLT || OpUpdate < prec {
+ return left
+ } else if precLeft < OpLHS {
+ p.fail("expression")
+ return nil
+ }
+ p.next()
+ left = &UnaryExpr{PostIncrToken, left}
+ precLeft = OpUpdate
+ case DecrToken:
+ if p.prevLT || OpUpdate < prec {
+ return left
+ } else if precLeft < OpLHS {
+ p.fail("expression")
+ return nil
+ }
+ p.next()
+ left = &UnaryExpr{PostDecrToken, left}
+ precLeft = OpUpdate
+ case ExpToken:
+ if OpExp < prec {
+ return left
+ } else if precLeft < OpUpdate {
+ p.fail("expression")
+ return nil
+ }
+ p.next()
+ left = &BinaryExpr{tt, left, p.parseExpression(OpExp)}
+ precLeft = OpExp
+ case MulToken, DivToken, ModToken:
+ if OpMul < prec {
+ return left
+ } else if precLeft < OpMul {
+ p.fail("expression")
+ return nil
+ }
+ p.next()
+ left = &BinaryExpr{tt, left, p.parseExpression(OpExp)}
+ precLeft = OpMul
+ case AddToken, SubToken:
+ if OpAdd < prec {
+ return left
+ } else if precLeft < OpAdd {
+ p.fail("expression")
+ return nil
+ }
+ p.next()
+ left = &BinaryExpr{tt, left, p.parseExpression(OpMul)}
+ precLeft = OpAdd
+ case LtLtToken, GtGtToken, GtGtGtToken:
+ if OpShift < prec {
+ return left
+ } else if precLeft < OpShift {
+ p.fail("expression")
+ return nil
+ }
+ p.next()
+ left = &BinaryExpr{tt, left, p.parseExpression(OpAdd)}
+ precLeft = OpShift
+ case BitAndToken:
+ if OpBitAnd < prec {
+ return left
+ } else if precLeft < OpBitAnd {
+ p.fail("expression")
+ return nil
+ }
+ p.next()
+ left = &BinaryExpr{tt, left, p.parseExpression(OpEquals)}
+ precLeft = OpBitAnd
+ case BitXorToken:
+ if OpBitXor < prec {
+ return left
+ } else if precLeft < OpBitXor {
+ p.fail("expression")
+ return nil
+ }
+ p.next()
+ left = &BinaryExpr{tt, left, p.parseExpression(OpBitAnd)}
+ precLeft = OpBitXor
+ case BitOrToken:
+ if OpBitOr < prec {
+ return left
+ } else if precLeft < OpBitOr {
+ p.fail("expression")
+ return nil
+ }
+ p.next()
+ left = &BinaryExpr{tt, left, p.parseExpression(OpBitXor)}
+ precLeft = OpBitOr
+ case QuestionToken:
+ if OpAssign < prec {
+ return left
+ } else if precLeft < OpCoalesce {
+ p.fail("expression")
+ return nil
+ }
+ p.next()
+ ifExpr := p.parseExpression(OpAssign)
+ if !p.consume("conditional expression", ColonToken) {
+ return nil
+ }
+ elseExpr := p.parseExpression(OpAssign)
+ left = &CondExpr{left, ifExpr, elseExpr}
+ precLeft = OpAssign
+ case CommaToken:
+ if OpExpr < prec {
+ return left
+ }
+ p.next()
+ if commaExpr, ok := left.(*CommaExpr); ok {
+ commaExpr.List = append(commaExpr.List, p.parseExpression(OpAssign))
+ i-- // adjust expression nesting limit
+ } else {
+ left = &CommaExpr{[]IExpr{left, p.parseExpression(OpAssign)}}
+ }
+ precLeft = OpExpr
+ case ArrowToken:
+ // handle identifier => ..., where identifier could also be yield or await
+ if OpAssign < prec {
+ return left
+ } else if precLeft < OpPrimary {
+ p.fail("expression")
+ return nil
+ }
+
+ v, ok := left.(*Var)
+ if !ok {
+ p.fail("expression")
+ return nil
+ }
+
+ left = p.parseIdentifierArrowFunc(v)
+ precLeft = OpAssign
+ default:
+ return left
+ }
+ }
+}
+
+func (p *Parser) parseAssignmentExpression() IExpr {
+ // this could be a BindingElement or an AssignmentExpression. Here we handle BindingIdentifier with a possible Initializer, BindingPattern will be handled by parseArrayLiteral or parseObjectLiteral
+ if p.assumeArrowFunc && p.isIdentifierReference(p.tt) {
+ tt := p.tt
+ data := p.data
+ p.next()
+ if p.tt == EqToken || p.tt == CommaToken || p.tt == CloseParenToken || p.tt == CloseBraceToken || p.tt == CloseBracketToken {
+ var ok bool
+ var left IExpr
+ left, ok = p.scope.Declare(ArgumentDecl, data)
+ if ok {
+ p.assumeArrowFunc = false
+ left = p.parseExpressionSuffix(left, OpAssign, OpPrimary)
+ p.assumeArrowFunc = true
+ return left
+ }
+ }
+ p.assumeArrowFunc = false
+ if tt == AsyncToken {
+ return p.parseAsyncExpression(OpAssign, data)
+ }
+ return p.parseIdentifierExpression(OpAssign, data)
+ } else if p.tt != OpenBracketToken && p.tt != OpenBraceToken {
+ p.assumeArrowFunc = false
+ }
+ return p.parseExpression(OpAssign)
+}
+
+func (p *Parser) parseParenthesizedExpressionOrArrowFunc(prec OpPrec, async []byte) IExpr {
+ var left IExpr
+ precLeft := OpPrimary
+
+ // expect to be at (
+ p.next()
+
+ isAsync := async != nil
+ arrowFunc := &ArrowFunc{}
+ parent := p.enterScope(&arrowFunc.Body.Scope, true)
+ parentAssumeArrowFunc, parentInFor := p.assumeArrowFunc, p.inFor
+ p.assumeArrowFunc, p.inFor = true, false
+
+ // parse a parenthesized expression but assume we might be parsing an (async) arrow function. If this is really an arrow function, parsing as a parenthesized expression cannot fail as AssignmentExpression, ArrayLiteral, and ObjectLiteral are supersets of SingleNameBinding, ArrayBindingPattern, and ObjectBindingPattern respectively. Any identifier that would be a BindingIdentifier in case of an arrow function, will be added as such. If finally this is not an arrow function, we will demote those variables an undeclared and merge them with the parent scope.
+
+ var list []IExpr
+ var rest IExpr
+ for p.tt != CloseParenToken && p.tt != ErrorToken {
+ if p.tt == EllipsisToken && p.assumeArrowFunc {
+ p.next()
+ if isAsync {
+ rest = p.parseAssignmentExpression()
+ if p.tt == CommaToken {
+ p.next()
+ }
+ } else if p.isIdentifierReference(p.tt) {
+ var ok bool
+ rest, ok = p.scope.Declare(ArgumentDecl, p.data)
+ if !ok {
+ p.failMessage("identifier %s has already been declared", string(p.data))
+ return nil
+ }
+ p.next()
+ } else if p.tt == OpenBracketToken {
+ array := p.parseArrayLiteral()
+ rest = &array
+ } else if p.tt == OpenBraceToken {
+ object := p.parseObjectLiteral()
+ rest = &object
+ } else {
+ p.fail("arrow function")
+ return nil
+ }
+ break
+ }
+
+ list = append(list, p.parseAssignmentExpression())
+ if p.tt != CommaToken {
+ break
+ }
+ p.next()
+ }
+ if p.tt != CloseParenToken {
+ p.fail("expression")
+ return nil
+ }
+ p.next()
+ isArrowFunc := p.tt == ArrowToken && p.assumeArrowFunc
+ p.assumeArrowFunc, p.inFor = parentAssumeArrowFunc, parentInFor
+
+ if isArrowFunc {
+ parentAwait, parentYield := p.await, p.yield
+ p.await = isAsync
+
+ // arrow function
+ arrowFunc.Params = Params{List: make([]BindingElement, len(list))}
+ for i, item := range list {
+ arrowFunc.Params.List[i] = p.exprToBindingElement(item) // can not fail when assumArrowFunc is set
+ }
+ arrowFunc.Async = isAsync
+ arrowFunc.Params.Rest = p.exprToBinding(rest)
+ arrowFunc.Body.List = p.parseArrowFuncBody()
+
+ p.await, p.yield = parentAwait, parentYield
+ p.exitScope(parent)
+
+ left = arrowFunc
+ precLeft = OpAssign
+ } else if len(list) == 0 || !isAsync && rest != nil || isAsync && OpCall < prec {
+ p.fail("arrow function", ArrowToken)
+ return nil
+ } else {
+ p.exitScope(parent)
+
+ // for any nested FuncExpr/ArrowFunc scope, Parent will point to the temporary scope created in case this was an arrow function instead of a parenthesized expression. This is not a problem as Parent is only used for defining new variables, and we already parsed all the nested scopes so that Parent (not Func) are not relevant anymore. Anyways, the Parent will just point to an empty scope, whose Parent/Func will point to valid scopes. This should not be a big deal.
+ // Here we move all declared ArgumentDecls (in case of an arrow function) to its parent scope as undeclared variables (identifiers used in a parenthesized expression).
+ arrowFunc.Body.Scope.UndeclareScope()
+
+ if isAsync {
+ // call expression
+ args := Args{}
+ for _, item := range list {
+ args.List = append(args.List, Arg{Value: item, Rest: false})
+ }
+ if rest != nil {
+ args.List = append(args.List, Arg{Value: rest, Rest: true})
+ }
+ left = p.scope.Use(async)
+ left = &CallExpr{left, args, false}
+ precLeft = OpCall
+ } else {
+ // parenthesized expression
+ if 1 < len(list) {
+ left = &GroupExpr{&CommaExpr{list}}
+ } else {
+ left = &GroupExpr{list[0]}
+ }
+ }
+ }
+ return p.parseExpressionSuffix(left, prec, precLeft)
+}
+
+// exprToBinding converts a CoverParenthesizedExpressionAndArrowParameterList into FormalParameters
+// Any unbound variables of the parameters (Initializer, ComputedPropertyName) are kept in the parent scope
+func (p *Parser) exprToBinding(expr IExpr) (binding IBinding) {
+ if v, ok := expr.(*Var); ok {
+ binding = v
+ } else if array, ok := expr.(*ArrayExpr); ok {
+ bindingArray := BindingArray{}
+ for _, item := range array.List {
+ if item.Spread {
+ // can only BindingIdentifier or BindingPattern
+ bindingArray.Rest = p.exprToBinding(item.Value)
+ break
+ }
+ var bindingElement BindingElement
+ bindingElement = p.exprToBindingElement(item.Value)
+ bindingArray.List = append(bindingArray.List, bindingElement)
+ }
+ binding = &bindingArray
+ } else if object, ok := expr.(*ObjectExpr); ok {
+ bindingObject := BindingObject{}
+ for _, item := range object.List {
+ if item.Spread {
+ // can only be BindingIdentifier
+ bindingObject.Rest = item.Value.(*Var)
+ break
+ }
+ var bindingElement BindingElement
+ bindingElement.Binding = p.exprToBinding(item.Value)
+ if bindingElement.Binding == nil {
+ bindingElement = p.exprToBindingElement(item.Value)
+ } else if item.Init != nil {
+ bindingElement.Default = item.Init
+ }
+ bindingObject.List = append(bindingObject.List, BindingObjectItem{Key: item.Name, Value: bindingElement})
+ }
+ binding = &bindingObject
+ }
+ return
+}
+
+func (p *Parser) exprToBindingElement(expr IExpr) (bindingElement BindingElement) {
+ if assign, ok := expr.(*BinaryExpr); ok && assign.Op == EqToken {
+ bindingElement.Binding = p.exprToBinding(assign.X)
+ bindingElement.Default = assign.Y
+ } else {
+ bindingElement.Binding = p.exprToBinding(expr)
+ }
+ return
+}
+
+func (p *Parser) isIdentifierReference(tt TokenType) bool {
+ return IsIdentifier(tt) || tt == YieldToken && !p.yield || tt == AwaitToken && !p.await
+}
diff --git a/vendor/github.com/tdewolff/parse/v2/js/table.go b/vendor/github.com/tdewolff/parse/v2/js/table.go
new file mode 100644
index 0000000..2533091
--- /dev/null
+++ b/vendor/github.com/tdewolff/parse/v2/js/table.go
@@ -0,0 +1,142 @@
+package js
+
+import "strconv"
+
+// OpPrec is the operator precedence
+type OpPrec int
+
+// OpPrec values.
+const (
+ OpExpr OpPrec = iota // a,b
+ OpAssign // a?b:c, yield x, ()=>x, async ()=>x, a=b, a+=b, ...
+ OpCoalesce // a??b
+ OpOr // a||b
+ OpAnd // a&&b
+ OpBitOr // a|b
+ OpBitXor // a^b
+ OpBitAnd // a&b
+ OpEquals // a==b, a!=b, a===b, a!==b
+ OpCompare // a<b, a>b, a<=b, a>=b, a instanceof b, a in b
+ OpShift // a<<b, a>>b, a>>>b
+ OpAdd // a+b, a-b
+ OpMul // a*b, a/b, a%b
+ OpExp // a**b
+ OpUnary // ++x, --x, delete x, void x, typeof x, +x, -x, ~x, !x, await x
+ OpUpdate // x++, x--
+ OpLHS // CallExpr/OptChainExpr or NewExpr
+ OpCall // a?.b, a(b), super(a), import(a)
+ OpNew // new a
+ OpMember // a[b], a.b, a`b`, super[x], super.x, new.target, import.meta, new a(b)
+ OpPrimary // literal, function, class, parenthesized
+)
+
+func (prec OpPrec) String() string {
+ switch prec {
+ case OpExpr:
+ return "OpExpr"
+ case OpAssign:
+ return "OpAssign"
+ case OpCoalesce:
+ return "OpCoalesce"
+ case OpOr:
+ return "OpOr"
+ case OpAnd:
+ return "OpAnd"
+ case OpBitOr:
+ return "OpBitOr"
+ case OpBitXor:
+ return "OpBitXor"
+ case OpBitAnd:
+ return "OpBitAnd"
+ case OpEquals:
+ return "OpEquals"
+ case OpCompare:
+ return "OpCompare"
+ case OpShift:
+ return "OpShift"
+ case OpAdd:
+ return "OAdd"
+ case OpMul:
+ return "OpMul"
+ case OpExp:
+ return "OpExp"
+ case OpUnary:
+ return "OpUnary"
+ case OpUpdate:
+ return "OpUpdate"
+ case OpLHS:
+ return "OpLHS"
+ case OpCall:
+ return "OpCall"
+ case OpNew:
+ return "OpNew"
+ case OpMember:
+ return "OpMember"
+ case OpPrimary:
+ return "OpPrimary"
+ }
+ return "Invalid(" + strconv.Itoa(int(prec)) + ")"
+}
+
+// Keywords is a map of reserved, strict, and other keywords
+var Keywords = map[string]TokenType{
+ // reserved
+ "await": AwaitToken,
+ "break": BreakToken,
+ "case": CaseToken,
+ "catch": CatchToken,
+ "class": ClassToken,
+ "const": ConstToken,
+ "continue": ContinueToken,
+ "debugger": DebuggerToken,
+ "default": DefaultToken,
+ "delete": DeleteToken,
+ "do": DoToken,
+ "else": ElseToken,
+ "enum": EnumToken,
+ "export": ExportToken,
+ "extends": ExtendsToken,
+ "false": FalseToken,
+ "finally": FinallyToken,
+ "for": ForToken,
+ "function": FunctionToken,
+ "if": IfToken,
+ "import": ImportToken,
+ "in": InToken,
+ "instanceof": InstanceofToken,
+ "new": NewToken,
+ "null": NullToken,
+ "return": ReturnToken,
+ "super": SuperToken,
+ "switch": SwitchToken,
+ "this": ThisToken,
+ "throw": ThrowToken,
+ "true": TrueToken,
+ "try": TryToken,
+ "typeof": TypeofToken,
+ "var": VarToken,
+ "void": VoidToken,
+ "while": WhileToken,
+ "with": WithToken,
+ "yield": YieldToken,
+
+ // strict mode
+ "let": LetToken,
+ "static": StaticToken,
+ "implements": ImplementsToken,
+ "interface": InterfaceToken,
+ "package": PackageToken,
+ "private": PrivateToken,
+ "protected": ProtectedToken,
+ "public": PublicToken,
+
+ // extra
+ "as": AsToken,
+ "async": AsyncToken,
+ "from": FromToken,
+ "get": GetToken,
+ "meta": MetaToken,
+ "of": OfToken,
+ "set": SetToken,
+ "target": TargetToken,
+}
diff --git a/vendor/github.com/tdewolff/parse/v2/js/tokentype.go b/vendor/github.com/tdewolff/parse/v2/js/tokentype.go
new file mode 100644
index 0000000..4f310be
--- /dev/null
+++ b/vendor/github.com/tdewolff/parse/v2/js/tokentype.go
@@ -0,0 +1,404 @@
+package js
+
+import "strconv"
+
+// TokenType determines the type of token, eg. a number or a semicolon.
+type TokenType uint16 // from LSB to MSB: 8 bits for tokens per category, 1 bit for numeric, 1 bit for punctuator, 1 bit for operator, 1 bit for identifier, 4 bits unused
+
+// TokenType values.
+const (
+ ErrorToken TokenType = iota // extra token when errors occur
+ WhitespaceToken
+ LineTerminatorToken // \r \n \r\n
+ CommentToken
+ CommentLineTerminatorToken
+ StringToken
+ TemplateToken
+ TemplateStartToken
+ TemplateMiddleToken
+ TemplateEndToken
+ RegExpToken
+ PrivateIdentifierToken
+)
+
+// Numeric token values.
+const (
+ NumericToken TokenType = 0x0100 + iota
+ DecimalToken
+ BinaryToken
+ OctalToken
+ HexadecimalToken
+ BigIntToken
+)
+
+// Punctuator token values.
+const (
+ PunctuatorToken TokenType = 0x0200 + iota
+ OpenBraceToken // {
+ CloseBraceToken // }
+ OpenParenToken // (
+ CloseParenToken // )
+ OpenBracketToken // [
+ CloseBracketToken // ]
+ DotToken // .
+ SemicolonToken // ;
+ CommaToken // ,
+ QuestionToken // ?
+ ColonToken // :
+ ArrowToken // =>
+ EllipsisToken // ...
+)
+
+// Operator token values.
+const (
+ OperatorToken TokenType = 0x0600 + iota
+ EqToken // =
+ EqEqToken // ==
+ EqEqEqToken // ===
+ NotToken // !
+ NotEqToken // !=
+ NotEqEqToken // !==
+ LtToken // <
+ LtEqToken // <=
+ LtLtToken // <<
+ LtLtEqToken // <<=
+ GtToken // >
+ GtEqToken // >=
+ GtGtToken // >>
+ GtGtEqToken // >>=
+ GtGtGtToken // >>>
+ GtGtGtEqToken // >>>=
+ AddToken // +
+ AddEqToken // +=
+ IncrToken // ++
+ SubToken // -
+ SubEqToken // -=
+ DecrToken // --
+ MulToken // *
+ MulEqToken // *=
+ ExpToken // **
+ ExpEqToken // **=
+ DivToken // /
+ DivEqToken // /=
+ ModToken // %
+ ModEqToken // %=
+ BitAndToken // &
+ BitOrToken // |
+ BitXorToken // ^
+ BitNotToken // ~
+ BitAndEqToken // &=
+ BitOrEqToken // |=
+ BitXorEqToken // ^=
+ AndToken // &&
+ OrToken // ||
+ NullishToken // ??
+ AndEqToken // &&=
+ OrEqToken // ||=
+ NullishEqToken // ??=
+ OptChainToken // ?.
+
+ // unused in lexer
+ PosToken // +a
+ NegToken // -a
+ PreIncrToken // ++a
+ PreDecrToken // --a
+ PostIncrToken // a++
+ PostDecrToken // a--
+)
+
+// Reserved token values.
+const (
+ ReservedToken TokenType = 0x0800 + iota
+ AwaitToken
+ BreakToken
+ CaseToken
+ CatchToken
+ ClassToken
+ ConstToken
+ ContinueToken
+ DebuggerToken
+ DefaultToken
+ DeleteToken
+ DoToken
+ ElseToken
+ EnumToken
+ ExportToken
+ ExtendsToken
+ FalseToken
+ FinallyToken
+ ForToken
+ FunctionToken
+ IfToken
+ ImportToken
+ InToken
+ InstanceofToken
+ NewToken
+ NullToken
+ ReturnToken
+ SuperToken
+ SwitchToken
+ ThisToken
+ ThrowToken
+ TrueToken
+ TryToken
+ TypeofToken
+ YieldToken
+ VarToken
+ VoidToken
+ WhileToken
+ WithToken
+)
+
+// Identifier token values.
+const (
+ IdentifierToken TokenType = 0x1000 + iota
+ AsToken
+ AsyncToken
+ FromToken
+ GetToken
+ ImplementsToken
+ InterfaceToken
+ LetToken
+ MetaToken
+ OfToken
+ PackageToken
+ PrivateToken
+ ProtectedToken
+ PublicToken
+ SetToken
+ StaticToken
+ TargetToken
+)
+
+// IsNumeric return true if token is numeric.
+func IsNumeric(tt TokenType) bool {
+ return tt&0x0100 != 0
+}
+
+// IsPunctuator return true if token is a punctuator.
+func IsPunctuator(tt TokenType) bool {
+ return tt&0x0200 != 0
+}
+
+// IsOperator return true if token is an operator.
+func IsOperator(tt TokenType) bool {
+ return tt&0x0400 != 0
+}
+
+// IsIdentifierName matches IdentifierName, i.e. any identifier
+func IsIdentifierName(tt TokenType) bool {
+ return tt&0x1800 != 0
+}
+
+// IsReservedWord matches ReservedWord
+func IsReservedWord(tt TokenType) bool {
+ return tt&0x0800 != 0
+}
+
+// IsIdentifier matches Identifier, i.e. IdentifierName but not ReservedWord. Does not match yield or await.
+func IsIdentifier(tt TokenType) bool {
+ return tt&0x1000 != 0
+}
+
+func (tt TokenType) String() string {
+ s := tt.Bytes()
+ if s == nil {
+ return "Invalid(" + strconv.Itoa(int(tt)) + ")"
+ }
+ return string(s)
+}
+
+var operatorBytes = [][]byte{
+ []byte("Operator"),
+ []byte("="),
+ []byte("=="),
+ []byte("==="),
+ []byte("!"),
+ []byte("!="),
+ []byte("!=="),
+ []byte("<"),
+ []byte("<="),
+ []byte("<<"),
+ []byte("<<="),
+ []byte(">"),
+ []byte(">="),
+ []byte(">>"),
+ []byte(">>="),
+ []byte(">>>"),
+ []byte(">>>="),
+ []byte("+"),
+ []byte("+="),
+ []byte("++"),
+ []byte("-"),
+ []byte("-="),
+ []byte("--"),
+ []byte("*"),
+ []byte("*="),
+ []byte("**"),
+ []byte("**="),
+ []byte("/"),
+ []byte("/="),
+ []byte("%"),
+ []byte("%="),
+ []byte("&"),
+ []byte("|"),
+ []byte("^"),
+ []byte("~"),
+ []byte("&="),
+ []byte("|="),
+ []byte("^="),
+ []byte("&&"),
+ []byte("||"),
+ []byte("??"),
+ []byte("&&="),
+ []byte("||="),
+ []byte("??="),
+ []byte("?."),
+ []byte("+"),
+ []byte("-"),
+ []byte("++"),
+ []byte("--"),
+ []byte("++"),
+ []byte("--"),
+}
+
+var reservedWordBytes = [][]byte{
+ []byte("Reserved"),
+ []byte("await"),
+ []byte("break"),
+ []byte("case"),
+ []byte("catch"),
+ []byte("class"),
+ []byte("const"),
+ []byte("continue"),
+ []byte("debugger"),
+ []byte("default"),
+ []byte("delete"),
+ []byte("do"),
+ []byte("else"),
+ []byte("enum"),
+ []byte("export"),
+ []byte("extends"),
+ []byte("false"),
+ []byte("finally"),
+ []byte("for"),
+ []byte("function"),
+ []byte("if"),
+ []byte("import"),
+ []byte("in"),
+ []byte("instanceof"),
+ []byte("new"),
+ []byte("null"),
+ []byte("return"),
+ []byte("super"),
+ []byte("switch"),
+ []byte("this"),
+ []byte("throw"),
+ []byte("true"),
+ []byte("try"),
+ []byte("typeof"),
+ []byte("yield"),
+ []byte("var"),
+ []byte("void"),
+ []byte("while"),
+ []byte("with"),
+}
+
+var identifierBytes = [][]byte{
+ []byte("Identifier"),
+ []byte("as"),
+ []byte("async"),
+ []byte("from"),
+ []byte("get"),
+ []byte("implements"),
+ []byte("interface"),
+ []byte("let"),
+ []byte("meta"),
+ []byte("of"),
+ []byte("package"),
+ []byte("private"),
+ []byte("protected"),
+ []byte("public"),
+ []byte("set"),
+ []byte("static"),
+ []byte("target"),
+}
+
+// Bytes returns the string representation of a TokenType.
+func (tt TokenType) Bytes() []byte {
+ if IsOperator(tt) && int(tt-OperatorToken) < len(operatorBytes) {
+ return operatorBytes[tt-OperatorToken]
+ } else if IsReservedWord(tt) && int(tt-ReservedToken) < len(reservedWordBytes) {
+ return reservedWordBytes[tt-ReservedToken]
+ } else if IsIdentifier(tt) && int(tt-IdentifierToken) < len(identifierBytes) {
+ return identifierBytes[tt-IdentifierToken]
+ }
+
+ switch tt {
+ case ErrorToken:
+ return []byte("Error")
+ case WhitespaceToken:
+ return []byte("Whitespace")
+ case LineTerminatorToken:
+ return []byte("LineTerminator")
+ case CommentToken:
+ return []byte("Comment")
+ case CommentLineTerminatorToken:
+ return []byte("CommentLineTerminator")
+ case StringToken:
+ return []byte("String")
+ case TemplateToken:
+ return []byte("Template")
+ case TemplateStartToken:
+ return []byte("TemplateStart")
+ case TemplateMiddleToken:
+ return []byte("TemplateMiddle")
+ case TemplateEndToken:
+ return []byte("TemplateEnd")
+ case RegExpToken:
+ return []byte("RegExp")
+ case PrivateIdentifierToken:
+ return []byte("PrivateIdentifier")
+ case NumericToken:
+ return []byte("Numeric")
+ case DecimalToken:
+ return []byte("Decimal")
+ case BinaryToken:
+ return []byte("Binary")
+ case OctalToken:
+ return []byte("Octal")
+ case HexadecimalToken:
+ return []byte("Hexadecimal")
+ case BigIntToken:
+ return []byte("BigInt")
+ case PunctuatorToken:
+ return []byte("Punctuator")
+ case OpenBraceToken:
+ return []byte("{")
+ case CloseBraceToken:
+ return []byte("}")
+ case OpenParenToken:
+ return []byte("(")
+ case CloseParenToken:
+ return []byte(")")
+ case OpenBracketToken:
+ return []byte("[")
+ case CloseBracketToken:
+ return []byte("]")
+ case DotToken:
+ return []byte(".")
+ case SemicolonToken:
+ return []byte(";")
+ case CommaToken:
+ return []byte(",")
+ case QuestionToken:
+ return []byte("?")
+ case ColonToken:
+ return []byte(":")
+ case ArrowToken:
+ return []byte("=>")
+ case EllipsisToken:
+ return []byte("...")
+ }
+ return nil
+}
diff --git a/vendor/github.com/tdewolff/parse/v2/js/util.go b/vendor/github.com/tdewolff/parse/v2/js/util.go
new file mode 100644
index 0000000..78a629c
--- /dev/null
+++ b/vendor/github.com/tdewolff/parse/v2/js/util.go
@@ -0,0 +1,38 @@
+package js
+
+// AsIdentifierName returns true if a valid identifier name is given.
+func AsIdentifierName(b []byte) bool {
+ if len(b) == 0 || !identifierStartTable[b[0]] {
+ return false
+ }
+
+ i := 1
+ for i < len(b) {
+ if identifierTable[b[i]] {
+ i++
+ } else {
+ return false
+ }
+ }
+ return true
+}
+
+// AsDecimalLiteral returns true if a valid decimal literal is given.
+func AsDecimalLiteral(b []byte) bool {
+ if len(b) == 0 || (b[0] < '0' || '9' < b[0]) && (b[0] != '.' || len(b) == 1) {
+ return false
+ } else if b[0] == '0' {
+ return len(b) == 1
+ }
+ i := 1
+ for i < len(b) && '0' <= b[i] && b[i] <= '9' {
+ i++
+ }
+ if i < len(b) && b[i] == '.' && b[0] != '.' {
+ i++
+ for i < len(b) && '0' <= b[i] && b[i] <= '9' {
+ i++
+ }
+ }
+ return i == len(b)
+}
diff --git a/vendor/github.com/tdewolff/parse/v2/js/walk.go b/vendor/github.com/tdewolff/parse/v2/js/walk.go
new file mode 100644
index 0000000..5c94473
--- /dev/null
+++ b/vendor/github.com/tdewolff/parse/v2/js/walk.go
@@ -0,0 +1,288 @@
+package js
+
+// IVisitor represents the AST Visitor
+// Each INode encountered by `Walk` is passed to `Enter`, children nodes will be ignored if the returned IVisitor is nil
+// `Exit` is called upon the exit of a node
+type IVisitor interface {
+ Enter(n INode) IVisitor
+ Exit(n INode)
+}
+
+// Walk traverses an AST in depth-first order
+func Walk(v IVisitor, n INode) {
+ if n == nil {
+ return
+ }
+
+ if v = v.Enter(n); v == nil {
+ return
+ }
+
+ defer v.Exit(n)
+
+ switch n := n.(type) {
+ case *AST:
+ Walk(v, &n.BlockStmt)
+ case *Var:
+ return
+ case *BlockStmt:
+ if n.List != nil {
+ for i := 0; i < len(n.List); i++ {
+ Walk(v, n.List[i])
+ }
+ }
+ case *EmptyStmt:
+ return
+ case *ExprStmt:
+ Walk(v, n.Value)
+ case *IfStmt:
+ Walk(v, n.Body)
+ Walk(v, n.Else)
+ Walk(v, n.Cond)
+ case *DoWhileStmt:
+ Walk(v, n.Body)
+ Walk(v, n.Cond)
+ case *WhileStmt:
+ Walk(v, n.Body)
+ Walk(v, n.Cond)
+ case *ForStmt:
+ if n.Body != nil {
+ Walk(v, n.Body)
+ }
+
+ Walk(v, n.Init)
+ Walk(v, n.Cond)
+ Walk(v, n.Post)
+ case *ForInStmt:
+ if n.Body != nil {
+ Walk(v, n.Body)
+ }
+
+ Walk(v, n.Init)
+ Walk(v, n.Value)
+ case *ForOfStmt:
+ if n.Body != nil {
+ Walk(v, n.Body)
+ }
+
+ Walk(v, n.Init)
+ Walk(v, n.Value)
+ case *CaseClause:
+ if n.List != nil {
+ for i := 0; i < len(n.List); i++ {
+ Walk(v, n.List[i])
+ }
+ }
+
+ Walk(v, n.Cond)
+ case *SwitchStmt:
+ if n.List != nil {
+ for i := 0; i < len(n.List); i++ {
+ Walk(v, &n.List[i])
+ }
+ }
+
+ Walk(v, n.Init)
+ case *BranchStmt:
+ return
+ case *ReturnStmt:
+ Walk(v, n.Value)
+ case *WithStmt:
+ Walk(v, n.Body)
+ Walk(v, n.Cond)
+ case *LabelledStmt:
+ Walk(v, n.Value)
+ case *ThrowStmt:
+ Walk(v, n.Value)
+ case *TryStmt:
+ if n.Body != nil {
+ Walk(v, n.Body)
+ }
+
+ if n.Catch != nil {
+ Walk(v, n.Catch)
+ }
+
+ if n.Finally != nil {
+ Walk(v, n.Finally)
+ }
+
+ Walk(v, n.Binding)
+ case *DebuggerStmt:
+ return
+ case *Alias:
+ return
+ case *ImportStmt:
+ if n.List != nil {
+ for i := 0; i < len(n.List); i++ {
+ Walk(v, &n.List[i])
+ }
+ }
+ case *ExportStmt:
+ if n.List != nil {
+ for i := 0; i < len(n.List); i++ {
+ Walk(v, &n.List[i])
+ }
+ }
+
+ Walk(v, n.Decl)
+ case *DirectivePrologueStmt:
+ return
+ case *PropertyName:
+ Walk(v, &n.Literal)
+ Walk(v, n.Computed)
+ case *BindingArray:
+ if n.List != nil {
+ for i := 0; i < len(n.List); i++ {
+ Walk(v, &n.List[i])
+ }
+ }
+
+ Walk(v, n.Rest)
+ case *BindingObjectItem:
+ if n.Key != nil {
+ Walk(v, n.Key)
+ }
+
+ Walk(v, &n.Value)
+ case *BindingObject:
+ if n.List != nil {
+ for i := 0; i < len(n.List); i++ {
+ Walk(v, &n.List[i])
+ }
+ }
+
+ if n.Rest != nil {
+ Walk(v, n.Rest)
+ }
+ case *BindingElement:
+ Walk(v, n.Binding)
+ Walk(v, n.Default)
+ case *VarDecl:
+ if n.List != nil {
+ for i := 0; i < len(n.List); i++ {
+ Walk(v, &n.List[i])
+ }
+ }
+ case *Params:
+ if n.List != nil {
+ for i := 0; i < len(n.List); i++ {
+ Walk(v, &n.List[i])
+ }
+ }
+
+ Walk(v, n.Rest)
+ case *FuncDecl:
+ Walk(v, &n.Body)
+ Walk(v, &n.Params)
+
+ if n.Name != nil {
+ Walk(v, n.Name)
+ }
+ case *MethodDecl:
+ Walk(v, &n.Body)
+ Walk(v, &n.Params)
+ Walk(v, &n.Name)
+ case *Field:
+ Walk(v, &n.Name)
+ Walk(v, n.Init)
+ case *ClassDecl:
+ if n.Name != nil {
+ Walk(v, n.Name)
+ }
+
+ Walk(v, n.Extends)
+
+ for _, item := range n.List {
+ if item.StaticBlock != nil {
+ Walk(v, item.StaticBlock)
+ } else if item.Method != nil {
+ Walk(v, item.Method)
+ } else {
+ Walk(v, &item.Field)
+ }
+ }
+ case *LiteralExpr:
+ return
+ case *Element:
+ Walk(v, n.Value)
+ case *ArrayExpr:
+ if n.List != nil {
+ for i := 0; i < len(n.List); i++ {
+ Walk(v, &n.List[i])
+ }
+ }
+ case *Property:
+ if n.Name != nil {
+ Walk(v, n.Name)
+ }
+
+ Walk(v, n.Value)
+ Walk(v, n.Init)
+ case *ObjectExpr:
+ if n.List != nil {
+ for i := 0; i < len(n.List); i++ {
+ Walk(v, &n.List[i])
+ }
+ }
+ case *TemplatePart:
+ Walk(v, n.Expr)
+ case *TemplateExpr:
+ if n.List != nil {
+ for i := 0; i < len(n.List); i++ {
+ Walk(v, &n.List[i])
+ }
+ }
+
+ Walk(v, n.Tag)
+ case *GroupExpr:
+ Walk(v, n.X)
+ case *IndexExpr:
+ Walk(v, n.X)
+ Walk(v, n.Y)
+ case *DotExpr:
+ Walk(v, n.X)
+ Walk(v, &n.Y)
+ case *NewTargetExpr:
+ return
+ case *ImportMetaExpr:
+ return
+ case *Arg:
+ Walk(v, n.Value)
+ case *Args:
+ if n.List != nil {
+ for i := 0; i < len(n.List); i++ {
+ Walk(v, &n.List[i])
+ }
+ }
+ case *NewExpr:
+ if n.Args != nil {
+ Walk(v, n.Args)
+ }
+
+ Walk(v, n.X)
+ case *CallExpr:
+ Walk(v, &n.Args)
+ Walk(v, n.X)
+ case *UnaryExpr:
+ Walk(v, n.X)
+ case *BinaryExpr:
+ Walk(v, n.X)
+ Walk(v, n.Y)
+ case *CondExpr:
+ Walk(v, n.Cond)
+ Walk(v, n.X)
+ Walk(v, n.Y)
+ case *YieldExpr:
+ Walk(v, n.X)
+ case *ArrowFunc:
+ Walk(v, &n.Body)
+ Walk(v, &n.Params)
+ case *CommaExpr:
+ for _, item := range n.List {
+ Walk(v, item)
+ }
+ default:
+ return
+ }
+}
diff --git a/vendor/github.com/tdewolff/parse/v2/position.go b/vendor/github.com/tdewolff/parse/v2/position.go
new file mode 100644
index 0000000..38e38ce
--- /dev/null
+++ b/vendor/github.com/tdewolff/parse/v2/position.go
@@ -0,0 +1,95 @@
+package parse
+
+import (
+ "fmt"
+ "io"
+ "strings"
+ "unicode"
+)
+
+// Position returns the line and column number for a certain position in a file. It is useful for recovering the position in a file that caused an error.
+// It only treates \n, \r, and \r\n as newlines, which might be different from some languages also recognizing \f, \u2028, and \u2029 to be newlines.
+func Position(r io.Reader, offset int) (line, col int, context string) {
+ l := NewInput(r)
+ line = 1
+ for l.Pos() < offset {
+ c := l.Peek(0)
+ n := 1
+ newline := false
+ if c == '\n' {
+ newline = true
+ } else if c == '\r' {
+ if l.Peek(1) == '\n' {
+ newline = true
+ n = 2
+ } else {
+ newline = true
+ }
+ } else if c >= 0xC0 {
+ var r rune
+ if r, n = l.PeekRune(0); r == '\u2028' || r == '\u2029' {
+ newline = true
+ }
+ } else if c == 0 && l.Err() != nil {
+ break
+ }
+
+ if 1 < n && offset < l.Pos()+n {
+ break
+ }
+ l.Move(n)
+
+ if newline {
+ line++
+ offset -= l.Pos()
+ l.Skip()
+ }
+ }
+
+ col = len([]rune(string(l.Lexeme()))) + 1
+ context = positionContext(l, line, col)
+ return
+}
+
+func positionContext(l *Input, line, col int) (context string) {
+ for {
+ c := l.Peek(0)
+ if c == 0 && l.Err() != nil || c == '\n' || c == '\r' {
+ break
+ }
+ l.Move(1)
+ }
+ rs := []rune(string(l.Lexeme()))
+
+ // cut off front or rear of context to stay between 60 characters
+ limit := 60
+ offset := 20
+ ellipsisFront := ""
+ ellipsisRear := ""
+ if limit < len(rs) {
+ if col <= limit-offset {
+ ellipsisRear = "..."
+ rs = rs[:limit-3]
+ } else if col >= len(rs)-offset-3 {
+ ellipsisFront = "..."
+ col -= len(rs) - offset - offset - 7
+ rs = rs[len(rs)-offset-offset-4:]
+ } else {
+ ellipsisFront = "..."
+ ellipsisRear = "..."
+ rs = rs[col-offset-1 : col+offset]
+ col = offset + 4
+ }
+ }
+
+ // replace unprintable characters by a space
+ for i, r := range rs {
+ if !unicode.IsGraphic(r) {
+ rs[i] = '·'
+ }
+ }
+
+ context += fmt.Sprintf("%5d: %s%s%s\n", line, ellipsisFront, string(rs), ellipsisRear)
+ context += fmt.Sprintf("%s^", strings.Repeat(" ", 6+col))
+ return
+}
diff --git a/vendor/github.com/tdewolff/parse/v2/strconv/float.go b/vendor/github.com/tdewolff/parse/v2/strconv/float.go
new file mode 100644
index 0000000..c89bdb2
--- /dev/null
+++ b/vendor/github.com/tdewolff/parse/v2/strconv/float.go
@@ -0,0 +1,257 @@
+package strconv
+
+import (
+ "math"
+)
+
+var float64pow10 = []float64{
+ 1e0, 1e1, 1e2, 1e3, 1e4, 1e5, 1e6, 1e7, 1e8, 1e9,
+ 1e10, 1e11, 1e12, 1e13, 1e14, 1e15, 1e16, 1e17, 1e18, 1e19,
+ 1e20, 1e21, 1e22,
+}
+
+// ParseFloat parses a byte-slice and returns the float it represents.
+// If an invalid character is encountered, it will stop there.
+func ParseFloat(b []byte) (float64, int) {
+ i := 0
+ neg := false
+ if i < len(b) && (b[i] == '+' || b[i] == '-') {
+ neg = b[i] == '-'
+ i++
+ }
+ start := i
+ dot := -1
+ trunk := -1
+ n := uint64(0)
+ for ; i < len(b); i++ {
+ c := b[i]
+ if c >= '0' && c <= '9' {
+ if trunk == -1 {
+ if n > math.MaxUint64/10 {
+ trunk = i
+ } else {
+ n *= 10
+ n += uint64(c - '0')
+ }
+ }
+ } else if dot == -1 && c == '.' {
+ dot = i
+ } else {
+ break
+ }
+ }
+ if i == start || i == start+1 && dot == start {
+ return 0.0, 0
+ }
+
+ f := float64(n)
+ if neg {
+ f = -f
+ }
+
+ mantExp := int64(0)
+ if dot != -1 {
+ if trunk == -1 {
+ trunk = i
+ }
+ mantExp = int64(trunk - dot - 1)
+ } else if trunk != -1 {
+ mantExp = int64(trunk - i)
+ }
+ expExp := int64(0)
+ if i < len(b) && (b[i] == 'e' || b[i] == 'E') {
+ startExp := i
+ i++
+ if e, expLen := ParseInt(b[i:]); expLen > 0 {
+ expExp = e
+ i += expLen
+ } else {
+ i = startExp
+ }
+ }
+ exp := expExp - mantExp
+
+ // copied from strconv/atof.go
+ if exp == 0 {
+ return f, i
+ } else if exp > 0 && exp <= 15+22 { // int * 10^k
+ // If exponent is big but number of digits is not,
+ // can move a few zeros into the integer part.
+ if exp > 22 {
+ f *= float64pow10[exp-22]
+ exp = 22
+ }
+ if f <= 1e15 && f >= -1e15 {
+ return f * float64pow10[exp], i
+ }
+ } else if exp < 0 && exp >= -22 { // int / 10^k
+ return f / float64pow10[-exp], i
+ }
+ f *= math.Pow10(int(-mantExp))
+ return f * math.Pow10(int(expExp)), i
+}
+
+const log2 = 0.3010299956639812
+
+func float64exp(f float64) int {
+ exp2 := 0
+ if f != 0.0 {
+ x := math.Float64bits(f)
+ exp2 = int(x>>(64-11-1))&0x7FF - 1023 + 1
+ }
+
+ exp10 := float64(exp2) * log2
+ if exp10 < 0 {
+ exp10 -= 1.0
+ }
+ return int(exp10)
+}
+
+// AppendFloat appends a float to `b` with precision `prec`. It returns the new slice and whether successful or not. Precision is the number of decimals to display, thus prec + 1 == number of significant digits.
+func AppendFloat(b []byte, f float64, prec int) ([]byte, bool) {
+ if math.IsNaN(f) || math.IsInf(f, 0) {
+ return b, false
+ }
+
+ neg := false
+ if f < 0.0 {
+ f = -f
+ neg = true
+ }
+ if prec < 0 || 17 < prec {
+ prec = 17 // maximum number of significant digits in double
+ }
+ prec -= float64exp(f) // number of digits in front of the dot
+ f *= math.Pow10(prec)
+
+ // calculate mantissa and exponent
+ mant := int64(f)
+ mantLen := LenInt(mant)
+ mantExp := mantLen - prec - 1
+ if mant == 0 {
+ return append(b, '0'), true
+ }
+
+ // expLen is zero for positive exponents, because positive exponents are determined later on in the big conversion loop
+ exp := 0
+ expLen := 0
+ if mantExp > 0 {
+ // positive exponent is determined in the loop below
+ // but if we initially decreased the exponent to fit in an integer, we can't set the new exponent in the loop alone,
+ // since the number of zeros at the end determines the positive exponent in the loop, and we just artificially lost zeros
+ if prec < 0 {
+ exp = mantExp
+ }
+ expLen = 1 + LenInt(int64(exp)) // e + digits
+ } else if mantExp < -3 {
+ exp = mantExp
+ expLen = 2 + LenInt(int64(exp)) // e + minus + digits
+ } else if mantExp < -1 {
+ mantLen += -mantExp - 1 // extra zero between dot and first digit
+ }
+
+ // reserve space in b
+ i := len(b)
+ maxLen := 1 + mantLen + expLen // dot + mantissa digits + exponent
+ if neg {
+ maxLen++
+ }
+ if i+maxLen > cap(b) {
+ b = append(b, make([]byte, maxLen)...)
+ } else {
+ b = b[:i+maxLen]
+ }
+
+ // write to string representation
+ if neg {
+ b[i] = '-'
+ i++
+ }
+
+ // big conversion loop, start at the end and move to the front
+ // initially print trailing zeros and remove them later on
+ // for example if the first non-zero digit is three positions in front of the dot, it will overwrite the zeros with a positive exponent
+ zero := true
+ last := i + mantLen // right-most position of digit that is non-zero + dot
+ dot := last - prec - exp // position of dot
+ j := last
+ for mant > 0 {
+ if j == dot {
+ b[j] = '.'
+ j--
+ }
+ newMant := mant / 10
+ digit := mant - 10*newMant
+ if zero && digit > 0 {
+ // first non-zero digit, if we are still behind the dot we can trim the end to this position
+ // otherwise trim to the dot (including the dot)
+ if j > dot {
+ i = j + 1
+ // decrease negative exponent further to get rid of dot
+ if exp < 0 {
+ newExp := exp - (j - dot)
+ // getting rid of the dot shouldn't lower the exponent to more digits (e.g. -9 -> -10)
+ if LenInt(int64(newExp)) == LenInt(int64(exp)) {
+ exp = newExp
+ dot = j
+ j--
+ i--
+ }
+ }
+ } else {
+ i = dot
+ }
+ last = j
+ zero = false
+ }
+ b[j] = '0' + byte(digit)
+ j--
+ mant = newMant
+ }
+
+ if j > dot {
+ // extra zeros behind the dot
+ for j > dot {
+ b[j] = '0'
+ j--
+ }
+ b[j] = '.'
+ } else if last+3 < dot {
+ // add positive exponent because we have 3 or more zeros in front of the dot
+ i = last + 1
+ exp = dot - last - 1
+ } else if j == dot {
+ // handle 0.1
+ b[j] = '.'
+ }
+
+ // exponent
+ if exp != 0 {
+ if exp == 1 {
+ b[i] = '0'
+ i++
+ } else if exp == 2 {
+ b[i] = '0'
+ b[i+1] = '0'
+ i += 2
+ } else {
+ b[i] = 'e'
+ i++
+ if exp < 0 {
+ b[i] = '-'
+ i++
+ exp = -exp
+ }
+ i += LenInt(int64(exp))
+ j := i
+ for exp > 0 {
+ newExp := exp / 10
+ digit := exp - 10*newExp
+ j--
+ b[j] = '0' + byte(digit)
+ exp = newExp
+ }
+ }
+ }
+ return b[:i], true
+}
diff --git a/vendor/github.com/tdewolff/parse/v2/strconv/int.go b/vendor/github.com/tdewolff/parse/v2/strconv/int.go
new file mode 100644
index 0000000..e3483bd
--- /dev/null
+++ b/vendor/github.com/tdewolff/parse/v2/strconv/int.go
@@ -0,0 +1,108 @@
+package strconv
+
+import (
+ "math"
+)
+
+// ParseInt parses a byte-slice and returns the integer it represents.
+// If an invalid character is encountered, it will stop there.
+func ParseInt(b []byte) (int64, int) {
+ i := 0
+ neg := false
+ if len(b) > 0 && (b[0] == '+' || b[0] == '-') {
+ neg = b[0] == '-'
+ i++
+ }
+ start := i
+ n := uint64(0)
+ for i < len(b) {
+ c := b[i]
+ if n > math.MaxUint64/10 {
+ return 0, 0
+ } else if c >= '0' && c <= '9' {
+ n *= 10
+ n += uint64(c - '0')
+ } else {
+ break
+ }
+ i++
+ }
+ if i == start {
+ return 0, 0
+ }
+ if !neg && n > uint64(math.MaxInt64) || n > uint64(math.MaxInt64)+1 {
+ return 0, 0
+ } else if neg {
+ return -int64(n), i
+ }
+ return int64(n), i
+}
+
+// ParseUint parses a byte-slice and returns the integer it represents.
+// If an invalid character is encountered, it will stop there.
+func ParseUint(b []byte) (uint64, int) {
+ i := 0
+ n := uint64(0)
+ for i < len(b) {
+ c := b[i]
+ if n > math.MaxUint64/10 {
+ return 0, 0
+ } else if c >= '0' && c <= '9' {
+ n *= 10
+ n += uint64(c - '0')
+ } else {
+ break
+ }
+ i++
+ }
+ return n, i
+}
+
+// LenInt returns the written length of an integer.
+func LenInt(i int64) int {
+ if i < 0 {
+ if i == -9223372036854775808 {
+ return 19
+ }
+ i = -i
+ }
+ switch {
+ case i < 10:
+ return 1
+ case i < 100:
+ return 2
+ case i < 1000:
+ return 3
+ case i < 10000:
+ return 4
+ case i < 100000:
+ return 5
+ case i < 1000000:
+ return 6
+ case i < 10000000:
+ return 7
+ case i < 100000000:
+ return 8
+ case i < 1000000000:
+ return 9
+ case i < 10000000000:
+ return 10
+ case i < 100000000000:
+ return 11
+ case i < 1000000000000:
+ return 12
+ case i < 10000000000000:
+ return 13
+ case i < 100000000000000:
+ return 14
+ case i < 1000000000000000:
+ return 15
+ case i < 10000000000000000:
+ return 16
+ case i < 100000000000000000:
+ return 17
+ case i < 1000000000000000000:
+ return 18
+ }
+ return 19
+}
diff --git a/vendor/github.com/tdewolff/parse/v2/strconv/price.go b/vendor/github.com/tdewolff/parse/v2/strconv/price.go
new file mode 100644
index 0000000..94b3834
--- /dev/null
+++ b/vendor/github.com/tdewolff/parse/v2/strconv/price.go
@@ -0,0 +1,83 @@
+package strconv
+
+// AppendPrice will append an int64 formatted as a price, where the int64 is the price in cents.
+// It does not display whether a price is negative or not.
+func AppendPrice(b []byte, price int64, dec bool, milSeparator byte, decSeparator byte) []byte {
+ if price < 0 {
+ if price == -9223372036854775808 {
+ x := []byte("92 233 720 368 547 758 08")
+ x[2] = milSeparator
+ x[6] = milSeparator
+ x[10] = milSeparator
+ x[14] = milSeparator
+ x[18] = milSeparator
+ x[22] = decSeparator
+ return append(b, x...)
+ }
+ price = -price
+ }
+
+ // rounding
+ if !dec {
+ firstDec := (price / 10) % 10
+ if firstDec >= 5 {
+ price += 100
+ }
+ }
+
+ // calculate size
+ n := LenInt(price) - 2
+ if n > 0 {
+ n += (n - 1) / 3 // mil separator
+ } else {
+ n = 1
+ }
+ if dec {
+ n += 2 + 1 // decimals + dec separator
+ }
+
+ // resize byte slice
+ i := len(b)
+ if i+n > cap(b) {
+ b = append(b, make([]byte, n)...)
+ } else {
+ b = b[:i+n]
+ }
+
+ // print fractional-part
+ i += n - 1
+ if dec {
+ for j := 0; j < 2; j++ {
+ c := byte(price%10) + '0'
+ price /= 10
+ b[i] = c
+ i--
+ }
+ b[i] = decSeparator
+ i--
+ } else {
+ price /= 100
+ }
+
+ if price == 0 {
+ b[i] = '0'
+ return b
+ }
+
+ // print integer-part
+ j := 0
+ for price > 0 {
+ if j == 3 {
+ b[i] = milSeparator
+ i--
+ j = 0
+ }
+
+ c := byte(price%10) + '0'
+ price /= 10
+ b[i] = c
+ i--
+ j++
+ }
+ return b
+}
diff --git a/vendor/github.com/tdewolff/parse/v2/util.go b/vendor/github.com/tdewolff/parse/v2/util.go
new file mode 100644
index 0000000..db706d4
--- /dev/null
+++ b/vendor/github.com/tdewolff/parse/v2/util.go
@@ -0,0 +1,481 @@
+package parse
+
+import (
+ "bytes"
+ "fmt"
+ "strconv"
+ "unicode"
+)
+
+// Copy returns a copy of the given byte slice.
+func Copy(src []byte) (dst []byte) {
+ dst = make([]byte, len(src))
+ copy(dst, src)
+ return
+}
+
+// ToLower converts all characters in the byte slice from A-Z to a-z.
+func ToLower(src []byte) []byte {
+ for i, c := range src {
+ if c >= 'A' && c <= 'Z' {
+ src[i] = c + ('a' - 'A')
+ }
+ }
+ return src
+}
+
+// EqualFold returns true when s matches case-insensitively the targetLower (which must be lowercase).
+func EqualFold(s, targetLower []byte) bool {
+ if len(s) != len(targetLower) {
+ return false
+ }
+ for i, c := range targetLower {
+ d := s[i]
+ if d != c && (d < 'A' || d > 'Z' || d+('a'-'A') != c) {
+ return false
+ }
+ }
+ return true
+}
+
+// Printable returns a printable string for given rune
+func Printable(r rune) string {
+ if unicode.IsGraphic(r) {
+ return fmt.Sprintf("%c", r)
+ } else if r < 128 {
+ return fmt.Sprintf("0x%02X", r)
+ }
+ return fmt.Sprintf("%U", r)
+}
+
+var whitespaceTable = [256]bool{
+ // ASCII
+ false, false, false, false, false, false, false, false,
+ false, true, true, false, true, true, false, false, // tab, new line, form feed, carriage return
+ false, false, false, false, false, false, false, false,
+ false, false, false, false, false, false, false, false,
+
+ true, false, false, false, false, false, false, false, // space
+ false, false, false, false, false, false, false, false,
+ false, false, false, false, false, false, false, false,
+ false, false, false, false, false, false, false, false,
+
+ false, false, false, false, false, false, false, false,
+ false, false, false, false, false, false, false, false,
+ false, false, false, false, false, false, false, false,
+ false, false, false, false, false, false, false, false,
+
+ false, false, false, false, false, false, false, false,
+ false, false, false, false, false, false, false, false,
+ false, false, false, false, false, false, false, false,
+ false, false, false, false, false, false, false, false,
+
+ // non-ASCII
+ false, false, false, false, false, false, false, false,
+ false, false, false, false, false, false, false, false,
+ false, false, false, false, false, false, false, false,
+ false, false, false, false, false, false, false, false,
+
+ false, false, false, false, false, false, false, false,
+ false, false, false, false, false, false, false, false,
+ false, false, false, false, false, false, false, false,
+ false, false, false, false, false, false, false, false,
+
+ false, false, false, false, false, false, false, false,
+ false, false, false, false, false, false, false, false,
+ false, false, false, false, false, false, false, false,
+ false, false, false, false, false, false, false, false,
+
+ false, false, false, false, false, false, false, false,
+ false, false, false, false, false, false, false, false,
+ false, false, false, false, false, false, false, false,
+ false, false, false, false, false, false, false, false,
+}
+
+// IsWhitespace returns true for space, \n, \r, \t, \f.
+func IsWhitespace(c byte) bool {
+ return whitespaceTable[c]
+}
+
+var newlineTable = [256]bool{
+ // ASCII
+ false, false, false, false, false, false, false, false,
+ false, false, true, false, false, true, false, false, // new line, carriage return
+ false, false, false, false, false, false, false, false,
+ false, false, false, false, false, false, false, false,
+
+ false, false, false, false, false, false, false, false,
+ false, false, false, false, false, false, false, false,
+ false, false, false, false, false, false, false, false,
+ false, false, false, false, false, false, false, false,
+
+ false, false, false, false, false, false, false, false,
+ false, false, false, false, false, false, false, false,
+ false, false, false, false, false, false, false, false,
+ false, false, false, false, false, false, false, false,
+
+ false, false, false, false, false, false, false, false,
+ false, false, false, false, false, false, false, false,
+ false, false, false, false, false, false, false, false,
+ false, false, false, false, false, false, false, false,
+
+ // non-ASCII
+ false, false, false, false, false, false, false, false,
+ false, false, false, false, false, false, false, false,
+ false, false, false, false, false, false, false, false,
+ false, false, false, false, false, false, false, false,
+
+ false, false, false, false, false, false, false, false,
+ false, false, false, false, false, false, false, false,
+ false, false, false, false, false, false, false, false,
+ false, false, false, false, false, false, false, false,
+
+ false, false, false, false, false, false, false, false,
+ false, false, false, false, false, false, false, false,
+ false, false, false, false, false, false, false, false,
+ false, false, false, false, false, false, false, false,
+
+ false, false, false, false, false, false, false, false,
+ false, false, false, false, false, false, false, false,
+ false, false, false, false, false, false, false, false,
+ false, false, false, false, false, false, false, false,
+}
+
+// IsNewline returns true for \n, \r.
+func IsNewline(c byte) bool {
+ return newlineTable[c]
+}
+
+// IsAllWhitespace returns true when the entire byte slice consists of space, \n, \r, \t, \f.
+func IsAllWhitespace(b []byte) bool {
+ for _, c := range b {
+ if !IsWhitespace(c) {
+ return false
+ }
+ }
+ return true
+}
+
+// TrimWhitespace removes any leading and trailing whitespace characters.
+func TrimWhitespace(b []byte) []byte {
+ n := len(b)
+ start := n
+ for i := 0; i < n; i++ {
+ if !IsWhitespace(b[i]) {
+ start = i
+ break
+ }
+ }
+ end := n
+ for i := n - 1; i >= start; i-- {
+ if !IsWhitespace(b[i]) {
+ end = i + 1
+ break
+ }
+ }
+ return b[start:end]
+}
+
+// ReplaceMultipleWhitespace replaces character series of space, \n, \t, \f, \r into a single space or newline (when the serie contained a \n or \r).
+func ReplaceMultipleWhitespace(b []byte) []byte {
+ j, k := 0, 0 // j is write position, k is start of next text section
+ for i := 0; i < len(b); i++ {
+ if IsWhitespace(b[i]) {
+ start := i
+ newline := IsNewline(b[i])
+ i++
+ for ; i < len(b) && IsWhitespace(b[i]); i++ {
+ if IsNewline(b[i]) {
+ newline = true
+ }
+ }
+ if newline {
+ b[start] = '\n'
+ } else {
+ b[start] = ' '
+ }
+ if 1 < i-start { // more than one whitespace
+ if j == 0 {
+ j = start + 1
+ } else {
+ j += copy(b[j:], b[k:start+1])
+ }
+ k = i
+ }
+ }
+ }
+ if j == 0 {
+ return b
+ } else if j == 1 { // only if starts with whitespace
+ b[k-1] = b[0]
+ return b[k-1:]
+ } else if k < len(b) {
+ j += copy(b[j:], b[k:])
+ }
+ return b[:j]
+}
+
+// replaceEntities will replace in b at index i, assuming that b[i] == '&' and that i+3<len(b). The returned int will be the last character of the entity, so that the next iteration can safely do i++ to continue and not miss any entitites.
+func replaceEntities(b []byte, i int, entitiesMap map[string][]byte, revEntitiesMap map[byte][]byte) ([]byte, int) {
+ const MaxEntityLength = 31 // longest HTML entity: CounterClockwiseContourIntegral
+ var r []byte
+ j := i + 1
+ if b[j] == '#' {
+ j++
+ if b[j] == 'x' {
+ j++
+ c := 0
+ for ; j < len(b) && (b[j] >= '0' && b[j] <= '9' || b[j] >= 'a' && b[j] <= 'f' || b[j] >= 'A' && b[j] <= 'F'); j++ {
+ if b[j] <= '9' {
+ c = c<<4 + int(b[j]-'0')
+ } else if b[j] <= 'F' {
+ c = c<<4 + int(b[j]-'A') + 10
+ } else if b[j] <= 'f' {
+ c = c<<4 + int(b[j]-'a') + 10
+ }
+ }
+ if j <= i+3 || 10000 <= c {
+ return b, j - 1
+ }
+ if c < 128 {
+ r = []byte{byte(c)}
+ } else {
+ r = append(r, '&', '#')
+ r = strconv.AppendInt(r, int64(c), 10)
+ r = append(r, ';')
+ }
+ } else {
+ c := 0
+ for ; j < len(b) && c < 128 && b[j] >= '0' && b[j] <= '9'; j++ {
+ c = c*10 + int(b[j]-'0')
+ }
+ if j <= i+2 || 128 <= c {
+ return b, j - 1
+ }
+ r = []byte{byte(c)}
+ }
+ } else {
+ for ; j < len(b) && j-i-1 <= MaxEntityLength && b[j] != ';'; j++ {
+ }
+ if j <= i+1 || len(b) <= j {
+ return b, j - 1
+ }
+
+ var ok bool
+ r, ok = entitiesMap[string(b[i+1:j])]
+ if !ok {
+ return b, j
+ }
+ }
+
+ // j is at semicolon
+ n := j + 1 - i
+ if j < len(b) && b[j] == ';' && 2 < n {
+ if len(r) == 1 {
+ if q, ok := revEntitiesMap[r[0]]; ok {
+ if len(q) == len(b[i:j+1]) && bytes.Equal(q, b[i:j+1]) {
+ return b, j
+ }
+ r = q
+ } else if r[0] == '&' {
+ // check if for example &amp; is followed by something that could potentially be an entity
+ k := j + 1
+ if k < len(b) && (b[k] >= '0' && b[k] <= '9' || b[k] >= 'a' && b[k] <= 'z' || b[k] >= 'A' && b[k] <= 'Z' || b[k] == '#') {
+ return b, k
+ }
+ }
+ }
+
+ copy(b[i:], r)
+ copy(b[i+len(r):], b[j+1:])
+ b = b[:len(b)-n+len(r)]
+ return b, i + len(r) - 1
+ }
+ return b, i
+}
+
+// ReplaceEntities replaces all occurrences of entites (such as &quot;) to their respective unencoded bytes.
+func ReplaceEntities(b []byte, entitiesMap map[string][]byte, revEntitiesMap map[byte][]byte) []byte {
+ for i := 0; i < len(b); i++ {
+ if b[i] == '&' && i+3 < len(b) {
+ b, i = replaceEntities(b, i, entitiesMap, revEntitiesMap)
+ }
+ }
+ return b
+}
+
+// ReplaceMultipleWhitespaceAndEntities is a combination of ReplaceMultipleWhitespace and ReplaceEntities. It is faster than executing both sequentially.
+func ReplaceMultipleWhitespaceAndEntities(b []byte, entitiesMap map[string][]byte, revEntitiesMap map[byte][]byte) []byte {
+ j, k := 0, 0 // j is write position, k is start of next text section
+ for i := 0; i < len(b); i++ {
+ if IsWhitespace(b[i]) {
+ start := i
+ newline := IsNewline(b[i])
+ i++
+ for ; i < len(b) && IsWhitespace(b[i]); i++ {
+ if IsNewline(b[i]) {
+ newline = true
+ }
+ }
+ if newline {
+ b[start] = '\n'
+ } else {
+ b[start] = ' '
+ }
+ if 1 < i-start { // more than one whitespace
+ if j == 0 {
+ j = start + 1
+ } else {
+ j += copy(b[j:], b[k:start+1])
+ }
+ k = i
+ }
+ }
+ if i+3 < len(b) && b[i] == '&' {
+ b, i = replaceEntities(b, i, entitiesMap, revEntitiesMap)
+ }
+ }
+ if j == 0 {
+ return b
+ } else if j == 1 { // only if starts with whitespace
+ b[k-1] = b[0]
+ return b[k-1:]
+ } else if k < len(b) {
+ j += copy(b[j:], b[k:])
+ }
+ return b[:j]
+}
+
+// URLEncodingTable is a charmap for which characters need escaping in the URL encoding scheme
+var URLEncodingTable = [256]bool{
+ // ASCII
+ true, true, true, true, true, true, true, true,
+ true, true, true, true, true, true, true, true,
+ true, true, true, true, true, true, true, true,
+ true, true, true, true, true, true, true, true,
+
+ true, false, true, true, true, true, true, false, // space, ", #, $, %, &
+ false, false, false, true, true, false, false, true, // +, comma, /
+ false, false, false, false, false, false, false, false,
+ false, false, true, true, true, true, true, true, // :, ;, <, =, >, ?
+
+ true, false, false, false, false, false, false, false, // @
+ false, false, false, false, false, false, false, false,
+ false, false, false, false, false, false, false, false,
+ false, false, false, true, true, true, true, false, // [, \, ], ^
+
+ true, false, false, false, false, false, false, false, // `
+ false, false, false, false, false, false, false, false,
+ false, false, false, false, false, false, false, false,
+ false, false, false, true, true, true, false, true, // {, |, }, DEL
+
+ // non-ASCII
+ true, true, true, true, true, true, true, true,
+ true, true, true, true, true, true, true, true,
+ true, true, true, true, true, true, true, true,
+ true, true, true, true, true, true, true, true,
+
+ true, true, true, true, true, true, true, true,
+ true, true, true, true, true, true, true, true,
+ true, true, true, true, true, true, true, true,
+ true, true, true, true, true, true, true, true,
+
+ true, true, true, true, true, true, true, true,
+ true, true, true, true, true, true, true, true,
+ true, true, true, true, true, true, true, true,
+ true, true, true, true, true, true, true, true,
+
+ true, true, true, true, true, true, true, true,
+ true, true, true, true, true, true, true, true,
+ true, true, true, true, true, true, true, true,
+ true, true, true, true, true, true, true, true,
+}
+
+// DataURIEncodingTable is a charmap for which characters need escaping in the Data URI encoding scheme
+// Escape only non-printable characters, unicode and %, #, &.
+// IE11 additionally requires encoding of \, [, ], ", <, >, `, {, }, |, ^ which is not required by Chrome, Firefox, Opera, Edge, Safari, Yandex
+// To pass the HTML validator, restricted URL characters must be escaped: non-printable characters, space, <, >, #, %, "
+var DataURIEncodingTable = [256]bool{
+ // ASCII
+ true, true, true, true, true, true, true, true,
+ true, true, true, true, true, true, true, true,
+ true, true, true, true, true, true, true, true,
+ true, true, true, true, true, true, true, true,
+
+ true, false, true, true, false, true, true, false, // space, ", #, %, &
+ false, false, false, false, false, false, false, false,
+ false, false, false, false, false, false, false, false,
+ false, false, false, false, true, false, true, false, // <, >
+
+ false, false, false, false, false, false, false, false,
+ false, false, false, false, false, false, false, false,
+ false, false, false, false, false, false, false, false,
+ false, false, false, true, true, true, true, false, // [, \, ], ^
+
+ true, false, false, false, false, false, false, false, // `
+ false, false, false, false, false, false, false, false,
+ false, false, false, false, false, false, false, false,
+ false, false, false, true, true, true, false, true, // {, |, }, DEL
+
+ // non-ASCII
+ true, true, true, true, true, true, true, true,
+ true, true, true, true, true, true, true, true,
+ true, true, true, true, true, true, true, true,
+ true, true, true, true, true, true, true, true,
+
+ true, true, true, true, true, true, true, true,
+ true, true, true, true, true, true, true, true,
+ true, true, true, true, true, true, true, true,
+ true, true, true, true, true, true, true, true,
+
+ true, true, true, true, true, true, true, true,
+ true, true, true, true, true, true, true, true,
+ true, true, true, true, true, true, true, true,
+ true, true, true, true, true, true, true, true,
+
+ true, true, true, true, true, true, true, true,
+ true, true, true, true, true, true, true, true,
+ true, true, true, true, true, true, true, true,
+ true, true, true, true, true, true, true, true,
+}
+
+// EncodeURL encodes bytes using the URL encoding scheme
+func EncodeURL(b []byte, table [256]bool) []byte {
+ for i := 0; i < len(b); i++ {
+ c := b[i]
+ if table[c] {
+ b = append(b, 0, 0)
+ copy(b[i+3:], b[i+1:])
+ b[i+0] = '%'
+ b[i+1] = "0123456789ABCDEF"[c>>4]
+ b[i+2] = "0123456789ABCDEF"[c&15]
+ }
+ }
+ return b
+}
+
+// DecodeURL decodes an URL encoded using the URL encoding scheme
+func DecodeURL(b []byte) []byte {
+ for i := 0; i < len(b); i++ {
+ if b[i] == '%' && i+2 < len(b) {
+ j := i + 1
+ c := 0
+ for ; j < i+3 && (b[j] >= '0' && b[j] <= '9' || b[j] >= 'a' && b[j] <= 'f' || b[j] >= 'A' && b[j] <= 'F'); j++ {
+ if b[j] <= '9' {
+ c = c<<4 + int(b[j]-'0')
+ } else if b[j] <= 'F' {
+ c = c<<4 + int(b[j]-'A') + 10
+ } else if b[j] <= 'f' {
+ c = c<<4 + int(b[j]-'a') + 10
+ }
+ }
+ if j == i+3 && c < 128 {
+ b[i] = byte(c)
+ b = append(b[:i+1], b[i+3:]...)
+ }
+ } else if b[i] == '+' {
+ b[i] = ' '
+ }
+ }
+ return b
+}