diff --git a/vendor/github.com/DavidBelicza/TextRank/v2/.gitignore b/vendor/github.com/DavidBelicza/TextRank/v2/.gitignore new file mode 100644 index 0000000000000000000000000000000000000000..f83ccd424b77b7b57714c1bff2bea48476438ecb --- /dev/null +++ b/vendor/github.com/DavidBelicza/TextRank/v2/.gitignore @@ -0,0 +1,6 @@ +/.vscode +/.idea +/pkg +/bin +/install.sh +/vendor diff --git a/vendor/github.com/DavidBelicza/TextRank/v2/.travis.yml b/vendor/github.com/DavidBelicza/TextRank/v2/.travis.yml new file mode 100644 index 0000000000000000000000000000000000000000..899b6a33f461703928809c884db7e62b022a66ea --- /dev/null +++ b/vendor/github.com/DavidBelicza/TextRank/v2/.travis.yml @@ -0,0 +1,10 @@ +language: go +sudo: false + +matrix: + include: + - go: "1.15" + +script: + - go mod vendor + - go test ./... diff --git a/vendor/github.com/DavidBelicza/TextRank/v2/Dockerfile b/vendor/github.com/DavidBelicza/TextRank/v2/Dockerfile new file mode 100644 index 0000000000000000000000000000000000000000..d31839ac3d38fd7ac7bfdac834e76b870b4a340d --- /dev/null +++ b/vendor/github.com/DavidBelicza/TextRank/v2/Dockerfile @@ -0,0 +1,9 @@ +FROM golang:1.15 +MAINTAINER David Belicza + +ADD ./ /go/src/github.com/DavidBelicza/TextRank + +WORKDIR /go/src/github.com/DavidBelicza/TextRank + +CMD go mod vendor +CMD /bin/bash diff --git a/vendor/github.com/DavidBelicza/TextRank/v2/LICENSE b/vendor/github.com/DavidBelicza/TextRank/v2/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..960d66b5d17f209aac95d0bd06cb631dc8e8e91f --- /dev/null +++ b/vendor/github.com/DavidBelicza/TextRank/v2/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright 2018 David Belicza + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +of the Software, and to permit persons to whom the Software is furnished to do +so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/DavidBelicza/TextRank/v2/README.md b/vendor/github.com/DavidBelicza/TextRank/v2/README.md new file mode 100644 index 0000000000000000000000000000000000000000..7b17b6438deb1bca4746e13c77f51a54559fb32a --- /dev/null +++ b/vendor/github.com/DavidBelicza/TextRank/v2/README.md @@ -0,0 +1,543 @@ +
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+This source code is an implementation of textrank algorithm, under MIT licence.
+
The minimum requred Go version is 1.8.
+
+ Hello World
+
+## MOTIVATION
+
+If there was a program what could rank book size text's words, phrases and sentences continuously on multiple threads and it would be opened to modifing by objects, written in a simple, secure, static language and if it would be very well documented... Now, here it is.
+
+## FEATURES
+
+* Find the most important phrases.
+* Find the most important words.
+* Find the most important N sentences.
+ * Importance by phrase weights.
+ * Importance by word occurrence.
+* Find the first N sentences, start from Xth sentence.
+* Find sentences by phrase chains ordered by position in text.
+* Access to the whole ranked data.
+* Support more languages.
+* Algorithm for weighting can be modified by interface implementation.
+* Parser can be modified by interface implementation.
+* Multi thread support.
+
+## INSTALL
+
+You can install TextRank by Go's get:
+
+```go get github.com/DavidBelicza/TextRank```
+
+TextRank uses the default Go *mod* as vendoring tool, so you can install the dependencies with this command:
+
+```go mod vendor```
+
+## DOCKER
+
+Using Docker to TextRank isn't necessary, it's just an option.
+
+Build image from the repository's root directory:
+
+```docker build -t go_text_rank_image .```
+
+Create container from the image:
+
+```docker run -dit --name textrank go_text_rank_image:latest```
+
+Run the **go test -v .** code inside the container:
+
+```docker exec -i -t textrank go test -v .```
+
+Stop, start or remove the container:
+
+* ```docker stop textrank```
+* ```docker start textrank```
+* ```docker rm textrank```
+
+## HOW DOES IT WORK
+
+Too see how does it work, the easiest way is to use the sample text. Sample text can be found in the [textrank_test.go file at this line](https://github.com/DavidBelicza/TextRank/blob/master/textrank_test.go#L12). It's a short size text about Gnome Shell.
+
+* TextRank reads the text,
+ * parse it,
+ * remove the unnecessary stop words,
+ * tokenize it
+* and counting the occurrence of the words and phrases
+* and then it starts weighting
+ * by the occurrence of words and phrases and their relations.
+* After weights are done, TextRank normalize weights to between 1 and 0.
+* Then the different finder methods capable to find the most important words, phrases or sentences.
+
+The most important phrases from the sample text are:
+
+Phrase | Occurrence | Weight
+--- | --- | ---
+gnome - shell | 5 | 1
+extension - gnome | 3 | 0.50859946
+icons - tray | 3 | 0.49631447
+gnome - caffeine | 2 | 0.27027023
+
+The **gnome** is the most often used word in this text and **shell** is also used multiple times. Two of them are used together as a phrase 5 times. This is the highest occurrence in this text, so this is the most important phrase.
+
+The following two important phrases have same occurrence 3, however they are not equal. This is because the **extension gnome** phrase contains the word **gnome**, the most popular word in the text, and it increases the phrase's weight. It increases the weight of any word what is related to it, but not too much to overcome other important phrases what don't contain the **gnome** word.
+
+The exact algorithm can be found in the [algorithm.go file at this line](https://github.com/DavidBelicza/TextRank/blob/master/rank/algorithm.go#L65).
+
+## TEXTRANK OR AUTOMATIC SUMMARIZATION
+> Automatic summarization is the process of reducing a text document with a computer program in order to create a summary that retains the most important points of the original document. Technologies that can make a coherent summary take into account variables such as length, writing style and syntax. Automatic data summarization is part of machine learning and data mining. The main idea of summarization is to find a representative subset of the data, which contains the information of the entire set. Summarization technologies are used in a large number of sectors in industry today. - Wikipedia
+
+## EXAMPLES
+
+### Find the most important phrases
+
+This is the most basic and simplest usage of textrank.
+
+```go
+package main
+
+import (
+ "fmt"
+
+ "github.com/DavidBelicza/TextRank/v2"
+)
+
+func main() {
+ rawText := "Your long raw text, it could be a book. Lorem ipsum..."
+ // TextRank object
+ tr := textrank.NewTextRank()
+ // Default Rule for parsing.
+ rule := textrank.NewDefaultRule()
+ // Default Language for filtering stop words.
+ language := textrank.NewDefaultLanguage()
+ // Default algorithm for ranking text.
+ algorithmDef := textrank.NewDefaultAlgorithm()
+
+ // Add text.
+ tr.Populate(rawText, language, rule)
+ // Run the ranking.
+ tr.Ranking(algorithmDef)
+
+ // Get all phrases by weight.
+ rankedPhrases := textrank.FindPhrases(tr)
+
+ // Most important phrase.
+ fmt.Println(rankedPhrases[0])
+ // Second important phrase.
+ fmt.Println(rankedPhrases[1])
+}
+```
+
+### All possible pre-defined finder queries
+
+After ranking, the graph contains a lot of valuable data. There are functions in textrank package what contains logic to retrieve those data from the graph.
+
+```go
+package main
+
+import (
+ "fmt"
+
+ "github.com/DavidBelicza/TextRank/v2"
+)
+
+func main() {
+ rawText := "Your long raw text, it could be a book. Lorem ipsum..."
+ // TextRank object
+ tr := textrank.NewTextRank()
+ // Default Rule for parsing.
+ rule := textrank.NewDefaultRule()
+ // Default Language for filtering stop words.
+ language := textrank.NewDefaultLanguage()
+ // Default algorithm for ranking text.
+ algorithmDef := textrank.NewDefaultAlgorithm()
+
+ // Add text.
+ tr.Populate(rawText, language, rule)
+ // Run the ranking.
+ tr.Ranking(algorithmDef)
+
+ // Get all phrases order by weight.
+ rankedPhrases := textrank.FindPhrases(tr)
+ // Most important phrase.
+ fmt.Println(rankedPhrases[0])
+
+ // Get all words order by weight.
+ words := textrank.FindSingleWords(tr)
+ // Most important word.
+ fmt.Println(words[0])
+
+ // Get the most important 10 sentences. Importance by phrase weights.
+ sentences := textrank.FindSentencesByRelationWeight(tr, 10)
+ // Found sentences
+ fmt.Println(sentences)
+
+ // Get the most important 10 sentences. Importance by word occurrence.
+ sentences = textrank.FindSentencesByWordQtyWeight(tr, 10)
+ // Found sentences
+ fmt.Println(sentences)
+
+ // Get the first 10 sentences, start from 5th sentence.
+ sentences = textrank.FindSentencesFrom(tr, 5, 10)
+ // Found sentences
+ fmt.Println(sentences)
+
+ // Get sentences by phrase/word chains order by position in text.
+ sentencesPh := textrank.FindSentencesByPhraseChain(tr, []string{"gnome", "shell", "extension"})
+ // Found sentence.
+ fmt.Println(sentencesPh[0])
+}
+```
+
+### Access to everything
+
+After ranking, the graph contains a lot of valuable data. The GetRank function allows access to the graph and every data can be retrieved from this structure.
+
+```go
+package main
+
+import (
+ "fmt"
+
+ "github.com/DavidBelicza/TextRank/v2"
+)
+
+func main() {
+ rawText := "Your long raw text, it could be a book. Lorem ipsum..."
+ // TextRank object
+ tr := textrank.NewTextRank()
+ // Default Rule for parsing.
+ rule := textrank.NewDefaultRule()
+ // Default Language for filtering stop words.
+ language := textrank.NewDefaultLanguage()
+ // Default algorithm for ranking text.
+ algorithmDef := textrank.NewDefaultAlgorithm()
+
+ // Add text.
+ tr.Populate(rawText, language, rule)
+ // Run the ranking.
+ tr.Ranking(algorithmDef)
+
+ // Get the rank graph.
+ rankData := tr.GetRankData()
+
+ // Get word ID by token/word.
+ wordId := rankData.WordValID["gnome"]
+
+ // Word's weight.
+ fmt.Println(rankData.Words[wordId].Weight)
+ // Word's quantity/occurrence.
+ fmt.Println(rankData.Words[wordId].Qty)
+ // All sentences what contain the this word.
+ fmt.Println(rankData.Words[wordId].SentenceIDs)
+ // All other words what are related to this word on left side.
+ fmt.Println(rankData.Words[wordId].ConnectionLeft)
+ // All other words what are related to this word on right side.
+ fmt.Println(rankData.Words[wordId].ConnectionRight)
+ // The node of this word, it contains the related words and the relation weight.
+ fmt.Println(rankData.Relation.Node[wordId])
+}
+```
+
+### Adding text continuously
+
+It is possibe to add more text after another texts already have been added. The Ranking function can merge these multiple texts and it can recalculate the weights and all related data.
+
+```go
+package main
+
+import (
+ "fmt"
+
+ "github.com/DavidBelicza/TextRank/v2"
+)
+
+func main() {
+ rawText := "Your long raw text, it could be a book. Lorem ipsum..."
+ // TextRank object
+ tr := textrank.NewTextRank()
+ // Default Rule for parsing.
+ rule := textrank.NewDefaultRule()
+ // Default Language for filtering stop words.
+ language := textrank.NewDefaultLanguage()
+ // Default algorithm for ranking text.
+ algorithmDef := textrank.NewDefaultAlgorithm()
+
+ // Add text.
+ tr.Populate(rawText, language, rule)
+ // Run the ranking.
+ tr.Ranking(algorithmDef)
+
+ rawText2 := "Another book or article..."
+ rawText3 := "Third another book or article..."
+
+ // Add text to the previously added text.
+ tr.Populate(rawText2, language, rule)
+ // Add text to the previously added text.
+ tr.Populate(rawText3, language, rule)
+ // Run the ranking to the whole composed text.
+ tr.Ranking(algorithmDef)
+
+ // Get all phrases by weight.
+ rankedPhrases := textrank.FindPhrases(tr)
+
+ // Most important phrase.
+ fmt.Println(rankedPhrases[0])
+ // Second important phrase.
+ fmt.Println(rankedPhrases[1])
+}
+```
+
+### Using different algorithm to ranking text
+
+There are two algorithm has implemented, it is possible to write custom algorithm by Algorithm interface and use it instead of defaults.
+
+```go
+package main
+
+import (
+ "fmt"
+
+ "github.com/DavidBelicza/TextRank/v2"
+)
+
+func main() {
+ rawText := "Your long raw text, it could be a book. Lorem ipsum..."
+ // TextRank object
+ tr := textrank.NewTextRank()
+ // Default Rule for parsing.
+ rule := textrank.NewDefaultRule()
+ // Default Language for filtering stop words.
+ language := textrank.NewDefaultLanguage()
+ // Using a little bit more complex algorithm to ranking text.
+ algorithmChain := textrank.NewChainAlgorithm()
+
+ // Add text.
+ tr.Populate(rawText, language, rule)
+ // Run the ranking.
+ tr.Ranking(algorithmChain)
+
+ // Get all phrases by weight.
+ rankedPhrases := textrank.FindPhrases(tr)
+
+ // Most important phrase.
+ fmt.Println(rankedPhrases[0])
+ // Second important phrase.
+ fmt.Println(rankedPhrases[1])
+}
+```
+
+### Using multiple graphs
+
+Graph ID exists because it is possible run multiple independent text ranking processes.
+
+```go
+package main
+
+import (
+ "fmt"
+
+ "github.com/DavidBelicza/TextRank/v2"
+)
+
+func main() {
+ rawText := "Your long raw text, it could be a book. Lorem ipsum..."
+ // 1th TextRank object
+ tr1 := textrank.NewTextRank()
+ // Default Rule for parsing.
+ rule := textrank.NewDefaultRule()
+ // Default Language for filtering stop words.
+ language := textrank.NewDefaultLanguage()
+ // Default algorithm for ranking text.
+ algorithmDef := textrank.NewDefaultAlgorithm()
+
+ // Add text.
+ tr1.Populate(rawText, language, rule)
+ // Run the ranking.
+ tr1.Ranking(algorithmDef)
+
+ // 2nd TextRank object
+ tr2 := textrank.NewTextRank()
+
+ // Using a little bit more complex algorithm to ranking text.
+ algorithmChain := textrank.NewChainAlgorithm()
+
+ // Add text to the second graph.
+ tr2.Populate(rawText, language, rule)
+ // Run the ranking on the second graph.
+ tr2.Ranking(algorithmChain)
+
+ // Get all phrases by weight from first graph.
+ rankedPhrases := textrank.FindPhrases(tr1)
+
+ // Most important phrase from first graph.
+ fmt.Println(rankedPhrases[0])
+ // Second important phrase from first graph.
+ fmt.Println(rankedPhrases[1])
+
+ // Get all phrases by weight from second graph.
+ rankedPhrases2 := textrank.FindPhrases(tr2)
+
+ // Most important phrase from second graph.
+ fmt.Println(rankedPhrases2[0])
+ // Second important phrase from second graph.
+ fmt.Println(rankedPhrases2[1])
+}
+```
+
+### Using different non-English languages
+
+Engish is used by default but it is possible to add any language. To use other languages a stop word list is required what you can find here: https://github.com/stopwords-iso
+
+```go
+package main
+
+import (
+ "fmt"
+
+ "github.com/DavidBelicza/TextRank/v2"
+)
+
+func main() {
+ rawText := "Your long raw text, it could be a book. Lorem ipsum..."
+ // TextRank object
+ tr := textrank.NewTextRank()
+ // Default Rule for parsing.
+ rule := textrank.NewDefaultRule()
+ // Default Language for filtering stop words.
+ language := textrank.NewDefaultLanguage()
+
+ // Add Spanish stop words (just some example).
+ language.SetWords("es", []string{"uno", "dos", "tres", "yo", "es", "eres"})
+ // Active the Spanish.
+ language.SetActiveLanguage("es")
+
+ // Default algorithm for ranking text.
+ algorithmDef := textrank.NewDefaultAlgorithm()
+
+ // Add text.
+ tr.Populate(rawText, language, rule)
+ // Run the ranking.
+ tr.Ranking(algorithmDef)
+
+ // Get all phrases by weight.
+ rankedPhrases := textrank.FindPhrases(tr)
+
+ // Most important phrase.
+ fmt.Println(rankedPhrases[0])
+ // Second important phrase.
+ fmt.Println(rankedPhrases[1])
+}
+```
+
+### Asynchronous usage by goroutines
+
+It is thread safe. Independent graphs can receive texts in the same time and can be extended by more text also in the same time.
+
+```go
+package main
+
+import (
+ "fmt"
+ "time"
+
+ "github.com/DavidBelicza/TextRank/v2"
+)
+
+func main() {
+ // A flag when program has to stop.
+ stopProgram := false
+ // Channel.
+ stream := make(chan string)
+ // TextRank object.
+ tr := textrank.NewTextRank()
+
+ // Open new thread/routine
+ go func(tr *textrank.TextRank) {
+ // 3 texts.
+ rawTexts := []string{
+ "Very long text...",
+ "Another very long text...",
+ "Second another very long text...",
+ }
+
+ // Add 3 texts to the stream channel, one by one.
+ for _, rawText := range rawTexts {
+ stream <- rawText
+ }
+ }(tr)
+
+ // Open new thread/routine
+ go func() {
+ // Counter how many times texts added to the ranking.
+ i := 1
+
+ for {
+ // Get text from stream channel when it got a new one.
+ rawText := <-stream
+
+ // Default Rule for parsing.
+ rule := textrank.NewDefaultRule()
+ // Default Language for filtering stop words.
+ language := textrank.NewDefaultLanguage()
+ // Default algorithm for ranking text.
+ algorithm := textrank.NewDefaultAlgorithm()
+
+ // Add text.
+ tr.Populate(rawText, language, rule)
+ // Run the ranking.
+ tr.Ranking(algorithm)
+
+ // Set stopProgram flag to true when all 3 text have been added.
+ if i == 3 {
+ stopProgram = true
+ }
+
+ i++
+ }
+ }()
+
+ // The main thread has to run while go-routines run. When stopProgram is
+ // true then the loop has finish.
+ for !stopProgram {
+ time.Sleep(time.Second * 1)
+ }
+
+ // Most important phrase.
+ phrases := textrank.FindPhrases(tr)
+ // Second important phrase.
+ fmt.Println(phrases[0])
+}
+```
+
+## A SIMPLE VISUAL REPRESENTATION
+
+The below image is a representation how works the simplest text ranking algorithm. This algorithm can be replaced by an another one by inject different Algorithm interface implementation.
+
+
diff --git a/vendor/github.com/DavidBelicza/TextRank/v2/convert/builder.go b/vendor/github.com/DavidBelicza/TextRank/v2/convert/builder.go
new file mode 100644
index 0000000000000000000000000000000000000000..db94cfc52fe4e56c57ad115b2d00008679dab6c0
--- /dev/null
+++ b/vendor/github.com/DavidBelicza/TextRank/v2/convert/builder.go
@@ -0,0 +1,43 @@
+package convert
+
+import (
+ "github.com/DavidBelicza/TextRank/v2/parse"
+ "github.com/DavidBelicza/TextRank/v2/rank"
+)
+
+// TextToRank function converts a ParsedSentence object to Rank object, it is
+// the preparing process to later text ranking.
+func TextToRank(sentence parse.ParsedSentence, lang Language, ranks *rank.Rank) {
+ sentenceId := addSentence(ranks, sentence)
+ addWord(ranks, sentence.GetWords(), lang, sentenceId)
+}
+
+func addWord(ranks *rank.Rank, words []string, lang Language, sentenceID int) {
+ prevWordID := -1
+ var curWordID int
+
+ for _, word := range words {
+ if !lang.IsStopWord(word) {
+ if found, rootWord := lang.FindRootWord(word); found {
+ word = rootWord
+ }
+
+ if !ranks.IsWordExist(word) {
+ curWordID = ranks.AddNewWord(word, prevWordID, sentenceID)
+ } else {
+ curWordID = ranks.UpdateWord(word, prevWordID, sentenceID)
+ }
+
+ ranks.Relation.AddRelation(curWordID, prevWordID, sentenceID)
+ ranks.UpdateRightConnection(prevWordID, curWordID)
+
+ prevWordID = curWordID
+ }
+ }
+}
+
+func addSentence(ranks *rank.Rank, sentence parse.ParsedSentence) int {
+ ranks.SentenceMap[len(ranks.SentenceMap)] = sentence.GetOriginal()
+
+ return len(ranks.SentenceMap) - 1
+}
diff --git a/vendor/github.com/DavidBelicza/TextRank/v2/convert/language.go b/vendor/github.com/DavidBelicza/TextRank/v2/convert/language.go
new file mode 100644
index 0000000000000000000000000000000000000000..fdad6989640b3c287fbbf83a5fc23bdbf2221c11
--- /dev/null
+++ b/vendor/github.com/DavidBelicza/TextRank/v2/convert/language.go
@@ -0,0 +1,71 @@
+package convert
+
+import "unicode/utf8"
+
+// Language interface and its methods make possible the polimorf usage of
+// language specific features by custom implementations.
+type Language interface {
+ IsStopWord(word string) bool
+ FindRootWord(word string) (bool, string)
+ SetActiveLanguage(code string)
+ SetWords(code string, words []string)
+}
+
+// LanguageDefault struct is implementation of Language interface. It stores
+// the stop words of loaded languages and can find stop words by tokens.
+type LanguageDefault struct {
+ defaultLang string
+ languages map[string][]string
+}
+
+// NewLanguage constructor of the LanguageDefault Retrieves a pointer
+// LanguageDefault. It has setup to English by default.
+func NewLanguage() *LanguageDefault {
+ lang := &LanguageDefault{
+ "en",
+ make(map[string][]string),
+ }
+
+ words := getDefaultEnglish()
+
+ lang.SetWords("en", words)
+
+ return lang
+}
+
+// IsStopWord method retrieves true when the given word is in the stop word
+// list or when the word has less character then 2.
+func (lang *LanguageDefault) IsStopWord(word string) bool {
+ if utf8.RuneCountInString(word) <= 2 {
+ return true
+ }
+
+ if stopWords, ok := lang.languages[lang.defaultLang]; ok {
+ for _, val := range stopWords {
+ if val == word {
+ return true
+ }
+ }
+ }
+
+ return false
+}
+
+// FindRootWord method gets a word as an input, "apples" for example and it
+// retrieves the root-word of this given word, "apple" for example. The first
+// return parameter is true when a word-root has found, otherwise it's false.
+func (lang *LanguageDefault) FindRootWord(word string) (bool, string) {
+ return false, ""
+}
+
+// SetActiveLanguage method switch between languages by the language's code. The
+// language code is not standard, it can be anything.
+func (lang *LanguageDefault) SetActiveLanguage(code string) {
+ lang.defaultLang = code
+}
+
+// SetWords method set stop words into the LanguageDefault struct by the
+// language's code.
+func (lang *LanguageDefault) SetWords(code string, words []string) {
+ lang.languages[code] = words
+}
diff --git a/vendor/github.com/DavidBelicza/TextRank/v2/convert/stop_word.go b/vendor/github.com/DavidBelicza/TextRank/v2/convert/stop_word.go
new file mode 100644
index 0000000000000000000000000000000000000000..8977a2dea4296895bf3396d8d56b3f2d40bbc24a
--- /dev/null
+++ b/vendor/github.com/DavidBelicza/TextRank/v2/convert/stop_word.go
@@ -0,0 +1,332 @@
+package convert
+
+func getDefaultEnglish() []string {
+
+ return []string{
+ "a",
+ "about",
+ "above",
+ "above",
+ "across",
+ "after",
+ "afterwards",
+ "again",
+ "against",
+ "all",
+ "almost",
+ "alone",
+ "along",
+ "already",
+ "also",
+ "although",
+ "always",
+ "am",
+ "among",
+ "amongst",
+ "amount",
+ "an",
+ "and",
+ "another",
+ "any",
+ "anyhow",
+ "anyone",
+ "anything",
+ "anyway",
+ "anywhere",
+ "are",
+ "around",
+ "as",
+ "at",
+ "back",
+ "be",
+ "became",
+ "because",
+ "become",
+ "becomes",
+ "becoming",
+ "been",
+ "before",
+ "beforehand",
+ "behind",
+ "being",
+ "below",
+ "beside",
+ "besides",
+ "between",
+ "beyond",
+ "bill",
+ "both",
+ "bottom",
+ "but",
+ "by",
+ "call",
+ "can",
+ "cannot",
+ "cant",
+ "co",
+ "con",
+ "could",
+ "couldn't",
+ "cry",
+ "de",
+ "describe",
+ "detail",
+ "did",
+ "didn't",
+ "do",
+ "does",
+ "doesn't",
+ "done",
+ "don't",
+ "down",
+ "due",
+ "during",
+ "each",
+ "eg",
+ "eight",
+ "either",
+ "eleven",
+ "else",
+ "elsewhere",
+ "empty",
+ "enough",
+ "etc",
+ "even",
+ "ever",
+ "every",
+ "everyone",
+ "everything",
+ "everywhere",
+ "except",
+ "few",
+ "fifteen",
+ "fify",
+ "fill",
+ "find",
+ "fire",
+ "first",
+ "five",
+ "for",
+ "former",
+ "formerly",
+ "forty",
+ "found",
+ "four",
+ "from",
+ "front",
+ "full",
+ "further",
+ "get",
+ "give",
+ "go",
+ "had",
+ "has",
+ "hasnt",
+ "have",
+ "he",
+ "hence",
+ "her",
+ "here",
+ "hereafter",
+ "hereby",
+ "herein",
+ "hereupon",
+ "hers",
+ "herself",
+ "him",
+ "himself",
+ "his",
+ "how",
+ "however",
+ "hundred",
+ "i",
+ "ie",
+ "if",
+ "in",
+ "inc",
+ "indeed",
+ "interest",
+ "into",
+ "is",
+ "it",
+ "its",
+ "itself",
+ "keep",
+ "last",
+ "latter",
+ "latterly",
+ "least",
+ "less",
+ "ltd",
+ "made",
+ "many",
+ "may",
+ "me",
+ "meanwhile",
+ "might",
+ "mill",
+ "mine",
+ "more",
+ "moreover",
+ "most",
+ "mostly",
+ "move",
+ "much",
+ "must",
+ "my",
+ "myself",
+ "name",
+ "namely",
+ "neither",
+ "never",
+ "nevertheless",
+ "next",
+ "nine",
+ "no",
+ "nobody",
+ "none",
+ "noone",
+ "nor",
+ "not",
+ "nothing",
+ "now",
+ "nowhere",
+ "of",
+ "off",
+ "often",
+ "oh",
+ "on",
+ "once",
+ "one",
+ "only",
+ "onto",
+ "or",
+ "other",
+ "others",
+ "otherwise",
+ "our",
+ "ours",
+ "ourselves",
+ "out",
+ "over",
+ "own",
+ "part",
+ "per",
+ "perhaps",
+ "please",
+ "put",
+ "rather",
+ "re",
+ "same",
+ "see",
+ "seem",
+ "seemed",
+ "seeming",
+ "seems",
+ "serious",
+ "several",
+ "she",
+ "should",
+ "show",
+ "side",
+ "since",
+ "sincere",
+ "six",
+ "sixty",
+ "so",
+ "some",
+ "somehow",
+ "someone",
+ "something",
+ "sometime",
+ "sometimes",
+ "somewhere",
+ "still",
+ "such",
+ "system",
+ "take",
+ "ten",
+ "than",
+ "that",
+ "the",
+ "their",
+ "them",
+ "themselves",
+ "then",
+ "thence",
+ "there",
+ "thereafter",
+ "thereby",
+ "therefore",
+ "therein",
+ "thereupon",
+ "these",
+ "they",
+ "thickv",
+ "thin",
+ "third",
+ "this",
+ "those",
+ "though",
+ "three",
+ "through",
+ "throughout",
+ "thru",
+ "thus",
+ "to",
+ "together",
+ "too",
+ "top",
+ "toward",
+ "towards",
+ "twelve",
+ "twenty",
+ "two",
+ "un",
+ "under",
+ "until",
+ "up",
+ "upon",
+ "us",
+ "very",
+ "via",
+ "was",
+ "we",
+ "well",
+ "were",
+ "what",
+ "whatever",
+ "when",
+ "whence",
+ "whenever",
+ "where",
+ "whereafter",
+ "whereas",
+ "whereby",
+ "wherein",
+ "whereupon",
+ "wherever",
+ "whether",
+ "which",
+ "while",
+ "whither",
+ "who",
+ "whoever",
+ "whole",
+ "whom",
+ "whose",
+ "why",
+ "will",
+ "with",
+ "within",
+ "without",
+ "would",
+ "yes",
+ "yet",
+ "you",
+ "your",
+ "yours",
+ "yourself",
+ "yourselves",
+ }
+}
diff --git a/vendor/github.com/DavidBelicza/TextRank/v2/doc.go b/vendor/github.com/DavidBelicza/TextRank/v2/doc.go
new file mode 100644
index 0000000000000000000000000000000000000000..51c8cc6ca24502abec9116b0a47926817b6285b7
--- /dev/null
+++ b/vendor/github.com/DavidBelicza/TextRank/v2/doc.go
@@ -0,0 +1,445 @@
+/*
+Package textrank is an implementation of Text Rank algorithm in Go with
+extendable features (automatic summarization, phrase extraction). It supports
+multithreading by goroutines. The package is under The MIT Licence.
+
+MOTIVATION
+
+If there was a program what could rank book size text's words, phrases and
+sentences continuously on multiple threads and it would be opened to modifing by
+objects, written in a simple, secure, static language and if it would be very
+well documented... Now, here it is.
+
+FEATURES
+
+- Find the most important phrases.
+- Find the most important words.
+- Find the most important N sentences.
+- Importance by phrase weights.
+- Importance by word occurrence.
+- Find the first N sentences, start from Xth sentence.
+- Find sentences by phrase chains ordered by position in text.
+- Access to the whole ranked data.
+- Support more languages.
+- Algorithm for weighting can be modified by interface implementation.
+- Parser can be modified by interface implementation.
+- Multi thread support.
+
+EXAMPLES
+
+Find the most important phrases:
+
+This is the most basic and simplest usage of textrank.
+
+ package main
+
+ import (
+ "fmt"
+
+ "github.com/DavidBelicza/TextRank"
+ )
+
+ func main() {
+ rawText := "Your long raw text, it could be a book. Lorem ipsum..."
+ // TextRank object
+ tr := textrank.NewTextRank()
+ // Default Rule for parsing.
+ rule := textrank.NewDefaultRule()
+ // Default Language for filtering stop words.
+ language := textrank.NewDefaultLanguage()
+ // Default algorithm for ranking text.
+ algorithmDef := textrank.NewDefaultAlgorithm()
+
+ // Add text.
+ tr.Populate(rawText, language, rule)
+ // Run the ranking.
+ tr.Ranking(algorithmDef)
+
+ // Get all phrases by weight.
+ rankedPhrases := textrank.FindPhrases(tr)
+
+ // Most important phrase.
+ fmt.Println(rankedPhrases[0])
+ // Second important phrase.
+ fmt.Println(rankedPhrases[1])
+ }
+
+All possible pre-defined finder queries:
+
+After ranking, the graph contains a lot of valuable data. There are functions in
+textrank package what contains logic to retrieve those data from the graph.
+
+ package main
+
+ import (
+ "fmt"
+
+ "github.com/DavidBelicza/TextRank"
+ )
+
+ func main() {
+ rawText := "Your long raw text, it could be a book. Lorem ipsum..."
+ // TextRank object
+ tr := textrank.NewTextRank()
+ // Default Rule for parsing.
+ rule := textrank.NewDefaultRule()
+ // Default Language for filtering stop words.
+ language := textrank.NewDefaultLanguage()
+ // Default algorithm for ranking text.
+ algorithmDef := textrank.NewDefaultAlgorithm()
+
+ // Add text.
+ tr.Populate(rawText, language, rule)
+ // Run the ranking.
+ tr.Ranking(algorithmDef)
+
+ // Get all phrases order by weight.
+ rankedPhrases := textrank.FindPhrases(tr)
+ // Most important phrase.
+ fmt.Println(rankedPhrases[0])
+
+ // Get all words order by weight.
+ words := textrank.FindSingleWords(tr)
+ // Most important word.
+ fmt.Println(words[0])
+
+ // Get the most important 10 sentences. Importance by phrase weights.
+ sentences := textrank.FindSentencesByRelationWeight(tr, 10)
+ // Found sentences
+ fmt.Println(sentences)
+
+ // Get the most important 10 sentences. Importance by word occurrence.
+ sentences = textrank.FindSentencesByWordQtyWeight(tr, 10)
+ // Found sentences
+ fmt.Println(sentences)
+
+ // Get the first 10 sentences, start from 5th sentence.
+ sentences = textrank.FindSentencesFrom(tr, 5, 10)
+ // Found sentences
+ fmt.Println(sentences)
+
+ // Get sentences by phrase/word chains order by position in text.
+ sentencesPh := textrank.FindSentencesByPhraseChain(tr, []string{"gnome", "shell", "extension"})
+ // Found sentence.
+ fmt.Println(sentencesPh[0])
+ }
+
+Access to everything
+
+After ranking, the graph contains a lot of valuable data. The GetRank function
+allows access to the graph and every data can be retrieved from this structure.
+
+ package main
+
+ import (
+ "fmt"
+
+ "github.com/DavidBelicza/TextRank"
+ )
+
+ func main() {
+ rawText := "Your long raw text, it could be a book. Lorem ipsum..."
+ // TextRank object
+ tr := textrank.NewTextRank()
+ // Default Rule for parsing.
+ rule := textrank.NewDefaultRule()
+ // Default Language for filtering stop words.
+ language := textrank.NewDefaultLanguage()
+ // Default algorithm for ranking text.
+ algorithmDef := textrank.NewDefaultAlgorithm()
+
+ // Add text.
+ tr.Populate(rawText, language, rule)
+ // Run the ranking.
+ tr.Ranking(algorithmDef)
+
+ // Get the rank graph.
+ rankData := tr.GetRankData()
+
+ // Get word ID by token/word.
+ wordId := rankData.WordValID["gnome"]
+
+ // Word's weight.
+ fmt.Println(rankData.Words[wordId].Weight)
+ // Word's quantity/occurrence.
+ fmt.Println(rankData.Words[wordId].Qty)
+ // All sentences what contain the this word.
+ fmt.Println(rankData.Words[wordId].SentenceIDs)
+ // All other words what are related to this word on left side.
+ fmt.Println(rankData.Words[wordId].ConnectionLeft)
+ // All other words what are related to this word on right side.
+ fmt.Println(rankData.Words[wordId].ConnectionRight)
+ // The node of this word, it contains the related words and the
+ // relation weight.
+ fmt.Println(rankData.Relation.Node[wordId])
+ }
+
+Adding text continuously:
+
+It is possibe to add more text after another texts already have been added. The
+Ranking function can merge these multiple texts and it can recalculate the
+weights and all related data.
+
+ package main
+
+ import (
+ "fmt"
+
+ "github.com/DavidBelicza/TextRank"
+ )
+
+ func main() {
+ rawText := "Your long raw text, it could be a book. Lorem ipsum..."
+ // TextRank object
+ tr := textrank.NewTextRank()
+ // Default Rule for parsing.
+ rule := textrank.NewDefaultRule()
+ // Default Language for filtering stop words.
+ language := textrank.NewDefaultLanguage()
+ // Default algorithm for ranking text.
+ algorithmDef := textrank.NewDefaultAlgorithm()
+
+ // Add text.
+ tr.Populate(rawText, language, rule)
+ // Run the ranking.
+ tr.Ranking(algorithmDef)
+
+ rawText2 := "Another book or article..."
+ rawText3 := "Third another book or article..."
+
+ // Add text to the previously added text.
+ tr.Populate(rawText2, language, rule)
+ // Add text to the previously added text.
+ tr.Populate(rawText3, language, rule)
+ // Run the ranking to the whole composed text.
+ tr.Ranking(algorithmDef)
+
+ // Get all phrases by weight.
+ rankedPhrases := textrank.FindPhrases(tr)
+
+ // Most important phrase.
+ fmt.Println(rankedPhrases[0])
+ // Second important phrase.
+ fmt.Println(rankedPhrases[1])
+ }
+
+Using different algorithm to ranking text:
+
+There are two algorithm has implemented, it is possible to write custom
+algorithm by Algorithm interface and use it instead of defaults.
+
+ package main
+
+ import (
+ "fmt"
+
+ "github.com/DavidBelicza/TextRank"
+ )
+
+ func main() {
+ rawText := "Your long raw text, it could be a book. Lorem ipsum..."
+ // TextRank object
+ tr := textrank.NewTextRank()
+ // Default Rule for parsing.
+ rule := textrank.NewDefaultRule()
+ // Default Language for filtering stop words.
+ language := textrank.NewDefaultLanguage()
+ // Using a little bit more complex algorithm to ranking text.
+ algorithmChain := textrank.NewChainAlgorithm()
+
+ // Add text.
+ tr.Populate(rawText, language, rule)
+ // Run the ranking.
+ tr.Ranking(algorithmChain)
+
+ // Get all phrases by weight.
+ rankedPhrases := textrank.FindPhrases(tr)
+
+ // Most important phrase.
+ fmt.Println(rankedPhrases[0])
+ // Second important phrase.
+ fmt.Println(rankedPhrases[1])
+ }
+
+Using multiple graphs:
+
+Graph ID exists because it is possible run multiple independent text ranking
+processes.
+
+ package main
+
+ import (
+ "fmt"
+
+ "github.com/DavidBelicza/TextRank"
+ )
+
+ func main() {
+ rawText := "Your long raw text, it could be a book. Lorem ipsum..."
+ // 1th TextRank object
+ tr1 := textrank.NewTextRank()
+ // Default Rule for parsing.
+ rule := textrank.NewDefaultRule()
+ // Default Language for filtering stop words.
+ language := textrank.NewDefaultLanguage()
+ // Default algorithm for ranking text.
+ algorithmDef := textrank.NewDefaultAlgorithm()
+
+ // Add text.
+ tr1.Populate(rawText, language, rule)
+ // Run the ranking.
+ tr1.Ranking(algorithmDef)
+
+ // 2nd TextRank object
+ tr2 := textrank.NewTextRank()
+
+ // Using a little bit more complex algorithm to ranking text.
+ algorithmChain := textrank.NewChainAlgorithm()
+
+ // Add text to the second graph.
+ tr2.Populate(rawText, language, rule)
+ // Run the ranking on the second graph.
+ tr2.Ranking(algorithmChain)
+
+ // Get all phrases by weight from first graph.
+ rankedPhrases := textrank.FindPhrases(tr1)
+
+ // Most important phrase from first graph.
+ fmt.Println(rankedPhrases[0])
+ // Second important phrase from first graph.
+ fmt.Println(rankedPhrases[1])
+
+ // Get all phrases by weight from second graph.
+ rankedPhrases2 := textrank.FindPhrases(tr2)
+
+ // Most important phrase from second graph.
+ fmt.Println(rankedPhrases2[0])
+ // Second important phrase from second graph.
+ fmt.Println(rankedPhrases2[1])
+ }
+
+Using different non-English languages:
+
+Engish is used by default but it is possible to add any language. To use other
+languages a stop word list is required what you can find here:
+https://github.com/stopwords-iso
+
+ package main
+
+ import (
+ "fmt"
+
+ "github.com/DavidBelicza/TextRank"
+ )
+
+ func main() {
+ rawText := "Your long raw text, it could be a book. Lorem ipsum..."
+ // TextRank object
+ tr := textrank.NewTextRank()
+ // Default Rule for parsing.
+ rule := textrank.NewDefaultRule()
+ // Default Language for filtering stop words.
+ language := textrank.NewDefaultLanguage()
+
+ // Add Spanish stop words (just some example).
+ language.SetWords("es", []string{"uno", "dos", "tres", "yo", "es", "eres"})
+ // Active the Spanish.
+ language.SetActiveLanguage("es")
+
+ // Default algorithm for ranking text.
+ algorithmDef := textrank.NewDefaultAlgorithm()
+
+ // Add text.
+ tr.Populate(rawText, language, rule)
+ // Run the ranking.
+ tr.Ranking(algorithmDef)
+
+ // Get all phrases by weight.
+ rankedPhrases := textrank.FindPhrases(tr)
+
+ // Most important phrase.
+ fmt.Println(rankedPhrases[0])
+ // Second important phrase.
+ fmt.Println(rankedPhrases[1])
+ }
+
+Asynchronous usage by goroutines:
+
+It is thread safe. Independent graphs can receive texts in the same time and can
+be extended by more text also in the same time.
+
+ package main
+
+ import (
+ "fmt"
+ "time"
+
+ "github.com/DavidBelicza/TextRank"
+ )
+
+ func main() {
+ // A flag when program has to stop.
+ stopProgram := false
+ // Channel.
+ stream := make(chan string)
+ // TextRank object.
+ tr := textrank.NewTextRank()
+
+ // Open new thread/routine
+ go func(tr *textrank.TextRank) {
+ // 3 texts.
+ rawTexts := []string{
+ "Very long text...",
+ "Another very long text...",
+ "Second another very long text...",
+ }
+
+ // Add 3 texts to the stream channel, one by one.
+ for _, rawText := range rawTexts {
+ stream <- rawText
+ }
+ }(tr)
+
+ // Open new thread/routine
+ go func() {
+ // Counter how many times texts added to the ranking.
+ i := 1
+
+ for {
+ // Get text from stream channel when it got a new one.
+ rawText := <-stream
+
+ // Default Rule for parsing.
+ rule := textrank.NewDefaultRule()
+ // Default Language for filtering stop words.
+ language := textrank.NewDefaultLanguage()
+ // Default algorithm for ranking text.
+ algorithm := textrank.NewDefaultAlgorithm()
+
+ // Add text.
+ tr.Populate(rawText, language, rule)
+ // Run the ranking.
+ tr.Ranking(algorithm)
+
+ // Set stopProgram flag to true when all 3 text have been added.
+ if i == 3 {
+ stopProgram = true
+ }
+
+ i++
+ }
+ }()
+
+ // The main thread has to run while go-routines run. When stopProgram is
+ // true then the loop has finish.
+ for !stopProgram {
+ time.Sleep(time.Second * 1)
+ }
+
+ // Most important phrase.
+ phrases := textrank.FindPhrases(tr)
+ // Second important phrase.
+ fmt.Println(phrases[0])
+ }
+*/
+package textrank
diff --git a/vendor/github.com/DavidBelicza/TextRank/v2/install.example.sh b/vendor/github.com/DavidBelicza/TextRank/v2/install.example.sh
new file mode 100644
index 0000000000000000000000000000000000000000..84e2d1bb09ae1bce511ca78c696cc93626a52266
--- /dev/null
+++ b/vendor/github.com/DavidBelicza/TextRank/v2/install.example.sh
@@ -0,0 +1,4 @@
+#!/usr/bin/env bash
+
+go mod vendor
+go test ./...
diff --git a/vendor/github.com/DavidBelicza/TextRank/v2/parse/rule.go b/vendor/github.com/DavidBelicza/TextRank/v2/parse/rule.go
new file mode 100644
index 0000000000000000000000000000000000000000..0f6ec91418307ad1e59d4c149abc3f1f5d83f1c3
--- /dev/null
+++ b/vendor/github.com/DavidBelicza/TextRank/v2/parse/rule.go
@@ -0,0 +1,52 @@
+package parse
+
+// Rule interface and its methods make possible the polimorf usage of process
+// how Rule retrieve tokens from text.
+type Rule interface {
+ IsWordSeparator(rune rune) bool
+ IsSentenceSeparator(rune rune) bool
+}
+
+// RuleDefault struct implements the Rule interface. It contains the separator
+// characters and can decide a character is separator or not.
+type RuleDefault struct {
+ wordSeparators [21]string
+ sentenceSeparators [3]string
+}
+
+// NewRule constructor retrieves a RuleDefault pointer.
+func NewRule() *RuleDefault {
+ return &RuleDefault{
+ [21]string{" ", ",", "'", "’", "\"", ")", "(", "[", "]", "{", "}", "\"", ";", "\n", ">", "<", "%", "@", "&", "=", "#"},
+ [3]string{"!", ".", "?"},
+ }
+}
+
+// IsWordSeparator method retrieves true when a character is a kind of special
+// character and possibly it separates to words from each other. It also checks
+// for sentence separator by IsSentenceSeparator method.
+func (r *RuleDefault) IsWordSeparator(rune rune) bool {
+ chr := string(rune)
+
+ for _, val := range r.wordSeparators {
+ if chr == val {
+ return true
+ }
+ }
+
+ return r.IsSentenceSeparator(rune)
+}
+
+// IsSentenceSeparator method retrieves true when a character is a kind of
+// special character and possibly it separates to words from each other.
+func (r *RuleDefault) IsSentenceSeparator(rune rune) bool {
+ chr := string(rune)
+
+ for _, val := range r.sentenceSeparators {
+ if chr == val {
+ return true
+ }
+ }
+
+ return false
+}
diff --git a/vendor/github.com/DavidBelicza/TextRank/v2/parse/text.go b/vendor/github.com/DavidBelicza/TextRank/v2/parse/text.go
new file mode 100644
index 0000000000000000000000000000000000000000..aab27c36cbb0d890040df82edc3912b24da20117
--- /dev/null
+++ b/vendor/github.com/DavidBelicza/TextRank/v2/parse/text.go
@@ -0,0 +1,44 @@
+package parse
+
+// Text struct contains a parsed text.
+type Text struct {
+ parsedSentences []ParsedSentence
+}
+
+// ParsedSentence struct contains the original raw sentences and their words.
+type ParsedSentence struct {
+ original string
+ words []string
+}
+
+// Append method creates a sentence and its words and append them to the Text
+// object.
+func (text *Text) Append(rawSentence string, words []string) {
+ if len(words) > 0 {
+ parsedSentence := ParsedSentence{
+ original: rawSentence,
+ words: words,
+ }
+
+ text.parsedSentences = append(
+ text.parsedSentences,
+ parsedSentence,
+ )
+ }
+}
+
+// GetSentences method returns ParsedSentence slice from Text struct.
+func (text *Text) GetSentences() []ParsedSentence {
+ return text.parsedSentences
+}
+
+// GetWords methods returns the words string slice of ParsedSentence struct.
+func (parsedSentence *ParsedSentence) GetWords() []string {
+ return parsedSentence.words
+}
+
+// GetOriginal method returns the original sentence as a string from a
+// ParsedSentence struct.
+func (parsedSentence *ParsedSentence) GetOriginal() string {
+ return parsedSentence.original
+}
diff --git a/vendor/github.com/DavidBelicza/TextRank/v2/parse/tokenizer.go b/vendor/github.com/DavidBelicza/TextRank/v2/parse/tokenizer.go
new file mode 100644
index 0000000000000000000000000000000000000000..003460ed4ad3e70ae5389d6305200ffdfceab331
--- /dev/null
+++ b/vendor/github.com/DavidBelicza/TextRank/v2/parse/tokenizer.go
@@ -0,0 +1,63 @@
+package parse
+
+import (
+ "strings"
+)
+
+// TokenizeText function use the given raw text and parses by a Rule object and
+// retrieves the parsed text in a Text struct object.
+func TokenizeText(rawText string, rule Rule) Text {
+ return findSentences(rawText, rule)
+}
+
+func findSentences(rawText string, rule Rule) Text {
+ text := Text{}
+
+ var sentence string
+ var i int
+ slen := len(rawText)
+
+ for j, chr := range rawText {
+ j += len(string(chr))
+ //when separator or the last
+ if rule.IsSentenceSeparator(chr) || j == slen {
+ sentence = rawText[i:j]
+ if len(sentence) > 0 {
+ text.Append(sentence, findWords(sentence, rule))
+ }
+
+ sentence = ""
+ i = j
+ }
+ }
+
+ return text
+}
+
+func findWords(rawSentence string, rule Rule) (words []string) {
+ words = []string{}
+
+ var word string
+ var i int
+ slen := len(rawSentence)
+
+ for j, chr := range rawSentence {
+ chrlen := len(string(chr))
+ j += chrlen
+ //when separator or the last
+ if sep := rule.IsWordSeparator(chr); sep || j == slen {
+ if sep {
+ word = rawSentence[i : j-chrlen]
+ } else {
+ word = rawSentence[i:j]
+ }
+ if len(word) > 0 {
+ words = append(words, strings.ToLower(word))
+ }
+ word = ""
+ i = j
+ }
+ }
+
+ return
+}
diff --git a/vendor/github.com/DavidBelicza/TextRank/v2/rank/algorithm.go b/vendor/github.com/DavidBelicza/TextRank/v2/rank/algorithm.go
new file mode 100644
index 0000000000000000000000000000000000000000..8f9345ffa888ab621fe7466bc8a20b15879eafb0
--- /dev/null
+++ b/vendor/github.com/DavidBelicza/TextRank/v2/rank/algorithm.go
@@ -0,0 +1,99 @@
+package rank
+
+import (
+ "math"
+)
+
+// Algorithm interface and its methods make possible the polimorf usage of
+// weighting process.
+type Algorithm interface {
+ WeightingRelation(
+ word1ID int,
+ word2ID int,
+ rank *Rank,
+ ) float32
+
+ WeightingHits(
+ wordID int,
+ rank *Rank,
+ ) float32
+}
+
+// AlgorithmDefault struct is the basic implementation of Algorithm. It can
+// weight a word or phrase by comparing them.
+type AlgorithmDefault struct{}
+
+// NewAlgorithmDefault constructor retrieves an AlgorithmDefault pointer.
+func NewAlgorithmDefault() *AlgorithmDefault {
+ return &AlgorithmDefault{}
+}
+
+// WeightingRelation method is the traditional algorithm of text rank to
+// weighting a phrase.
+func (a *AlgorithmDefault) WeightingRelation(
+ word1ID int,
+ word2ID int,
+ rank *Rank,
+) float32 {
+ relationQty := rank.Relation.Node[word1ID][word2ID].Qty
+
+ return float32(relationQty)
+}
+
+// WeightingHits method ranks the words by their occurrence.
+func (a *AlgorithmDefault) WeightingHits(
+ wordID int,
+ rank *Rank,
+) float32 {
+ weight := rank.Words[wordID].Qty
+
+ return float32(weight)
+}
+
+// AlgorithmChain struct is the combined implementation of Algorithm. It is a
+// good example how weighting can be changed by a different implementations. It
+// can weight a word or phrase by comparing them.
+type AlgorithmChain struct{}
+
+// NewAlgorithmChain constructor retrieves an AlgorithmChain pointer.
+func NewAlgorithmChain() *AlgorithmChain {
+ return &AlgorithmChain{}
+}
+
+// WeightingRelation method is a combined algorithm of text rank and word
+// occurrence, it weights a phrase.
+func (a *AlgorithmChain) WeightingRelation(
+ word1ID int,
+ word2ID int,
+ rank *Rank,
+) float32 {
+ relationQty := rank.Relation.Node[word1ID][word2ID].Qty
+ word1Qty := rank.Words[word1ID].Qty
+ word2Qty := rank.Words[word2ID].Qty
+
+ qDiff := float32(math.Abs(float64(word1Qty)-float64(word2Qty))) / 100
+ weight := float32(relationQty) + qDiff
+
+ return weight
+}
+
+// WeightingHits method ranks the words by their occurrence.
+func (a *AlgorithmChain) WeightingHits(
+ wordID int,
+ rank *Rank,
+) float32 {
+ word := rank.Words[wordID]
+ qty := 0
+
+ for leftWordID, leftWordQty := range word.ConnectionLeft {
+ qty += rank.Words[leftWordID].Qty * leftWordQty
+ }
+
+ for rightWordID, rightWordQty := range word.ConnectionRight {
+ qty += rank.Words[rightWordID].Qty * rightWordQty
+ }
+
+ weight := float32(word.Qty) + (float32(qty))
+
+ return float32(weight)
+}
diff --git a/vendor/github.com/DavidBelicza/TextRank/v2/rank/rank.go b/vendor/github.com/DavidBelicza/TextRank/v2/rank/rank.go
new file mode 100644
index 0000000000000000000000000000000000000000..3bcef7c4cd222b00329e1b6c607b4f03b9a878bb
--- /dev/null
+++ b/vendor/github.com/DavidBelicza/TextRank/v2/rank/rank.go
@@ -0,0 +1,147 @@
+package rank
+
+// Rank struct contains every original raw sentences, words, tokens, phrases,
+// indexes, word hits, phrase hits and minimum-maximum values.
+//
+// Max is the occurrence of the most used word.
+//
+// Min is the occurrence of the less used word. It is always greater then 0.
+//
+// Relation is the Relation object, contains phrases.
+//
+// SentenceMap contains raw sentences. Index is the sentence ID, value is the
+// sentence itself.
+//
+// Words contains Word objects. Index is the word ID, value is the word/token
+// itself.
+//
+// WordValID contains words. Index is the word/token, value is the ID.
+type Rank struct {
+ Max float32
+ Min float32
+ Relation Relation
+ SentenceMap map[int]string
+ Words map[int]*Word
+ WordValID map[string]int
+}
+
+// Word struct contains all data about the words.
+//
+// If a word is multiple times in the text then the multiple words point to the
+// same ID. So Word is unique.
+//
+// SentenceIDs contains all IDs of sentences what contain the word.
+//
+// ConnectionLeft contains all words what are connected to this word on the left
+// side. The map index is the ID of the related word and its value is the
+// occurrence.
+//
+// ConnectionRight contains all words what are connected to this word on the
+// right side. The map index is the ID of the related word and its value is the
+// occurrence.
+//
+// Token is the word itself, but not the original, it is tokenized.
+//
+// Qty is the number of occurrence of the word.
+//
+// Weight is the weight of the word between 0.00 and 1.00.
+type Word struct {
+ ID int
+ SentenceIDs []int
+ ConnectionLeft map[int]int
+ ConnectionRight map[int]int
+ Token string
+ Qty int
+ Weight float32
+}
+
+// NewRank constructor retrieves a Rank pointer.
+func NewRank() *Rank {
+ return &Rank{
+ 0,
+ 0,
+ Relation{
+ 0,
+ 0,
+ make(map[int]map[int]Score),
+ },
+ make(map[int]string),
+ make(map[int]*Word),
+ make(map[string]int),
+ }
+}
+
+// IsWordExist method retrieves true when the given word is already in the rank.
+func (rank *Rank) IsWordExist(word string) bool {
+ _, find := rank.WordValID[word]
+
+ return find
+}
+
+// AddNewWord method adds a new word to the rank object and it defines its ID.
+func (rank *Rank) AddNewWord(word string, prevWordIdx int, sentenceID int) (wordID int) {
+ wordID = len(rank.Words)
+ connectionLeft := make(map[int]int)
+
+ if prevWordIdx >= 0 {
+ connectionLeft[prevWordIdx] = 1
+ }
+
+ newWord := &Word{
+ ID: wordID,
+ SentenceIDs: []int{sentenceID},
+ ConnectionLeft: connectionLeft,
+ ConnectionRight: make(map[int]int),
+ Token: word,
+ Qty: 1,
+ Weight: 0,
+ }
+
+ rank.Words[wordID] = newWord
+ rank.WordValID[word] = wordID
+
+ return
+}
+
+// UpdateWord method update a word what already exists in the rank object. It
+// retrieves its ID.
+func (rank *Rank) UpdateWord(word string, prevWordIdx int, sentenceID int) (wordID int) {
+ wordID = rank.WordValID[word]
+
+ found := false
+
+ for _, oldSentenceID := range rank.Words[wordID].SentenceIDs {
+ if sentenceID == oldSentenceID {
+ found = true
+ break
+ }
+ }
+
+ if !found {
+ rank.Words[wordID].SentenceIDs = append(
+ rank.Words[wordID].SentenceIDs,
+ sentenceID,
+ )
+ }
+
+ rank.Words[wordID].Qty++
+
+ if prevWordIdx >= 0 {
+ rank.Words[wordID].ConnectionLeft[prevWordIdx]++
+ }
+
+ return
+}
+
+// UpdateRightConnection method adds the right connection to the word. It always
+// can be used after a word has added and the next word is known.
+func (rank *Rank) UpdateRightConnection(wordID int, rightWordID int) {
+ if wordID >= 0 {
+ rank.Words[wordID].ConnectionRight[rightWordID]++
+ }
+}
+
+// GetWordData method retrieves all words as a pointer.
+func (rank *Rank) GetWordData() map[int]*Word {
+ return rank.Words
+}
diff --git a/vendor/github.com/DavidBelicza/TextRank/v2/rank/ranking.go b/vendor/github.com/DavidBelicza/TextRank/v2/rank/ranking.go
new file mode 100644
index 0000000000000000000000000000000000000000..5fd2dfa4c592eb432d080aed3c2005b332a31ccb
--- /dev/null
+++ b/vendor/github.com/DavidBelicza/TextRank/v2/rank/ranking.go
@@ -0,0 +1,66 @@
+package rank
+
+// Calculate function ranking words by the given algorithm implementation.
+func Calculate(ranks *Rank, algorithm Algorithm) {
+ updateRanks(ranks, algorithm)
+}
+
+func updateRanks(ranks *Rank, algorithm Algorithm) {
+ for _, word := range ranks.Words {
+ weight := algorithm.WeightingHits(word.ID, ranks)
+ word.Weight = weight
+
+ if ranks.Max < word.Weight {
+ ranks.Max = word.Weight
+ }
+
+ if ranks.Min > word.Weight || ranks.Min == 0 {
+ ranks.Min = word.Weight
+ }
+ }
+
+ for _, word := range ranks.Words {
+ word.Weight = normalize(word.Weight, ranks.Min, ranks.Max)
+ }
+
+ for x, xMap := range ranks.Relation.Node {
+ for y := range xMap {
+ sentenceIDs := ranks.Relation.Node[x][y].SentenceIDs
+ weight := algorithm.WeightingRelation(x, y, ranks)
+
+ ranks.Relation.Node[x][y] = Score{
+ ranks.Relation.Node[x][y].Qty,
+ weight,
+ sentenceIDs,
+ }
+
+ if ranks.Relation.Max < weight {
+ ranks.Relation.Max = weight
+ }
+
+ if ranks.Relation.Min > weight || ranks.Relation.Min == 0 {
+ ranks.Relation.Min = weight
+ }
+ }
+ }
+
+ for x, xMap := range ranks.Relation.Node {
+ for y := range xMap {
+ weight := normalize(
+ ranks.Relation.Node[x][y].Weight,
+ ranks.Relation.Min,
+ ranks.Relation.Max,
+ )
+
+ ranks.Relation.Node[x][y] = Score{
+ ranks.Relation.Node[x][y].Qty,
+ weight,
+ ranks.Relation.Node[x][y].SentenceIDs,
+ }
+ }
+ }
+}
+
+func normalize(weight float32, min float32, max float32) float32 {
+ return (weight - min) / (max - min)
+}
diff --git a/vendor/github.com/DavidBelicza/TextRank/v2/rank/relation.go b/vendor/github.com/DavidBelicza/TextRank/v2/rank/relation.go
new file mode 100644
index 0000000000000000000000000000000000000000..cb8b97ed7efb9154868ab32715cfb15dfed0596a
--- /dev/null
+++ b/vendor/github.com/DavidBelicza/TextRank/v2/rank/relation.go
@@ -0,0 +1,77 @@
+package rank
+
+// Relation struct contains the phrase data.
+//
+// Max is the occurrence of the most used phrase.
+//
+// Min is the occurrence of the less used phrase. It is always greater then 0.
+//
+// Node is contains the Scores. Firs ID is the word 1, second ID is the word 2,
+// and the value is the Score what contains the data about their relation.
+type Relation struct {
+ Max float32
+ Min float32
+ Node map[int]map[int]Score
+}
+
+// Score struct contains data about a relation of two words.
+//
+// Qty is the occurrence of the phrase.
+//
+// Weight is the weight of the phrase between 0.00 and 1.00.
+//
+// SentenceIDs contains all IDs of sentences what contain the phrase.
+type Score struct {
+ Qty int
+ Weight float32
+ SentenceIDs []int
+}
+
+// AddRelation method adds a new relation to Relation object.
+func (relation *Relation) AddRelation(wordID int, relatedWordID int, sentenceID int) {
+ if relatedWordID == -1 {
+ return
+ }
+
+ if relation.updateRelation(relatedWordID, wordID, true, sentenceID) {
+ return
+ }
+
+ if relation.extendRelation(wordID, relatedWordID, true, sentenceID) {
+ return
+ }
+
+ relation.createRelation(wordID, relatedWordID, sentenceID)
+}
+
+func (relation *Relation) updateRelation(x int, y int, r bool, sentenceID int) bool {
+ if _, ok := relation.Node[x][y]; ok {
+ count := relation.Node[x][y].Qty + 1
+ weight := relation.Node[x][y].Weight
+ sentenceIDs := append(relation.Node[x][y].SentenceIDs, sentenceID)
+ relation.Node[x][y] = Score{count, weight, sentenceIDs}
+
+ return true
+ } else if r {
+ return relation.updateRelation(y, x, false, sentenceID)
+ }
+
+ return false
+}
+
+func (relation *Relation) extendRelation(x int, y int, r bool, sentenceID int) bool {
+ if _, ok := relation.Node[x]; ok {
+ relation.Node[x][y] = Score{1, 0, []int{sentenceID}}
+
+ return true
+ } else if r {
+ return relation.extendRelation(y, x, false, sentenceID)
+ }
+
+ return false
+}
+
+func (relation *Relation) createRelation(x int, y int, sentenceID int) {
+ relation.Node[x] = map[int]Score{}
+ relation.Node[x][y] = Score{1, 0, []int{sentenceID}}
+}
diff --git a/vendor/github.com/DavidBelicza/TextRank/v2/rank/sorting.go b/vendor/github.com/DavidBelicza/TextRank/v2/rank/sorting.go
new file mode 100644
index 0000000000000000000000000000000000000000..6d00a97b527be24980b209e9b7d55317fe21a658
--- /dev/null
+++ b/vendor/github.com/DavidBelicza/TextRank/v2/rank/sorting.go
@@ -0,0 +1,202 @@
+package rank
+
+import (
+ "sort"
+)
+
+// Phrase struct contains a single phrase and its data.
+//
+// LeftID is the ID of the word 1.
+//
+// RightID is the ID of the word 2.
+//
+// Left is the token of the word 1.
+//
+// Right is the token of the word 2.
+//
+// Weight is between 0.00 and 1.00.
+//
+// Qty is the occurrence of the phrase.
+type Phrase struct {
+ LeftID int
+ RightID int
+ Left string
+ Right string
+ Weight float32
+ Qty int
+}
+
+// FindPhrases function has wrapper textrank.FindPhrases. Use the wrapper
+// instead.
+func FindPhrases(ranks *Rank) []Phrase {
+ var phrases []Phrase
+
+ for x, xMap := range ranks.Relation.Node {
+ for y := range xMap {
+ phrases = append(phrases, Phrase{
+ ranks.Words[x].ID,
+ ranks.Words[y].ID,
+ ranks.Words[x].Token,
+ ranks.Words[y].Token,
+ ranks.Relation.Node[x][y].Weight,
+ ranks.Relation.Node[x][y].Qty,
+ })
+ }
+ }
+
+ sort.Slice(phrases, func(i, j int) bool {
+ return phrases[i].Weight > phrases[j].Weight
+ })
+
+ return phrases
+}
+
+// SingleWord struct contains a single word and its data.
+//
+// ID of the word.
+//
+// Word itself, the token.
+//
+// Weight of the word between 0.00 and 1.00.
+//
+// Quantity of the word.
+type SingleWord struct {
+ ID int
+ Word string
+ Weight float32
+ Qty int
+}
+
+// FindSingleWords function has wrapper textrank.FindSingleWords. Use the
+// wrapper instead.
+func FindSingleWords(ranks *Rank) []SingleWord {
+ var singleWords []SingleWord
+
+ for _, word := range ranks.Words {
+ singleWords = append(singleWords, SingleWord{
+ word.ID,
+ word.Token,
+ word.Weight,
+ word.Qty,
+ })
+ }
+
+ sort.Slice(singleWords, func(i, j int) bool {
+ return singleWords[i].Weight > singleWords[j].Weight
+ })
+
+ return singleWords
+}
+
+// Sentence struct contains a single sentence and its data.
+type Sentence struct {
+ ID int
+ Value string
+}
+
+// ByQty filter by occurrence of word.
+const ByQty = 0
+
+// ByRelation filter by phrase weight.
+const ByRelation = 1
+
+// FindSentences function has wrappers textrank.FindSentencesByRelationWeight
+// and textrank.FindSentencesByWordQtyWeight. Use the wrappers instead.
+func FindSentences(ranks *Rank, kind int, limit int) []Sentence {
+ var sentences []Sentence
+
+ cache := make(map[int]bool)
+
+ collect := func(sentenceIDs []int) bool {
+ for _, id := range sentenceIDs {
+ if len(sentences) >= limit {
+ return true
+ }
+
+ if !cache[id] {
+ sentences = append(sentences, Sentence{id, ranks.SentenceMap[id]})
+ cache[id] = true
+ }
+ }
+
+ return false
+ }
+
+ if kind == ByQty {
+ singleWords := FindSingleWords(ranks)
+
+ for _, singleWord := range singleWords {
+ sentenceIDs := ranks.Words[singleWord.ID].SentenceIDs
+
+ if collect(sentenceIDs) {
+ return sentences
+ }
+ }
+ } else if kind == ByRelation {
+ phrases := FindPhrases(ranks)
+
+ for _, phrase := range phrases {
+ sentenceIDs := ranks.Relation.Node[phrase.LeftID][phrase.RightID].SentenceIDs
+
+ if collect(sentenceIDs) {
+ return sentences
+ }
+ }
+ }
+
+ return sentences
+}
+
+// FindSentencesByPhrases function has wrapper
+// textrank.FindSentencesByPhraseChain. Use the wrapper instead.
+func FindSentencesByPhrases(ranks *Rank, words []string) []Sentence {
+ var sentences []Sentence
+
+ reqMatch := len(words) - 1
+ sentenceIDs := make(map[int]int)
+
+ for _, i := range words {
+ for _, j := range words {
+ x := ranks.WordValID[i]
+ y := ranks.WordValID[j]
+
+ if _, ok := ranks.Relation.Node[x][y]; ok {
+ curSentenceIDs := ranks.Relation.Node[x][y].SentenceIDs
+
+ for _, id := range curSentenceIDs {
+ if _, ok := sentenceIDs[id]; ok {
+ sentenceIDs[id]++
+ } else {
+ sentenceIDs[id] = 1
+ }
+ }
+ }
+ }
+ }
+
+ for sentenceID, v := range sentenceIDs {
+ if v >= reqMatch {
+ sentences = append(sentences, Sentence{sentenceID, ranks.SentenceMap[sentenceID]})
+ }
+ }
+
+ sort.Slice(sentences, func(i, j int) bool {
+ return sentences[i].ID < sentences[j].ID
+ })
+
+ return sentences
+}
+
+// FindSentencesFrom function has wrapper textrank.FindSentencesFrom. Use the
+// wrapper instead.
+func FindSentencesFrom(ranks *Rank, id int, limit int) []Sentence {
+ var sentences []Sentence
+
+ limit = id + limit - 1
+
+ for i := id; i <= limit; i++ {
+ sentences = append(sentences, Sentence{i, ranks.SentenceMap[i]})
+ }
+
+ return sentences
+}
diff --git a/vendor/github.com/DavidBelicza/TextRank/v2/textrank.go b/vendor/github.com/DavidBelicza/TextRank/v2/textrank.go
new file mode 100644
index 0000000000000000000000000000000000000000..ed48ce3ff8bb048556b521a057eae6fe2695e908
--- /dev/null
+++ b/vendor/github.com/DavidBelicza/TextRank/v2/textrank.go
@@ -0,0 +1,194 @@
+package textrank
+
+import (
+ "github.com/DavidBelicza/TextRank/v2/convert"
+ "github.com/DavidBelicza/TextRank/v2/parse"
+ "github.com/DavidBelicza/TextRank/v2/rank"
+)
+
+// TextRank structure contains the Rank data object. This structure is a wrapper
+// around the whole text ranking functionality.
+type TextRank struct {
+ rank *rank.Rank
+}
+
+// NewTextRank constructor retrieves a TextRank pointer. This is the 1th step to
+// use TextRank.
+func NewTextRank() *TextRank {
+ return &TextRank{
+ rank.NewRank(),
+ }
+}
+
+// NewDefaultRule function retrieves a default Rule object what works in the
+// most cases in English or similar Latin languages like French or Spanish. The
+// Rule defines raw text how should be split to sentences and words. Because
+// Rule is an interface it's possible modify the ranking by inject different
+// Rule implementation. This is the 2nd step to use TextRank.
+func NewDefaultRule() *parse.RuleDefault {
+ return parse.NewRule()
+}
+
+// NewDefaultLanguage function retrieves a default Language object. It defines
+// what words are real and what words are just Stop Words or useless Junk Words.
+// It uses the default English Stop Words, but it's possible to set different
+// Stop Words in English or any other languages. Because Language is an
+// interface it's possible to modify the ranking by inject different Language
+// implementation. This is the 3rd step to use TextRank.
+func NewDefaultLanguage() *convert.LanguageDefault {
+ return convert.NewLanguage()
+}
+
+// NewDefaultAlgorithm function retrieves an Algorithm object. It defines how
+// should work the text ranking algorithm, the weighting. This is the general
+// text rank by weighting the connection between the words to find the strongest
+// phrases. Because Algorithm is an interface it's possible to modify the
+// ranking algorithm by inject different implementation. This is the 4th step to
+// use TextRank.
+func NewDefaultAlgorithm() *rank.AlgorithmDefault {
+ return rank.NewAlgorithmDefault()
+}
+
+// NewChainAlgorithm function retrieves an Algorithm object. It defines how
+// should work the text ranking algorithm, the weighting. This is an alternative
+// way to ranking words by weighting the number of the words. Because Algorithm
+// is an interface it's possible to modify the ranking algorithm by inject
+// different implementation. This is the 4th step to use TextRank.
+func NewChainAlgorithm() *rank.AlgorithmChain {
+ return rank.NewAlgorithmChain()
+}
+
+// Populate method adds a raw text to the text-ranking graph. It parses,
+// tokenize the raw text and prepares it to weighting and scoring. It's possible
+// to append a new raw text to an existing one even if the previously text is
+// already ranked. This is 5th step to use TextRank.
+//
+// text string must be a plain text from TXT or PDF or any document, it can
+// contain new lines, break lines or any unnecessary text parts, but it should
+// not contain HTML tags or codes.
+//
+// lang Language object can be loaded from NewDefaultLanguage function.
+//
+// rule Rule object can be loaded from NewDefaultRule function.
+func (textRank *TextRank) Populate(
+ text string,
+ lang convert.Language,
+ rule parse.Rule,
+) {
+ parsedText := parse.TokenizeText(text, rule)
+
+ for _, sentence := range parsedText.GetSentences() {
+ convert.TextToRank(sentence, lang, textRank.rank)
+ }
+}
+
+// Ranking method counts the words and connections between the words, then it
+// weights the numbers then normalize them in type float32 between 0.00 and
+// 1.00. This is the 6th step to use TextRank.
+//
+// algorithm Algorithm is the object of the weighting and scoring methods.
+func (textRank *TextRank) Ranking(algorithm rank.Algorithm) {
+ rank.Calculate(textRank.rank, algorithm)
+}
+
+// GetRankData method retrieves the Rank data to that case if the developer want
+// access to the whole graph and sentences, words, weights and all of the data
+// to analyze it or just implement a new search logic or finder method.
+func (textRank *TextRank) GetRankData() *rank.Rank {
+ return textRank.rank
+}
+
+// FindPhrases function retrieves a slice of Phrase structures by TextRank
+// object. The return value contains the sorted phrases with IDs, words, weights
+// and quantities by weight from 1 to 0. Weight is calculated from quantities of
+// relation between two words. A single phrase is from two words - not less and
+// more. (But it's possible to find chain of phrases by
+// FindSentencesByPhraseChain function.)
+func FindPhrases(textRank *TextRank) []rank.Phrase {
+ return rank.FindPhrases(textRank.rank)
+}
+
+// FindSingleWords function retrieves a slice of SingleWord structures by
+// TextRank object. The return value contains the sorted words with IDs, words,
+// weights and quantities by weight from 1 to 0. Weight is calculated from
+// quantities of word.
+func FindSingleWords(textRank *TextRank) []rank.SingleWord {
+ return rank.FindSingleWords(textRank.rank)
+}
+
+// FindSentencesByRelationWeight function retrieves a slice of Sentence
+// structures by TextRank object. The return value contains the ID of the
+// sentence and the sentence text itself. The slice is sorted by weight of
+// phrases from 1 to 0.
+func FindSentencesByRelationWeight(
+ textRank *TextRank,
+ limit int,
+) []rank.Sentence {
+
+ return rank.FindSentences(textRank.rank, rank.ByRelation, limit)
+}
+
+// FindSentencesByWordQtyWeight function retrieves a slice of Sentence
+// structures by TextRank object. The return value contains the ID of the
+// sentence and the sentence text itself. The slice is sorted by weight of word
+// quantities from 1 to 0.
+func FindSentencesByWordQtyWeight(
+ textRank *TextRank,
+ limit int,
+) []rank.Sentence {
+
+ return rank.FindSentences(textRank.rank, rank.ByQty, limit)
+}
+
+// FindSentencesByPhraseChain function retrieves a slice of Sentence structures
+// by TextRank object and slice of phrases. The return value contains the ID of
+// the sentence and the sentence text itself. The slice is sorted by weight of
+// word quantities from 1 to 0.
+//
+// textRank TextRank is the object of the TextRank.
+//
+// phrases []string is a slice of phrases. A single phrase is from two words, so
+// when the slice contains 3 words the inner method will search for two phrases.
+// The search algorithm seeks for "len(phrases)!". In case of three item the
+// possible combination is 3 factorial (3!) = 3 * 2 * 1.
+//
+// rawText := "Long raw text, lorem ipsum..."
+// rule := NewDefaultRule()
+// language := NewDefaultLanguage()
+// algorithm := NewDefaultAlgorithm()
+//
+// Append(rawText, language, rule, 1)
+// Ranking(1, algorithm)
+//
+// FindSentencesByPhraseChain(1, []string{
+// "captain",
+// "james",
+// "kirk",
+// })
+//
+// The above code searches for captain james kirk, captain kirk james, james
+// kirk captain, james captain kirk, kirk james captain and james kirk captain
+// combinations in the graph. The 3 of words have to be related to each other
+// in the same sentence but the search algorithm ignores the stop words. So if
+// there is a sentence "James Kirk is the Captain of the Enterprise." the
+// sentence will be returned because the words "is" and "the" are stop words.
+func FindSentencesByPhraseChain(
+ textRank *TextRank,
+ phrases []string,
+) []rank.Sentence {
+
+ return rank.FindSentencesByPhrases(textRank.rank, phrases)
+}
+
+// FindSentencesFrom function retrieves a slice of Sentence structures by
+// TextRank object and by ID of the sentence. The return value contains the
+// sentence text itself. The returned slice contains sentences sorted by their
+// IDs started from the given sentence ID in ascending sort.
+func FindSentencesFrom(
+ textRank *TextRank,
+ sentenceID int,
+ limit int,
+) []rank.Sentence {
+
+ return rank.FindSentencesFrom(textRank.rank, sentenceID, limit)
+}
diff --git a/vendor/github.com/alecthomas/chroma/v2/.gitignore b/vendor/github.com/alecthomas/chroma/v2/.gitignore
new file mode 100644
index 0000000000000000000000000000000000000000..ccacd12e988116ed4a81001da5fad2da97490004
--- /dev/null
+++ b/vendor/github.com/alecthomas/chroma/v2/.gitignore
@@ -0,0 +1,19 @@
+# Binaries for programs and plugins
+*.exe
+*.dll
+*.so
+*.dylib
+/cmd/chroma/chroma
+
+# Test binary, build with `go test -c`
+*.test
+
+# Output of the go coverage tool, specifically when used with LiteIDE
+*.out
+
+# Project-local glide cache, RE: https://github.com/Masterminds/glide/issues/736
+.glide/
+
+_models/
+
+_examples/
diff --git a/vendor/github.com/alecthomas/chroma/v2/.golangci.yml b/vendor/github.com/alecthomas/chroma/v2/.golangci.yml
new file mode 100644
index 0000000000000000000000000000000000000000..a06e95e8f7807a4bbc4a08cbacca1e80dafec97d
--- /dev/null
+++ b/vendor/github.com/alecthomas/chroma/v2/.golangci.yml
@@ -0,0 +1,78 @@
+run:
+ tests: true
+ skip-dirs:
+ - _examples
+
+output:
+ print-issued-lines: false
+
+linters:
+ enable-all: true
+ disable:
+ - maligned
+ - megacheck
+ - lll
+ - gocyclo
+ - dupl
+ - gochecknoglobals
+ - funlen
+ - godox
+ - wsl
+ - gomnd
+ - gocognit
+ - goerr113
+ - nolintlint
+ - testpackage
+ - godot
+ - nestif
+ - paralleltest
+ - nlreturn
+ - cyclop
+ - exhaustivestruct
+ - gci
+ - gofumpt
+ - errorlint
+ - exhaustive
+ - ifshort
+ - wrapcheck
+ - stylecheck
+ - thelper
+
+linters-settings:
+ govet:
+ check-shadowing: true
+ gocyclo:
+ min-complexity: 10
+ dupl:
+ threshold: 100
+ goconst:
+ min-len: 8
+ min-occurrences: 3
+ forbidigo:
+ #forbid:
+ # - (Must)?NewLexer$
+ exclude_godoc_examples: false
+
+
+issues:
+ max-per-linter: 0
+ max-same: 0
+ exclude-use-default: false
+ exclude:
+ # Captured by errcheck.
+ - '^(G104|G204):'
+ # Very commonly not checked.
+ - 'Error return value of .(.*\.Help|.*\.MarkFlagRequired|(os\.)?std(out|err)\..*|.*Close|.*Flush|os\.Remove(All)?|.*printf?|os\.(Un)?Setenv). is not checked'
+ - 'exported method (.*\.MarshalJSON|.*\.UnmarshalJSON|.*\.EntityURN|.*\.GoString|.*\.Pos) should have comment or be unexported'
+ - 'composite literal uses unkeyed fields'
+ - 'declaration of "err" shadows declaration'
+ - 'should not use dot imports'
+ - 'Potential file inclusion via variable'
+ - 'should have comment or be unexported'
+ - 'comment on exported var .* should be of the form'
+ - 'at least one file in a package should have a package comment'
+ - 'string literal contains the Unicode'
+ - 'methods on the same type should have the same receiver name'
+ - '_TokenType_name should be _TokenTypeName'
+ - '`_TokenType_map` should be `_TokenTypeMap`'
+ - 'rewrite if-else to switch statement'
diff --git a/vendor/github.com/alecthomas/chroma/v2/.goreleaser.yml b/vendor/github.com/alecthomas/chroma/v2/.goreleaser.yml
new file mode 100644
index 0000000000000000000000000000000000000000..8cd7592d3fb73c2c30c6267c809cde66480b7717
--- /dev/null
+++ b/vendor/github.com/alecthomas/chroma/v2/.goreleaser.yml
@@ -0,0 +1,37 @@
+project_name: chroma
+release:
+ github:
+ owner: alecthomas
+ name: chroma
+brews:
+ -
+ install: bin.install "chroma"
+env:
+ - CGO_ENABLED=0
+builds:
+- goos:
+ - linux
+ - darwin
+ - windows
+ goarch:
+ - arm64
+ - amd64
+ - "386"
+ goarm:
+ - "6"
+ dir: ./cmd/chroma
+ main: .
+ ldflags: -s -w -X main.version={{.Version}} -X main.commit={{.Commit}} -X main.date={{.Date}}
+ binary: chroma
+archives:
+ -
+ format: tar.gz
+ name_template: '{{ .Binary }}-{{ .Version }}-{{ .Os }}-{{ .Arch }}{{ if .Arm }}v{{
+ .Arm }}{{ end }}'
+ files:
+ - COPYING
+ - README*
+snapshot:
+ name_template: SNAPSHOT-{{ .Commit }}
+checksum:
+ name_template: '{{ .ProjectName }}-{{ .Version }}-checksums.txt'
diff --git a/vendor/github.com/alecthomas/chroma/v2/COPYING b/vendor/github.com/alecthomas/chroma/v2/COPYING
new file mode 100644
index 0000000000000000000000000000000000000000..92dc39f709123ccec3123d152f2e8e6126eabd7f
--- /dev/null
+++ b/vendor/github.com/alecthomas/chroma/v2/COPYING
@@ -0,0 +1,19 @@
+Copyright (C) 2017 Alec Thomas
+
+Permission is hereby granted, free of charge, to any person obtaining a copy of
+this software and associated documentation files (the "Software"), to deal in
+the Software without restriction, including without limitation the rights to
+use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+of the Software, and to permit persons to whom the Software is furnished to do
+so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
diff --git a/vendor/github.com/alecthomas/chroma/v2/Makefile b/vendor/github.com/alecthomas/chroma/v2/Makefile
new file mode 100644
index 0000000000000000000000000000000000000000..34e3c41c633f5b45ff70e7c34c49ede6d38bbcea
--- /dev/null
+++ b/vendor/github.com/alecthomas/chroma/v2/Makefile
@@ -0,0 +1,19 @@
+.PHONY: chromad upload all
+
+VERSION ?= $(shell git describe --tags --dirty --always)
+
+all: README.md tokentype_string.go
+
+README.md: lexers/*/*.go
+ ./table.py
+
+tokentype_string.go: types.go
+ go generate
+
+chromad:
+ rm -f chromad
+ (export CGOENABLED=0 GOOS=linux GOARCH=amd64; cd ./cmd/chromad && go build -ldflags="-X 'main.version=$(VERSION)'" -o ../../chromad .)
+
+upload: chromad
+ scp chromad root@swapoff.org: && \
+ ssh root@swapoff.org 'install -m755 ./chromad /srv/http/swapoff.org/bin && service chromad restart'
diff --git a/vendor/github.com/alecthomas/chroma/v2/README.md b/vendor/github.com/alecthomas/chroma/v2/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..445fb967d6cdea3bc0f4fb4d0328ba925712fcb6
--- /dev/null
+++ b/vendor/github.com/alecthomas/chroma/v2/README.md
@@ -0,0 +1,285 @@
+# Chroma — A general purpose syntax highlighter in pure Go
+[](https://godoc.org/github.com/alecthomas/chroma) [](https://github.com/alecthomas/chroma/actions/workflows/ci.yml) [](https://invite.slack.golangbridge.org/)
+
+> **NOTE:** As Chroma has just been released, its API is still in flux. That said, the high-level interface should not change significantly.
+
+Chroma takes source code and other structured text and converts it into syntax
+highlighted HTML, ANSI-coloured text, etc.
+
+Chroma is based heavily on [Pygments](http://pygments.org/), and includes
+translators for Pygments lexers and styles.
+
+
+## Table of Contents
+
+
+
+1. [Table of Contents](#table-of-contents)
+2. [Supported languages](#supported-languages)
+3. [Try it](#try-it)
+4. [Using the library](#using-the-library)
+ 1. [Quick start](#quick-start)
+ 2. [Identifying the language](#identifying-the-language)
+ 3. [Formatting the output](#formatting-the-output)
+ 4. [The HTML formatter](#the-html-formatter)
+5. [More detail](#more-detail)
+ 1. [Lexers](#lexers)
+ 2. [Formatters](#formatters)
+ 3. [Styles](#styles)
+6. [Command-line interface](#command-line-interface)
+7. [What's missing compared to Pygments?](#whats-missing-compared-to-pygments)
+
+
+
+
+## Supported languages
+
+Prefix | Language
+:----: | --------
+A | ABAP, ABNF, ActionScript, ActionScript 3, Ada, Angular2, ANTLR, ApacheConf, APL, AppleScript, Arduino, Awk
+B | Ballerina, Base Makefile, Bash, Batchfile, BibTeX, Bicep, BlitzBasic, BNF, Brainfuck
+C | C, C#, C++, Caddyfile, Caddyfile Directives, Cap'n Proto, Cassandra CQL, Ceylon, CFEngine3, cfstatement, ChaiScript, Cheetah, Clojure, CMake, COBOL, CoffeeScript, Common Lisp, Coq, Crystal, CSS, Cython
+D | D, Dart, Diff, Django/Jinja, Docker, DTD, Dylan
+E | EBNF, Elixir, Elm, EmacsLisp, Erlang
+F | Factor, Fish, Forth, Fortran, FSharp
+G | GAS, GDScript, Genshi, Genshi HTML, Genshi Text, Gherkin, GLSL, Gnuplot, Go, Go HTML Template, Go Text Template, GraphQL, Groff, Groovy
+H | Handlebars, Haskell, Haxe, HCL, Hexdump, HLB, HTML, HTTP, Hy
+I | Idris, Igor, INI, Io
+J | J, Java, JavaScript, JSON, Julia, Jungle
+K | Kotlin
+L | Lighttpd configuration file, LLVM, Lua
+M | Mako, markdown, Mason, Mathematica, Matlab, MiniZinc, MLIR, Modula-2, MonkeyC, MorrowindScript, Myghty, MySQL
+N | NASM, Newspeak, Nginx configuration file, Nim, Nix
+O | Objective-C, OCaml, Octave, OnesEnterprise, OpenEdge ABL, OpenSCAD, Org Mode
+P | PacmanConf, Perl, PHP, PHTML, Pig, PkgConfig, PL/pgSQL, plaintext, Pony, PostgreSQL SQL dialect, PostScript, POVRay, PowerShell, Prolog, PromQL, Protocol Buffer, Puppet, Python 2, Python
+Q | QBasic
+R | R, Racket, Ragel, Raku, react, ReasonML, reg, reStructuredText, Rexx, Ruby, Rust
+S | SAS, Sass, Scala, Scheme, Scilab, SCSS, Smalltalk, Smarty, Snobol, Solidity, SPARQL, SQL, SquidConf, Standard ML, Stylus, Svelte, Swift, SYSTEMD, systemverilog
+T | TableGen, TASM, Tcl, Tcsh, Termcap, Terminfo, Terraform, TeX, Thrift, TOML, TradingView, Transact-SQL, Turing, Turtle, Twig, TypeScript, TypoScript, TypoScriptCssData, TypoScriptHtmlData
+V | VB.net, verilog, VHDL, VimL, vue
+W | WDTE
+X | XML, Xorg
+Y | YAML, YANG
+Z | Zig
+
+
+_I will attempt to keep this section up to date, but an authoritative list can be
+displayed with `chroma --list`._
+
+
+## Try it
+
+Try out various languages and styles on the [Chroma Playground](https://swapoff.org/chroma/playground/).
+
+
+## Using the library
+
+Chroma, like Pygments, has the concepts of
+[lexers](https://github.com/alecthomas/chroma/tree/master/lexers),
+[formatters](https://github.com/alecthomas/chroma/tree/master/formatters) and
+[styles](https://github.com/alecthomas/chroma/tree/master/styles).
+
+Lexers convert source text into a stream of tokens, styles specify how token
+types are mapped to colours, and formatters convert tokens and styles into
+formatted output.
+
+A package exists for each of these, containing a global `Registry` variable
+with all of the registered implementations. There are also helper functions
+for using the registry in each package, such as looking up lexers by name or
+matching filenames, etc.
+
+In all cases, if a lexer, formatter or style can not be determined, `nil` will
+be returned. In this situation you may want to default to the `Fallback`
+value in each respective package, which provides sane defaults.
+
+
+### Quick start
+
+A convenience function exists that can be used to simply format some source
+text, without any effort:
+
+```go
+err := quick.Highlight(os.Stdout, someSourceCode, "go", "html", "monokai")
+```
+
+
+### Identifying the language
+
+To highlight code, you'll first have to identify what language the code is
+written in. There are three primary ways to do that:
+
+1. Detect the language from its filename.
+
+ ```go
+ lexer := lexers.Match("foo.go")
+ ```
+
+3. Explicitly specify the language by its Chroma syntax ID (a full list is available from `lexers.Names()`).
+
+ ```go
+ lexer := lexers.Get("go")
+ ```
+
+3. Detect the language from its content.
+
+ ```go
+ lexer := lexers.Analyse("package main\n\nfunc main()\n{\n}\n")
+ ```
+
+In all cases, `nil` will be returned if the language can not be identified.
+
+```go
+if lexer == nil {
+ lexer = lexers.Fallback
+}
+```
+
+At this point, it should be noted that some lexers can be extremely chatty. To
+mitigate this, you can use the coalescing lexer to coalesce runs of identical
+token types into a single token:
+
+```go
+lexer = chroma.Coalesce(lexer)
+```
+
+
+### Formatting the output
+
+Once a language is identified you will need to pick a formatter and a style (theme).
+
+```go
+style := styles.Get("swapoff")
+if style == nil {
+ style = styles.Fallback
+}
+formatter := formatters.Get("html")
+if formatter == nil {
+ formatter = formatters.Fallback
+}
+```
+
+Then obtain an iterator over the tokens:
+
+```go
+contents, err := ioutil.ReadAll(r)
+iterator, err := lexer.Tokenise(nil, string(contents))
+```
+
+And finally, format the tokens from the iterator:
+
+```go
+err := formatter.Format(w, style, iterator)
+```
+
+
+### The HTML formatter
+
+By default the `html` registered formatter generates standalone HTML with
+embedded CSS. More flexibility is available through the `formatters/html` package.
+
+Firstly, the output generated by the formatter can be customised with the
+following constructor options:
+
+- `Standalone()` - generate standalone HTML with embedded CSS.
+- `WithClasses()` - use classes rather than inlined style attributes.
+- `ClassPrefix(prefix)` - prefix each generated CSS class.
+- `TabWidth(width)` - Set the rendered tab width, in characters.
+- `WithLineNumbers()` - Render line numbers (style with `LineNumbers`).
+- `LinkableLineNumbers()` - Make the line numbers linkable and be a link to themselves.
+- `HighlightLines(ranges)` - Highlight lines in these ranges (style with `LineHighlight`).
+- `LineNumbersInTable()` - Use a table for formatting line numbers and code, rather than spans.
+
+If `WithClasses()` is used, the corresponding CSS can be obtained from the formatter with:
+
+```go
+formatter := html.New(html.WithClasses(true))
+err := formatter.WriteCSS(w, style)
+```
+
+
+## More detail
+
+
+### Lexers
+
+See the [Pygments documentation](http://pygments.org/docs/lexerdevelopment/)
+for details on implementing lexers. Most concepts apply directly to Chroma,
+but see existing lexer implementations for real examples.
+
+In many cases lexers can be automatically converted directly from Pygments by
+using the included Python 3 script `pygments2chroma.py`. I use something like
+the following:
+
+```sh
+python3 _tools/pygments2chroma.py \
+ pygments.lexers.jvm.KotlinLexer \
+ > lexers/k/kotlin.go \
+ && gofmt -s -w lexers/k/kotlin.go
+```
+
+See notes in [pygments-lexers.txt](https://github.com/alecthomas/chroma/blob/master/pygments-lexers.txt)
+for a list of lexers, and notes on some of the issues importing them.
+
+
+### Formatters
+
+Chroma supports HTML output, as well as terminal output in 8 colour, 256 colour, and true-colour.
+
+A `noop` formatter is included that outputs the token text only, and a `tokens`
+formatter outputs raw tokens. The latter is useful for debugging lexers.
+
+
+### Styles
+
+Chroma styles use the [same syntax](http://pygments.org/docs/styles/) as Pygments.
+
+All Pygments styles have been converted to Chroma using the `_tools/style.py` script.
+
+When you work with one of [Chroma's styles](https://github.com/alecthomas/chroma/tree/master/styles), know that the `chroma.Background` token type provides the default style for tokens. It does so by defining a foreground color and background color.
+
+For example, this gives each token name not defined in the style a default color of `#f8f8f8` and uses `#000000` for the highlighted code block's background:
+
+~~~go
+chroma.Background: "#f8f8f2 bg:#000000",
+~~~
+
+Also, token types in a style file are hierarchical. For instance, when `CommentSpecial` is not defined, Chroma uses the token style from `Comment`. So when several comment tokens use the same color, you'll only need to define `Comment` and override the one that has a different color.
+
+For a quick overview of the available styles and how they look, check out the [Chroma Style Gallery](https://xyproto.github.io/splash/docs/).
+
+
+## Command-line interface
+
+A command-line interface to Chroma is included.
+
+Binaries are available to install from [the releases page](https://github.com/alecthomas/chroma/releases).
+
+The CLI can be used as a preprocessor to colorise output of `less(1)`,
+see documentation for the `LESSOPEN` environment variable.
+
+The `--fail` flag can be used to suppress output and return with exit status
+1 to facilitate falling back to some other preprocessor in case chroma
+does not resolve a specific lexer to use for the given file. For example:
+
+```shell
+export LESSOPEN='| p() { chroma --fail "$1" || cat "$1"; }; p "%s"'
+```
+
+Replace `cat` with your favourite fallback preprocessor.
+
+When invoked as `.lessfilter`, the `--fail` flag is automatically turned
+on under the hood for easy integration with [lesspipe shipping with
+Debian and derivatives](https://manpages.debian.org/lesspipe#USER_DEFINED_FILTERS);
+for that setup the `chroma` executable can be just symlinked to `~/.lessfilter`.
+
+
+## What's missing compared to Pygments?
+
+- Quite a few lexers, for various reasons (pull-requests welcome):
+ - Pygments lexers for complex languages often include custom code to
+ handle certain aspects, such as Raku's ability to nest code inside
+ regular expressions. These require time and effort to convert.
+ - I mostly only converted languages I had heard of, to reduce the porting cost.
+- Some more esoteric features of Pygments are omitted for simplicity.
+- Though the Chroma API supports content detection, very few languages support them.
+ I have plans to implement a statistical analyser at some point, but not enough time.
diff --git a/vendor/github.com/alecthomas/chroma/v2/coalesce.go b/vendor/github.com/alecthomas/chroma/v2/coalesce.go
new file mode 100644
index 0000000000000000000000000000000000000000..f5048951a290beeb85ac54abec83ac0f218d2060
--- /dev/null
+++ b/vendor/github.com/alecthomas/chroma/v2/coalesce.go
@@ -0,0 +1,35 @@
+package chroma
+
+// Coalesce is a Lexer interceptor that collapses runs of common types into a single token.
+func Coalesce(lexer Lexer) Lexer { return &coalescer{lexer} }
+
+type coalescer struct{ Lexer }
+
+func (d *coalescer) Tokenise(options *TokeniseOptions, text string) (Iterator, error) {
+ var prev Token
+ it, err := d.Lexer.Tokenise(options, text)
+ if err != nil {
+ return nil, err
+ }
+ return func() Token {
+ for token := it(); token != (EOF); token = it() {
+ if len(token.Value) == 0 {
+ continue
+ }
+ if prev == EOF {
+ prev = token
+ } else {
+ if prev.Type == token.Type && len(prev.Value) < 8192 {
+ prev.Value += token.Value
+ } else {
+ out := prev
+ prev = token
+ return out
+ }
+ }
+ }
+ out := prev
+ prev = EOF
+ return out
+ }, nil
+}
diff --git a/vendor/github.com/alecthomas/chroma/v2/colour.go b/vendor/github.com/alecthomas/chroma/v2/colour.go
new file mode 100644
index 0000000000000000000000000000000000000000..15d794ce2a7288655402e43e945bc00776cbbcb0
--- /dev/null
+++ b/vendor/github.com/alecthomas/chroma/v2/colour.go
@@ -0,0 +1,164 @@
+package chroma
+
+import (
+ "fmt"
+ "math"
+ "strconv"
+ "strings"
+)
+
+// ANSI2RGB maps ANSI colour names, as supported by Chroma, to hex RGB values.
+var ANSI2RGB = map[string]string{
+ "#ansiblack": "000000",
+ "#ansidarkred": "7f0000",
+ "#ansidarkgreen": "007f00",
+ "#ansibrown": "7f7fe0",
+ "#ansidarkblue": "00007f",
+ "#ansipurple": "7f007f",
+ "#ansiteal": "007f7f",
+ "#ansilightgray": "e5e5e5",
+ // Normal
+ "#ansidarkgray": "555555",
+ "#ansired": "ff0000",
+ "#ansigreen": "00ff00",
+ "#ansiyellow": "ffff00",
+ "#ansiblue": "0000ff",
+ "#ansifuchsia": "ff00ff",
+ "#ansiturquoise": "00ffff",
+ "#ansiwhite": "ffffff",
+
+ // Aliases without the "ansi" prefix, because...why?
+ "#black": "000000",
+ "#darkred": "7f0000",
+ "#darkgreen": "007f00",
+ "#brown": "7f7fe0",
+ "#darkblue": "00007f",
+ "#purple": "7f007f",
+ "#teal": "007f7f",
+ "#lightgray": "e5e5e5",
+ // Normal
+ "#darkgray": "555555",
+ "#red": "ff0000",
+ "#green": "00ff00",
+ "#yellow": "ffff00",
+ "#blue": "0000ff",
+ "#fuchsia": "ff00ff",
+ "#turquoise": "00ffff",
+ "#white": "ffffff",
+}
+
+// Colour represents an RGB colour.
+type Colour int32
+
+// NewColour creates a Colour directly from RGB values.
+func NewColour(r, g, b uint8) Colour {
+ return ParseColour(fmt.Sprintf("%02x%02x%02x", r, g, b))
+}
+
+// Distance between this colour and another.
+//
+// This uses the approach described here (https://www.compuphase.com/cmetric.htm).
+// This is not as accurate as LAB, et. al. but is *vastly* simpler and sufficient for our needs.
+func (c Colour) Distance(e2 Colour) float64 {
+ ar, ag, ab := int64(c.Red()), int64(c.Green()), int64(c.Blue())
+ br, bg, bb := int64(e2.Red()), int64(e2.Green()), int64(e2.Blue())
+ rmean := (ar + br) / 2
+ r := ar - br
+ g := ag - bg
+ b := ab - bb
+ return math.Sqrt(float64((((512 + rmean) * r * r) >> 8) + 4*g*g + (((767 - rmean) * b * b) >> 8)))
+}
+
+// Brighten returns a copy of this colour with its brightness adjusted.
+//
+// If factor is negative, the colour is darkened.
+//
+// Uses approach described here (http://www.pvladov.com/2012/09/make-color-lighter-or-darker.html).
+func (c Colour) Brighten(factor float64) Colour {
+ r := float64(c.Red())
+ g := float64(c.Green())
+ b := float64(c.Blue())
+
+ if factor < 0 {
+ factor++
+ r *= factor
+ g *= factor
+ b *= factor
+ } else {
+ r = (255-r)*factor + r
+ g = (255-g)*factor + g
+ b = (255-b)*factor + b
+ }
+ return NewColour(uint8(r), uint8(g), uint8(b))
+}
+
+// BrightenOrDarken brightens a colour if it is < 0.5 brighteness or darkens if > 0.5 brightness.
+func (c Colour) BrightenOrDarken(factor float64) Colour {
+ if c.Brightness() < 0.5 {
+ return c.Brighten(factor)
+ }
+ return c.Brighten(-factor)
+}
+
+// Brightness of the colour (roughly) in the range 0.0 to 1.0
+func (c Colour) Brightness() float64 {
+ return (float64(c.Red()) + float64(c.Green()) + float64(c.Blue())) / 255.0 / 3.0
+}
+
+// ParseColour in the forms #rgb, #rrggbb, #ansi`, styleAttr)
+ }
+
+ return ``
+ },
+ end: func(code bool) string {
+ if code {
+ return ``
+ }
+
+ return ``
+ },
+ }
+ }
+}
+
+// WithPreWrapper allows control of the surrounding pre tags.
+func WithPreWrapper(wrapper PreWrapper) Option {
+ return func(f *Formatter) {
+ f.preWrapper = wrapper
+ }
+}
+
+// WrapLongLines wraps long lines.
+func WrapLongLines(b bool) Option {
+ return func(f *Formatter) {
+ f.wrapLongLines = b
+ }
+}
+
+// WithLineNumbers formats output with line numbers.
+func WithLineNumbers(b bool) Option {
+ return func(f *Formatter) {
+ f.lineNumbers = b
+ }
+}
+
+// LineNumbersInTable will, when combined with WithLineNumbers, separate the line numbers
+// and code in table td's, which make them copy-and-paste friendly.
+func LineNumbersInTable(b bool) Option {
+ return func(f *Formatter) {
+ f.lineNumbersInTable = b
+ }
+}
+
+// LinkableLineNumbers decorates the line numbers HTML elements with an "id"
+// attribute so they can be linked.
+func LinkableLineNumbers(b bool, prefix string) Option {
+ return func(f *Formatter) {
+ f.linkableLineNumbers = b
+ f.lineNumbersIDPrefix = prefix
+ }
+}
+
+// HighlightLines higlights the given line ranges with the Highlight style.
+//
+// A range is the beginning and ending of a range as 1-based line numbers, inclusive.
+func HighlightLines(ranges [][2]int) Option {
+ return func(f *Formatter) {
+ f.highlightRanges = ranges
+ sort.Sort(f.highlightRanges)
+ }
+}
+
+// BaseLineNumber sets the initial number to start line numbering at. Defaults to 1.
+func BaseLineNumber(n int) Option {
+ return func(f *Formatter) {
+ f.baseLineNumber = n
+ }
+}
+
+// New HTML formatter.
+func New(options ...Option) *Formatter {
+ f := &Formatter{
+ baseLineNumber: 1,
+ preWrapper: defaultPreWrapper,
+ }
+ for _, option := range options {
+ option(f)
+ }
+ return f
+}
+
+// PreWrapper defines the operations supported in WithPreWrapper.
+type PreWrapper interface {
+ // Start is called to write a start element.
+ // The code flag tells whether this block surrounds
+ // highlighted code. This will be false when surrounding
+ // line numbers.
+ Start(code bool, styleAttr string) string
+
+ // End is called to write the end
element.
+ End(code bool) string
+}
+
+type preWrapper struct {
+ start func(code bool, styleAttr string) string
+ end func(code bool) string
+}
+
+func (p preWrapper) Start(code bool, styleAttr string) string {
+ return p.start(code, styleAttr)
+}
+
+func (p preWrapper) End(code bool) string {
+ return p.end(code)
+}
+
+var (
+ nopPreWrapper = preWrapper{
+ start: func(code bool, styleAttr string) string { return "" },
+ end: func(code bool) string { return "" },
+ }
+ defaultPreWrapper = preWrapper{
+ start: func(code bool, styleAttr string) string {
+ if code {
+ return fmt.Sprintf(`
`
+ }
+
+ return ``
+ },
+ }
+)
+
+// Formatter that generates HTML.
+type Formatter struct {
+ standalone bool
+ prefix string
+ Classes bool // Exported field to detect when classes are being used
+ allClasses bool
+ customCSS map[chroma.TokenType]string
+ preWrapper PreWrapper
+ inlineCode bool
+ preventSurroundingPre bool
+ tabWidth int
+ wrapLongLines bool
+ lineNumbers bool
+ lineNumbersInTable bool
+ linkableLineNumbers bool
+ lineNumbersIDPrefix string
+ highlightRanges highlightRanges
+ baseLineNumber int
+}
+
+type highlightRanges [][2]int
+
+func (h highlightRanges) Len() int { return len(h) }
+func (h highlightRanges) Swap(i, j int) { h[i], h[j] = h[j], h[i] }
+func (h highlightRanges) Less(i, j int) bool { return h[i][0] < h[j][0] }
+
+func (f *Formatter) Format(w io.Writer, style *chroma.Style, iterator chroma.Iterator) (err error) {
+ return f.writeHTML(w, style, iterator.Tokens())
+}
+
+// We deliberately don't use html/template here because it is two orders of magnitude slower (benchmarked).
+//
+// OTOH we need to be super careful about correct escaping...
+func (f *Formatter) writeHTML(w io.Writer, style *chroma.Style, tokens []chroma.Token) (err error) { // nolint: gocyclo
+ css := f.styleToCSS(style)
+ if !f.Classes {
+ for t, style := range css {
+ css[t] = compressStyle(style)
+ }
+ }
+ if f.standalone {
+ fmt.Fprint(w, "\n")
+ if f.Classes {
+ fmt.Fprint(w, "")
+ }
+ fmt.Fprintf(w, "\n", f.styleAttr(css, chroma.Background))
+ }
+
+ wrapInTable := f.lineNumbers && f.lineNumbersInTable
+
+ lines := chroma.SplitTokensIntoLines(tokens)
+ lineDigits := len(fmt.Sprintf("%d", f.baseLineNumber+len(lines)-1))
+ highlightIndex := 0
+
+ if wrapInTable {
+ // List line numbers in its own `, styleAttr)
+ }
+
+ return fmt.Sprintf(``, styleAttr)
+ },
+ end: func(code bool) string {
+ if code {
+ return `
+ fmt.Fprintf(w, "
\n")
+ fmt.Fprint(w, "", f.styleAttr(css, chroma.LineTable))
+ fmt.Fprintf(w, " \n", f.styleAttr(css, chroma.LineTableTD))
+ fmt.Fprintf(w, f.preWrapper.Start(false, f.styleAttr(css, chroma.PreWrapper)))
+ for index := range lines {
+ line := f.baseLineNumber + index
+ highlight, next := f.shouldHighlight(highlightIndex, line)
+ if next {
+ highlightIndex++
+ }
+ if highlight {
+ fmt.Fprintf(w, "", f.styleAttr(css, chroma.LineHighlight))
+ }
+
+ fmt.Fprintf(w, "%s\n", f.styleAttr(css, chroma.LineNumbersTable), f.lineIDAttribute(line), f.lineTitleWithLinkIfNeeded(lineDigits, line))
+
+ if highlight {
+ fmt.Fprintf(w, "")
+ }
+ }
+ fmt.Fprint(w, f.preWrapper.End(false))
+ fmt.Fprint(w, " \n")
+ fmt.Fprintf(w, "\n", f.styleAttr(css, chroma.LineTableTD, "width:100%"))
+ }
+
+ fmt.Fprintf(w, f.preWrapper.Start(true, f.styleAttr(css, chroma.PreWrapper)))
+
+ highlightIndex = 0
+ for index, tokens := range lines {
+ // 1-based line number.
+ line := f.baseLineNumber + index
+ highlight, next := f.shouldHighlight(highlightIndex, line)
+ if next {
+ highlightIndex++
+ }
+
+ if !(f.preventSurroundingPre || f.inlineCode) {
+ // Start of Line
+ fmt.Fprint(w, ``)
+ } else {
+ fmt.Fprintf(w, "%s>", f.styleAttr(css, chroma.Line))
+ }
+
+ // Line number
+ if f.lineNumbers && !wrapInTable {
+ fmt.Fprintf(w, "%s", f.styleAttr(css, chroma.LineNumbers), f.lineIDAttribute(line), f.lineTitleWithLinkIfNeeded(lineDigits, line))
+ }
+
+ fmt.Fprintf(w, ``, f.styleAttr(css, chroma.CodeLine))
+ }
+
+ for _, token := range tokens {
+ html := html.EscapeString(token.String())
+ attr := f.styleAttr(css, token.Type)
+ if attr != "" {
+ html = fmt.Sprintf("%s", attr, html)
+ }
+ fmt.Fprint(w, html)
+ }
+
+ if !(f.preventSurroundingPre || f.inlineCode) {
+ fmt.Fprint(w, ``) // End of CodeLine
+
+ fmt.Fprint(w, ``) // End of Line
+ }
+ }
+ fmt.Fprintf(w, f.preWrapper.End(true))
+
+ if wrapInTable {
+ fmt.Fprint(w, " , B
+
+
+
+ go-arg
+
+Struct-based argument parsing for Go
+
+
+
+Declare command line arguments for your program by defining a struct.
+
+```go
+var args struct {
+ Foo string
+ Bar bool
+}
+arg.MustParse(&args)
+fmt.Println(args.Foo, args.Bar)
+```
+
+```shell
+$ ./example --foo=hello --bar
+hello true
+```
+
+### Installation
+
+```shell
+go get github.com/alexflint/go-arg
+```
+
+### Required arguments
+
+```go
+var args struct {
+ ID int `arg:"required"`
+ Timeout time.Duration
+}
+arg.MustParse(&args)
+```
+
+```shell
+$ ./example
+Usage: example --id ID [--timeout TIMEOUT]
+error: --id is required
+```
+
+### Positional arguments
+
+```go
+var args struct {
+ Input string `arg:"positional"`
+ Output []string `arg:"positional"`
+}
+arg.MustParse(&args)
+fmt.Println("Input:", args.Input)
+fmt.Println("Output:", args.Output)
+```
+
+```
+$ ./example src.txt x.out y.out z.out
+Input: src.txt
+Output: [x.out y.out z.out]
+```
+
+### Environment variables
+
+```go
+var args struct {
+ Workers int `arg:"env"`
+}
+arg.MustParse(&args)
+fmt.Println("Workers:", args.Workers)
+```
+
+```
+$ WORKERS=4 ./example
+Workers: 4
+```
+
+```
+$ WORKERS=4 ./example --workers=6
+Workers: 6
+```
+
+You can also override the name of the environment variable:
+
+```go
+var args struct {
+ Workers int `arg:"env:NUM_WORKERS"`
+}
+arg.MustParse(&args)
+fmt.Println("Workers:", args.Workers)
+```
+
+```
+$ NUM_WORKERS=4 ./example
+Workers: 4
+```
+
+You can provide multiple values using the CSV (RFC 4180) format:
+
+```go
+var args struct {
+ Workers []int `arg:"env"`
+}
+arg.MustParse(&args)
+fmt.Println("Workers:", args.Workers)
+```
+
+```
+$ WORKERS='1,99' ./example
+Workers: [1 99]
+```
+
+### Usage strings
+```go
+var args struct {
+ Input string `arg:"positional"`
+ Output []string `arg:"positional"`
+ Verbose bool `arg:"-v,--verbose" help:"verbosity level"`
+ Dataset string `help:"dataset to use"`
+ Optimize int `arg:"-O" help:"optimization level"`
+}
+arg.MustParse(&args)
+```
+
+```shell
+$ ./example -h
+Usage: [--verbose] [--dataset DATASET] [--optimize OPTIMIZE] [--help] INPUT [OUTPUT [OUTPUT ...]]
+
+Positional arguments:
+ INPUT
+ OUTPUT
+
+Options:
+ --verbose, -v verbosity level
+ --dataset DATASET dataset to use
+ --optimize OPTIMIZE, -O OPTIMIZE
+ optimization level
+ --help, -h print this help message
+```
+
+### Default values
+
+```go
+var args struct {
+ Foo string `default:"abc"`
+ Bar bool
+}
+arg.MustParse(&args)
+```
+
+### Default values (before v1.2)
+
+```go
+var args struct {
+ Foo string
+ Bar bool
+}
+arg.Foo = "abc"
+arg.MustParse(&args)
+```
+
+### Combining command line options, environment variables, and default values
+
+You can combine command line arguments, environment variables, and default values. Command line arguments take precedence over environment variables, which take precedence over default values. This means that we check whether a certain option was provided on the command line, then if not, we check for an environment variable (only if an `env` tag was provided), then if none is found, we check for a `default` tag containing a default value.
+
+```go
+var args struct {
+ Test string `arg:"-t,env:TEST" default:"something"`
+}
+arg.MustParse(&args)
+```
+
+### Arguments with multiple values
+```go
+var args struct {
+ Database string
+ IDs []int64
+}
+arg.MustParse(&args)
+fmt.Printf("Fetching the following IDs from %s: %q", args.Database, args.IDs)
+```
+
+```shell
+./example -database foo -ids 1 2 3
+Fetching the following IDs from foo: [1 2 3]
+```
+
+### Arguments that can be specified multiple times, mixed with positionals
+```go
+var args struct {
+ Commands []string `arg:"-c,separate"`
+ Files []string `arg:"-f,separate"`
+ Databases []string `arg:"positional"`
+}
+arg.MustParse(&args)
+```
+
+```shell
+./example -c cmd1 db1 -f file1 db2 -c cmd2 -f file2 -f file3 db3 -c cmd3
+Commands: [cmd1 cmd2 cmd3]
+Files [file1 file2 file3]
+Databases [db1 db2 db3]
+```
+
+### Arguments with keys and values
+```go
+var args struct {
+ UserIDs map[string]int
+}
+arg.MustParse(&args)
+fmt.Println(args.UserIDs)
+```
+
+```shell
+./example --userids john=123 mary=456
+map[john:123 mary:456]
+```
+
+### Custom validation
+```go
+var args struct {
+ Foo string
+ Bar string
+}
+p := arg.MustParse(&args)
+if args.Foo == "" && args.Bar == "" {
+ p.Fail("you must provide either --foo or --bar")
+}
+```
+
+```shell
+./example
+Usage: samples [--foo FOO] [--bar BAR]
+error: you must provide either --foo or --bar
+```
+
+### Version strings
+
+```go
+type args struct {
+ ...
+}
+
+func (args) Version() string {
+ return "someprogram 4.3.0"
+}
+
+func main() {
+ var args args
+ arg.MustParse(&args)
+}
+```
+
+```shell
+$ ./example --version
+someprogram 4.3.0
+```
+
+### Overriding option names
+
+```go
+var args struct {
+ Short string `arg:"-s"`
+ Long string `arg:"--custom-long-option"`
+ ShortAndLong string `arg:"-x,--my-option"`
+ OnlyShort string `arg:"-o,--"`
+}
+arg.MustParse(&args)
+```
+
+```shell
+$ ./example --help
+Usage: example [-o ONLYSHORT] [--short SHORT] [--custom-long-option CUSTOM-LONG-OPTION] [--my-option MY-OPTION]
+
+Options:
+ --short SHORT, -s SHORT
+ --custom-long-option CUSTOM-LONG-OPTION
+ --my-option MY-OPTION, -x MY-OPTION
+ -o ONLYSHORT
+ --help, -h display this help and exit
+```
+
+
+### Embedded structs
+
+The fields of embedded structs are treated just like regular fields:
+
+```go
+
+type DatabaseOptions struct {
+ Host string
+ Username string
+ Password string
+}
+
+type LogOptions struct {
+ LogFile string
+ Verbose bool
+}
+
+func main() {
+ var args struct {
+ DatabaseOptions
+ LogOptions
+ }
+ arg.MustParse(&args)
+}
+```
+
+As usual, any field tagged with `arg:"-"` is ignored.
+
+### Supported types
+
+The following types may be used as arguments:
+- built-in integer types: `int, int8, int16, int32, int64, byte, rune`
+- built-in floating point types: `float32, float64`
+- strings
+- booleans
+- URLs represented as `url.URL`
+- time durations represented as `time.Duration`
+- email addresses represented as `mail.Address`
+- MAC addresses represented as `net.HardwareAddr`
+- pointers to any of the above
+- slices of any of the above
+- maps using any of the above as keys and values
+- any type that implements `encoding.TextUnmarshaler`
+
+### Custom parsing
+
+Implement `encoding.TextUnmarshaler` to define your own parsing logic.
+
+```go
+// Accepts command line arguments of the form "head.tail"
+type NameDotName struct {
+ Head, Tail string
+}
+
+func (n *NameDotName) UnmarshalText(b []byte) error {
+ s := string(b)
+ pos := strings.Index(s, ".")
+ if pos == -1 {
+ return fmt.Errorf("missing period in %s", s)
+ }
+ n.Head = s[:pos]
+ n.Tail = s[pos+1:]
+ return nil
+}
+
+func main() {
+ var args struct {
+ Name NameDotName
+ }
+ arg.MustParse(&args)
+ fmt.Printf("%#v\n", args.Name)
+}
+```
+```shell
+$ ./example --name=foo.bar
+main.NameDotName{Head:"foo", Tail:"bar"}
+
+$ ./example --name=oops
+Usage: example [--name NAME]
+error: error processing --name: missing period in "oops"
+```
+
+### Custom parsing with default values
+
+Implement `encoding.TextMarshaler` to define your own default value strings:
+
+```go
+// Accepts command line arguments of the form "head.tail"
+type NameDotName struct {
+ Head, Tail string
+}
+
+func (n *NameDotName) UnmarshalText(b []byte) error {
+ // same as previous example
+}
+
+// this is only needed if you want to display a default value in the usage string
+func (n *NameDotName) MarshalText() ([]byte, error) {
+ return []byte(fmt.Sprintf("%s.%s", n.Head, n.Tail)), nil
+}
+
+func main() {
+ var args struct {
+ Name NameDotName `default:"file.txt"`
+ }
+ arg.MustParse(&args)
+ fmt.Printf("%#v\n", args.Name)
+}
+```
+```shell
+$ ./example --help
+Usage: test [--name NAME]
+
+Options:
+ --name NAME [default: file.txt]
+ --help, -h display this help and exit
+
+$ ./example
+main.NameDotName{Head:"file", Tail:"txt"}
+```
+
+### Custom placeholders
+
+*Introduced in version 1.3.0*
+
+Use the `placeholder` tag to control which placeholder text is used in the usage text.
+
+```go
+var args struct {
+ Input string `arg:"positional" placeholder:"SRC"`
+ Output []string `arg:"positional" placeholder:"DST"`
+ Optimize int `arg:"-O" help:"optimization level" placeholder:"LEVEL"`
+ MaxJobs int `arg:"-j" help:"maximum number of simultaneous jobs" placeholder:"N"`
+}
+arg.MustParse(&args)
+```
+```shell
+$ ./example -h
+Usage: example [--optimize LEVEL] [--maxjobs N] SRC [DST [DST ...]]
+
+Positional arguments:
+ SRC
+ DST
+
+Options:
+ --optimize LEVEL, -O LEVEL
+ optimization level
+ --maxjobs N, -j N maximum number of simultaneous jobs
+ --help, -h display this help and exit
+```
+
+### Description strings
+
+```go
+type args struct {
+ Foo string
+}
+
+func (args) Description() string {
+ return "this program does this and that"
+}
+
+func main() {
+ var args args
+ arg.MustParse(&args)
+}
+```
+
+```shell
+$ ./example -h
+this program does this and that
+Usage: example [--foo FOO]
+
+Options:
+ --foo FOO
+ --help, -h display this help and exit
+```
+
+### Subcommands
+
+*Introduced in version 1.1.0*
+
+Subcommands are commonly used in tools that wish to group multiple functions into a single program. An example is the `git` tool:
+```shell
+$ git checkout [arguments specific to checking out code]
+$ git commit [arguments specific to committing]
+$ git push [arguments specific to pushing]
+```
+
+The strings "checkout", "commit", and "push" are different from simple positional arguments because the options available to the user change depending on which subcommand they choose.
+
+This can be implemented with `go-arg` as follows:
+
+```go
+type CheckoutCmd struct {
+ Branch string `arg:"positional"`
+ Track bool `arg:"-t"`
+}
+type CommitCmd struct {
+ All bool `arg:"-a"`
+ Message string `arg:"-m"`
+}
+type PushCmd struct {
+ Remote string `arg:"positional"`
+ Branch string `arg:"positional"`
+ SetUpstream bool `arg:"-u"`
+}
+var args struct {
+ Checkout *CheckoutCmd `arg:"subcommand:checkout"`
+ Commit *CommitCmd `arg:"subcommand:commit"`
+ Push *PushCmd `arg:"subcommand:push"`
+ Quiet bool `arg:"-q"` // this flag is global to all subcommands
+}
+
+arg.MustParse(&args)
+
+switch {
+case args.Checkout != nil:
+ fmt.Printf("checkout requested for branch %s\n", args.Checkout.Branch)
+case args.Commit != nil:
+ fmt.Printf("commit requested with message \"%s\"\n", args.Commit.Message)
+case args.Push != nil:
+ fmt.Printf("push requested from %s to %s\n", args.Push.Branch, args.Push.Remote)
+}
+```
+
+Some additional rules apply when working with subcommands:
+* The `subcommand` tag can only be used with fields that are pointers to structs
+* Any struct that contains a subcommand must not contain any positionals
+
+This package allows to have a program that accepts subcommands, but also does something else
+when no subcommands are specified.
+If on the other hand you want the program to terminate when no subcommands are specified,
+the recommended way is:
+
+```go
+p := arg.MustParse(&args)
+if p.Subcommand() == nil {
+ p.Fail("missing subcommand")
+}
+```
+
+### API Documentation
+
+https://godoc.org/github.com/alexflint/go-arg
+
+### Rationale
+
+There are many command line argument parsing libraries for Go, including one in the standard library, so why build another?
+
+The `flag` library that ships in the standard library seems awkward to me. Positional arguments must preceed options, so `./prog x --foo=1` does what you expect but `./prog --foo=1 x` does not. It also does not allow arguments to have both long (`--foo`) and short (`-f`) forms.
+
+Many third-party argument parsing libraries are great for writing sophisticated command line interfaces, but feel to me like overkill for a simple script with a few flags.
+
+The idea behind `go-arg` is that Go already has an excellent way to describe data structures using structs, so there is no need to develop additional levels of abstraction. Instead of one API to specify which arguments your program accepts, and then another API to get the values of those arguments, `go-arg` replaces both with a single struct.
+
+### Backward compatibility notes
+
+Earlier versions of this library required the help text to be part of the `arg` tag. This is still supported but is now deprecated. Instead, you should use a separate `help` tag, described above, which removes most of the limits on the text you can write. In particular, you will need to use the new `help` tag if your help text includes any commas.
diff --git a/vendor/github.com/alexflint/go-arg/doc.go b/vendor/github.com/alexflint/go-arg/doc.go
new file mode 100644
index 0000000000000000000000000000000000000000..3b0bafd421a6bff3eed5b7e9a3ddd900bb601b13
--- /dev/null
+++ b/vendor/github.com/alexflint/go-arg/doc.go
@@ -0,0 +1,39 @@
+// Package arg parses command line arguments using the fields from a struct.
+//
+// For example,
+//
+// var args struct {
+// Iter int
+// Debug bool
+// }
+// arg.MustParse(&args)
+//
+// defines two command line arguments, which can be set using any of
+//
+// ./example --iter=1 --debug // debug is a boolean flag so its value is set to true
+// ./example -iter 1 // debug defaults to its zero value (false)
+// ./example --debug=true // iter defaults to its zero value (zero)
+//
+// The fastest way to see how to use go-arg is to read the examples below.
+//
+// Fields can be bool, string, any float type, or any signed or unsigned integer type.
+// They can also be slices of any of the above, or slices of pointers to any of the above.
+//
+// Tags can be specified using the `arg` and `help` tag names:
+//
+// var args struct {
+// Input string `arg:"positional"`
+// Log string `arg:"positional,required"`
+// Debug bool `arg:"-d" help:"turn on debug mode"`
+// RealMode bool `arg:"--real"
+// Wr io.Writer `arg:"-"`
+// }
+//
+// Any tag string that starts with a single hyphen is the short form for an argument
+// (e.g. `./example -d`), and any tag string that starts with two hyphens is the long
+// form for the argument (instead of the field name).
+//
+// Other valid tag strings are `positional` and `required`.
+//
+// Fields can be excluded from processing with `arg:"-"`.
+package arg
diff --git a/vendor/github.com/alexflint/go-arg/parse.go b/vendor/github.com/alexflint/go-arg/parse.go
new file mode 100644
index 0000000000000000000000000000000000000000..7588dfb74a749c0788d9d5bd379363b46932d1ff
--- /dev/null
+++ b/vendor/github.com/alexflint/go-arg/parse.go
@@ -0,0 +1,729 @@
+package arg
+
+import (
+ "encoding"
+ "encoding/csv"
+ "errors"
+ "fmt"
+ "os"
+ "path/filepath"
+ "reflect"
+ "strings"
+
+ scalar "github.com/alexflint/go-scalar"
+)
+
+// path represents a sequence of steps to find the output location for an
+// argument or subcommand in the final destination struct
+type path struct {
+ root int // index of the destination struct
+ fields []reflect.StructField // sequence of struct fields to traverse
+}
+
+// String gets a string representation of the given path
+func (p path) String() string {
+ s := "args"
+ for _, f := range p.fields {
+ s += "." + f.Name
+ }
+ return s
+}
+
+// Child gets a new path representing a child of this path.
+func (p path) Child(f reflect.StructField) path {
+ // copy the entire slice of fields to avoid possible slice overwrite
+ subfields := make([]reflect.StructField, len(p.fields)+1)
+ copy(subfields, p.fields)
+ subfields[len(subfields)-1] = f
+ return path{
+ root: p.root,
+ fields: subfields,
+ }
+}
+
+// spec represents a command line option
+type spec struct {
+ dest path
+ field reflect.StructField // the struct field from which this option was created
+ long string // the --long form for this option, or empty if none
+ short string // the -s short form for this option, or empty if none
+ cardinality cardinality // determines how many tokens will be present (possible values: zero, one, multiple)
+ required bool // if true, this option must be present on the command line
+ positional bool // if true, this option will be looked for in the positional flags
+ separate bool // if true, each slice and map entry will have its own --flag
+ help string // the help text for this option
+ env string // the name of the environment variable for this option, or empty for none
+ defaultVal string // default value for this option
+ placeholder string // name of the data in help
+}
+
+// command represents a named subcommand, or the top-level command
+type command struct {
+ name string
+ help string
+ dest path
+ specs []*spec
+ subcommands []*command
+ parent *command
+}
+
+// ErrHelp indicates that -h or --help were provided
+var ErrHelp = errors.New("help requested by user")
+
+// ErrVersion indicates that --version was provided
+var ErrVersion = errors.New("version requested by user")
+
+// MustParse processes command line arguments and exits upon failure
+func MustParse(dest ...interface{}) *Parser {
+ p, err := NewParser(Config{}, dest...)
+ if err != nil {
+ fmt.Fprintln(stdout, err)
+ osExit(-1)
+ return nil // just in case osExit was monkey-patched
+ }
+
+ err = p.Parse(flags())
+ switch {
+ case err == ErrHelp:
+ p.writeHelpForSubcommand(stdout, p.lastCmd)
+ osExit(0)
+ case err == ErrVersion:
+ fmt.Fprintln(stdout, p.version)
+ osExit(0)
+ case err != nil:
+ p.failWithSubcommand(err.Error(), p.lastCmd)
+ }
+
+ return p
+}
+
+// Parse processes command line arguments and stores them in dest
+func Parse(dest ...interface{}) error {
+ p, err := NewParser(Config{}, dest...)
+ if err != nil {
+ return err
+ }
+ return p.Parse(flags())
+}
+
+// flags gets all command line arguments other than the first (program name)
+func flags() []string {
+ if len(os.Args) == 0 { // os.Args could be empty
+ return nil
+ }
+ return os.Args[1:]
+}
+
+// Config represents configuration options for an argument parser
+type Config struct {
+ // Program is the name of the program used in the help text
+ Program string
+
+ // IgnoreEnv instructs the library not to read environment variables
+ IgnoreEnv bool
+}
+
+// Parser represents a set of command line options with destination values
+type Parser struct {
+ cmd *command
+ roots []reflect.Value
+ config Config
+ version string
+ description string
+
+ // the following field changes during processing of command line arguments
+ lastCmd *command
+}
+
+// Versioned is the interface that the destination struct should implement to
+// make a version string appear at the top of the help message.
+type Versioned interface {
+ // Version returns the version string that will be printed on a line by itself
+ // at the top of the help message.
+ Version() string
+}
+
+// Described is the interface that the destination struct should implement to
+// make a description string appear at the top of the help message.
+type Described interface {
+ // Description returns the string that will be printed on a line by itself
+ // at the top of the help message.
+ Description() string
+}
+
+// walkFields calls a function for each field of a struct, recursively expanding struct fields.
+func walkFields(t reflect.Type, visit func(field reflect.StructField, owner reflect.Type) bool) {
+ walkFieldsImpl(t, visit, nil)
+}
+
+func walkFieldsImpl(t reflect.Type, visit func(field reflect.StructField, owner reflect.Type) bool, path []int) {
+ for i := 0; i < t.NumField(); i++ {
+ field := t.Field(i)
+ field.Index = make([]int, len(path)+1)
+ copy(field.Index, append(path, i))
+ expand := visit(field, t)
+ if expand && field.Type.Kind() == reflect.Struct {
+ var subpath []int
+ if field.Anonymous {
+ subpath = append(path, i)
+ }
+ walkFieldsImpl(field.Type, visit, subpath)
+ }
+ }
+}
+
+// NewParser constructs a parser from a list of destination structs
+func NewParser(config Config, dests ...interface{}) (*Parser, error) {
+ // first pick a name for the command for use in the usage text
+ var name string
+ switch {
+ case config.Program != "":
+ name = config.Program
+ case len(os.Args) > 0:
+ name = filepath.Base(os.Args[0])
+ default:
+ name = "program"
+ }
+
+ // construct a parser
+ p := Parser{
+ cmd: &command{name: name},
+ config: config,
+ }
+
+ // make a list of roots
+ for _, dest := range dests {
+ p.roots = append(p.roots, reflect.ValueOf(dest))
+ }
+
+ // process each of the destination values
+ for i, dest := range dests {
+ t := reflect.TypeOf(dest)
+ if t.Kind() != reflect.Ptr {
+ panic(fmt.Sprintf("%s is not a pointer (did you forget an ampersand?)", t))
+ }
+
+ cmd, err := cmdFromStruct(name, path{root: i}, t)
+ if err != nil {
+ return nil, err
+ }
+
+ // add nonzero field values as defaults
+ for _, spec := range cmd.specs {
+ if v := p.val(spec.dest); v.IsValid() && !isZero(v) {
+ if defaultVal, ok := v.Interface().(encoding.TextMarshaler); ok {
+ str, err := defaultVal.MarshalText()
+ if err != nil {
+ return nil, fmt.Errorf("%v: error marshaling default value to string: %v", spec.dest, err)
+ }
+ spec.defaultVal = string(str)
+ } else {
+ spec.defaultVal = fmt.Sprintf("%v", v)
+ }
+ }
+ }
+
+ p.cmd.specs = append(p.cmd.specs, cmd.specs...)
+ p.cmd.subcommands = append(p.cmd.subcommands, cmd.subcommands...)
+
+ if dest, ok := dest.(Versioned); ok {
+ p.version = dest.Version()
+ }
+ if dest, ok := dest.(Described); ok {
+ p.description = dest.Description()
+ }
+ }
+
+ return &p, nil
+}
+
+func cmdFromStruct(name string, dest path, t reflect.Type) (*command, error) {
+ // commands can only be created from pointers to structs
+ if t.Kind() != reflect.Ptr {
+ return nil, fmt.Errorf("subcommands must be pointers to structs but %s is a %s",
+ dest, t.Kind())
+ }
+
+ t = t.Elem()
+ if t.Kind() != reflect.Struct {
+ return nil, fmt.Errorf("subcommands must be pointers to structs but %s is a pointer to %s",
+ dest, t.Kind())
+ }
+
+ cmd := command{
+ name: name,
+ dest: dest,
+ }
+
+ var errs []string
+ walkFields(t, func(field reflect.StructField, t reflect.Type) bool {
+ // check for the ignore switch in the tag
+ tag := field.Tag.Get("arg")
+ if tag == "-" {
+ return false
+ }
+
+ // if this is an embedded struct then recurse into its fields, even if
+ // it is unexported, because exported fields on unexported embedded
+ // structs are still writable
+ if field.Anonymous && field.Type.Kind() == reflect.Struct {
+ return true
+ }
+
+ // ignore any other unexported field
+ if !isExported(field.Name) {
+ return false
+ }
+
+ // duplicate the entire path to avoid slice overwrites
+ subdest := dest.Child(field)
+ spec := spec{
+ dest: subdest,
+ field: field,
+ long: strings.ToLower(field.Name),
+ }
+
+ help, exists := field.Tag.Lookup("help")
+ if exists {
+ spec.help = help
+ }
+
+ defaultVal, hasDefault := field.Tag.Lookup("default")
+ if hasDefault {
+ spec.defaultVal = defaultVal
+ }
+
+ // Look at the tag
+ var isSubcommand bool // tracks whether this field is a subcommand
+ for _, key := range strings.Split(tag, ",") {
+ if key == "" {
+ continue
+ }
+ key = strings.TrimLeft(key, " ")
+ var value string
+ if pos := strings.Index(key, ":"); pos != -1 {
+ value = key[pos+1:]
+ key = key[:pos]
+ }
+
+ switch {
+ case strings.HasPrefix(key, "---"):
+ errs = append(errs, fmt.Sprintf("%s.%s: too many hyphens", t.Name(), field.Name))
+ case strings.HasPrefix(key, "--"):
+ spec.long = key[2:]
+ case strings.HasPrefix(key, "-"):
+ if len(key) != 2 {
+ errs = append(errs, fmt.Sprintf("%s.%s: short arguments must be one character only",
+ t.Name(), field.Name))
+ return false
+ }
+ spec.short = key[1:]
+ case key == "required":
+ if hasDefault {
+ errs = append(errs, fmt.Sprintf("%s.%s: 'required' cannot be used when a default value is specified",
+ t.Name(), field.Name))
+ return false
+ }
+ spec.required = true
+ case key == "positional":
+ spec.positional = true
+ case key == "separate":
+ spec.separate = true
+ case key == "help": // deprecated
+ spec.help = value
+ case key == "env":
+ // Use override name if provided
+ if value != "" {
+ spec.env = value
+ } else {
+ spec.env = strings.ToUpper(field.Name)
+ }
+ case key == "subcommand":
+ // decide on a name for the subcommand
+ cmdname := value
+ if cmdname == "" {
+ cmdname = strings.ToLower(field.Name)
+ }
+
+ // parse the subcommand recursively
+ subcmd, err := cmdFromStruct(cmdname, subdest, field.Type)
+ if err != nil {
+ errs = append(errs, err.Error())
+ return false
+ }
+
+ subcmd.parent = &cmd
+ subcmd.help = field.Tag.Get("help")
+
+ cmd.subcommands = append(cmd.subcommands, subcmd)
+ isSubcommand = true
+ default:
+ errs = append(errs, fmt.Sprintf("unrecognized tag '%s' on field %s", key, tag))
+ return false
+ }
+ }
+
+ placeholder, hasPlaceholder := field.Tag.Lookup("placeholder")
+ if hasPlaceholder {
+ spec.placeholder = placeholder
+ } else if spec.long != "" {
+ spec.placeholder = strings.ToUpper(spec.long)
+ } else {
+ spec.placeholder = strings.ToUpper(spec.field.Name)
+ }
+
+ // Check whether this field is supported. It's good to do this here rather than
+ // wait until ParseValue because it means that a program with invalid argument
+ // fields will always fail regardless of whether the arguments it received
+ // exercised those fields.
+ if !isSubcommand {
+ cmd.specs = append(cmd.specs, &spec)
+
+ var err error
+ spec.cardinality, err = cardinalityOf(field.Type)
+ if err != nil {
+ errs = append(errs, fmt.Sprintf("%s.%s: %s fields are not supported",
+ t.Name(), field.Name, field.Type.String()))
+ return false
+ }
+ if spec.cardinality == multiple && hasDefault {
+ errs = append(errs, fmt.Sprintf("%s.%s: default values are not supported for slice or map fields",
+ t.Name(), field.Name))
+ return false
+ }
+ }
+
+ // if this was an embedded field then we already returned true up above
+ return false
+ })
+
+ if len(errs) > 0 {
+ return nil, errors.New(strings.Join(errs, "\n"))
+ }
+
+ // check that we don't have both positionals and subcommands
+ var hasPositional bool
+ for _, spec := range cmd.specs {
+ if spec.positional {
+ hasPositional = true
+ }
+ }
+ if hasPositional && len(cmd.subcommands) > 0 {
+ return nil, fmt.Errorf("%s cannot have both subcommands and positional arguments", dest)
+ }
+
+ return &cmd, nil
+}
+
+// Parse processes the given command line option, storing the results in the field
+// of the structs from which NewParser was constructed
+func (p *Parser) Parse(args []string) error {
+ err := p.process(args)
+ if err != nil {
+ // If -h or --help were specified then make sure help text supercedes other errors
+ for _, arg := range args {
+ if arg == "-h" || arg == "--help" {
+ return ErrHelp
+ }
+ if arg == "--" {
+ break
+ }
+ }
+ }
+ return err
+}
+
+// process environment vars for the given arguments
+func (p *Parser) captureEnvVars(specs []*spec, wasPresent map[*spec]bool) error {
+ for _, spec := range specs {
+ if spec.env == "" {
+ continue
+ }
+
+ value, found := os.LookupEnv(spec.env)
+ if !found {
+ continue
+ }
+
+ if spec.cardinality == multiple {
+ // expect a CSV string in an environment
+ // variable in the case of multiple values
+ var values []string
+ var err error
+ if len(strings.TrimSpace(value)) > 0 {
+ values, err = csv.NewReader(strings.NewReader(value)).Read()
+ if err != nil {
+ return fmt.Errorf(
+ "error reading a CSV string from environment variable %s with multiple values: %v",
+ spec.env,
+ err,
+ )
+ }
+ }
+ if err = setSliceOrMap(p.val(spec.dest), values, !spec.separate); err != nil {
+ return fmt.Errorf(
+ "error processing environment variable %s with multiple values: %v",
+ spec.env,
+ err,
+ )
+ }
+ } else {
+ if err := scalar.ParseValue(p.val(spec.dest), value); err != nil {
+ return fmt.Errorf("error processing environment variable %s: %v", spec.env, err)
+ }
+ }
+ wasPresent[spec] = true
+ }
+
+ return nil
+}
+
+// process goes through arguments one-by-one, parses them, and assigns the result to
+// the underlying struct field
+func (p *Parser) process(args []string) error {
+ // track the options we have seen
+ wasPresent := make(map[*spec]bool)
+
+ // union of specs for the chain of subcommands encountered so far
+ curCmd := p.cmd
+ p.lastCmd = curCmd
+
+ // make a copy of the specs because we will add to this list each time we expand a subcommand
+ specs := make([]*spec, len(curCmd.specs))
+ copy(specs, curCmd.specs)
+
+ // deal with environment vars
+ if !p.config.IgnoreEnv {
+ err := p.captureEnvVars(specs, wasPresent)
+ if err != nil {
+ return err
+ }
+ }
+
+ // process each string from the command line
+ var allpositional bool
+ var positionals []string
+
+ // must use explicit for loop, not range, because we manipulate i inside the loop
+ for i := 0; i < len(args); i++ {
+ arg := args[i]
+ if arg == "--" {
+ allpositional = true
+ continue
+ }
+
+ if !isFlag(arg) || allpositional {
+ // each subcommand can have either subcommands or positionals, but not both
+ if len(curCmd.subcommands) == 0 {
+ positionals = append(positionals, arg)
+ continue
+ }
+
+ // if we have a subcommand then make sure it is valid for the current context
+ subcmd := findSubcommand(curCmd.subcommands, arg)
+ if subcmd == nil {
+ return fmt.Errorf("invalid subcommand: %s", arg)
+ }
+
+ // instantiate the field to point to a new struct
+ v := p.val(subcmd.dest)
+ v.Set(reflect.New(v.Type().Elem())) // we already checked that all subcommands are struct pointers
+
+ // add the new options to the set of allowed options
+ specs = append(specs, subcmd.specs...)
+
+ // capture environment vars for these new options
+ if !p.config.IgnoreEnv {
+ err := p.captureEnvVars(subcmd.specs, wasPresent)
+ if err != nil {
+ return err
+ }
+ }
+
+ curCmd = subcmd
+ p.lastCmd = curCmd
+ continue
+ }
+
+ // check for special --help and --version flags
+ switch arg {
+ case "-h", "--help":
+ return ErrHelp
+ case "--version":
+ return ErrVersion
+ }
+
+ // check for an equals sign, as in "--foo=bar"
+ var value string
+ opt := strings.TrimLeft(arg, "-")
+ if pos := strings.Index(opt, "="); pos != -1 {
+ value = opt[pos+1:]
+ opt = opt[:pos]
+ }
+
+ // lookup the spec for this option (note that the "specs" slice changes as
+ // we expand subcommands so it is better not to use a map)
+ spec := findOption(specs, opt)
+ if spec == nil {
+ return fmt.Errorf("unknown argument %s", arg)
+ }
+ wasPresent[spec] = true
+
+ // deal with the case of multiple values
+ if spec.cardinality == multiple {
+ var values []string
+ if value == "" {
+ for i+1 < len(args) && !isFlag(args[i+1]) && args[i+1] != "--" {
+ values = append(values, args[i+1])
+ i++
+ if spec.separate {
+ break
+ }
+ }
+ } else {
+ values = append(values, value)
+ }
+ err := setSliceOrMap(p.val(spec.dest), values, !spec.separate)
+ if err != nil {
+ return fmt.Errorf("error processing %s: %v", arg, err)
+ }
+ continue
+ }
+
+ // if it's a flag and it has no value then set the value to true
+ // use boolean because this takes account of TextUnmarshaler
+ if spec.cardinality == zero && value == "" {
+ value = "true"
+ }
+
+ // if we have something like "--foo" then the value is the next argument
+ if value == "" {
+ if i+1 == len(args) {
+ return fmt.Errorf("missing value for %s", arg)
+ }
+ if !nextIsNumeric(spec.field.Type, args[i+1]) && isFlag(args[i+1]) {
+ return fmt.Errorf("missing value for %s", arg)
+ }
+ value = args[i+1]
+ i++
+ }
+
+ err := scalar.ParseValue(p.val(spec.dest), value)
+ if err != nil {
+ return fmt.Errorf("error processing %s: %v", arg, err)
+ }
+ }
+
+ // process positionals
+ for _, spec := range specs {
+ if !spec.positional {
+ continue
+ }
+ if len(positionals) == 0 {
+ break
+ }
+ wasPresent[spec] = true
+ if spec.cardinality == multiple {
+ err := setSliceOrMap(p.val(spec.dest), positionals, true)
+ if err != nil {
+ return fmt.Errorf("error processing %s: %v", spec.field.Name, err)
+ }
+ positionals = nil
+ } else {
+ err := scalar.ParseValue(p.val(spec.dest), positionals[0])
+ if err != nil {
+ return fmt.Errorf("error processing %s: %v", spec.field.Name, err)
+ }
+ positionals = positionals[1:]
+ }
+ }
+ if len(positionals) > 0 {
+ return fmt.Errorf("too many positional arguments at '%s'", positionals[0])
+ }
+
+ // fill in defaults and check that all the required args were provided
+ for _, spec := range specs {
+ if wasPresent[spec] {
+ continue
+ }
+
+ name := strings.ToLower(spec.field.Name)
+ if spec.long != "" && !spec.positional {
+ name = "--" + spec.long
+ }
+
+ if spec.required {
+ msg := fmt.Sprintf("%s is required", name)
+ if spec.env != "" {
+ msg += " (or environment variable " + spec.env + ")"
+ }
+ return errors.New(msg)
+ }
+ if spec.defaultVal != "" {
+ err := scalar.ParseValue(p.val(spec.dest), spec.defaultVal)
+ if err != nil {
+ return fmt.Errorf("error processing default value for %s: %v", name, err)
+ }
+ }
+ }
+
+ return nil
+}
+
+func nextIsNumeric(t reflect.Type, s string) bool {
+ switch t.Kind() {
+ case reflect.Ptr:
+ return nextIsNumeric(t.Elem(), s)
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Float32, reflect.Float64, reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ v := reflect.New(t)
+ err := scalar.ParseValue(v, s)
+ return err == nil
+ default:
+ return false
+ }
+}
+
+// isFlag returns true if a token is a flag such as "-v" or "--user" but not "-" or "--"
+func isFlag(s string) bool {
+ return strings.HasPrefix(s, "-") && strings.TrimLeft(s, "-") != ""
+}
+
+// val returns a reflect.Value corresponding to the current value for the
+// given path
+func (p *Parser) val(dest path) reflect.Value {
+ v := p.roots[dest.root]
+ for _, field := range dest.fields {
+ if v.Kind() == reflect.Ptr {
+ if v.IsNil() {
+ return reflect.Value{}
+ }
+ v = v.Elem()
+ }
+
+ v = v.FieldByIndex(field.Index)
+ }
+ return v
+}
+
+// findOption finds an option from its name, or returns null if no spec is found
+func findOption(specs []*spec, name string) *spec {
+ for _, spec := range specs {
+ if spec.positional {
+ continue
+ }
+ if spec.long == name || spec.short == name {
+ return spec
+ }
+ }
+ return nil
+}
+
+// findSubcommand finds a subcommand using its name, or returns null if no subcommand is found
+func findSubcommand(cmds []*command, name string) *command {
+ for _, cmd := range cmds {
+ if cmd.name == name {
+ return cmd
+ }
+ }
+ return nil
+}
diff --git a/vendor/github.com/alexflint/go-arg/reflect.go b/vendor/github.com/alexflint/go-arg/reflect.go
new file mode 100644
index 0000000000000000000000000000000000000000..cd80be7ce5b881ba08e575da29df02490370d615
--- /dev/null
+++ b/vendor/github.com/alexflint/go-arg/reflect.go
@@ -0,0 +1,107 @@
+package arg
+
+import (
+ "encoding"
+ "fmt"
+ "reflect"
+ "unicode"
+ "unicode/utf8"
+
+ scalar "github.com/alexflint/go-scalar"
+)
+
+var textUnmarshalerType = reflect.TypeOf([]encoding.TextUnmarshaler{}).Elem()
+
+// cardinality tracks how many tokens are expected for a given spec
+// - zero is a boolean, which does to expect any value
+// - one is an ordinary option that will be parsed from a single token
+// - multiple is a slice or map that can accept zero or more tokens
+type cardinality int
+
+const (
+ zero cardinality = iota
+ one
+ multiple
+ unsupported
+)
+
+func (k cardinality) String() string {
+ switch k {
+ case zero:
+ return "zero"
+ case one:
+ return "one"
+ case multiple:
+ return "multiple"
+ case unsupported:
+ return "unsupported"
+ default:
+ return fmt.Sprintf("unknown(%d)", int(k))
+ }
+}
+
+// cardinalityOf returns true if the type can be parsed from a string
+func cardinalityOf(t reflect.Type) (cardinality, error) {
+ if scalar.CanParse(t) {
+ if isBoolean(t) {
+ return zero, nil
+ }
+ return one, nil
+ }
+
+ // look inside pointer types
+ if t.Kind() == reflect.Ptr {
+ t = t.Elem()
+ }
+
+ // look inside slice and map types
+ switch t.Kind() {
+ case reflect.Slice:
+ if !scalar.CanParse(t.Elem()) {
+ return unsupported, fmt.Errorf("cannot parse into %v because %v not supported", t, t.Elem())
+ }
+ return multiple, nil
+ case reflect.Map:
+ if !scalar.CanParse(t.Key()) {
+ return unsupported, fmt.Errorf("cannot parse into %v because key type %v not supported", t, t.Elem())
+ }
+ if !scalar.CanParse(t.Elem()) {
+ return unsupported, fmt.Errorf("cannot parse into %v because value type %v not supported", t, t.Elem())
+ }
+ return multiple, nil
+ default:
+ return unsupported, fmt.Errorf("cannot parse into %v", t)
+ }
+}
+
+// isBoolean returns true if the type can be parsed from a single string
+func isBoolean(t reflect.Type) bool {
+ switch {
+ case t.Implements(textUnmarshalerType):
+ return false
+ case t.Kind() == reflect.Bool:
+ return true
+ case t.Kind() == reflect.Ptr && t.Elem().Kind() == reflect.Bool:
+ return true
+ default:
+ return false
+ }
+}
+
+// isExported returns true if the struct field name is exported
+func isExported(field string) bool {
+ r, _ := utf8.DecodeRuneInString(field) // returns RuneError for empty string or invalid UTF8
+ return unicode.IsLetter(r) && unicode.IsUpper(r)
+}
+
+// isZero returns true if v contains the zero value for its type
+func isZero(v reflect.Value) bool {
+ t := v.Type()
+ if t.Kind() == reflect.Slice || t.Kind() == reflect.Map {
+ return v.IsNil()
+ }
+ if !t.Comparable() {
+ return false
+ }
+ return v.Interface() == reflect.Zero(t).Interface()
+}
diff --git a/vendor/github.com/alexflint/go-arg/sequence.go b/vendor/github.com/alexflint/go-arg/sequence.go
new file mode 100644
index 0000000000000000000000000000000000000000..35a3614ecb9dad1df572430f5dfd80959ccb4fce
--- /dev/null
+++ b/vendor/github.com/alexflint/go-arg/sequence.go
@@ -0,0 +1,123 @@
+package arg
+
+import (
+ "fmt"
+ "reflect"
+ "strings"
+
+ scalar "github.com/alexflint/go-scalar"
+)
+
+// setSliceOrMap parses a sequence of strings into a slice or map. If clear is
+// true then any values already in the slice or map are first removed.
+func setSliceOrMap(dest reflect.Value, values []string, clear bool) error {
+ if !dest.CanSet() {
+ return fmt.Errorf("field is not writable")
+ }
+
+ t := dest.Type()
+ if t.Kind() == reflect.Ptr {
+ dest = dest.Elem()
+ t = t.Elem()
+ }
+
+ switch t.Kind() {
+ case reflect.Slice:
+ return setSlice(dest, values, clear)
+ case reflect.Map:
+ return setMap(dest, values, clear)
+ default:
+ return fmt.Errorf("setSliceOrMap cannot insert values into a %v", t)
+ }
+}
+
+// setSlice parses a sequence of strings and inserts them into a slice. If clear
+// is true then any values already in the slice are removed.
+func setSlice(dest reflect.Value, values []string, clear bool) error {
+ var ptr bool
+ elem := dest.Type().Elem()
+ if elem.Kind() == reflect.Ptr && !elem.Implements(textUnmarshalerType) {
+ ptr = true
+ elem = elem.Elem()
+ }
+
+ // clear the slice in case default values exist
+ if clear && !dest.IsNil() {
+ dest.SetLen(0)
+ }
+
+ // parse the values one-by-one
+ for _, s := range values {
+ v := reflect.New(elem)
+ if err := scalar.ParseValue(v.Elem(), s); err != nil {
+ return err
+ }
+ if !ptr {
+ v = v.Elem()
+ }
+ dest.Set(reflect.Append(dest, v))
+ }
+ return nil
+}
+
+// setMap parses a sequence of name=value strings and inserts them into a map.
+// If clear is true then any values already in the map are removed.
+func setMap(dest reflect.Value, values []string, clear bool) error {
+ // determine the key and value type
+ var keyIsPtr bool
+ keyType := dest.Type().Key()
+ if keyType.Kind() == reflect.Ptr && !keyType.Implements(textUnmarshalerType) {
+ keyIsPtr = true
+ keyType = keyType.Elem()
+ }
+
+ var valIsPtr bool
+ valType := dest.Type().Elem()
+ if valType.Kind() == reflect.Ptr && !valType.Implements(textUnmarshalerType) {
+ valIsPtr = true
+ valType = valType.Elem()
+ }
+
+ // clear the slice in case default values exist
+ if clear && !dest.IsNil() {
+ for _, k := range dest.MapKeys() {
+ dest.SetMapIndex(k, reflect.Value{})
+ }
+ }
+
+ // allocate the map if it is not allocated
+ if dest.IsNil() {
+ dest.Set(reflect.MakeMap(dest.Type()))
+ }
+
+ // parse the values one-by-one
+ for _, s := range values {
+ // split at the first equals sign
+ pos := strings.Index(s, "=")
+ if pos == -1 {
+ return fmt.Errorf("cannot parse %q into a map, expected format key=value", s)
+ }
+
+ // parse the key
+ k := reflect.New(keyType)
+ if err := scalar.ParseValue(k.Elem(), s[:pos]); err != nil {
+ return err
+ }
+ if !keyIsPtr {
+ k = k.Elem()
+ }
+
+ // parse the value
+ v := reflect.New(valType)
+ if err := scalar.ParseValue(v.Elem(), s[pos+1:]); err != nil {
+ return err
+ }
+ if !valIsPtr {
+ v = v.Elem()
+ }
+
+ // add it to the map
+ dest.SetMapIndex(k, v)
+ }
+ return nil
+}
diff --git a/vendor/github.com/alexflint/go-arg/subcommand.go b/vendor/github.com/alexflint/go-arg/subcommand.go
new file mode 100644
index 0000000000000000000000000000000000000000..dff732c0d00189b04dd461a37166b9a97dec67f5
--- /dev/null
+++ b/vendor/github.com/alexflint/go-arg/subcommand.go
@@ -0,0 +1,37 @@
+package arg
+
+// Subcommand returns the user struct for the subcommand selected by
+// the command line arguments most recently processed by the parser.
+// The return value is always a pointer to a struct. If no subcommand
+// was specified then it returns the top-level arguments struct. If
+// no command line arguments have been processed by this parser then it
+// returns nil.
+func (p *Parser) Subcommand() interface{} {
+ if p.lastCmd == nil || p.lastCmd.parent == nil {
+ return nil
+ }
+ return p.val(p.lastCmd.dest).Interface()
+}
+
+// SubcommandNames returns the sequence of subcommands specified by the
+// user. If no subcommands were given then it returns an empty slice.
+func (p *Parser) SubcommandNames() []string {
+ if p.lastCmd == nil {
+ return nil
+ }
+
+ // make a list of ancestor commands
+ var ancestors []string
+ cur := p.lastCmd
+ for cur.parent != nil { // we want to exclude the root
+ ancestors = append(ancestors, cur.name)
+ cur = cur.parent
+ }
+
+ // reverse the list
+ out := make([]string, len(ancestors))
+ for i := 0; i < len(ancestors); i++ {
+ out[i] = ancestors[len(ancestors)-i-1]
+ }
+ return out
+}
diff --git a/vendor/github.com/alexflint/go-arg/usage.go b/vendor/github.com/alexflint/go-arg/usage.go
new file mode 100644
index 0000000000000000000000000000000000000000..e93681170cba141c8f19e0e767ca9b234c1d7401
--- /dev/null
+++ b/vendor/github.com/alexflint/go-arg/usage.go
@@ -0,0 +1,335 @@
+package arg
+
+import (
+ "fmt"
+ "io"
+ "os"
+ "strings"
+)
+
+// the width of the left column
+const colWidth = 25
+
+// to allow monkey patching in tests
+var (
+ stdout io.Writer = os.Stdout
+ stderr io.Writer = os.Stderr
+ osExit = os.Exit
+)
+
+// Fail prints usage information to stderr and exits with non-zero status
+func (p *Parser) Fail(msg string) {
+ p.failWithSubcommand(msg, p.cmd)
+}
+
+// FailSubcommand prints usage information for a specified subcommand to stderr,
+// then exits with non-zero status. To write usage information for a top-level
+// subcommand, provide just the name of that subcommand. To write usage
+// information for a subcommand that is nested under another subcommand, provide
+// a sequence of subcommand names starting with the top-level subcommand and so
+// on down the tree.
+func (p *Parser) FailSubcommand(msg string, subcommand ...string) error {
+ cmd, err := p.lookupCommand(subcommand...)
+ if err != nil {
+ return err
+ }
+ p.failWithSubcommand(msg, cmd)
+ return nil
+}
+
+// failWithSubcommand prints usage information for the given subcommand to stderr and exits with non-zero status
+func (p *Parser) failWithSubcommand(msg string, cmd *command) {
+ p.writeUsageForSubcommand(stderr, cmd)
+ fmt.Fprintln(stderr, "error:", msg)
+ osExit(-1)
+}
+
+// WriteUsage writes usage information to the given writer
+func (p *Parser) WriteUsage(w io.Writer) {
+ cmd := p.cmd
+ if p.lastCmd != nil {
+ cmd = p.lastCmd
+ }
+ p.writeUsageForSubcommand(w, cmd)
+}
+
+// WriteUsageForSubcommand writes the usage information for a specified
+// subcommand. To write usage information for a top-level subcommand, provide
+// just the name of that subcommand. To write usage information for a subcommand
+// that is nested under another subcommand, provide a sequence of subcommand
+// names starting with the top-level subcommand and so on down the tree.
+func (p *Parser) WriteUsageForSubcommand(w io.Writer, subcommand ...string) error {
+ cmd, err := p.lookupCommand(subcommand...)
+ if err != nil {
+ return err
+ }
+ p.writeUsageForSubcommand(w, cmd)
+ return nil
+}
+
+// writeUsageForSubcommand writes usage information for the given subcommand
+func (p *Parser) writeUsageForSubcommand(w io.Writer, cmd *command) {
+ var positionals, longOptions, shortOptions []*spec
+ for _, spec := range cmd.specs {
+ switch {
+ case spec.positional:
+ positionals = append(positionals, spec)
+ case spec.long != "":
+ longOptions = append(longOptions, spec)
+ case spec.short != "":
+ shortOptions = append(shortOptions, spec)
+ }
+ }
+
+ if p.version != "" {
+ fmt.Fprintln(w, p.version)
+ }
+
+ // make a list of ancestor commands so that we print with full context
+ var ancestors []string
+ ancestor := cmd
+ for ancestor != nil {
+ ancestors = append(ancestors, ancestor.name)
+ ancestor = ancestor.parent
+ }
+
+ // print the beginning of the usage string
+ fmt.Fprint(w, "Usage:")
+ for i := len(ancestors) - 1; i >= 0; i-- {
+ fmt.Fprint(w, " "+ancestors[i])
+ }
+
+ // write the option component of the usage message
+ for _, spec := range shortOptions {
+ // prefix with a space
+ fmt.Fprint(w, " ")
+ if !spec.required {
+ fmt.Fprint(w, "[")
+ }
+ fmt.Fprint(w, synopsis(spec, "-"+spec.short))
+ if !spec.required {
+ fmt.Fprint(w, "]")
+ }
+ }
+
+ for _, spec := range longOptions {
+ // prefix with a space
+ fmt.Fprint(w, " ")
+ if !spec.required {
+ fmt.Fprint(w, "[")
+ }
+ fmt.Fprint(w, synopsis(spec, "--"+spec.long))
+ if !spec.required {
+ fmt.Fprint(w, "]")
+ }
+ }
+
+ // When we parse positionals, we check that:
+ // 1. required positionals come before non-required positionals
+ // 2. there is at most one multiple-value positional
+ // 3. if there is a multiple-value positional then it comes after all other positionals
+ // Here we merely print the usage string, so we do not explicitly re-enforce those rules
+
+ // write the positionals in following form:
+ // REQUIRED1 REQUIRED2
+ // REQUIRED1 REQUIRED2 [OPTIONAL1 [OPTIONAL2]]
+ // REQUIRED1 REQUIRED2 REPEATED [REPEATED ...]
+ // REQUIRED1 REQUIRED2 [REPEATEDOPTIONAL [REPEATEDOPTIONAL ...]]
+ // REQUIRED1 REQUIRED2 [OPTIONAL1 [REPEATEDOPTIONAL [REPEATEDOPTIONAL ...]]]
+ var closeBrackets int
+ for _, spec := range positionals {
+ fmt.Fprint(w, " ")
+ if !spec.required {
+ fmt.Fprint(w, "[")
+ closeBrackets += 1
+ }
+ if spec.cardinality == multiple {
+ fmt.Fprintf(w, "%s [%s ...]", spec.placeholder, spec.placeholder)
+ } else {
+ fmt.Fprint(w, spec.placeholder)
+ }
+ }
+ fmt.Fprint(w, strings.Repeat("]", closeBrackets))
+
+ // if the program supports subcommands, give a hint to the user about their existence
+ if len(cmd.subcommands) > 0 {
+ fmt.Fprint(w, " ]{0,})> ]{0,})>([\d]{0,}\.)(.*)((
([\w\W\s\d][^<>]{0,})|[\s]{0,}))<\/a><\/TD>]{0,})>([\w\W\s\d][^<>]{0,})<\/TD> ]{0,})>([\w\W\s\d][^<>]{0,})<\/TD><\/TR>/is
+
+ 0: 43.Word Processor
(N-1286)Lega lstaff.com CA - Statewide
+ 1: BGCOLOR='#DBE9E9'
+ 2: align=left valign=top
+ 3: 43.
+ 4: Word Processor43.Word Processor
(N-1286)Lega lstaff.com CA - Statewide
(N-1286)
+ 5:
+ 6:
+ 7: (?
(?
is valid, but isn't valid as the "dir" attr
+// is mandatory
+func (p *Policy) addDefaultElementsWithoutAttrs() {
+ p.init()
+
+ p.setOfElementsAllowedWithoutAttrs["abbr"] = struct{}{}
+ p.setOfElementsAllowedWithoutAttrs["acronym"] = struct{}{}
+ p.setOfElementsAllowedWithoutAttrs["address"] = struct{}{}
+ p.setOfElementsAllowedWithoutAttrs["article"] = struct{}{}
+ p.setOfElementsAllowedWithoutAttrs["aside"] = struct{}{}
+ p.setOfElementsAllowedWithoutAttrs["audio"] = struct{}{}
+ p.setOfElementsAllowedWithoutAttrs["b"] = struct{}{}
+ p.setOfElementsAllowedWithoutAttrs["bdi"] = struct{}{}
+ p.setOfElementsAllowedWithoutAttrs["blockquote"] = struct{}{}
+ p.setOfElementsAllowedWithoutAttrs["body"] = struct{}{}
+ p.setOfElementsAllowedWithoutAttrs["br"] = struct{}{}
+ p.setOfElementsAllowedWithoutAttrs["button"] = struct{}{}
+ p.setOfElementsAllowedWithoutAttrs["canvas"] = struct{}{}
+ p.setOfElementsAllowedWithoutAttrs["caption"] = struct{}{}
+ p.setOfElementsAllowedWithoutAttrs["center"] = struct{}{}
+ p.setOfElementsAllowedWithoutAttrs["cite"] = struct{}{}
+ p.setOfElementsAllowedWithoutAttrs["code"] = struct{}{}
+ p.setOfElementsAllowedWithoutAttrs["col"] = struct{}{}
+ p.setOfElementsAllowedWithoutAttrs["colgroup"] = struct{}{}
+ p.setOfElementsAllowedWithoutAttrs["datalist"] = struct{}{}
+ p.setOfElementsAllowedWithoutAttrs["dd"] = struct{}{}
+ p.setOfElementsAllowedWithoutAttrs["del"] = struct{}{}
+ p.setOfElementsAllowedWithoutAttrs["details"] = struct{}{}
+ p.setOfElementsAllowedWithoutAttrs["dfn"] = struct{}{}
+ p.setOfElementsAllowedWithoutAttrs["div"] = struct{}{}
+ p.setOfElementsAllowedWithoutAttrs["dl"] = struct{}{}
+ p.setOfElementsAllowedWithoutAttrs["dt"] = struct{}{}
+ p.setOfElementsAllowedWithoutAttrs["em"] = struct{}{}
+ p.setOfElementsAllowedWithoutAttrs["fieldset"] = struct{}{}
+ p.setOfElementsAllowedWithoutAttrs["figcaption"] = struct{}{}
+ p.setOfElementsAllowedWithoutAttrs["figure"] = struct{}{}
+ p.setOfElementsAllowedWithoutAttrs["footer"] = struct{}{}
+ p.setOfElementsAllowedWithoutAttrs["h1"] = struct{}{}
+ p.setOfElementsAllowedWithoutAttrs["h2"] = struct{}{}
+ p.setOfElementsAllowedWithoutAttrs["h3"] = struct{}{}
+ p.setOfElementsAllowedWithoutAttrs["h4"] = struct{}{}
+ p.setOfElementsAllowedWithoutAttrs["h5"] = struct{}{}
+ p.setOfElementsAllowedWithoutAttrs["h6"] = struct{}{}
+ p.setOfElementsAllowedWithoutAttrs["head"] = struct{}{}
+ p.setOfElementsAllowedWithoutAttrs["header"] = struct{}{}
+ p.setOfElementsAllowedWithoutAttrs["hgroup"] = struct{}{}
+ p.setOfElementsAllowedWithoutAttrs["hr"] = struct{}{}
+ p.setOfElementsAllowedWithoutAttrs["html"] = struct{}{}
+ p.setOfElementsAllowedWithoutAttrs["i"] = struct{}{}
+ p.setOfElementsAllowedWithoutAttrs["ins"] = struct{}{}
+ p.setOfElementsAllowedWithoutAttrs["kbd"] = struct{}{}
+ p.setOfElementsAllowedWithoutAttrs["li"] = struct{}{}
+ p.setOfElementsAllowedWithoutAttrs["mark"] = struct{}{}
+ p.setOfElementsAllowedWithoutAttrs["marquee"] = struct{}{}
+ p.setOfElementsAllowedWithoutAttrs["nav"] = struct{}{}
+ p.setOfElementsAllowedWithoutAttrs["ol"] = struct{}{}
+ p.setOfElementsAllowedWithoutAttrs["optgroup"] = struct{}{}
+ p.setOfElementsAllowedWithoutAttrs["option"] = struct{}{}
+ p.setOfElementsAllowedWithoutAttrs["p"] = struct{}{}
+ p.setOfElementsAllowedWithoutAttrs["picture"] = struct{}{}
+ p.setOfElementsAllowedWithoutAttrs["pre"] = struct{}{}
+ p.setOfElementsAllowedWithoutAttrs["q"] = struct{}{}
+ p.setOfElementsAllowedWithoutAttrs["rp"] = struct{}{}
+ p.setOfElementsAllowedWithoutAttrs["rt"] = struct{}{}
+ p.setOfElementsAllowedWithoutAttrs["ruby"] = struct{}{}
+ p.setOfElementsAllowedWithoutAttrs["s"] = struct{}{}
+ p.setOfElementsAllowedWithoutAttrs["samp"] = struct{}{}
+ p.setOfElementsAllowedWithoutAttrs["script"] = struct{}{}
+ p.setOfElementsAllowedWithoutAttrs["section"] = struct{}{}
+ p.setOfElementsAllowedWithoutAttrs["select"] = struct{}{}
+ p.setOfElementsAllowedWithoutAttrs["small"] = struct{}{}
+ p.setOfElementsAllowedWithoutAttrs["span"] = struct{}{}
+ p.setOfElementsAllowedWithoutAttrs["strike"] = struct{}{}
+ p.setOfElementsAllowedWithoutAttrs["strong"] = struct{}{}
+ p.setOfElementsAllowedWithoutAttrs["style"] = struct{}{}
+ p.setOfElementsAllowedWithoutAttrs["sub"] = struct{}{}
+ p.setOfElementsAllowedWithoutAttrs["summary"] = struct{}{}
+ p.setOfElementsAllowedWithoutAttrs["sup"] = struct{}{}
+ p.setOfElementsAllowedWithoutAttrs["svg"] = struct{}{}
+ p.setOfElementsAllowedWithoutAttrs["table"] = struct{}{}
+ p.setOfElementsAllowedWithoutAttrs["tbody"] = struct{}{}
+ p.setOfElementsAllowedWithoutAttrs["td"] = struct{}{}
+ p.setOfElementsAllowedWithoutAttrs["textarea"] = struct{}{}
+ p.setOfElementsAllowedWithoutAttrs["tfoot"] = struct{}{}
+ p.setOfElementsAllowedWithoutAttrs["th"] = struct{}{}
+ p.setOfElementsAllowedWithoutAttrs["thead"] = struct{}{}
+ p.setOfElementsAllowedWithoutAttrs["title"] = struct{}{}
+ p.setOfElementsAllowedWithoutAttrs["time"] = struct{}{}
+ p.setOfElementsAllowedWithoutAttrs["tr"] = struct{}{}
+ p.setOfElementsAllowedWithoutAttrs["tt"] = struct{}{}
+ p.setOfElementsAllowedWithoutAttrs["u"] = struct{}{}
+ p.setOfElementsAllowedWithoutAttrs["ul"] = struct{}{}
+ p.setOfElementsAllowedWithoutAttrs["var"] = struct{}{}
+ p.setOfElementsAllowedWithoutAttrs["video"] = struct{}{}
+ p.setOfElementsAllowedWithoutAttrs["wbr"] = struct{}{}
+
+}
+
+// addDefaultSkipElementContent adds the HTML elements that we should skip
+// rendering the character content of, if the element itself is not allowed.
+// This is all character data that the end user would not normally see.
+// i.e. if we exclude a tag.
+func (p *Policy) addDefaultSkipElementContent() {
+ p.init()
+
+ p.setOfElementsToSkipContent["frame"] = struct{}{}
+ p.setOfElementsToSkipContent["frameset"] = struct{}{}
+ p.setOfElementsToSkipContent["iframe"] = struct{}{}
+ p.setOfElementsToSkipContent["noembed"] = struct{}{}
+ p.setOfElementsToSkipContent["noframes"] = struct{}{}
+ p.setOfElementsToSkipContent["noscript"] = struct{}{}
+ p.setOfElementsToSkipContent["nostyle"] = struct{}{}
+ p.setOfElementsToSkipContent["object"] = struct{}{}
+ p.setOfElementsToSkipContent["script"] = struct{}{}
+ p.setOfElementsToSkipContent["style"] = struct{}{}
+ p.setOfElementsToSkipContent["title"] = struct{}{}
+}
diff --git a/vendor/github.com/microcosm-cc/bluemonday/sanitize.go b/vendor/github.com/microcosm-cc/bluemonday/sanitize.go
new file mode 100644
index 0000000000000000000000000000000000000000..9121aefb005ea321afc11164d4f68a621e49202b
--- /dev/null
+++ b/vendor/github.com/microcosm-cc/bluemonday/sanitize.go
@@ -0,0 +1,1116 @@
+// Copyright (c) 2014, David Kitchen