]>
Commit | Line | Data |
---|---|---|
7a938933 ILT |
1 | // Copyright 2010 The Go Authors. All rights reserved. |
2 | // Use of this source code is governed by a BSD-style | |
3 | // license that can be found in the LICENSE file. | |
4 | ||
5 | /* | |
9ff56c95 | 6 | Package html implements an HTML5-compliant tokenizer and parser. |
adb0401d | 7 | INCOMPLETE. |
7a938933 ILT |
8 | |
9 | Tokenization is done by creating a Tokenizer for an io.Reader r. It is the | |
10 | caller's responsibility to ensure that r provides UTF-8 encoded HTML. | |
11 | ||
12 | z := html.NewTokenizer(r) | |
13 | ||
14 | Given a Tokenizer z, the HTML is tokenized by repeatedly calling z.Next(), | |
15 | which parses the next token and returns its type, or an error: | |
16 | ||
17 | for { | |
18 | tt := z.Next() | |
ff5f50c5 | 19 | if tt == html.ErrorToken { |
7a938933 ILT |
20 | // ... |
21 | return ... | |
22 | } | |
23 | // Process the current token. | |
24 | } | |
25 | ||
26 | There are two APIs for retrieving the current token. The high-level API is to | |
27 | call Token; the low-level API is to call Text or TagName / TagAttr. Both APIs | |
28 | allow optionally calling Raw after Next but before Token, Text, TagName, or | |
29 | TagAttr. In EBNF notation, the valid call sequence per token is: | |
30 | ||
31 | Next {Raw} [ Token | Text | TagName {TagAttr} ] | |
32 | ||
33 | Token returns an independent data structure that completely describes a token. | |
34 | Entities (such as "<") are unescaped, tag names and attribute keys are | |
35 | lower-cased, and attributes are collected into a []Attribute. For example: | |
36 | ||
37 | for { | |
ff5f50c5 | 38 | if z.Next() == html.ErrorToken { |
9c63abc9 | 39 | // Returning io.EOF indicates success. |
ab61e9c4 | 40 | return z.Err() |
7a938933 ILT |
41 | } |
42 | emitToken(z.Token()) | |
43 | } | |
44 | ||
45 | The low-level API performs fewer allocations and copies, but the contents of | |
46 | the []byte values returned by Text, TagName and TagAttr may change on the next | |
47 | call to Next. For example, to extract an HTML page's anchor text: | |
48 | ||
49 | depth := 0 | |
50 | for { | |
51 | tt := z.Next() | |
52 | switch tt { | |
ff5f50c5 | 53 | case ErrorToken: |
ab61e9c4 | 54 | return z.Err() |
ff5f50c5 | 55 | case TextToken: |
7a938933 ILT |
56 | if depth > 0 { |
57 | // emitBytes should copy the []byte it receives, | |
58 | // if it doesn't process it immediately. | |
59 | emitBytes(z.Text()) | |
60 | } | |
ff5f50c5 | 61 | case StartTagToken, EndTagToken: |
7a938933 ILT |
62 | tn, _ := z.TagName() |
63 | if len(tn) == 1 && tn[0] == 'a' { | |
9c63abc9 | 64 | if tt == StartTagToken { |
7a938933 ILT |
65 | depth++ |
66 | } else { | |
67 | depth-- | |
68 | } | |
69 | } | |
70 | } | |
71 | } | |
72 | ||
ff5f50c5 ILT |
73 | Parsing is done by calling Parse with an io.Reader, which returns the root of |
74 | the parse tree (the document element) as a *Node. It is the caller's | |
75 | responsibility to ensure that the Reader provides UTF-8 encoded HTML. For | |
76 | example, to process each anchor node in depth-first order: | |
77 | ||
78 | doc, err := html.Parse(r) | |
79 | if err != nil { | |
80 | // ... | |
81 | } | |
82 | var f func(*html.Node) | |
83 | f = func(n *html.Node) { | |
84 | if n.Type == html.ElementNode && n.Data == "a" { | |
85 | // Do something with n... | |
86 | } | |
4ccad563 | 87 | for c := n.FirstChild; c != nil; c = c.NextSibling { |
ff5f50c5 ILT |
88 | f(c) |
89 | } | |
90 | } | |
91 | f(doc) | |
92 | ||
7a938933 ILT |
93 | The relevant specifications include: |
94 | http://www.whatwg.org/specs/web-apps/current-work/multipage/syntax.html and | |
95 | http://www.whatwg.org/specs/web-apps/current-work/multipage/tokenization.html | |
96 | */ | |
97 | package html | |
98 | ||
99 | // The tokenization algorithm implemented by this package is not a line-by-line | |
100 | // transliteration of the relatively verbose state-machine in the WHATWG | |
101 | // specification. A more direct approach is used instead, where the program | |
102 | // counter implies the state, such as whether it is tokenizing a tag or a text | |
103 | // node. Specification compliance is verified by checking expected and actual | |
104 | // outputs over a test suite rather than aiming for algorithmic fidelity. | |
105 | ||
7a938933 ILT |
106 | // TODO(nigeltao): Does a DOM API belong in this package or a separate one? |
107 | // TODO(nigeltao): How does parsing interact with a JavaScript engine? |