Skip to content

Commit 2faa328

Browse files
committed
Make enums as consts to let typescript inline them.
Prevent problems with closure running after typescript.
1 parent 8c718d2 commit 2faa328

File tree

6 files changed

+122
-124
lines changed

6 files changed

+122
-124
lines changed

src/shady-css/common.ts

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -23,7 +23,7 @@ const matcher = {
2323
/**
2424
* An enumeration of Node types.
2525
*/
26-
export enum nodeType {
26+
export const enum nodeType {
2727
stylesheet = 'stylesheet',
2828
comment = 'comment',
2929
atRule = 'atRule',

src/shady-css/parser.ts

Lines changed: 23 additions & 23 deletions
Original file line numberDiff line numberDiff line change
@@ -11,7 +11,7 @@
1111

1212
import {AtRule, Comment, Declaration, Discarded, Rule, Rulelist, Ruleset, Stylesheet} from './common';
1313
import {NodeFactory} from './node-factory';
14-
import {Token} from './token';
14+
import {TokenType} from './token';
1515
import {Tokenizer} from './tokenizer';
1616

1717
/**
@@ -81,20 +81,20 @@ class Parser {
8181
if (token === null) {
8282
return null;
8383
}
84-
if (token.is(Token.type.whitespace)) {
84+
if (token.is(TokenType.whitespace)) {
8585
tokenizer.advance();
8686
return null;
8787

88-
} else if (token.is(Token.type.comment)) {
88+
} else if (token.is(TokenType.comment)) {
8989
return this.parseComment(tokenizer);
9090

91-
} else if (token.is(Token.type.word)) {
91+
} else if (token.is(TokenType.word)) {
9292
return this.parseDeclarationOrRuleset(tokenizer);
9393

94-
} else if (token.is(Token.type.propertyBoundary)) {
94+
} else if (token.is(TokenType.propertyBoundary)) {
9595
return this.parseUnknown(tokenizer);
9696

97-
} else if (token.is(Token.type.at)) {
97+
} else if (token.is(TokenType.at)) {
9898
return this.parseAtRule(tokenizer);
9999

100100
} else {
@@ -130,7 +130,7 @@ class Parser {
130130
}
131131

132132
while (tokenizer.currentToken &&
133-
tokenizer.currentToken.is(Token.type.boundary)) {
133+
tokenizer.currentToken.is(TokenType.boundary)) {
134134
end = tokenizer.advance();
135135
}
136136

@@ -155,27 +155,27 @@ class Parser {
155155
const start = tokenizer.currentToken.start;
156156

157157
while (tokenizer.currentToken) {
158-
if (tokenizer.currentToken.is(Token.type.whitespace)) {
158+
if (tokenizer.currentToken.is(TokenType.whitespace)) {
159159
tokenizer.advance();
160-
} else if (!name && tokenizer.currentToken.is(Token.type.at)) {
160+
} else if (!name && tokenizer.currentToken.is(TokenType.at)) {
161161
// Discard the @:
162162
tokenizer.advance();
163163
const start = tokenizer.currentToken;
164164
let end;
165165

166166
while (tokenizer.currentToken &&
167-
tokenizer.currentToken.is(Token.type.word)) {
167+
tokenizer.currentToken.is(TokenType.word)) {
168168
end = tokenizer.advance();
169169
}
170170
nameRange = tokenizer.getRange(start, end);
171171
name = tokenizer.cssText.slice(nameRange.start, nameRange.end);
172-
} else if (tokenizer.currentToken.is(Token.type.openBrace)) {
172+
} else if (tokenizer.currentToken.is(TokenType.openBrace)) {
173173
rulelist = this.parseRulelist(tokenizer);
174174
break;
175-
} else if (tokenizer.currentToken.is(Token.type.semicolon)) {
175+
} else if (tokenizer.currentToken.is(TokenType.semicolon)) {
176176
tokenizer.advance();
177177
break;
178-
} else if (tokenizer.currentToken.is(Token.type.closeBrace)) {
178+
} else if (tokenizer.currentToken.is(TokenType.closeBrace)) {
179179
break;
180180
} else {
181181
if (parametersStart == null) {
@@ -217,7 +217,7 @@ class Parser {
217217
tokenizer.advance();
218218

219219
while (tokenizer.currentToken) {
220-
if (tokenizer.currentToken.is(Token.type.closeBrace)) {
220+
if (tokenizer.currentToken.is(TokenType.closeBrace)) {
221221
endToken = tokenizer.currentToken;
222222
tokenizer.advance();
223223
break;
@@ -250,20 +250,20 @@ class Parser {
250250
// property boundary.. though that may be impossible.
251251

252252
while (tokenizer.currentToken) {
253-
if (tokenizer.currentToken.is(Token.type.whitespace)) {
253+
if (tokenizer.currentToken.is(TokenType.whitespace)) {
254254
tokenizer.advance();
255-
} else if (tokenizer.currentToken.is(Token.type.openParenthesis)) {
255+
} else if (tokenizer.currentToken.is(TokenType.openParenthesis)) {
256256
// skip until close paren
257257
while (tokenizer.currentToken &&
258-
!tokenizer.currentToken.is(Token.type.closeParenthesis)) {
258+
!tokenizer.currentToken.is(TokenType.closeParenthesis)) {
259259
tokenizer.advance();
260260
}
261261
} else if (
262-
tokenizer.currentToken.is(Token.type.openBrace) ||
263-
tokenizer.currentToken.is(Token.type.propertyBoundary)) {
262+
tokenizer.currentToken.is(TokenType.openBrace) ||
263+
tokenizer.currentToken.is(TokenType.propertyBoundary)) {
264264
break;
265265
} else {
266-
if (tokenizer.currentToken.is(Token.type.colon)) {
266+
if (tokenizer.currentToken.is(TokenType.colon)) {
267267
colon = tokenizer.currentToken;
268268
}
269269

@@ -282,7 +282,7 @@ class Parser {
282282
}
283283

284284
// A ruleset never contains or ends with a semi-colon.
285-
if (tokenizer.currentToken.is(Token.type.propertyBoundary)) {
285+
if (tokenizer.currentToken.is(TokenType.propertyBoundary)) {
286286
const nameRange =
287287
tokenizer.getRange(ruleStart!, colon ? colon.previous : ruleEnd);
288288
const declarationName =
@@ -298,7 +298,7 @@ class Parser {
298298
this.nodeFactory.expression(expressionValue, expressionRange);
299299
}
300300

301-
if (tokenizer.currentToken.is(Token.type.semicolon)) {
301+
if (tokenizer.currentToken.is(TokenType.semicolon)) {
302302
tokenizer.advance();
303303
}
304304

@@ -313,7 +313,7 @@ class Parser {
313313
} else if (colon && colon === ruleEnd) {
314314
const rulelist = this.parseRulelist(tokenizer);
315315

316-
if (tokenizer.currentToken.is(Token.type.semicolon)) {
316+
if (tokenizer.currentToken.is(TokenType.semicolon)) {
317317
tokenizer.advance();
318318
}
319319

src/shady-css/token.ts

Lines changed: 25 additions & 27 deletions
Original file line numberDiff line numberDiff line change
@@ -12,40 +12,38 @@
1212
/**
1313
* An enumeration of Token types.
1414
*/
15-
export enum TokenType {
15+
export const enum TokenType {
1616
none = 0,
17-
whitespace = (2 ** 0),
18-
string = (2 ** 1),
19-
comment = (2 ** 2),
20-
word = (2 ** 3),
21-
boundary = (2 ** 4),
22-
propertyBoundary = (2 ** 5),
17+
whitespace = (1 << 0),
18+
string = (1 << 1),
19+
comment = (1 << 2),
20+
word = (1 << 3),
21+
boundary = (1 << 4),
22+
propertyBoundary = (1 << 5),
2323
// Special cases for boundary:
24-
openParenthesis = (2 ** 6) | TokenType.boundary,
25-
closeParenthesis = (2 ** 7) | TokenType.boundary,
26-
at = (2 ** 8) | TokenType.boundary,
27-
openBrace = (2 ** 9) | TokenType.boundary,
24+
openParenthesis = (1 << 6) | TokenType.boundary,
25+
closeParenthesis = (1 << 7) | TokenType.boundary,
26+
at = (1 << 8) | TokenType.boundary,
27+
openBrace = (1 << 9) | TokenType.boundary,
2828
// [};] are property boundaries:
29-
closeBrace = (2 ** 10) | TokenType.propertyBoundary | TokenType.boundary,
30-
semicolon = (2 ** 11) | TokenType.propertyBoundary | TokenType.boundary,
29+
closeBrace = (1 << 10) | TokenType.propertyBoundary | TokenType.boundary,
30+
semicolon = (1 << 11) | TokenType.propertyBoundary | TokenType.boundary,
3131
// : is a chimaeric abomination:
3232
// foo:bar{}
3333
// foo:bar;
34-
colon = (2 ** 12) | TokenType.boundary | TokenType.word,
34+
colon = (1 << 12) | TokenType.boundary | TokenType.word,
3535

3636
// TODO: are these two boundaries? I mean, sometimes they are I guess? Or
3737
// maybe they shouldn't exist in the boundaryTokenTypes map.
38-
hyphen = (2 ** 13),
39-
underscore = (2 ** 14)
38+
hyphen = (1 << 13),
39+
underscore = (1 << 14)
4040
}
4141

4242

4343
/**
4444
* Class that describes individual tokens as produced by the Tokenizer.
4545
*/
4646
class Token {
47-
static type = TokenType;
48-
4947
readonly type: TokenType;
5048
readonly start: number;
5149
readonly end: number;
@@ -84,15 +82,15 @@ class Token {
8482
* A mapping of boundary token text to their corresponding types.
8583
*/
8684
const boundaryTokenTypes: {[boundaryText: string]: TokenType | undefined} = {
87-
'(': Token.type.openParenthesis,
88-
')': Token.type.closeParenthesis,
89-
':': Token.type.colon,
90-
'@': Token.type.at,
91-
'{': Token.type.openBrace,
92-
'}': Token.type.closeBrace,
93-
';': Token.type.semicolon,
94-
'-': Token.type.hyphen,
95-
'_': Token.type.underscore
85+
'(': TokenType.openParenthesis,
86+
')': TokenType.closeParenthesis,
87+
':': TokenType.colon,
88+
'@': TokenType.at,
89+
'{': TokenType.openBrace,
90+
'}': TokenType.closeBrace,
91+
';': TokenType.semicolon,
92+
'-': TokenType.hyphen,
93+
'_': TokenType.underscore
9694
};
9795

9896
export {Token, boundaryTokenTypes};

src/shady-css/tokenizer.ts

Lines changed: 7 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -10,7 +10,7 @@
1010
*/
1111

1212
import {matcher, Range} from './common';
13-
import {boundaryTokenTypes, Token} from './token';
13+
import {boundaryTokenTypes, Token, TokenType} from './token';
1414

1515
/**
1616
* Class that implements tokenization of significant lexical features of the
@@ -23,7 +23,7 @@ class Tokenizer {
2323
* Tracks the position of the tokenizer in the source string.
2424
* Also the default head of the Token linked list.
2525
*/
26-
private cursorToken_ = new Token(Token.type.none, 0, 0);
26+
private cursorToken_ = new Token(TokenType.none, 0, 0);
2727

2828
/**
2929
* Holds a reference to a Token that is "next" in the source string, often
@@ -181,7 +181,7 @@ class Tokenizer {
181181
}
182182
}
183183

184-
return new Token(Token.type.string, start, offset);
184+
return new Token(TokenType.string, start, offset);
185185
}
186186

187187
/**
@@ -200,7 +200,7 @@ class Tokenizer {
200200
offset++;
201201
}
202202

203-
return new Token(Token.type.word, start, offset);
203+
return new Token(TokenType.word, start, offset);
204204
}
205205

206206
/**
@@ -220,7 +220,7 @@ class Tokenizer {
220220
offset = matcher.whitespaceGreedy.lastIndex;
221221
}
222222

223-
return new Token(Token.type.whitespace, start, offset);
223+
return new Token(TokenType.whitespace, start, offset);
224224
}
225225

226226
/**
@@ -242,7 +242,7 @@ class Tokenizer {
242242
offset = matcher.commentGreedy.lastIndex;
243243
}
244244

245-
return new Token(Token.type.comment, start, offset);
245+
return new Token(TokenType.comment, start, offset);
246246
}
247247

248248
/**
@@ -255,7 +255,7 @@ class Tokenizer {
255255
tokenizeBoundary(offset: number): Token {
256256
// TODO(cdata): Evaluate if this is faster than a switch statement:
257257
const type =
258-
boundaryTokenTypes[this.cssText[offset]] || Token.type.boundary;
258+
boundaryTokenTypes[this.cssText[offset]] || TokenType.boundary;
259259

260260
return new Token(type, offset, offset + 1);
261261
}

src/test/helpers.ts

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -42,7 +42,7 @@ function linkedTokens(tokens: Token[]) {
4242
}
4343

4444
return r;
45-
}, new Token(Token.type.none, 0, 0));
45+
}, new Token(TokenType.none, 0, 0));
4646

4747
return tokens;
4848
}

0 commit comments

Comments
 (0)