@@ -109,6 +109,7 @@ def tokensMatch(expectedTokens, receivedTokens, ignoreErrorOrder,
109109token .pop ()
110110
111111if not ignoreErrorOrder and not ignoreErrors :
112+ expectedTokens = concatenateCharacterTokens (expectedTokens )
112113return expectedTokens == receivedTokens
113114else :
114115# Sort the tokens into two groups; non-parse errors and parse errors
@@ -121,6 +122,7 @@ def tokensMatch(expectedTokens, receivedTokens, ignoreErrorOrder,
121122else :
122123if not ignoreErrors :
123124tokens [tokenType ][1 ].append (token )
125+ tokens [tokenType ][0 ]= concatenateCharacterTokens (tokens [tokenType ][0 ])
124126return tokens ["expected" ]== tokens ["received" ]
125127
126128
@@ -174,7 +176,7 @@ def runTokenizerTest(test):
174176warnings .resetwarnings ()
175177warnings .simplefilter ("error" )
176178
177- expected = concatenateCharacterTokens ( test ['output' ])
179+ expected = test ['output' ]
178180if 'lastStartTag' not in test :
179181test ['lastStartTag' ]= None
180182parser = TokenizerTestParser (test ['initialState' ],