aboutsummaryrefslogtreecommitdiff
path: root/src/google/protobuf/io/tokenizer_unittest.cc
diff options
context:
space:
mode:
Diffstat (limited to 'src/google/protobuf/io/tokenizer_unittest.cc')
-rw-r--r--src/google/protobuf/io/tokenizer_unittest.cc24
1 files changed, 11 insertions, 13 deletions
diff --git a/src/google/protobuf/io/tokenizer_unittest.cc b/src/google/protobuf/io/tokenizer_unittest.cc
index dbb5be4f..b39279b7 100644
--- a/src/google/protobuf/io/tokenizer_unittest.cc
+++ b/src/google/protobuf/io/tokenizer_unittest.cc
@@ -411,12 +411,6 @@ MultiTokenCase kMultiTokenCases[] = {
{ Tokenizer::TYPE_END , "" , 1, 3, 3 },
}},
- // Bytes with the high-order bit set should not be seen as control characters.
- { "\300", {
- { Tokenizer::TYPE_SYMBOL, "\300", 0, 0, 1 },
- { Tokenizer::TYPE_END , "" , 0, 1, 1 },
- }},
-
// Test all whitespace chars
{ "foo\n\t\r\v\fbar", {
{ Tokenizer::TYPE_IDENTIFIER, "foo", 0, 0, 3 },
@@ -741,7 +735,7 @@ TEST_F(TokenizerTest, ParseInteger) {
EXPECT_EQ(0, ParseInteger("0x"));
uint64 i;
-#ifdef PROTOBUF_HASDEATH_TEST // death tests do not work on Windows yet
+#ifdef PROTOBUF_HAS_DEATH_TEST // death tests do not work on Windows yet
// Test invalid integers that will never be tokenized as integers.
EXPECT_DEBUG_DEATH(Tokenizer::ParseInteger("zxy", kuint64max, &i),
"passed text that could not have been tokenized as an integer");
@@ -753,7 +747,7 @@ TEST_F(TokenizerTest, ParseInteger) {
"passed text that could not have been tokenized as an integer");
EXPECT_DEBUG_DEATH(Tokenizer::ParseInteger("-1", kuint64max, &i),
"passed text that could not have been tokenized as an integer");
-#endif // PROTOBUF_HASDEATH_TEST
+#endif // PROTOBUF_HAS_DEATH_TEST
// Test overflows.
EXPECT_TRUE (Tokenizer::ParseInteger("0", 0, &i));
@@ -796,7 +790,7 @@ TEST_F(TokenizerTest, ParseFloat) {
EXPECT_EQ( 0.0, Tokenizer::ParseFloat("1e-9999999999999999999999999999"));
EXPECT_EQ(HUGE_VAL, Tokenizer::ParseFloat("1e+9999999999999999999999999999"));
-#ifdef PROTOBUF_HASDEATH_TEST // death tests do not work on Windows yet
+#ifdef PROTOBUF_HAS_DEATH_TEST // death tests do not work on Windows yet
// Test invalid integers that will never be tokenized as integers.
EXPECT_DEBUG_DEATH(Tokenizer::ParseFloat("zxy"),
"passed text that could not have been tokenized as a float");
@@ -804,7 +798,7 @@ TEST_F(TokenizerTest, ParseFloat) {
"passed text that could not have been tokenized as a float");
EXPECT_DEBUG_DEATH(Tokenizer::ParseFloat("-1.0"),
"passed text that could not have been tokenized as a float");
-#endif // PROTOBUF_HASDEATH_TEST
+#endif // PROTOBUF_HAS_DEATH_TEST
}
TEST_F(TokenizerTest, ParseString) {
@@ -843,10 +837,10 @@ TEST_F(TokenizerTest, ParseString) {
EXPECT_EQ("u0", output);
// Test invalid strings that will never be tokenized as strings.
-#ifdef PROTOBUF_HASDEATH_TEST // death tests do not work on Windows yet
+#ifdef PROTOBUF_HAS_DEATH_TEST // death tests do not work on Windows yet
EXPECT_DEBUG_DEATH(Tokenizer::ParseString("", &output),
"passed text that could not have been tokenized as a string");
-#endif // PROTOBUF_HASDEATH_TEST
+#endif // PROTOBUF_HAS_DEATH_TEST
}
TEST_F(TokenizerTest, ParseStringAppend) {
@@ -883,7 +877,7 @@ ErrorCase kErrorCases[] = {
{ "'\\x' foo", true,
"0:3: Expected hex digits for escape sequence.\n" },
{ "'foo", false,
- "0:4: String literals cannot cross line boundaries.\n" },
+ "0:4: Unexpected end of string.\n" },
{ "'bar\nfoo", true,
"0:4: String literals cannot cross line boundaries.\n" },
{ "'\\u01' foo", true,
@@ -951,6 +945,10 @@ ErrorCase kErrorCases[] = {
"0:0: Invalid control characters encountered in text.\n" },
{ string("\0\0foo", 5), true,
"0:0: Invalid control characters encountered in text.\n" },
+
+ // Check error from high order bits set
+ { "\300foo", true,
+ "0:0: Interpreting non ascii codepoint 192.\n" },
};
TEST_2D(TokenizerTest, Errors, kErrorCases, kBlockSizes) {