Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Adding support for commas in field names #1896

Open
wants to merge 5 commits into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
69 changes: 44 additions & 25 deletions mr/src/main/java/org/elasticsearch/hadoop/util/StringUtils.java
Original file line number Diff line number Diff line change
Expand Up @@ -29,6 +29,7 @@
import java.util.Collections;
import java.util.List;
import java.util.StringTokenizer;
import java.util.stream.Collectors;


/**
Expand Down Expand Up @@ -126,20 +127,44 @@ public static List<String> tokenize(String string, String delimiters, boolean tr
if (!StringUtils.hasText(string)) {
return Collections.emptyList();
}
StringTokenizer st = new StringTokenizer(string, delimiters);
List<String> tokens = new ArrayList<String>();
while (st.hasMoreTokens()) {
String token = st.nextToken();
if (trimTokens) {
token = token.trim();
List<String> tokens = new ArrayList<>();
char[] delims = delimiters.toCharArray();
StringBuilder currentToken = new StringBuilder();
boolean inQuotedToken = false;
for (char character : string.toCharArray()) {
if (character == '\"') {
inQuotedToken = !inQuotedToken;
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I still think just going in an encoding/decoding the fields where needed would be simpler. For instance, this logic now breaks if we include quotation marks in the field names. Granted, that's probably even more unlikely to happen than commas, but if we're talking about respecting the tokenize method contracts, it's still broken.

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I'll have to come back to this when I have a little time to get into it. I don't remember exactly what I meant by

it does something that I don't think most people would expect of methods called tokenize and concatenate

But I remember it being bad enough that it seemed like a deal-killer for encoding/decoding the fields.

}
if (!ignoreEmptyTokens || token.length() > 0) {
tokens.add(token);
else if (inQuotedToken == false && isCharacterInArray(character, delims)) {
addTokenToList(tokens, currentToken, trimTokens, ignoreEmptyTokens);
currentToken = new StringBuilder();
} else {
currentToken.append(character);
}
}
addTokenToList(tokens, currentToken, trimTokens, ignoreEmptyTokens);
return tokens;
}

private static void addTokenToList(List<String> tokens, StringBuilder newToken, boolean trimTokens, boolean ignoreEmptyTokens) {
String token = newToken.toString();
if (trimTokens) {
token = token.trim();
}
if (!ignoreEmptyTokens || token.length() > 0) {
tokens.add(token);
}
}

private static boolean isCharacterInArray(char character, char[] charArray) {
for (char arrayChar : charArray) {
if (character == arrayChar) {
return true;
}
}
return false;
}

public static String concatenate(Collection<?> list) {
return concatenate(list, DEFAULT_DELIMITER);
}
Expand All @@ -151,15 +176,10 @@ public static String concatenate(Collection<?> list, String delimiter) {
if (delimiter == null) {
delimiter = EMPTY;
}
StringBuilder sb = new StringBuilder();

for (Object object : list) {
sb.append(object.toString());
sb.append(delimiter);
}

sb.setLength(sb.length() - delimiter.length());
return sb.toString();
final String finalDelimiter = delimiter;
return list.stream().map(item -> item.toString())
.map(token -> optionallyWrapToken(token, finalDelimiter))
.collect(Collectors.joining(delimiter));
}

public static String concatenate(Object[] array, String delimiter) {
Expand All @@ -169,15 +189,14 @@ public static String concatenate(Object[] array, String delimiter) {
if (delimiter == null) {
delimiter = EMPTY;
}
final String finalDelimiter = delimiter;
return Arrays.stream(array).map(item -> item.toString())
.map(token -> optionallyWrapToken(token, finalDelimiter))
.collect(Collectors.joining(delimiter));
}

StringBuilder sb = new StringBuilder();
for (int i = 0; i < array.length; i++) {
if (i > 0) {
sb.append(delimiter);
}
sb.append(array[i]);
}
return sb.toString();
private static String optionallyWrapToken(String token, String delimiter) {
return token.contains(delimiter) ? "\"" + token + "\"" : token;
}

public static String deleteWhitespace(CharSequence sequence) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,10 @@

import org.junit.Test;

import java.util.Arrays;
import java.util.Collections;
import java.util.List;

import static org.junit.Assert.*;

public class StringUtilsTest {
Expand Down Expand Up @@ -72,4 +76,32 @@ public void testSingularIndexNames() {
assertFalse(StringUtils.isValidSingularIndexName("abc{date|yyyy-MM-dd}defg"));

}

@Test
public void testTokenize() {
List<String> test1 = Arrays.asList(new String[]{"this", "is a", "test"});
String concatenatedString = StringUtils.concatenate(test1);
List<String> tokens = StringUtils.tokenize(concatenatedString, ",", true, true);
assertEquals(test1, tokens);

List<String> test2 = Arrays.asList(new String[]{"this", " is a", " test ", " "});
concatenatedString = StringUtils.concatenate(test2);
tokens = StringUtils.tokenize(concatenatedString, ",", false, false);
assertEquals(test2, tokens);

List<String> test3 = Arrays.asList(new String[]{"this", "is, a", "test"});
concatenatedString = StringUtils.concatenate(test3);
tokens = StringUtils.tokenize(concatenatedString, ",", true, true);
assertEquals(test3, tokens);

Object[] test4 = new String[]{"this", "is, a", "test"};
concatenatedString = StringUtils.concatenate(test4, ";");
tokens = StringUtils.tokenize(concatenatedString, ";", true, true);
assertEquals(Arrays.asList(test4), tokens);

List<String> test5 = Arrays.asList(new String[]{"this", "is, a", "test"});
concatenatedString = StringUtils.concatenate(test5, ",");
tokens = StringUtils.tokenize(concatenatedString, ";,", true, true);
assertEquals(test5, tokens);
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -2223,7 +2223,30 @@ class AbstractScalaEsScalaSparkSQL(prefix: String, readMetadata: jl.Boolean, pus
assertEquals(nested(0).getLong(1), 6)
}


@Test
def testCommasInFieldNames(): Unit = {
val index = wrapIndex("commas-in-names-index")
val typed = "data"
val (target, docPath) = makeTargets(index, typed)
val mapping = wrapMapping("data", s"""{
| "dynamic": "strict",
| "properties" : {
| "some column with a comma, and then some" : {
| "type" : "keyword"
| }
| }
| }
""".stripMargin)
RestUtils.touch(index)
RestUtils.putMapping(index, typed, mapping.getBytes(StringUtils.UTF_8))
RestUtils.postData(docPath, "{\"some column with a comma, and then some\": \"sdfdsf\"}".getBytes("UTF-8"))
RestUtils.refresh(target)
val df = sqc.read.format("es").load(index)
df.printSchema()
df.show()
assertEquals(1, df.count())
}

@Test
def testMultiIndexes() {
// add some data
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -2284,7 +2284,30 @@ class AbstractScalaEsScalaSparkSQL(prefix: String, readMetadata: jl.Boolean, pus
assertEquals(nested(0).getLong(1), 6)
}


@Test
def testCommasInFieldNames(): Unit = {
val index = wrapIndex("commas-in-names-index")
val typed = "data"
val (target, docPath) = makeTargets(index, typed)
val mapping = wrapMapping("data", s"""{
| "dynamic": "strict",
| "properties" : {
| "some column with a comma, and then some" : {
| "type" : "keyword"
| }
| }
| }
""".stripMargin)
RestUtils.touch(index)
RestUtils.putMapping(index, typed, mapping.getBytes(StringUtils.UTF_8))
RestUtils.postData(docPath, "{\"some column with a comma, and then some\": \"sdfdsf\"}".getBytes("UTF-8"))
RestUtils.refresh(target)
val df = sqc.read.format("es").load(index)
df.printSchema()
df.show()
assertEquals(1, df.count())
}

@Test
def testMultiIndexes() {
// add some data
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -2284,7 +2284,30 @@ class AbstractScalaEsScalaSparkSQL(prefix: String, readMetadata: jl.Boolean, pus
assertEquals(nested(0).getLong(1), 6)
}


@Test
def testCommasInFieldNames(): Unit = {
val index = wrapIndex("commas-in-names-index")
val typed = "data"
val (target, docPath) = makeTargets(index, typed)
val mapping = wrapMapping("data", s"""{
| "dynamic": "strict",
| "properties" : {
| "some column with a comma, and then some" : {
| "type" : "keyword"
| }
| }
| }
""".stripMargin)
RestUtils.touch(index)
RestUtils.putMapping(index, typed, mapping.getBytes(StringUtils.UTF_8))
RestUtils.postData(docPath, "{\"some column with a comma, and then some\": \"sdfdsf\"}".getBytes("UTF-8"))
RestUtils.refresh(target)
val df = sqc.read.format("es").load(index)
df.printSchema()
df.show()
assertEquals(1, df.count())
}

@Test
def testMultiIndexes() {
// add some data
Expand Down