Skip to content

Copy old mappings to _meta section #83041

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 7 commits into from
Jan 27, 2022
Merged
Show file tree
Hide file tree
Changes from 4 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -1899,8 +1899,19 @@ public static IndexMetadata legacyFromXContent(XContentParser parser) throws IOE
}
builder.settings(settings);
} else if ("mappings".equals(currentFieldName)) {
// don't try to parse these for now
parser.skipChildren();
MapBuilder<String, Object> mappingSourceBuilder = MapBuilder.<String, Object>newMapBuilder();
while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
if (token == XContentParser.Token.FIELD_NAME) {
currentFieldName = parser.currentName();
} else if (token == XContentParser.Token.START_OBJECT) {
String mappingType = currentFieldName;
mappingSourceBuilder.put(mappingType, parser.mapOrdered());
} else {
throw new IllegalArgumentException("Unexpected token: " + token);
}
}
Map<String, Object> mapping = mappingSourceBuilder.map();
handleLegacyMapping(builder, mapping);
} else if ("in_sync_allocations".equals(currentFieldName)) {
while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
if (token == XContentParser.Token.FIELD_NAME) {
Expand All @@ -1924,8 +1935,18 @@ public static IndexMetadata legacyFromXContent(XContentParser parser) throws IOE
}
} else if (token == XContentParser.Token.START_ARRAY) {
if ("mappings".equals(currentFieldName)) {
// don't try to parse these for now
parser.skipChildren();
MapBuilder<String, Object> mappingSourceBuilder = MapBuilder.<String, Object>newMapBuilder();
while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) {
Map<String, Object> mapping;
if (token == XContentParser.Token.VALUE_EMBEDDED_OBJECT) {
CompressedXContent compressedXContent = new CompressedXContent(parser.binaryValue());
mapping = XContentHelper.convertToMap(compressedXContent.compressedReference(), true).v2();
} else {
mapping = parser.mapOrdered();
}
mappingSourceBuilder.putAll(mapping);
}
handleLegacyMapping(builder, mappingSourceBuilder.map());
} else {
parser.skipChildren();
}
Expand All @@ -1949,12 +1970,23 @@ public static IndexMetadata legacyFromXContent(XContentParser parser) throws IOE
}
XContentParserUtils.ensureExpectedToken(XContentParser.Token.END_OBJECT, parser.nextToken(), parser);

builder.putMapping(MappingMetadata.EMPTY_MAPPINGS); // just make sure it's not empty so that _source can be read
if (builder.mapping() == null) {
builder.putMapping(MappingMetadata.EMPTY_MAPPINGS); // just make sure it's not empty so that _source can be read
}

IndexMetadata indexMetadata = builder.build();
assert indexMetadata.getCreationVersion().before(Version.CURRENT.minimumIndexCompatibilityVersion());
return indexMetadata;
}

private static void handleLegacyMapping(Builder builder, Map<String, Object> mapping) {
if (mapping.size() == 1) {
String mappingType = mapping.keySet().iterator().next();
builder.putMapping(new MappingMetadata(mappingType, mapping));
} else if (mapping.size() > 1) {
builder.putMapping(new MappingMetadata(MapperService.SINGLE_MAPPING_NAME, mapping));
}
}
}

/**
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -141,6 +141,16 @@ public Map<String, Object> getSourceAsMap() throws ElasticsearchParseException {
return sourceAsMap();
}

/**
* Converts the serialized compressed form of the mappings into a parsed map.
* In contrast to {@link #sourceAsMap()}, this does not remove the type
*/
@SuppressWarnings("unchecked")
public Map<String, Object> rawSourceAsMap() throws ElasticsearchParseException {
Map<String, Object> mapping = XContentHelper.convertToMap(source.compressedReference(), true).v2();
return mapping;
}

public boolean routingRequired() {
return this.routingRequired;
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -32,6 +32,7 @@
import org.elasticsearch.cluster.metadata.IndexMetadata;
import org.elasticsearch.cluster.metadata.IndexMetadataVerifier;
import org.elasticsearch.cluster.metadata.IndexTemplateMetadata;
import org.elasticsearch.cluster.metadata.MappingMetadata;
import org.elasticsearch.cluster.metadata.Metadata;
import org.elasticsearch.cluster.metadata.MetadataCreateIndexService;
import org.elasticsearch.cluster.metadata.MetadataDeleteIndexService;
Expand Down Expand Up @@ -78,6 +79,7 @@
import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.LinkedHashMap;
import java.util.List;
import java.util.Locale;
import java.util.Map;
Expand Down Expand Up @@ -1294,6 +1296,11 @@ public ClusterState execute(ClusterState currentState) {
request.indexSettings(),
request.ignoreIndexSettings()
);
if (snapshotIndexMetadata.getCreationVersion()
.before(currentState.getNodes().getMaxNodeVersion().minimumIndexCompatibilityVersion())) {
// adapt index metadata so that it can be understood by current version
snapshotIndexMetadata = convertLegacyIndex(snapshotIndexMetadata);
}
try {
snapshotIndexMetadata = indexMetadataVerifier.verifyIndexMetadata(snapshotIndexMetadata, minIndexCompatibilityVersion);
} catch (Exception ex) {
Expand Down Expand Up @@ -1582,6 +1589,32 @@ public void clusterStateProcessed(ClusterState oldState, ClusterState newState)
}
}

private IndexMetadata convertLegacyIndex(IndexMetadata snapshotIndexMetadata) {
MappingMetadata mappingMetadata = snapshotIndexMetadata.mapping();
Map<String, Object> loadedMappingSource = mappingMetadata.rawSourceAsMap();

// store old mapping under _meta/legacy-mappings
Map<String, Object> legacyMapping = new LinkedHashMap<>();
boolean sourceOnlySnapshot = snapshotIndexMetadata.getSettings().getAsBoolean("index.source_only", false);
if (sourceOnlySnapshot) {
// actual mapping is under "_meta" (but strip type first)
Object sourceOnlyMeta = mappingMetadata.sourceAsMap().get("_meta");
if (sourceOnlyMeta instanceof Map<?, ?> sourceOnlyMetaMap) {
legacyMapping.put("legacy-mappings", sourceOnlyMetaMap);
}
} else {
legacyMapping.put("legacy-mappings", loadedMappingSource);
}

Map<String, Object> newMappingSource = new LinkedHashMap<>();
newMappingSource.put("_meta", legacyMapping);

Map<String, Object> newMapping = new LinkedHashMap<>();
newMapping.put(mappingMetadata.type(), newMappingSource);
// TODO: _routing? Perhaps we don't need to obey any routing here as stuff is read-only anyway and get API will be disabled
return IndexMetadata.builder(snapshotIndexMetadata).putMapping(new MappingMetadata(mappingMetadata.type(), newMapping)).build();
}

private static IndexMetadata.Builder restoreToCreateNewIndex(IndexMetadata snapshotIndexMetadata, String renamedIndexName) {
return IndexMetadata.builder(snapshotIndexMetadata)
.state(IndexMetadata.State.OPEN)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -25,11 +25,14 @@
import org.elasticsearch.client.RestClient;
import org.elasticsearch.client.RestHighLevelClient;
import org.elasticsearch.client.indices.CloseIndexRequest;
import org.elasticsearch.client.indices.GetMappingsRequest;
import org.elasticsearch.client.indices.PutMappingRequest;
import org.elasticsearch.client.searchable_snapshots.MountSnapshotRequest;
import org.elasticsearch.cluster.SnapshotsInProgress;
import org.elasticsearch.cluster.health.ClusterHealthStatus;
import org.elasticsearch.cluster.metadata.IndexMetadata;
import org.elasticsearch.cluster.metadata.MappingMetadata;
import org.elasticsearch.cluster.routing.Murmur3HashFunction;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.settings.SecureString;
import org.elasticsearch.common.settings.Settings;
Expand Down Expand Up @@ -59,8 +62,13 @@
import java.util.Set;
import java.util.stream.Collectors;

import static org.hamcrest.Matchers.empty;
import static org.hamcrest.Matchers.greaterThan;
import static org.hamcrest.Matchers.hasKey;
import static org.hamcrest.Matchers.hasSize;
import static org.hamcrest.Matchers.instanceOf;
import static org.hamcrest.Matchers.not;
import static org.hamcrest.Matchers.startsWith;

public class OldRepositoryAccessIT extends ESRestTestCase {
@Override
Expand Down Expand Up @@ -127,7 +135,9 @@ && randomBoolean()) {
for (int i = 0; i < numDocs + extraDocs; i++) {
String id = "testdoc" + i;
expectedIds.add(id);
Request doc = new Request("PUT", "/test/doc/" + id);
// use multiple types for ES versions < 6.0.0
String type = "doc" + (oldVersion.before(Version.fromString("6.0.0")) ? Murmur3HashFunction.hash(id) % 2 : 0);
Request doc = new Request("PUT", "/test/" + type + "/" + id);
doc.addParameter("refresh", "true");
doc.setJsonEntity(sourceForDoc(i));
assertOK(oldEs.performRequest(doc));
Expand All @@ -136,7 +146,8 @@ && randomBoolean()) {
for (int i = 0; i < extraDocs; i++) {
String id = randomFrom(expectedIds);
expectedIds.remove(id);
Request doc = new Request("DELETE", "/test/doc/" + id);
String type = "doc" + (oldVersion.before(Version.fromString("6.0.0")) ? Murmur3HashFunction.hash(id) % 2 : 0);
Request doc = new Request("DELETE", "/test/" + type + "/" + id);
doc.addParameter("refresh", "true");
oldEs.performRequest(doc);
}
Expand Down Expand Up @@ -218,7 +229,7 @@ && randomBoolean()) {

if (Build.CURRENT.isSnapshot()) {
// restore / mount and check whether searches work
restoreMountAndVerify(numDocs, expectedIds, client, numberOfShards, sourceOnlyRepository);
restoreMountAndVerify(numDocs, expectedIds, client, numberOfShards, sourceOnlyRepository, oldVersion);

// close indices
assertTrue(
Expand All @@ -236,7 +247,7 @@ && randomBoolean()) {
);

// restore / mount again
restoreMountAndVerify(numDocs, expectedIds, client, numberOfShards, sourceOnlyRepository);
restoreMountAndVerify(numDocs, expectedIds, client, numberOfShards, sourceOnlyRepository, oldVersion);
}
} finally {
IOUtils.closeWhileHandlingException(
Expand Down Expand Up @@ -266,7 +277,8 @@ private void restoreMountAndVerify(
Set<String> expectedIds,
RestHighLevelClient client,
int numberOfShards,
boolean sourceOnlyRepository
boolean sourceOnlyRepository,
Version oldVersion
) throws IOException {
// restore index
RestoreSnapshotResponse restoreSnapshotResponse = client.snapshot()
Expand All @@ -291,6 +303,39 @@ private void restoreMountAndVerify(
.getStatus()
);

MappingMetadata mapping = client.indices()
.getMapping(new GetMappingsRequest().indices("restored_test"), RequestOptions.DEFAULT)
.mappings()
.get("restored_test");
logger.info("mapping for {}: {}", mapping.type(), mapping.source().string());
Map<String, Object> root = mapping.sourceAsMap();
assertThat(root, hasKey("_meta"));
assertThat(root.get("_meta"), instanceOf(Map.class));
@SuppressWarnings("unchecked")
Map<String, Object> meta = (Map<String, Object>) root.get("_meta");
assertThat(meta, hasKey("legacy-mappings"));
assertThat(meta.get("legacy-mappings"), instanceOf(Map.class));
@SuppressWarnings("unchecked")
Map<String, Object> legacyMappings = (Map<String, Object>) meta.get("legacy-mappings");
assertThat(legacyMappings.keySet(), not(empty()));
for (Map.Entry<String, Object> entry : legacyMappings.entrySet()) {
String type = entry.getKey();
assertThat(type, startsWith("doc"));
assertThat(entry.getValue(), instanceOf(Map.class));
@SuppressWarnings("unchecked")
Map<String, Object> legacyMapping = (Map<String, Object>) entry.getValue();
assertThat(legacyMapping, hasKey("properties"));
assertThat(legacyMapping.get("properties"), instanceOf(Map.class));
@SuppressWarnings("unchecked")
Map<String, Object> propertiesMapping = (Map<String, Object>) legacyMapping.get("properties");
assertThat(propertiesMapping, hasKey("val"));
assertThat(propertiesMapping.get("val"), instanceOf(Map.class));
@SuppressWarnings("unchecked")
Map<String, Object> valMapping = (Map<String, Object>) propertiesMapping.get("val");
assertThat(valMapping, hasKey("type"));
assertEquals("long", valMapping.get("type"));
}

// run a search against the index
assertDocs("restored_test", numDocs, expectedIds, client, sourceOnlyRepository);

Expand Down