(query, queryEngineResults);
- }
-
- /* MNT-8804 filter ResultSet for nodes with corrupted indexes */
- private ResultSet filterNotExistingNodes(ResultSet resultSet)
- {
- if (resultSet instanceof PagingLuceneResultSet)
- {
- ResultSet wrapped = ((PagingLuceneResultSet)resultSet).getWrapped();
-
- if (wrapped instanceof FilteringResultSet)
- {
- FilteringResultSet filteringResultSet = (FilteringResultSet)wrapped;
-
- for (int i = 0; i < filteringResultSet.length(); i++)
- {
- NodeRef nodeRef = filteringResultSet.getNodeRef(i);
- /* filter node if it does not exist */
- if (!nodeService.exists(nodeRef))
- {
- filteringResultSet.setIncluded(i, false);
- }
- }
- }
- }
-
- return resultSet;
- }
-
- public CMISResultSet query(String query, StoreRef storeRef)
- {
- CMISQueryOptions options = new CMISQueryOptions(query, storeRef);
- return query(options);
- }
-
- public boolean getPwcSearchable()
- {
- return true;
- }
-
- public boolean getAllVersionsSearchable()
- {
- return false;
- }
-
- public CapabilityQuery getQuerySupport()
- {
- return CapabilityQuery.BOTHCOMBINED;
- }
-
- public CapabilityJoin getJoinSupport()
- {
- return CapabilityJoin.NONE;
- }
-}
diff --git a/src/main/java/org/alfresco/repo/blog/BlogServiceImpl.java b/src/main/java/org/alfresco/repo/blog/BlogServiceImpl.java
index da7d6d5a33..0b5939b0da 100644
--- a/src/main/java/org/alfresco/repo/blog/BlogServiceImpl.java
+++ b/src/main/java/org/alfresco/repo/blog/BlogServiceImpl.java
@@ -46,7 +46,7 @@
import org.alfresco.repo.blog.cannedqueries.GetBlogPostsCannedQuery;
import org.alfresco.repo.blog.cannedqueries.GetBlogPostsCannedQueryFactory;
import org.alfresco.repo.content.MimetypeMap;
-import org.alfresco.repo.search.impl.lucene.LuceneUtils;
+import org.alfresco.repo.search.LuceneUtils;
import org.alfresco.repo.security.authentication.AuthenticationUtil;
import org.alfresco.repo.security.permissions.AccessDeniedException;
import org.alfresco.repo.site.SiteServiceImpl;
diff --git a/src/main/java/org/alfresco/repo/links/LinksServiceImpl.java b/src/main/java/org/alfresco/repo/links/LinksServiceImpl.java
index 93bf691db4..2cc8dedd66 100644
--- a/src/main/java/org/alfresco/repo/links/LinksServiceImpl.java
+++ b/src/main/java/org/alfresco/repo/links/LinksServiceImpl.java
@@ -1,28 +1,28 @@
-/*
- * #%L
- * Alfresco Repository
- * %%
- * Copyright (C) 2005 - 2016 Alfresco Software Limited
- * %%
- * This file is part of the Alfresco software.
- * If the software was purchased under a paid Alfresco license, the terms of
- * the paid license agreement will prevail. Otherwise, the software is
- * provided under the following open source license terms:
- *
- * Alfresco is free software: you can redistribute it and/or modify
- * it under the terms of the GNU Lesser General Public License as published by
- * the Free Software Foundation, either version 3 of the License, or
- * (at your option) any later version.
- *
- * Alfresco is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public License
- * along with Alfresco. If not, see .
- * #L%
- */
+/*
+ * #%L
+ * Alfresco Repository
+ * %%
+ * Copyright (C) 2005 - 2016 Alfresco Software Limited
+ * %%
+ * This file is part of the Alfresco software.
+ * If the software was purchased under a paid Alfresco license, the terms of
+ * the paid license agreement will prevail. Otherwise, the software is
+ * provided under the following open source license terms:
+ *
+ * Alfresco is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License as published by
+ * the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * Alfresco is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public License
+ * along with Alfresco. If not, see .
+ * #L%
+ */
package org.alfresco.repo.links;
import java.io.Serializable;
@@ -42,7 +42,7 @@
import org.alfresco.repo.node.getchildren.GetChildrenAuditableCannedQuery;
import org.alfresco.repo.node.getchildren.GetChildrenAuditableCannedQueryFactory;
import org.alfresco.repo.query.NodeBackedEntity;
-import org.alfresco.repo.search.impl.lucene.LuceneUtils;
+import org.alfresco.repo.search.LuceneUtils;
import org.alfresco.repo.site.SiteServiceImpl;
import org.alfresco.service.cmr.dictionary.DictionaryService;
import org.alfresco.service.cmr.links.LinkInfo;
diff --git a/src/main/java/org/alfresco/repo/search/LuceneUtils.java b/src/main/java/org/alfresco/repo/search/LuceneUtils.java
new file mode 100644
index 0000000000..d3eb2665bb
--- /dev/null
+++ b/src/main/java/org/alfresco/repo/search/LuceneUtils.java
@@ -0,0 +1,128 @@
+/*
+ * #%L
+ * Alfresco Legacy Lucene
+ * %%
+ * Copyright (C) 2005 - 2016 Alfresco Software Limited
+ * %%
+ * This file is part of the Alfresco software.
+ * If the software was purchased under a paid Alfresco license, the terms of
+ * the paid license agreement will prevail. Otherwise, the software is
+ * provided under the following open source license terms:
+ *
+ * Alfresco is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License as published by
+ * the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * Alfresco is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public License
+ * along with Alfresco. If not, see .
+ * #L%
+ */
+package org.alfresco.repo.search;
+
+import java.text.SimpleDateFormat;
+import java.util.Date;
+import org.alfresco.service.cmr.dictionary.DataTypeDefinition;
+import org.alfresco.service.cmr.dictionary.DictionaryService;
+import org.alfresco.service.cmr.dictionary.PropertyDefinition;
+import org.alfresco.service.namespace.NamespaceService;
+import org.alfresco.service.namespace.QName;
+
+/**
+ * Lucene utils
+ *
+ * @author Andy
+ *
+ */
+public class LuceneUtils
+{
+ /**
+ * This is the date string format as required by Lucene e.g. "1970\\-01\\-01T00:00:00"
+ * @since 4.0
+ */
+ private static final SimpleDateFormat LUCENE_DATETIME_FORMAT = new SimpleDateFormat("yyyy\\-MM\\-dd'T'HH:mm:ss");
+
+ /**
+ * Returns a date string in the format required by Lucene.
+ *
+ * @since 4.0
+ */
+ public static String getLuceneDateString(Date date)
+ {
+ return LUCENE_DATETIME_FORMAT.format(date);
+ }
+ /**
+ * This method creates a Lucene query fragment which constrains the specified dateProperty to a range
+ * given by the fromDate and toDate parameters.
+ *
+ * @param fromDate the start of the date range (defaults to 1970-01-01 00:00:00 if null).
+ * @param toDate the end of the date range (defaults to 3000-12-31 00:00:00 if null).
+ * @param dateProperty the Alfresco property value to check against the range (must be a valid Date or DateTime property).
+ *
+ * @return the Lucene query fragment.
+ *
+ * @throws NullPointerException if dateProperty is null or if the dateProperty is not recognised by the system.
+ * @throws IllegalArgumentException if dateProperty refers to a property that is not of type {@link DataTypeDefinition#DATE} or {@link DataTypeDefinition#DATETIME}.
+ */
+ public static String createDateRangeQuery(Date fromDate, Date toDate, QName dateProperty,
+ DictionaryService dictionaryService, NamespaceService namespaceService)
+ {
+ // Some sanity checking of the date property.
+ if (dateProperty == null)
+ {
+ throw new NullPointerException("dateProperty cannot be null");
+ }
+ PropertyDefinition propDef = dictionaryService.getProperty(dateProperty);
+ if (propDef == null)
+ {
+ throw new NullPointerException("dateProperty '" + dateProperty + "' not recognised.");
+ }
+ else
+ {
+ final QName propDefType = propDef.getDataType().getName();
+ if ( !DataTypeDefinition.DATE.equals(propDefType) &&
+ !DataTypeDefinition.DATETIME.equals(propDefType))
+ {
+ throw new IllegalArgumentException("Illegal property type '" + dateProperty + "' [" + propDefType + "]");
+ }
+ }
+
+ QName propertyName = propDef.getName();
+ final String shortFormQName = propertyName.toPrefixString(namespaceService);
+ final String prefix = shortFormQName.substring(0, shortFormQName.indexOf(QName.NAMESPACE_PREFIX));
+ final String localName = propertyName.getLocalName();
+
+
+ // I can see potential issues with using 1970 and 3000 as default dates, but this is what the previous
+ // JavaScript controllers/libs did and I'll reproduce it here.
+ final String ZERO_DATE = "1970\\-01\\-01T00:00:00";
+ final String FUTURE_DATE = "3000\\-12\\-31T00:00:00";
+
+ StringBuilder luceneQuery = new StringBuilder();
+ luceneQuery.append(" +@").append(prefix).append("\\:").append(localName).append(":[");
+ if (fromDate != null)
+ {
+ luceneQuery.append(LuceneUtils.getLuceneDateString(fromDate));
+ }
+ else
+ {
+ luceneQuery.append(ZERO_DATE);
+ }
+ luceneQuery.append(" TO ");
+ if (toDate != null)
+ {
+ luceneQuery.append(LuceneUtils.getLuceneDateString(toDate));
+ }
+ else
+ {
+ luceneQuery.append(FUTURE_DATE);
+ }
+ luceneQuery.append("] ");
+ return luceneQuery.toString();
+ }
+}
diff --git a/src/main/java/org/alfresco/repo/search/QueryParserException.java b/src/main/java/org/alfresco/repo/search/QueryParserException.java
new file mode 100644
index 0000000000..b2151927fe
--- /dev/null
+++ b/src/main/java/org/alfresco/repo/search/QueryParserException.java
@@ -0,0 +1,82 @@
+/*
+ * #%L
+ * Alfresco Legacy Lucene
+ * %%
+ * Copyright (C) 2005 - 2016 Alfresco Software Limited
+ * %%
+ * This file is part of the Alfresco software.
+ * If the software was purchased under a paid Alfresco license, the terms of
+ * the paid license agreement will prevail. Otherwise, the software is
+ * provided under the following open source license terms:
+ *
+ * Alfresco is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License as published by
+ * the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * Alfresco is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public License
+ * along with Alfresco. If not, see .
+ * #L%
+ */
+package org.alfresco.repo.search;
+
+import org.alfresco.error.AlfrescoRuntimeException;
+
+/**
+ * @author Andy
+ *
+ */
+public class QueryParserException extends AlfrescoRuntimeException
+{
+
+ /**
+ *
+ */
+ private static final long serialVersionUID = 4886993838297301968L;
+
+ /**
+ * @param msgId
+ */
+ public QueryParserException(String msgId)
+ {
+ super(msgId);
+ // TODO Auto-generated constructor stub
+ }
+
+ /**
+ * @param msgId
+ * @param msgParams
+ */
+ public QueryParserException(String msgId, Object[] msgParams)
+ {
+ super(msgId, msgParams);
+ // TODO Auto-generated constructor stub
+ }
+
+ /**
+ * @param msgId
+ * @param cause
+ */
+ public QueryParserException(String msgId, Throwable cause)
+ {
+ super(msgId, cause);
+ // TODO Auto-generated constructor stub
+ }
+
+ /**
+ * @param msgId
+ * @param msgParams
+ * @param cause
+ */
+ public QueryParserException(String msgId, Object[] msgParams, Throwable cause)
+ {
+ super(msgId, msgParams, cause);
+ // TODO Auto-generated constructor stub
+ }
+
+}
diff --git a/src/main/java/org/alfresco/repo/search/impl/lucene/AbstractLuceneBase.java b/src/main/java/org/alfresco/repo/search/impl/lucene/AbstractLuceneBase.java
deleted file mode 100644
index c5530008ee..0000000000
--- a/src/main/java/org/alfresco/repo/search/impl/lucene/AbstractLuceneBase.java
+++ /dev/null
@@ -1,355 +0,0 @@
-/*
- * #%L
- * Alfresco Repository
- * %%
- * Copyright (C) 2005 - 2016 Alfresco Software Limited
- * %%
- * This file is part of the Alfresco software.
- * If the software was purchased under a paid Alfresco license, the terms of
- * the paid license agreement will prevail. Otherwise, the software is
- * provided under the following open source license terms:
- *
- * Alfresco is free software: you can redistribute it and/or modify
- * it under the terms of the GNU Lesser General Public License as published by
- * the Free Software Foundation, either version 3 of the License, or
- * (at your option) any later version.
- *
- * Alfresco is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public License
- * along with Alfresco. If not, see .
- * #L%
- */
-package org.alfresco.repo.search.impl.lucene;
-
-import java.io.File;
-import java.io.IOException;
-import java.util.Set;
-
-import org.alfresco.repo.search.IndexerException;
-import org.alfresco.repo.search.impl.lucene.analysis.AlfrescoStandardAnalyser;
-import org.alfresco.repo.search.impl.lucene.index.IndexInfo;
-import org.alfresco.repo.search.impl.lucene.index.TransactionStatus;
-import org.alfresco.repo.search.impl.lucene.index.IndexInfo.LockWork;
-import org.alfresco.service.cmr.dictionary.DictionaryService;
-import org.alfresco.service.cmr.repository.StoreRef;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.lucene.index.IndexReader;
-import org.apache.lucene.index.IndexWriter;
-import org.apache.lucene.search.IndexSearcher;
-
-/**
- * Common support for abstracting the lucene indexer from its configuration and management requirements.
- *
- *
- * This class defines where the indexes are stored. This should be via a configurable Bean property in Spring.
- *
- *
- * The default file structure is
- *
- * "base"/"protocol"/"name"/ for the main index
- * "base"/"protocol"/"name"/deltas/"id" for transactional updates
- * "base"/"protocol"/"name"/undo/"id" undo information
- *
- *
- *
- * The IndexWriter and IndexReader for a given index are toggled (one should be used for delete and the other for write). These are reused/closed/initialised as required.
- *
- *
- * The index deltas are buffered to memory and persisted in the file system as required.
- *
- * @author Andy Hind
- *
- */
-
-public abstract class AbstractLuceneBase
-{
- private static Log s_logger = LogFactory.getLog(AbstractLuceneBase.class);
-
- private IndexInfo indexInfo;
-
- /**
- * The identifier for the store
- */
-
- protected StoreRef store;
-
- /**
- * The identifier for the delta
- */
-
- protected String deltaId;
-
- private LuceneConfig config;
-
- private TransactionStatus status = TransactionStatus.UNKNOWN;
-
- // "lucene-indexes";
-
- /**
- * Initialise the configuration elements of the lucene store indexers and searchers.
- *
- * @param store StoreRef
- * @param deltaId String
- * @throws LuceneIndexException
- */
- protected void initialise(StoreRef store, String deltaId)
- throws LuceneIndexException
- {
- this.store = store;
- this.deltaId = deltaId;
-
- String basePath = getBasePath();
- File baseDir = new File(basePath);
- indexInfo = IndexInfo.getIndexInfo(baseDir, config);
- try
- {
- if (this.deltaId != null)
- {
- if (! getStatus().equals(TransactionStatus.ACTIVE))
- {
- setStatus(TransactionStatus.ACTIVE);
- }
- else
- {
- if (s_logger.isDebugEnabled())
- {
- s_logger.debug("Delta already set as active " + deltaId);
- }
- }
- }
- }
- catch (IOException e)
- {
- throw new IndexerException("Failed to set delta as active");
- }
- }
-
- /**
- * Utility method to find the path to the base index
- *
- * @return - the base path
- */
- private String getBasePath()
- {
- if (config.getIndexRootLocation() == null)
- {
- throw new IndexerException("No configuration for index location");
- }
- String basePath = config.getIndexRootLocation()
- + File.separator + store.getProtocol() + File.separator + store.getIdentifier() + File.separator;
- return basePath;
- }
-
- /**
- * Get a searcher for the main index TODO: Split out support for the main index. We really only need this if we want to search over the changing index before it is committed
- *
- * @return - the searcher
- * @throws LuceneIndexException
- */
-
- protected IndexSearcher getSearcher() throws LuceneIndexException
- {
- try
- {
- return new ClosingIndexSearcher(indexInfo.getMainIndexReferenceCountingReadOnlyIndexReader());
- }
- catch (IOException e)
- {
- s_logger.error("Error", e);
- throw new LuceneIndexException("Failed to open IndexSarcher for " + getBasePath(), e);
- }
- }
-
- protected ClosingIndexSearcher getSearcher(LuceneIndexer luceneIndexer) throws LuceneIndexException
- {
- // If we know the delta id we should do better
-
- try
- {
- if (luceneIndexer == null)
- {
- return new ClosingIndexSearcher(indexInfo.getMainIndexReferenceCountingReadOnlyIndexReader());
- }
- else
- {
- // TODO: Create appropriate reader that lies about deletions
- // from the first
- //
- luceneIndexer.flushPending();
- return new ClosingIndexSearcher(indexInfo.getMainIndexReferenceCountingReadOnlyIndexReader(deltaId,
- luceneIndexer.getDeletions(), luceneIndexer.getContainerDeletions(), luceneIndexer
- .getDeleteOnlyNodes()));
- }
-
- }
- catch (IOException e)
- {
- s_logger.error("Error", e);
- throw new LuceneIndexException("Failed to open IndexSarcher for " + getBasePath(), e);
- }
- }
-
- /**
- * Get a reader for the on file portion of the delta
- *
- * @return - the index reader
- * @throws IOException
- * @throws IOException
- */
-
- protected IndexReader getDeltaReader() throws LuceneIndexException, IOException
- {
- return indexInfo.getDeltaIndexReader(deltaId);
- }
-
- /**
- * Close the on file reader for the delta if it is open
- *
- * @throws IOException
- *
- * @throws IOException
- */
-
- protected void closeDeltaReader() throws LuceneIndexException, IOException
- {
- indexInfo.closeDeltaIndexReader(deltaId);
- }
-
- /**
- * Get the on file writer for the delta
- *
- * @return - the writer for the delta
- * @throws IOException
- * @throws IOException
- */
- protected IndexWriter getDeltaWriter() throws LuceneIndexException, IOException
- {
- return indexInfo.getDeltaIndexWriter(deltaId, new LuceneAnalyser(dictionaryService, config.getDefaultMLIndexAnalysisMode()));
- }
-
- /**
- * Close the on disk delta writer
- *
- * @throws IOException
- *
- * @throws IOException
- */
-
- protected void closeDeltaWriter() throws LuceneIndexException, IOException
- {
- indexInfo.closeDeltaIndexWriter(deltaId);
- }
-
- /**
- * Save the in memory delta to the disk, make sure there is nothing held in memory
- *
- * @throws IOException
- *
- * @throws IOException
- */
- protected void saveDelta() throws LuceneIndexException, IOException
- {
- // Only one should exist so we do not need error trapping to execute the
- // other
- closeDeltaReader();
- closeDeltaWriter();
- }
-
- protected void setInfo(long docs, Set deletions, Set containerDeletions, boolean deleteNodesOnly) throws IOException
- {
- indexInfo.setPreparedState(deltaId, deletions, containerDeletions, docs, deleteNodesOnly);
- }
-
- protected void setStatus(TransactionStatus status) throws IOException
- {
- indexInfo.setStatus(deltaId, status, null, null);
- this.status = status;
- }
-
- protected TransactionStatus getStatus()
- {
- return status;
- }
-
-
-
- private DictionaryService dictionaryService;
-
- protected IndexReader getReader() throws LuceneIndexException, IOException
- {
- return indexInfo.getMainIndexReferenceCountingReadOnlyIndexReader();
- }
-
- /**
- * Set the dictionary service
- * @param dictionaryService DictionaryService
- */
- public void setDictionaryService(DictionaryService dictionaryService)
- {
- this.dictionaryService = dictionaryService;
- }
-
- /**
- * Get the dictionary service.
- *
- * @return - the service
- */
- public DictionaryService getDictionaryService()
- {
- return dictionaryService;
- }
-
- /**
- * Set the lucene configuration options
- *
- * @param config LuceneConfig
- */
- public void setLuceneConfig(LuceneConfig config)
- {
- this.config = config;
- }
-
- /**
- * Get the lucene configuration options.
- *
- * @return - the config options object.
- */
- public LuceneConfig getLuceneConfig()
- {
- return config;
- }
-
- /**
- * Get the ID for the delat we are working with.
- *
- * @return - the id
- */
- public String getDeltaId()
- {
- return deltaId;
- }
-
-
- /**
- * Execute actions against a read only index (all write ops will block)
- *
- * @return - the result returned by the action.
- */
- public R doReadOnly(LockWork lockWork)
- {
- return indexInfo.doReadOnly(lockWork);
- }
-
-
- public void deleteIndex()
- {
- indexInfo.delete(deltaId);
- }
-
-
-}
diff --git a/src/main/java/org/alfresco/repo/search/impl/lucene/AbstractLuceneIndexerAndSearcherFactory.java b/src/main/java/org/alfresco/repo/search/impl/lucene/AbstractLuceneIndexerAndSearcherFactory.java
deleted file mode 100644
index 855c3c2c9d..0000000000
--- a/src/main/java/org/alfresco/repo/search/impl/lucene/AbstractLuceneIndexerAndSearcherFactory.java
+++ /dev/null
@@ -1,2238 +0,0 @@
-/*
- * #%L
- * Alfresco Repository
- * %%
- * Copyright (C) 2005 - 2016 Alfresco Software Limited
- * %%
- * This file is part of the Alfresco software.
- * If the software was purchased under a paid Alfresco license, the terms of
- * the paid license agreement will prevail. Otherwise, the software is
- * provided under the following open source license terms:
- *
- * Alfresco is free software: you can redistribute it and/or modify
- * it under the terms of the GNU Lesser General Public License as published by
- * the Free Software Foundation, either version 3 of the License, or
- * (at your option) any later version.
- *
- * Alfresco is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public License
- * along with Alfresco. If not, see .
- * #L%
- */
-package org.alfresco.repo.search.impl.lucene;
-
-import java.io.File;
-import java.io.FileInputStream;
-import java.io.FileNotFoundException;
-import java.io.FileOutputStream;
-import java.io.IOException;
-import java.io.InputStream;
-import java.io.OutputStream;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
-import java.util.concurrent.ThreadPoolExecutor;
-import java.util.concurrent.locks.ReentrantReadWriteLock;
-
-import javax.transaction.RollbackException;
-import javax.transaction.SystemException;
-import javax.transaction.Transaction;
-import javax.transaction.xa.XAException;
-import javax.transaction.xa.XAResource;
-import javax.transaction.xa.Xid;
-
-import org.alfresco.error.AlfrescoRuntimeException;
-import org.alfresco.repo.node.NodeBulkLoader;
-import org.alfresco.repo.search.IndexerException;
-import org.alfresco.repo.search.MLAnalysisMode;
-import org.alfresco.repo.search.QueryRegisterComponent;
-import org.alfresco.repo.search.SearcherException;
-import org.alfresco.repo.search.impl.lucene.index.IndexInfo;
-import org.alfresco.repo.search.transaction.SimpleTransaction;
-import org.alfresco.repo.search.transaction.SimpleTransactionManager;
-import org.alfresco.repo.tenant.TenantService;
-import org.alfresco.repo.transaction.AlfrescoTransactionSupport;
-import org.alfresco.repo.transaction.RetryingTransactionHelper.RetryingTransactionCallback;
-import org.alfresco.service.cmr.repository.NodeService;
-import org.alfresco.service.cmr.repository.StoreRef;
-import org.alfresco.service.cmr.search.SearchService;
-import org.alfresco.service.transaction.TransactionService;
-import org.alfresco.util.GUID;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.lucene.index.IndexWriter;
-import org.apache.lucene.search.BooleanQuery;
-import org.apache.lucene.store.Lock;
-import org.quartz.Job;
-import org.quartz.JobDataMap;
-import org.quartz.JobExecutionContext;
-import org.quartz.JobExecutionException;
-import org.springframework.beans.BeansException;
-import org.springframework.beans.factory.DisposableBean;
-import org.springframework.context.ApplicationContext;
-import org.springframework.context.ApplicationContextAware;
-import org.springframework.context.ConfigurableApplicationContext;
-import org.springframework.transaction.support.TransactionSynchronizationManager;
-
-/**
- * This class is resource manager LuceneIndexers and LuceneSearchers. It supports two phase commit inside XA
- * transactions and outside transactions it provides thread local transaction support. TODO: Provide pluggable support
- * for a transaction manager TODO: Integrate with Spring transactions
- *
- * @author andyh
- */
-
-public abstract class AbstractLuceneIndexerAndSearcherFactory extends AbstractIndexerAndSearcher implements LuceneIndexerAndSearcher, XAResource, ApplicationContextAware, DisposableBean
-{
- private static Log logger = LogFactory.getLog(AbstractLuceneIndexerAndSearcherFactory.class);
-
- private int queryMaxClauses;
-
- private int indexerBatchSize;
-
- /**
- * A map of active global transactions . It contains all the indexers a transaction has used, with at most one
- * indexer for each store within a transaction
- */
-
- private Map> activeIndexersInGlobalTx = new HashMap>();
-
- /**
- * Suspended global transactions.
- */
- private Map> suspendedIndexersInGlobalTx = new HashMap>();
-
- /**
- * The key under which this instance's map of indexers is stored in a (non-global) transaction
- */
- private final String indexersKey = "AbstractLuceneIndexerAndSearcherFactory." + GUID.generate();
-
- /**
- * The default timeout for transactions TODO: Respect this
- */
-
- private int timeout = DEFAULT_TIMEOUT;
-
- /**
- * Default time out value set to 10 minutes.
- */
- private static final int DEFAULT_TIMEOUT = 600000;
-
- protected TenantService tenantService;
-
- private String indexRootLocation;
-
- private QueryRegisterComponent queryRegister;
-
- /** the maximum transformation time to allow atomically, defaulting to 20ms */
- private long maxAtomicTransformationTime = 20;
-
- private int indexerMaxFieldLength = IndexWriter.DEFAULT_MAX_FIELD_LENGTH;
-
- private long writeLockTimeout;
-
- private long commitLockTimeout;
-
- private String lockDirectory;
-
- private MLAnalysisMode defaultMLIndexAnalysisMode = MLAnalysisMode.EXACT_LANGUAGE_AND_ALL;
-
- private MLAnalysisMode defaultMLSearchAnalysisMode = MLAnalysisMode.EXACT_LANGUAGE_AND_ALL;
-
- private ThreadPoolExecutor threadPoolExecutor;
-
- private NodeBulkLoader bulkLoader;
-
- private int maxDocIdCacheSize = 10000;
-
- private int maxDocsForInMemoryMerge = 10000;
-
- private int maxDocsForInMemoryIndex = 10000;
-
- private double maxRamInMbForInMemoryMerge = 16.0;
-
- private double maxRamInMbForInMemoryIndex = 16.0;
-
- private int maxDocumentCacheSize = 100;
-
- private int maxIsCategoryCacheSize = -1;
-
- private int maxLinkAspectCacheSize = 10000;
-
- private int maxParentCacheSize = 10000;
-
- private int maxPathCacheSize = 10000;
-
- private int maxTypeCacheSize = 10000;
-
- private int mergerMaxMergeDocs = 1000000;
-
- private int mergerMergeFactor = 5;
-
-
- private int mergerMaxBufferedDocs = IndexWriter.DISABLE_AUTO_FLUSH;
-
- private double mergerRamBufferSizeMb = 16.0;
-
- private int mergerTargetIndexCount = 5;
-
- private int mergerTargetOverlayCount = 5;
-
- private int mergerTargetOverlaysBlockingFactor = 1;
-
- private boolean fairLocking;
-
- private int termIndexInterval = IndexWriter.DEFAULT_TERM_INDEX_INTERVAL;
-
- private boolean useNioMemoryMapping = true;
-
- private int writerMaxMergeDocs = 1000000;
-
- private int writerMergeFactor = 5;
-
- private int writerMaxBufferedDocs = IndexWriter.DISABLE_AUTO_FLUSH;
-
- private double writerRamBufferSizeMb = 16.0;
-
- private boolean cacheEnabled = true;
-
- private boolean postSortDateTime;
-
- private ConfigurableApplicationContext applicationContext;
-
- private boolean contentIndexingEnabled = true;
-
- private boolean useInMemorySort = true;
-
- private int maxRawResultSetSizeForInMemorySort = 1000;
-
- private volatile boolean destroyed = false;
-
- /**
- * Private constructor for the singleton TODO: FIt in with IOC
- */
-
- public AbstractLuceneIndexerAndSearcherFactory()
- {
- super();
- }
-
- /*
- * (non-Javadoc)
- * @seeorg.springframework.context.ApplicationContextAware#setApplicationContext(org.springframework.context.
- * ApplicationContext)
- */
- public void setApplicationContext(ApplicationContext applicationContext) throws BeansException
- {
- this.applicationContext = (ConfigurableApplicationContext) applicationContext;
- }
-
-
- /*
- * (non-Javadoc)
- *
- * @see org.alfresco.repo.search.impl.lucene.LuceneConfig#getApplicationContext()
- */
- public ConfigurableApplicationContext getApplicationContext()
- {
- return this.applicationContext;
- }
-
- /**
- * Set the directory that contains the indexes
- *
- * @param indexRootLocation String
- */
-
- public void setIndexRootLocation(String indexRootLocation)
- {
- this.indexRootLocation = indexRootLocation;
- }
-
- /**
- * Set the tenant service
- *
- * @param tenantService TenantService
- */
- public void setTenantService(TenantService tenantService)
- {
- this.tenantService = tenantService;
- }
-
- /**
- * Set the query register
- *
- * @param queryRegister QueryRegisterComponent
- */
- public void setQueryRegister(QueryRegisterComponent queryRegister)
- {
- this.queryRegister = queryRegister;
- }
-
- /**
- * Get the query register.
- *
- * @return - the query register.
- */
- public QueryRegisterComponent getQueryRegister()
- {
- return queryRegister;
- }
-
- /**
- * Set the maximum average transformation time allowed to a transformer in order to have the transformation
- * performed in the current transaction. The default is 20ms.
- *
- * @param maxAtomicTransformationTime
- * the maximum average time that a text transformation may take in order to be performed atomically.
- */
- @Override
- public void setMaxAtomicTransformationTime(long maxAtomicTransformationTime)
- {
- this.maxAtomicTransformationTime = maxAtomicTransformationTime;
- }
-
- /**
- * Get the max time for an atomic transform
- *
- * @return - milliseconds as a long
- */
- @Override
- public long getMaxTransformationTime()
- {
- return maxAtomicTransformationTime;
- }
-
- public NodeBulkLoader getBulkLoader()
- {
- return bulkLoader;
- }
-
- public void setBulkLoader(NodeBulkLoader bulkLoader)
- {
- this.bulkLoader = bulkLoader;
- }
-
- /**
- * Check if we are in a global transactoin according to the transaction manager
- *
- * @return - true if in a global transaction
- */
-
- private boolean inGlobalTransaction()
- {
- try
- {
- return SimpleTransactionManager.getInstance().getTransaction() != null;
- }
- catch (SystemException e)
- {
- return false;
- }
- }
-
- /**
- * Get the local transaction - may be null oif we are outside a transaction.
- *
- * @return - the transaction
- * @throws IndexerException
- */
- private SimpleTransaction getTransaction() throws IndexerException
- {
- try
- {
- return SimpleTransactionManager.getInstance().getTransaction();
- }
- catch (SystemException e)
- {
- throw new IndexerException("Failed to get transaction", e);
- }
- }
-
- /**
- * Get an indexer for the store to use in the current transaction for this thread of control.
- *
- * @param storeRef -
- * the id of the store
- */
- public LuceneIndexer getIndexer(StoreRef storeRef) throws IndexerException
- {
- storeRef = tenantService.getName(storeRef);
-
- // register to receive txn callbacks
- // TODO: make this conditional on whether the XA stuff is being used
- // directly on not
- AlfrescoTransactionSupport.bindLucene(this);
-
- if (inGlobalTransaction())
- {
- SimpleTransaction tx = getTransaction();
- // Only find indexers in the active list
- Map indexers = activeIndexersInGlobalTx.get(tx);
- if (indexers == null)
- {
- if (suspendedIndexersInGlobalTx.containsKey(tx))
- {
- throw new IndexerException("Trying to obtain an index for a suspended transaction.");
- }
- indexers = new HashMap();
- activeIndexersInGlobalTx.put(tx, indexers);
- try
- {
- tx.enlistResource(this);
- }
- // TODO: what to do in each case?
- catch (IllegalStateException e)
- {
- throw new IndexerException("", e);
- }
- catch (RollbackException e)
- {
- throw new IndexerException("", e);
- }
- catch (SystemException e)
- {
- throw new IndexerException("", e);
- }
- }
- LuceneIndexer indexer = indexers.get(storeRef);
- if (indexer == null)
- {
- indexer = createIndexer(storeRef, getTransactionId(tx, storeRef));
- indexers.put(storeRef, indexer);
- }
- return indexer;
- }
- else
- // A thread local transaction
- {
- return getThreadLocalIndexer(storeRef);
- }
-
- }
-
- @SuppressWarnings("unchecked")
- private LuceneIndexer getThreadLocalIndexer(StoreRef storeRef)
- {
- Map indexers = (Map) AlfrescoTransactionSupport.getResource(indexersKey);
- if (indexers == null)
- {
- indexers = new HashMap();
- AlfrescoTransactionSupport.bindResource(indexersKey, indexers);
- }
- LuceneIndexer indexer = indexers.get(storeRef);
- if (indexer == null)
- {
- indexer = createIndexer(storeRef, GUID.generate());
- indexers.put(storeRef, indexer);
- }
- return indexer;
- }
-
- /**
- * Get the transaction identifier used to store it in the transaction map.
- *
- * @param tx Transaction
- * @param storeRef StoreRef
- * @return - the transaction id
- */
- @SuppressWarnings("unchecked")
- private String getTransactionId(Transaction tx, StoreRef storeRef)
- {
- if (tx instanceof SimpleTransaction)
- {
- SimpleTransaction simpleTx = (SimpleTransaction) tx;
- return simpleTx.getGUID();
- }
- else if (TransactionSynchronizationManager.isSynchronizationActive())
- {
- Map indexers = (Map) AlfrescoTransactionSupport.getResource(indexersKey);
- if (indexers != null)
- {
- LuceneIndexer indexer = indexers.get(storeRef);
- if (indexer != null)
- {
- return indexer.getDeltaId();
- }
- }
- }
- return null;
- }
-
- /**
- * Encapsulate creating an indexer
- *
- * @param storeRef StoreRef
- * @param deltaId String
- * @return - the indexer made by the concrete implemntation
- */
- protected abstract LuceneIndexer createIndexer(StoreRef storeRef, String deltaId);
-
- /**
- * Encapsulate creating a searcher over the main index
- */
- public LuceneSearcher getSearcher(StoreRef storeRef, boolean searchDelta) throws SearcherException
- {
- storeRef = tenantService.getName(storeRef);
-
- String deltaId = null;
- LuceneIndexer indexer = null;
- if (searchDelta)
- {
- deltaId = getTransactionId(getTransaction(), storeRef);
- if (deltaId != null)
- {
- indexer = getIndexer(storeRef);
- }
- }
- LuceneSearcher searcher = getSearcher(storeRef, indexer);
- return searcher;
- }
-
- /**
- * Get node-based searcher (for "selectNodes / selectProperties")
- */
- protected abstract SearchService getNodeSearcher() throws SearcherException;
-
- /**
- * Get a searcher over the index and the current delta
- *
- * @param storeRef StoreRef
- * @param indexer LuceneIndexer
- * @return - the searcher made by the concrete implementation.
- * @throws SearcherException
- */
-
- protected abstract LuceneSearcher getSearcher(StoreRef storeRef, LuceneIndexer indexer) throws SearcherException;
-
- /*
- * XAResource implementation
- */
-
- public void commit(Xid xid, boolean onePhase) throws XAException
- {
- try
- {
- // TODO: Should be remembering overall state
- // TODO: Keep track of prepare responses
- Map indexers = activeIndexersInGlobalTx.get(xid);
- if (indexers == null)
- {
- if (suspendedIndexersInGlobalTx.containsKey(xid))
- {
- throw new XAException("Trying to commit indexes for a suspended transaction.");
- }
- else
- {
- // nothing to do
- return;
- }
- }
-
- if (onePhase)
- {
- if (indexers.size() == 0)
- {
- return;
- }
- else if (indexers.size() == 1)
- {
- for (LuceneIndexer indexer : indexers.values())
- {
- indexer.commit();
- }
- return;
- }
- else
- {
- throw new XAException("Trying to do one phase commit on more than one index");
- }
- }
- else
- // two phase
- {
- for (LuceneIndexer indexer : indexers.values())
- {
- indexer.commit();
- }
- return;
- }
- }
- finally
- {
- activeIndexersInGlobalTx.remove(xid);
- }
- }
-
- public void end(Xid xid, int flag) throws XAException
- {
- Map indexers = activeIndexersInGlobalTx.get(xid);
- if (indexers == null)
- {
- if (suspendedIndexersInGlobalTx.containsKey(xid))
- {
- throw new XAException("Trying to commit indexes for a suspended transaction.");
- }
- else
- {
- // nothing to do
- return;
- }
- }
- if (flag == XAResource.TMSUSPEND)
- {
- activeIndexersInGlobalTx.remove(xid);
- suspendedIndexersInGlobalTx.put(xid, indexers);
- }
- else if (flag == TMFAIL)
- {
- activeIndexersInGlobalTx.remove(xid);
- suspendedIndexersInGlobalTx.remove(xid);
- }
- else if (flag == TMSUCCESS)
- {
- activeIndexersInGlobalTx.remove(xid);
- }
- }
-
- public void forget(Xid xid) throws XAException
- {
- activeIndexersInGlobalTx.remove(xid);
- suspendedIndexersInGlobalTx.remove(xid);
- }
-
- public int getTransactionTimeout() throws XAException
- {
- return timeout;
- }
-
- public boolean isSameRM(XAResource xar) throws XAException
- {
- return (xar instanceof AbstractLuceneIndexerAndSearcherFactory);
- }
-
- public int prepare(Xid xid) throws XAException
- {
- // TODO: Track state OK, ReadOnly, Exception (=> rolled back?)
- Map indexers = activeIndexersInGlobalTx.get(xid);
- if (indexers == null)
- {
- if (suspendedIndexersInGlobalTx.containsKey(xid))
- {
- throw new XAException("Trying to commit indexes for a suspended transaction.");
- }
- else
- {
- // nothing to do
- return XAResource.XA_OK;
- }
- }
- boolean isPrepared = true;
- boolean isModified = false;
- for (LuceneIndexer indexer : indexers.values())
- {
- try
- {
- isModified |= indexer.isModified();
- indexer.prepare();
- }
- catch (IndexerException e)
- {
- isPrepared = false;
- }
- }
- if (isPrepared)
- {
- if (isModified)
- {
- return XAResource.XA_OK;
- }
- else
- {
- return XAResource.XA_RDONLY;
- }
- }
- else
- {
- throw new XAException("Failed to prepare: requires rollback");
- }
- }
-
- public Xid[] recover(int arg0) throws XAException
- {
- // We can not rely on being able to recover at the moment
- // Avoiding for performance benefits at the moment
- // Assume roll back and no recovery - in the worst case we get an unused
- // delta
- // This should be there to avoid recovery of partial commits.
- // It is difficult to see how we can mandate the same conditions.
- return new Xid[0];
- }
-
- public void rollback(Xid xid) throws XAException
- {
- // TODO: What to do if all do not roll back?
- try
- {
- Map indexers = activeIndexersInGlobalTx.get(xid);
- if (indexers == null)
- {
- if (suspendedIndexersInGlobalTx.containsKey(xid))
- {
- throw new XAException("Trying to commit indexes for a suspended transaction.");
- }
- else
- {
- // nothing to do
- return;
- }
- }
- for (LuceneIndexer indexer : indexers.values())
- {
- indexer.rollback();
- }
- }
- finally
- {
- activeIndexersInGlobalTx.remove(xid);
- }
- }
-
- public boolean setTransactionTimeout(int timeout) throws XAException
- {
- this.timeout = timeout;
- return true;
- }
-
- public void start(Xid xid, int flag) throws XAException
- {
- Map active = activeIndexersInGlobalTx.get(xid);
- Map suspended = suspendedIndexersInGlobalTx.get(xid);
- if (flag == XAResource.TMJOIN)
- {
- // must be active
- if ((active != null) && (suspended == null))
- {
- return;
- }
- else
- {
- throw new XAException("Trying to rejoin transaction in an invalid state");
- }
-
- }
- else if (flag == XAResource.TMRESUME)
- {
- // must be suspended
- if ((active == null) && (suspended != null))
- {
- suspendedIndexersInGlobalTx.remove(xid);
- activeIndexersInGlobalTx.put(xid, suspended);
- return;
- }
- else
- {
- throw new XAException("Trying to rejoin transaction in an invalid state");
- }
-
- }
- else if (flag == XAResource.TMNOFLAGS)
- {
- if ((active == null) && (suspended == null))
- {
- return;
- }
- else
- {
- throw new XAException("Trying to start an existing or suspended transaction");
- }
- }
- else
- {
- throw new XAException("Unkown flags for start " + flag);
- }
-
- }
-
- /*
- * Thread local support for transactions
- */
-
- /**
- * Commit the transaction
- */
-
- @SuppressWarnings("unchecked")
- public void commit() throws IndexerException
- {
- Map indexers = null;
- try
- {
- indexers = (Map) AlfrescoTransactionSupport.getResource(indexersKey);
- if (indexers != null)
- {
- for (LuceneIndexer indexer : indexers.values())
- {
- if (destroyed && Thread.currentThread().isDaemon())
- {
- rollback();
- throw new IndexerException("Destroyed ..");
- }
- else
- {
- try
- {
- indexer.commit();
- }
- catch (IndexerException e)
- {
- rollback();
- throw e;
- }
- }
- }
- }
- }
- finally
- {
- if (indexers != null)
- {
- indexers.clear();
- AlfrescoTransactionSupport.unbindResource(indexersKey);
- }
- }
- }
-
- /**
- * Prepare the transaction TODO: Store prepare results
- *
- * @return - the tx code
- */
- @SuppressWarnings("unchecked")
- public int prepare() throws IndexerException
- {
- boolean isPrepared = true;
- boolean isModified = false;
- Map indexers = (Map) AlfrescoTransactionSupport.getResource(indexersKey);
- if (indexers != null)
- {
- for (LuceneIndexer indexer : indexers.values())
- {
- try
- {
- isModified |= indexer.isModified();
- indexer.prepare();
- }
- catch (IndexerException e)
- {
- isPrepared = false;
- throw new IndexerException("Failed to prepare: requires rollback", e);
- }
- }
- }
- if (isPrepared)
- {
- if (isModified)
- {
- return XAResource.XA_OK;
- }
- else
- {
- return XAResource.XA_RDONLY;
- }
- }
- else
- {
- throw new IndexerException("Failed to prepare: requires rollback");
- }
- }
-
- /**
- * Roll back the transaction
- */
- @SuppressWarnings("unchecked")
- public void rollback()
- {
- Map indexers = (Map) AlfrescoTransactionSupport.getResource(indexersKey);
-
- if (indexers != null)
- {
- for (LuceneIndexer indexer : indexers.values())
- {
- try
- {
- indexer.rollback();
- }
- catch (IndexerException e)
- {
-
- }
- }
- indexers.clear();
- AlfrescoTransactionSupport.unbindResource(indexersKey);
- }
- }
-
- @SuppressWarnings("unchecked")
- public void flush()
- {
- // TODO: Needs fixing if we expose the indexer in JTA
- Map indexers = (Map) AlfrescoTransactionSupport.getResource(indexersKey);
-
- if (indexers != null)
- {
- for (LuceneIndexer indexer : indexers.values())
- {
- indexer.flushPending();
- }
- }
- }
-
- @Override
- public String getIndexRootLocation()
- {
- return indexRootLocation;
- }
-
- @Override
- public int getIndexerBatchSize()
- {
- return indexerBatchSize;
- }
-
- /**
- * Set the batch six to use for background indexing
- *
- * @param indexerBatchSize int
- */
- @Override
- public void setIndexerBatchSize(int indexerBatchSize)
- {
- this.indexerBatchSize = indexerBatchSize;
- }
-
- /**
- * Get the directory where any lock files are written (by default there are none)
- *
- * @return - the path to the directory
- */
- public String getLockDirectory()
- {
- return lockDirectory;
- }
-
- public void setLockDirectory(String lockDirectory)
- {
- this.lockDirectory = lockDirectory;
- // Set the lucene lock file via System property
- // org.apache.lucene.lockDir
- System.setProperty("org.apache.lucene.lockDir", lockDirectory);
- // Make sure the lock directory exists
- File lockDir = new File(lockDirectory);
- if (!lockDir.exists())
- {
- lockDir.mkdirs();
- }
- // clean out any existing locks when we start up
-
- File[] children = lockDir.listFiles();
- if (children != null)
- {
- for (int i = 0; i < children.length; i++)
- {
- File child = children[i];
- if (child.isFile())
- {
- if (child.exists() && !child.delete() && child.exists())
- {
- throw new IllegalStateException("Failed to delete " + child);
- }
- }
- }
- }
- }
-
- @Override
- public int getQueryMaxClauses()
- {
- return queryMaxClauses;
- }
-
- /**
- * Set the max number of queries in a llucen boolean query
- *
- * @param queryMaxClauses int
- */
- @Override
- public void setQueryMaxClauses(int queryMaxClauses)
- {
- this.queryMaxClauses = queryMaxClauses;
- BooleanQuery.setMaxClauseCount(this.queryMaxClauses);
- }
-
- /**
- * Set the lucene write lock timeout
- *
- * @param timeout long
- */
- @Override
- public void setWriteLockTimeout(long timeout)
- {
- this.writeLockTimeout = timeout;
- }
-
- /**
- * Set the lucene commit lock timeout (no longer used with lucene 2.1)
- *
- * @param timeout long
- */
- @Override
- public void setCommitLockTimeout(long timeout)
- {
- this.commitLockTimeout = timeout;
- }
-
- /**
- * Get the commit lock timout.
- *
- * @return - the timeout
- */
- @Override
- public long getCommitLockTimeout()
- {
- return commitLockTimeout;
- }
-
- /**
- * Get the write lock timeout
- *
- * @return - the timeout in ms
- */
- @Override
- public long getWriteLockTimeout()
- {
- return writeLockTimeout;
- }
-
- /**
- * Set the lock poll interval in ms
- *
- * @param time long
- */
- @Override
- public void setLockPollInterval(long time)
- {
- Lock.LOCK_POLL_INTERVAL = time;
- }
-
- /**
- * Get the max number of tokens in the field
- *
- * @return - the max tokens considered.
- */
- @Override
- public int getIndexerMaxFieldLength()
- {
- return indexerMaxFieldLength;
- }
-
- /**
- * Set the max field length.
- *
- * @param indexerMaxFieldLength int
- */
- @Override
- public void setIndexerMaxFieldLength(int indexerMaxFieldLength)
- {
- this.indexerMaxFieldLength = indexerMaxFieldLength;
- }
-
- public ThreadPoolExecutor getThreadPoolExecutor()
- {
- return this.threadPoolExecutor;
- }
-
- public void setThreadPoolExecutor(ThreadPoolExecutor threadPoolExecutor)
- {
- this.threadPoolExecutor = threadPoolExecutor;
- }
-
- /**
- * @return the useInMemorySort
- */
- public boolean getUseInMemorySort()
- {
- return useInMemorySort;
- }
-
- /**
- * @param useInMemorySort the useInMemorySort to set
- */
- public void setUseInMemorySort(boolean useInMemorySort)
- {
- this.useInMemorySort = useInMemorySort;
- }
-
- /**
- * @return the maxRawResultSetSizeForInMemorySort
- */
- public int getMaxRawResultSetSizeForInMemorySort()
- {
- return maxRawResultSetSizeForInMemorySort;
- }
-
- /**
- * @param maxRawResultSetSizeForInMemorySort the maxRawResultSetSizeForInMemorySort to set
- */
- public void setMaxRawResultSetSizeForInMemorySort(int maxRawResultSetSizeForInMemorySort)
- {
- this.maxRawResultSetSizeForInMemorySort = maxRawResultSetSizeForInMemorySort;
- }
-
- /**
- * This component is able to safely perform backups of the Lucene indexes while the server is running.
- *
- * It can be run directly by calling the {@link #backup() } method, but the convenience {@link LuceneIndexBackupJob}
- * can be used to call it as well.
- *
- * @author Derek Hulley
- */
- public static class LuceneIndexBackupComponent /* implements InitializingBean */
- {
- ReentrantReadWriteLock rwLock = new ReentrantReadWriteLock();
-
- boolean executing = false;
-
- private static String BACKUP_TEMP_NAME = ".indexbackup_temp";
-
- private TransactionService transactionService;
-
- private Set factories;
-
- private NodeService nodeService;
-
- private String targetLocation;
-
- private boolean checkConfiguration = true;
-
- /**
- * Default constructor
- */
- public LuceneIndexBackupComponent()
- {
- }
-
- /**
- * If false do not check the index configuration.
- *
- * @param checkConfiguration boolean
- */
- public void setCheckConfiguration(boolean checkConfiguration)
- {
- this.checkConfiguration = checkConfiguration;
- }
-
- /**
- * Provides transactions in which to perform the work
- *
- * @param transactionService TransactionService
- */
- public void setTransactionService(TransactionService transactionService)
- {
- this.transactionService = transactionService;
- }
-
- /**
- * Set the Lucene index factory that will be used to control the index locks
- *
- * @param factories
- * the index factories
- */
- public void setFactories(Set factories)
- {
- this.factories = factories;
- }
-
- /**
- * Used to retrieve the stores
- *
- * @param nodeService
- * the node service
- */
- public void setNodeService(NodeService nodeService)
- {
- this.nodeService = nodeService;
- }
-
- /**
- * Set the directory to which the backup will be copied
- *
- * @param targetLocation
- * the backup directory
- */
- public void setTargetLocation(String targetLocation)
- {
- this.targetLocation = targetLocation;
- }
-
- /**
- * Backup the Lucene indexes
- */
- public void backup()
- {
- rwLock.readLock().lock();
- try
- {
- if (executing)
- {
- return;
- }
- }
- finally
- {
- rwLock.readLock().unlock();
- }
-
- rwLock.writeLock().lock();
- try
- {
- if (executing)
- {
- return;
- }
- executing = true;
- }
- finally
- {
- rwLock.writeLock().unlock();
- }
-
- try
- {
- RetryingTransactionCallback backupWork = new RetryingTransactionCallback()
- {
- public Object execute() throws Exception
- {
- backupImpl();
- return null;
- }
- };
- transactionService.getRetryingTransactionHelper().doInTransaction(backupWork);
- }
- finally
- {
- rwLock.writeLock().lock();
- try
- {
- executing = false;
- }
- finally
- {
- rwLock.writeLock().unlock();
- }
- }
- }
-
- private void backupImpl()
- {
- // create the location to copy to
- File targetDir = new File(targetLocation);
- if (targetDir.exists() && !targetDir.isDirectory())
- {
- throw new AlfrescoRuntimeException("Target location is a file and not a directory: " + targetDir);
- }
- File targetParentDir = targetDir.getParentFile();
- if (targetParentDir == null)
- {
- throw new AlfrescoRuntimeException("Target location may not be a root directory: " + targetDir);
- }
- File tempDir = new File(targetParentDir, BACKUP_TEMP_NAME);
-
- for (LuceneIndexerAndSearcher factory : factories)
- {
- ReadOnlyWork backupWork = new BackUpReadOnlyWork(factory, tempDir, targetDir);
-
- if (logger.isDebugEnabled())
- {
- logger.debug("Backing up Lucene indexes: \n" + " Target directory: " + targetDir);
- }
-
- factory.doReadOnly(backupWork);
-
- if (logger.isDebugEnabled())
- {
- logger.debug("Backed up Lucene indexes: \n" + " Target directory: " + targetDir);
- }
- }
- }
-
- static class BackUpReadOnlyWork implements ReadOnlyWork
- {
- LuceneIndexerAndSearcher factory;
-
- File tempDir;
-
- File targetDir;
-
- BackUpReadOnlyWork(LuceneIndexerAndSearcher factory, File tempDir, File targetDir)
- {
- this.factory = factory;
- this.tempDir = tempDir;
- this.targetDir = targetDir;
- }
-
- public Object doWork()
- {
- try
- {
- File indexRootDir = new File(factory.getIndexRootLocation());
- // perform the copy
- backupDirectory(indexRootDir, tempDir, targetDir);
- return null;
- }
- catch (Throwable e)
- {
- throw new AlfrescoRuntimeException("Failed to copy Lucene index root: \n"
- + " Index root: " + factory.getIndexRootLocation() + "\n" + " Target: " + targetDir, e);
- }
- }
-
- /**
- * Makes a backup of the source directory via a temporary folder.
- */
- private void backupDirectory(File sourceDir, File tempDir, File targetDir) throws Exception
- {
- if (!sourceDir.exists())
- {
- // there is nothing to copy
- return;
- }
- // delete the files from the temp directory
- if (tempDir.exists())
- {
- deleteDirectory(tempDir);
- if (tempDir.exists())
- {
- throw new AlfrescoRuntimeException("Temp directory exists and cannot be deleted: " + tempDir);
- }
- }
- // copy to the temp directory
- copyDirectory(sourceDir, tempDir, true);
- // check that the temp directory was created
- if (!tempDir.exists())
- {
- throw new AlfrescoRuntimeException("Copy to temp location failed");
- }
- // delete the target directory
- deleteDirectory(targetDir);
- if (targetDir.exists())
- {
- throw new AlfrescoRuntimeException("Failed to delete older files from target location");
- }
- // rename the temp to be the target
- tempDir.renameTo(targetDir);
- // make sure the rename worked
- if (!targetDir.exists())
- {
- throw new AlfrescoRuntimeException("Failed to rename temporary directory to target backup directory");
- }
- }
-
- /**
- * Note files can alter due to background processes so file not found is Ok
- *
- * @param srcDir File
- * @param destDir File
- * @param preserveFileDate boolean
- * @throws IOException
- */
- private void copyDirectory(File srcDir, File destDir, boolean preserveFileDate) throws IOException
- {
- if (destDir.exists())
- {
- throw new IOException("Destination should be created from clean");
- }
- else
- {
- if (!destDir.mkdirs())
- {
- throw new IOException("Destination '" + destDir + "' directory cannot be created");
- }
- if (preserveFileDate)
- {
- // OL if file not found so does not need to check
- destDir.setLastModified(srcDir.lastModified());
- }
- }
- if (!destDir.canWrite())
- {
- throw new IOException("No acces to destination directory" + destDir);
- }
-
- File[] files = srcDir.listFiles();
- if (files != null)
- {
- for (int i = 0; i < files.length; i++)
- {
- File currentCopyTarget = new File(destDir, files[i].getName());
- if (files[i].isDirectory())
- {
- // Skip any temp index file
- if (files[i].getName().equals(tempDir.getName()))
- {
- // skip any temp back up directories
- }
- else if (files[i].getName().equals(targetDir.getName()))
- {
- // skip any back up directories
- }
- else
- {
- copyDirectory(files[i], currentCopyTarget, preserveFileDate);
- }
- }
- else
- {
- copyFile(files[i], currentCopyTarget, preserveFileDate);
- }
- }
- }
- else
- {
- if (logger.isDebugEnabled())
- {
- logger.debug("Skipping transient directory " + srcDir);
- }
- }
- }
-
- private void copyFile(File srcFile, File destFile, boolean preserveFileDate) throws IOException
- {
- try
- {
- if (destFile.exists())
- {
- throw new IOException("File shoud not exist " + destFile);
- }
-
- FileInputStream input = new FileInputStream(srcFile);
- try
- {
- FileOutputStream output = new FileOutputStream(destFile);
- try
- {
- copy(input, output);
- }
- finally
- {
- try
- {
- output.close();
- }
- catch (IOException io)
- {
-
- }
- }
- }
- finally
- {
- try
- {
- input.close();
- }
- catch (IOException io)
- {
-
- }
- }
-
- // check copy
- if (srcFile.length() != destFile.length())
- {
- throw new IOException("Failed to copy full from '" + srcFile + "' to '" + destFile + "'");
- }
- if (preserveFileDate)
- {
- destFile.setLastModified(srcFile.lastModified());
- }
- }
- catch (FileNotFoundException fnfe)
- {
- // ignore as files can go
- if (logger.isDebugEnabled())
- {
- logger.debug("Skipping transient file " + srcFile);
- }
- }
- }
-
- public int copy(InputStream input, OutputStream output) throws IOException
- {
- byte[] buffer = new byte[2048 * 4];
- int count = 0;
- int n = 0;
- while ((n = input.read(buffer)) != -1)
- {
- output.write(buffer, 0, n);
- count += n;
- }
- return count;
- }
-
- public void deleteDirectory(File directory) throws IOException
- {
- if (!directory.exists())
- {
- return;
- }
- if (!directory.isDirectory())
- {
- throw new IllegalArgumentException("Not a directory " + directory);
- }
-
- File[] files = directory.listFiles();
- if (files == null)
- {
- throw new IOException("Failed to delete director - no access" + directory);
- }
-
- for (int i = 0; i < files.length; i++)
- {
- File file = files[i];
-
- if (file.isDirectory())
- {
- deleteDirectory(file);
- }
- else
- {
- if (!file.delete())
- {
- throw new IOException("Unable to delete file: " + file);
- }
- }
- }
-
- if (!directory.delete())
- {
- throw new IOException("Unable to delete directory " + directory);
- }
- }
-
- }
-
- public void afterPropertiesSetXXX() throws Exception
- {
- RetryingTransactionCallback backupWork = new RetryingTransactionCallback()
- {
- public Object execute() throws Exception
- {
- File targetDir = new File(targetLocation).getCanonicalFile();
-
- List stores;
- try
- {
- stores = nodeService.getStores();
- }
- catch (Exception e)
- {
- return null;
- }
- Set protocols = new HashSet();
- protocols.add(StoreRef.PROTOCOL_ARCHIVE);
- protocols.add(StoreRef.PROTOCOL_WORKSPACE);
- protocols.add("locks");
- for (StoreRef store : stores)
- {
- protocols.add(store.getProtocol());
- }
-
- for (LuceneIndexerAndSearcher factory : factories)
- {
- File indexRootDir = new File(factory.getIndexRootLocation()).getCanonicalFile();
-
- if (indexRootDir.getCanonicalPath().startsWith(targetDir.getCanonicalPath()))
- {
- throw new IllegalArgumentException("Backup directory can not contain or be an index directory");
- }
- if (targetDir.getCanonicalPath().startsWith(indexRootDir.getCanonicalPath()))
- {
- for (String name : protocols)
- {
- File test = new File(indexRootDir, name);
- if (targetDir.getCanonicalPath().startsWith(test.getCanonicalPath()))
- {
- throw new IllegalArgumentException("Backup directory can not be in index directory and match a store protocol name " + targetDir);
- }
- }
- }
- // if the back up directory exists make sure it only contains directories that are store
- // protocols
-
- if (targetDir.exists())
- {
- for (File file : targetDir.listFiles())
- {
- if (file.isFile())
- {
- throw new IllegalArgumentException("Existing index backup does not look like the expected structure. It constains a file "
- + file.getCanonicalPath());
- }
- if (!protocols.contains(file.getName()))
- {
- throw new IllegalArgumentException(
- "Existing index backup does not look like the expected structure. It constains a directory with a name that does not match a store protocol "
- + file.getCanonicalPath());
-
- }
- }
- }
-
- }
- return null;
- }
- };
-
- if (checkConfiguration)
- {
- transactionService.getRetryingTransactionHelper().doInTransaction(backupWork, true);
- }
-
- }
- }
-
- /**
- * Job that lock uses the {@link LuceneIndexBackupComponent} to perform safe backups of the Lucene indexes.
- *
- * @author Derek Hulley
- */
- public static class LuceneIndexBackupJob implements Job
- {
-
- /** KEY_LUCENE_INDEX_BACKUP_COMPONENT = 'luceneIndexBackupComponent' */
- public static final String KEY_LUCENE_INDEX_BACKUP_COMPONENT = "luceneIndexBackupComponent";
-
- /**
- * Locks the Lucene indexes and copies them to a backup location
- */
- public void execute(JobExecutionContext context) throws JobExecutionException
- {
- JobDataMap jobData = context.getJobDetail().getJobDataMap();
- LuceneIndexBackupComponent backupComponent = (LuceneIndexBackupComponent) jobData.get(KEY_LUCENE_INDEX_BACKUP_COMPONENT);
- if (backupComponent == null)
- {
- throw new JobExecutionException("Missing job data: " + KEY_LUCENE_INDEX_BACKUP_COMPONENT);
- }
- // perform the backup
- backupComponent.backup();
- }
- }
-
- @Override
- public MLAnalysisMode getDefaultMLIndexAnalysisMode()
- {
- return defaultMLIndexAnalysisMode;
- }
-
- /**
- * Set the ML analysis mode at index time.
- *
- * @param mode MLAnalysisMode
- */
- @Override
- public void setDefaultMLIndexAnalysisMode(MLAnalysisMode mode)
- {
- // defaultMLIndexAnalysisMode = MLAnalysisMode.getMLAnalysisMode(mode);
- defaultMLIndexAnalysisMode = mode;
- }
-
- @Override
- public MLAnalysisMode getDefaultMLSearchAnalysisMode()
- {
- return defaultMLSearchAnalysisMode;
- }
-
- /**
- * Set the ML analysis mode at search time
- *
- * @param mode MLAnalysisMode
- */
- @Override
- public void setDefaultMLSearchAnalysisMode(MLAnalysisMode mode)
- {
- // defaultMLSearchAnalysisMode = MLAnalysisMode.getMLAnalysisMode(mode);
- defaultMLSearchAnalysisMode = mode;
- }
-
- @Override
- public int getMaxDocIdCacheSize()
- {
- return maxDocIdCacheSize;
- }
-
- @Override
- public void setMaxDocIdCacheSize(int maxDocIdCacheSize)
- {
- this.maxDocIdCacheSize = maxDocIdCacheSize;
- }
-
- @Override
- public int getMaxDocsForInMemoryMerge()
- {
- return maxDocsForInMemoryMerge;
- }
-
- @Override
- public void setMaxDocsForInMemoryMerge(int maxDocsForInMemoryMerge)
- {
- this.maxDocsForInMemoryMerge = maxDocsForInMemoryMerge;
- }
-
- @Override
- public int getMaxDocumentCacheSize()
- {
- return maxDocumentCacheSize;
- }
-
- @Override
- public void setMaxDocumentCacheSize(int maxDocumentCacheSize)
- {
- this.maxDocumentCacheSize = maxDocumentCacheSize;
- }
-
- @Override
- public int getMaxIsCategoryCacheSize()
- {
- return maxIsCategoryCacheSize;
- }
-
- @Override
- public void setMaxIsCategoryCacheSize(int maxIsCategoryCacheSize)
- {
- this.maxIsCategoryCacheSize = maxIsCategoryCacheSize;
- }
-
- @Override
- public int getMaxLinkAspectCacheSize()
- {
- return maxLinkAspectCacheSize;
- }
-
- @Override
- public void setMaxLinkAspectCacheSize(int maxLinkAspectCacheSize)
- {
- this.maxLinkAspectCacheSize = maxLinkAspectCacheSize;
- }
-
- @Override
- public int getMaxParentCacheSize()
- {
- return maxParentCacheSize;
- }
-
- @Override
- public void setMaxParentCacheSize(int maxParentCacheSize)
- {
- this.maxParentCacheSize = maxParentCacheSize;
- }
-
- @Override
- public int getMaxPathCacheSize()
- {
- return maxPathCacheSize;
- }
-
- @Override
- public void setMaxPathCacheSize(int maxPathCacheSize)
- {
- this.maxPathCacheSize = maxPathCacheSize;
- }
-
- @Override
- public int getMaxTypeCacheSize()
- {
- return maxTypeCacheSize;
- }
-
- @Override
- public void setMaxTypeCacheSize(int maxTypeCacheSize)
- {
- this.maxTypeCacheSize = maxTypeCacheSize;
- }
-
- @Override
- public int getMergerMaxMergeDocs()
- {
- return mergerMaxMergeDocs;
- }
-
- @Override
- public void setMergerMaxMergeDocs(int mergerMaxMergeDocs)
- {
- this.mergerMaxMergeDocs = mergerMaxMergeDocs;
- }
-
- @Override
- public int getMergerMergeFactor()
- {
- return mergerMergeFactor;
- }
-
- @Override
- public void setMergerMergeFactor(int mergerMergeFactor)
- {
- this.mergerMergeFactor = mergerMergeFactor;
- }
-
- @Override
- public int getMergerMaxBufferedDocs()
- {
- return mergerMaxBufferedDocs;
- }
-
- @Override
- public void setMergerMaxBufferedDocs(int mergerMaxBufferedDocs)
- {
- this.mergerMaxBufferedDocs = mergerMaxBufferedDocs;
- }
-
- @Override
- public int getMergerTargetIndexCount()
- {
- return mergerTargetIndexCount;
- }
-
- @Override
- public void setMergerTargetIndexCount(int mergerTargetIndexCount)
- {
- this.mergerTargetIndexCount = mergerTargetIndexCount;
- }
-
- @Override
- public int getMergerTargetOverlayCount()
- {
- return mergerTargetOverlayCount;
- }
-
- @Override
- public void setMergerTargetOverlayCount(int mergerTargetOverlayCount)
- {
- this.mergerTargetOverlayCount = mergerTargetOverlayCount;
- }
-
- @Override
- public int getMergerTargetOverlaysBlockingFactor()
- {
- return mergerTargetOverlaysBlockingFactor;
- }
-
- @Override
- public void setMergerTargetOverlaysBlockingFactor(int mergerTargetOverlaysBlockingFactor)
- {
- this.mergerTargetOverlaysBlockingFactor = mergerTargetOverlaysBlockingFactor;
- }
-
- @Override
- public boolean getFairLocking()
- {
- return this.fairLocking;
- }
-
- @Override
- public void setFairLocking(boolean fairLocking)
- {
- this.fairLocking = fairLocking;
- }
-
- @Override
- public int getTermIndexInterval()
- {
- return termIndexInterval;
- }
-
- @Override
- public void setTermIndexInterval(int termIndexInterval)
- {
- this.termIndexInterval = termIndexInterval;
- }
-
- @Override
- public boolean getUseNioMemoryMapping()
- {
- return useNioMemoryMapping;
- }
-
- @Override
- public void setUseNioMemoryMapping(boolean useNioMemoryMapping)
- {
- this.useNioMemoryMapping = useNioMemoryMapping;
- }
-
- @Override
- public int getWriterMaxMergeDocs()
- {
- return writerMaxMergeDocs;
- }
-
- @Override
- public void setWriterMaxMergeDocs(int writerMaxMergeDocs)
- {
- this.writerMaxMergeDocs = writerMaxMergeDocs;
- }
-
- @Override
- public int getWriterMergeFactor()
- {
- return writerMergeFactor;
- }
-
- @Override
- public void setWriterMergeFactor(int writerMergeFactor)
- {
- this.writerMergeFactor = writerMergeFactor;
- }
-
- @Override
- public int getWriterMaxBufferedDocs()
- {
- return writerMaxBufferedDocs;
- }
-
- @Override
- public void setWriterMaxBufferedDocs(int writerMaxBufferedDocs)
- {
- this.writerMaxBufferedDocs = writerMaxBufferedDocs;
- }
-
- @Override
- public boolean isCacheEnabled()
- {
- return cacheEnabled;
- }
-
- @Override
- public void setCacheEnabled(boolean cacheEnabled)
- {
- this.cacheEnabled = cacheEnabled;
- }
-
- @Override
- public boolean getPostSortDateTime()
- {
- return postSortDateTime;
- }
-
- @Override
- public void setPostSortDateTime(boolean postSortDateTime)
- {
- this.postSortDateTime = postSortDateTime;
- }
-
- /**
- * @return the maxDocsForInMemoryIndex
- */
- @Override
- public int getMaxDocsForInMemoryIndex()
- {
- return maxDocsForInMemoryIndex;
- }
-
- /**
- * @param maxDocsForInMemoryIndex
- * the maxDocsForInMemoryIndex to set
- */
- @Override
- public void setMaxDocsForInMemoryIndex(int maxDocsForInMemoryIndex)
- {
- this.maxDocsForInMemoryIndex = maxDocsForInMemoryIndex;
- }
-
- /**
- * @return the maxRamInMbForInMemoryMerge
- */
- @Override
- public double getMaxRamInMbForInMemoryMerge()
- {
- return maxRamInMbForInMemoryMerge;
- }
-
- /**
- * @param maxRamInMbForInMemoryMerge
- * the maxRamInMbForInMemoryMerge to set
- */
- @Override
- public void setMaxRamInMbForInMemoryMerge(double maxRamInMbForInMemoryMerge)
- {
- this.maxRamInMbForInMemoryMerge = maxRamInMbForInMemoryMerge;
- }
-
- /**
- * @return the maxRamInMbForInMemoryIndex
- */
- @Override
- public double getMaxRamInMbForInMemoryIndex()
- {
- return maxRamInMbForInMemoryIndex;
- }
-
- /**
- * @param maxRamInMbForInMemoryIndex
- * the maxRamInMbForInMemoryIndex to set
- */
- @Override
- public void setMaxRamInMbForInMemoryIndex(double maxRamInMbForInMemoryIndex)
- {
- this.maxRamInMbForInMemoryIndex = maxRamInMbForInMemoryIndex;
- }
-
- /**
- * @return the mergerRamBufferSizeMb
- */
- @Override
- public double getMergerRamBufferSizeMb()
- {
- return mergerRamBufferSizeMb;
- }
-
- /**
- * @param mergerRamBufferSizeMb
- * the mergerRamBufferSizeMb to set
- */
- @Override
- public void setMergerRamBufferSizeMb(double mergerRamBufferSizeMb)
- {
- this.mergerRamBufferSizeMb = mergerRamBufferSizeMb;
- }
-
- /**
- * @return the writerRamBufferSizeMb
- */
- @Override
- public double getWriterRamBufferSizeMb()
- {
- return writerRamBufferSizeMb;
- }
-
- /**
- * @param writerRamBufferSizeMb
- * the writerRamBufferSizeMb to set
- */
- @Override
- public void setWriterRamBufferSizeMb(double writerRamBufferSizeMb)
- {
- this.writerRamBufferSizeMb = writerRamBufferSizeMb;
- }
-
-
-
-
- @Override
- public boolean isContentIndexingEnabled()
- {
- return contentIndexingEnabled;
- }
-
- @Override
- public void setContentIndexingEnabled(boolean contentIndexingEnabled)
- {
- this.contentIndexingEnabled = contentIndexingEnabled;
-
- }
-
- protected LuceneQueryLanguageSPI getQueryLanguage(String name)
- {
- return getQueryLanguages().get(name);
- }
-
- protected abstract List getAllStores();
-
- public R doReadOnly(ReadOnlyWork lockWork)
- {
- // get all the available stores
- List storeRefs = getAllStores();
-
- IndexInfo.LockWork currentLockWork = null;
-
- for (int i = storeRefs.size() - 1; i >= 0; i--)
- {
- StoreRef currentStore = storeRefs.get(i);
-
- if (currentLockWork == null)
- {
- currentLockWork = new CoreReadOnlyWork(getIndexer(currentStore), lockWork);
- }
- else
- {
- currentLockWork = new NestingReadOnlyWork(getIndexer(currentStore), currentLockWork);
- }
- }
-
- if (currentLockWork != null)
- {
- try
- {
- return currentLockWork.doWork();
- }
- catch (Throwable exception)
- {
-
- // Re-throw the exception
- if (exception instanceof RuntimeException)
- {
- throw (RuntimeException) exception;
- }
- else
- {
- throw new RuntimeException("Error during run with lock.", exception);
- }
- }
-
- }
- else
- {
- return null;
- }
- }
-
- private static class NestingReadOnlyWork implements IndexInfo.LockWork
- {
- IndexInfo.LockWork lockWork;
-
- LuceneIndexer indexer;
-
- NestingReadOnlyWork(LuceneIndexer indexer, IndexInfo.LockWork lockWork)
- {
- this.indexer = indexer;
- this.lockWork = lockWork;
- }
-
- public R doWork() throws Exception
- {
- return indexer.doReadOnly(lockWork);
- }
-
- public boolean canRetry()
- {
- return false;
- }
- }
-
- private static class CoreReadOnlyWork implements IndexInfo.LockWork
- {
- ReadOnlyWork lockWork;
-
- LuceneIndexer indexer;
-
- CoreReadOnlyWork(LuceneIndexer indexer, ReadOnlyWork lockWork)
- {
- this.indexer = indexer;
- this.lockWork = lockWork;
- }
-
- public R doWork() throws Exception
- {
- return indexer.doReadOnly(new IndexInfo.LockWork()
- {
- public R doWork()
- {
- try
- {
- return lockWork.doWork();
- }
- catch (Throwable exception)
- {
-
- // Re-throw the exception
- if (exception instanceof RuntimeException)
- {
- throw (RuntimeException) exception;
- }
- else
- {
- throw new RuntimeException("Error during run with lock.", exception);
- }
- }
- }
-
- public boolean canRetry()
- {
- return false;
- }
- });
- }
-
- public boolean canRetry()
- {
- return false;
- }
- }
-
- public static void main(String[] args) throws IOException
- {
- // delete a directory ....
- if (args.length != 1)
- {
- return;
- }
- File file = new File(args[0]);
- deleteDirectory(file);
- }
-
- public static void deleteDirectory(File directory) throws IOException
- {
- if (!directory.exists())
- {
- return;
- }
- if (!directory.isDirectory())
- {
- throw new IllegalArgumentException("Not a directory " + directory);
- }
-
- File[] files = directory.listFiles();
- if (files == null)
- {
- throw new IOException("Failed to delete director - no access" + directory);
- }
-
- for (int i = 0; i < files.length; i++)
- {
- File file = files[i];
-
- System.out.println(".");
- // System.out.println("Deleting "+file.getCanonicalPath());
- if (file.isDirectory())
- {
- deleteDirectory(file);
- }
- else
- {
- if (!file.delete())
- {
- throw new IOException("Unable to delete file: " + file);
- }
- }
- }
-
- if (!directory.delete())
- {
- throw new IOException("Unable to delete directory " + directory);
- }
- }
-
- @Override
- public void destroy() throws Exception
- {
- IndexInfo.destroy();
- destroyed = true;
- }
-
-
-}
diff --git a/src/main/java/org/alfresco/repo/search/impl/lucene/AbstractLuceneIndexerImpl.java b/src/main/java/org/alfresco/repo/search/impl/lucene/AbstractLuceneIndexerImpl.java
deleted file mode 100644
index b39897ecfb..0000000000
--- a/src/main/java/org/alfresco/repo/search/impl/lucene/AbstractLuceneIndexerImpl.java
+++ /dev/null
@@ -1,814 +0,0 @@
-/*
- * #%L
- * Alfresco Repository
- * %%
- * Copyright (C) 2005 - 2016 Alfresco Software Limited
- * %%
- * This file is part of the Alfresco software.
- * If the software was purchased under a paid Alfresco license, the terms of
- * the paid license agreement will prevail. Otherwise, the software is
- * provided under the following open source license terms:
- *
- * Alfresco is free software: you can redistribute it and/or modify
- * it under the terms of the GNU Lesser General Public License as published by
- * the Free Software Foundation, either version 3 of the License, or
- * (at your option) any later version.
- *
- * Alfresco is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public License
- * along with Alfresco. If not, see .
- * #L%
- */
-package org.alfresco.repo.search.impl.lucene;
-
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.Collection;
-import java.util.Collections;
-import java.util.LinkedHashSet;
-import java.util.List;
-import java.util.ListIterator;
-import java.util.Set;
-
-import javax.transaction.Status;
-import javax.transaction.xa.XAResource;
-
-import org.alfresco.repo.node.NodeBulkLoader;
-import org.alfresco.repo.search.Indexer;
-import org.alfresco.repo.search.IndexerException;
-import org.alfresco.repo.search.impl.lucene.index.TransactionStatus;
-import org.alfresco.repo.transaction.RetryingTransactionHelper.RetryingTransactionCallback;
-import org.alfresco.service.cmr.repository.InvalidNodeRefException;
-import org.alfresco.service.transaction.TransactionService;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.lucene.document.Document;
-import org.apache.lucene.index.IndexReader;
-import org.apache.lucene.index.Term;
-import org.apache.lucene.index.TermDocs;
-import org.springframework.dao.ConcurrencyFailureException;
-
-/**
- * Common support for indexing across implementations
- *
- * @author andyh
- * @param -
- * the type used to generate the key in the index file
- */
-public abstract class AbstractLuceneIndexerImpl extends AbstractLuceneBase implements Indexer
-{
- /**
- * Enum for indexing actions against a node
- */
- protected enum Action
- {
- /**
- * An index
- */
- INDEX,
- /**
- * A reindex
- */
- REINDEX,
- /**
- * A delete
- */
- DELETE,
- /**
- * A cascaded reindex (ensures directory structre is ok)
- */
- CASCADEREINDEX
- }
-
- protected enum IndexUpdateStatus
- {
- /**
- * Inde is unchanged
- */
- UNMODIFIED,
- /**
- * Index is being changein in TX
- */
- SYNCRONOUS,
- /**
- * Index is eiong changed by a background upate
- */
- ASYNCHRONOUS;
- }
-
- protected enum FTSStatus {New, Dirty, Clean};
-
- protected long docs;
-
- // An indexer with read through activated can only see already-committed documents in the database. Useful when
- // reindexing lots of old documents and not wanting to pollute the caches with stale versions of nodes.
- private boolean isReadThrough;
-
- protected TransactionService transactionService;
- protected NodeBulkLoader bulkLoader;
-
- public void setReadThrough(boolean isReadThrough)
- {
- this.isReadThrough = isReadThrough;
- }
-
- public void setTransactionService(TransactionService transactionService)
- {
- this.transactionService = transactionService;
- }
-
- /**
- * @param bulkLoader object to provide node loading options
- */
- public void setBulkLoader(NodeBulkLoader bulkLoader)
- {
- this.bulkLoader = bulkLoader;
- }
-
- protected static class Command
- {
- S ref;
-
- Action action;
-
- Command(S ref, Action action)
- {
- this.ref = ref;
- this.action = action;
- }
-
- public String toString()
- {
- StringBuffer buffer = new StringBuffer();
- if (action == Action.INDEX)
- {
- buffer.append("Index ");
- }
- else if (action == Action.DELETE)
- {
- buffer.append("Delete ");
- }
- else if (action == Action.REINDEX)
- {
- buffer.append("Reindex ");
- }
- else
- {
- buffer.append("Unknown ... ");
- }
- buffer.append(ref);
- return buffer.toString();
- }
-
- }
-
- /**
- * No transform available
- */
- public static final String NOT_INDEXED_NO_TRANSFORMATION = "nint";
-
- /**
- * Tranfrom failed
- */
- public static final String NOT_INDEXED_TRANSFORMATION_FAILED = "nitf";
-
- /**
- * No content
- */
- public static final String NOT_INDEXED_CONTENT_MISSING = "nicm";
-
- /**
- * No type conversion
- */
- public static final String NOT_INDEXED_NO_TYPE_CONVERSION = "nintc";
-
- /**
- * Logger
- */
- private static Log s_logger = LogFactory.getLog(AbstractLuceneIndexerImpl.class);
-
- protected static Set deletePrimary(Collection nodeRefs, IndexReader reader, boolean delete)
- throws LuceneIndexException
- {
-
- Set refs = new LinkedHashSet();
-
- for (String nodeRef : nodeRefs)
- {
-
- try
- {
- TermDocs td = reader.termDocs(new Term("PRIMARYPARENT", nodeRef));
- while (td.next())
- {
- int doc = td.doc();
- Document document = reader.document(doc);
- String[] ids = document.getValues("ID");
- refs.add(ids[ids.length - 1]);
- if (delete)
- {
- reader.deleteDocument(doc);
- }
- }
- td.close();
- }
- catch (IOException e)
- {
- throw new LuceneIndexException("Failed to delete node by primary parent for " + nodeRef, e);
- }
- }
-
- return refs;
-
- }
-
- protected static Set deleteReference(Collection nodeRefs, IndexReader reader, boolean delete)
- throws LuceneIndexException
- {
-
- Set refs = new LinkedHashSet();
-
- for (String nodeRef : nodeRefs)
- {
-
- try
- {
- TermDocs td = reader.termDocs(new Term("PARENT", nodeRef));
- while (td.next())
- {
- int doc = td.doc();
- Document document = reader.document(doc);
- String[] ids = document.getValues("ID");
- refs.add(ids[ids.length - 1]);
- if (delete)
- {
- reader.deleteDocument(doc);
- }
- }
- td.close();
- }
- catch (IOException e)
- {
- throw new LuceneIndexException("Failed to delete node by parent for " + nodeRef, e);
- }
- }
-
- return refs;
-
- }
-
- protected static Set deleteContainerAndBelow(String nodeRef, IndexReader reader, boolean delete,
- boolean cascade) throws LuceneIndexException
- {
- Set refs = new LinkedHashSet();
-
- try
- {
- if (delete)
- {
- reader.deleteDocuments(new Term("ID", nodeRef));
- }
- refs.add(nodeRef);
- if (cascade)
- {
- TermDocs td = reader.termDocs(new Term("ANCESTOR", nodeRef));
- while (td.next())
- {
- int doc = td.doc();
- Document document = reader.document(doc);
- String[] ids = document.getValues("ID");
- refs.add(ids[ids.length - 1]);
- if (delete)
- {
- reader.deleteDocument(doc);
- }
- }
- td.close();
- }
- }
- catch (IOException e)
- {
- throw new LuceneIndexException("Failed to delete container and below for " + nodeRef, e);
- }
- return refs;
- }
-
- protected boolean locateContainer(String nodeRef, IndexReader reader)
- {
- boolean found = false;
- try
- {
- TermDocs td = reader.termDocs(new Term("ID", nodeRef));
- while (td.next())
- {
- int doc = td.doc();
- Document document = reader.document(doc);
- if (document.getField("ISCONTAINER") != null)
- {
- found = true;
- break;
- }
- }
- td.close();
- }
- catch (IOException e)
- {
- throw new LuceneIndexException("Failed to delete container and below for " + nodeRef, e);
- }
- return found;
- }
-
- /** the maximum transformation time to allow atomically, defaulting to 20ms */
- protected long maxAtomicTransformationTime = 20;
-
- /**
- * A list of all deletions we have made - at merge these deletions need to be made against the main index. TODO:
- * Consider if this information needs to be persisted for recovery
- */
- protected Set deletions = new LinkedHashSet();
-
- /**
- * A list of cascading container deletions we have made - at merge these deletions need to be made against the main index.
- */
- protected Set containerDeletions = new LinkedHashSet();
-
- /**
- * List of pending indexing commands.
- */
- protected List> commandList = new ArrayList>(10000);
-
- /**
- * Flag to indicte if we are doing an in transactional delta or a batch update to the index. If true, we are just
- * fixing up non atomically indexed things from one or more other updates.
- */
- protected IndexUpdateStatus indexUpdateStatus = IndexUpdateStatus.UNMODIFIED;
-
- /**
- * Set the max time allowed to transform content atomically
- *
- * @param maxAtomicTransformationTime long
- */
- public void setMaxAtomicTransformationTime(long maxAtomicTransformationTime)
- {
- this.maxAtomicTransformationTime = maxAtomicTransformationTime;
- }
-
- /**
- * Utility method to check we are in the correct state to do work Also keeps track of the dirty flag.
- *
- * @throws IndexerException
- * @throws LuceneIndexException
- */
-
- protected void checkAbleToDoWork(IndexUpdateStatus indexUpdateStatus)
- {
- if (this.indexUpdateStatus == IndexUpdateStatus.UNMODIFIED)
- {
- this.indexUpdateStatus = indexUpdateStatus;
- }
- else if (this.indexUpdateStatus == indexUpdateStatus)
- {
- return;
- }
- else
- {
- throw new IndexerException("Can not mix FTS and transactional updates");
- }
-
- switch (getStatus())
- {
- case UNKNOWN:
- try
- {
- setStatus(TransactionStatus.ACTIVE);
- }
- catch (IOException e)
- {
- throw new LuceneIndexException("Failed to set TX active", e);
- }
- break;
- case ACTIVE:
- // OK
- break;
- default:
- // All other states are a problem
- throw new IndexerException(buildErrorString());
- }
- }
-
- /**
- * Utility method to report errors about invalid state.
- *
- * @return - an error based on status
- */
- private String buildErrorString()
- {
- StringBuilder buffer = new StringBuilder(128);
- buffer.append("The indexer is unable to accept more work: ");
- switch (getStatus().getStatus())
- {
- case Status.STATUS_COMMITTED:
- buffer.append("The indexer has been committed");
- break;
- case Status.STATUS_COMMITTING:
- buffer.append("The indexer is committing");
- break;
- case Status.STATUS_MARKED_ROLLBACK:
- buffer.append("The indexer is marked for rollback");
- break;
- case Status.STATUS_PREPARED:
- buffer.append("The indexer is prepared to commit");
- break;
- case Status.STATUS_PREPARING:
- buffer.append("The indexer is preparing to commit");
- break;
- case Status.STATUS_ROLLEDBACK:
- buffer.append("The indexer has been rolled back");
- break;
- case Status.STATUS_ROLLING_BACK:
- buffer.append("The indexer is rolling back");
- break;
- case Status.STATUS_UNKNOWN:
- buffer.append("The indexer is in an unknown state");
- break;
- default:
- break;
- }
- return buffer.toString();
- }
-
- /**
- * Commit this index
- *
- * @throws LuceneIndexException
- */
- public void commit() throws LuceneIndexException
- {
- if (s_logger.isDebugEnabled())
- {
- s_logger.debug(Thread.currentThread().getName() + " Starting Commit");
- }
- switch (getStatus().getStatus())
- {
- case Status.STATUS_COMMITTING:
- throw new LuceneIndexException("Unable to commit: Transaction is committing");
- case Status.STATUS_COMMITTED:
- throw new LuceneIndexException("Unable to commit: Transaction is commited ");
- case Status.STATUS_ROLLING_BACK:
- throw new LuceneIndexException("Unable to commit: Transaction is rolling back");
- case Status.STATUS_ROLLEDBACK:
- throw new LuceneIndexException("Unable to commit: Transaction is aleady rolled back");
- case Status.STATUS_MARKED_ROLLBACK:
- throw new LuceneIndexException("Unable to commit: Transaction is marked for roll back");
- case Status.STATUS_PREPARING:
- throw new LuceneIndexException("Unable to commit: Transaction is preparing");
- case Status.STATUS_ACTIVE:
- // special case - commit from active
- prepare();
- // drop through to do the commit;
- default:
- if (getStatus().getStatus() != Status.STATUS_PREPARED)
- {
- throw new LuceneIndexException("Index must be prepared to commit");
- }
- try
- {
- setStatus(TransactionStatus.COMMITTING);
- if (isModified())
- {
- doCommit();
- }
- setStatus(TransactionStatus.COMMITTED);
- }
- catch (LuceneIndexException e)
- {
- // If anything goes wrong we try and do a roll back
- rollback();
- if (s_logger.isDebugEnabled())
- {
- s_logger.debug(Thread.currentThread().getName() + " Commit Failed", e);
- }
- throw new LuceneIndexException("Commit failed", e);
- }
- catch (Throwable t)
- {
- // If anything goes wrong we try and do a roll back
- rollback();
- if (s_logger.isDebugEnabled())
- {
- s_logger.debug(Thread.currentThread().getName() + " Commit Failed", t);
- }
- throw new LuceneIndexException("Commit failed", t);
- }
- finally
- {
- if (s_logger.isDebugEnabled())
- {
- s_logger.debug(Thread.currentThread().getName() + " Ending Commit");
- }
-
- // Make sure we tidy up
- // deleteDelta();
- }
- break;
- }
- }
-
- /**
- * Prepare to commit At the moment this makes sure we have all the locks TODO: This is not doing proper
- * serialisation against the index as would a data base transaction.
- *
- * @return the tx state
- * @throws LuceneIndexException
- */
- public int prepare() throws LuceneIndexException
- {
- if (s_logger.isDebugEnabled())
- {
- s_logger.debug(Thread.currentThread().getName() + " Starting Prepare");
- }
- switch (getStatus().getStatus())
- {
- case Status.STATUS_COMMITTING:
- throw new IndexerException("Unable to prepare: Transaction is committing");
- case Status.STATUS_COMMITTED:
- throw new IndexerException("Unable to prepare: Transaction is commited ");
- case Status.STATUS_ROLLING_BACK:
- throw new IndexerException("Unable to prepare: Transaction is rolling back");
- case Status.STATUS_ROLLEDBACK:
- throw new IndexerException("Unable to prepare: Transaction is aleady rolled back");
- case Status.STATUS_MARKED_ROLLBACK:
- throw new IndexerException("Unable to prepare: Transaction is marked for roll back");
- case Status.STATUS_PREPARING:
- throw new IndexerException("Unable to prepare: Transaction is already preparing");
- case Status.STATUS_PREPARED:
- throw new IndexerException("Unable to prepare: Transaction is already prepared");
- default:
- try
- {
- setStatus(TransactionStatus.PREPARING);
- if (isModified())
- {
- doPrepare();
- if (s_logger.isDebugEnabled())
- {
- s_logger.debug(Thread.currentThread().getName() + " Waiting to Finish Preparing");
- }
- }
- setStatus(TransactionStatus.PREPARED);
- return isModified() ? XAResource.XA_OK : XAResource.XA_RDONLY;
- }
- catch (LuceneIndexException e)
- {
- setRollbackOnly();
- if (s_logger.isDebugEnabled())
- {
- s_logger.debug(Thread.currentThread().getName() + " Prepare Failed", e);
- }
- throw new LuceneIndexException("Index failed to prepare", e);
- }
- catch (Throwable t)
- {
- // If anything goes wrong we try and do a roll back
- rollback();
- if (s_logger.isDebugEnabled())
- {
- s_logger.debug(Thread.currentThread().getName() + " Prepare Failed", t);
- }
- throw new LuceneIndexException("Prepared failed", t);
- }
- finally
- {
- if (s_logger.isDebugEnabled())
- {
- s_logger.debug(Thread.currentThread().getName() + " Ending Prepare");
- }
- }
- }
- }
-
- /**
- * Has this index been modified?
- *
- * @return true if modified
- */
- public boolean isModified()
- {
- return indexUpdateStatus != IndexUpdateStatus.UNMODIFIED;
- }
-
- /**
- * Roll back the index changes (this just means they are never added)
- *
- * @throws LuceneIndexException
- */
- public void rollback() throws LuceneIndexException
- {
- switch (getStatus().getStatus())
- {
-
- case Status.STATUS_COMMITTED:
- throw new IndexerException("Unable to roll back: Transaction is committed ");
- case Status.STATUS_ROLLING_BACK:
- throw new IndexerException("Unable to roll back: Transaction is rolling back");
- case Status.STATUS_ROLLEDBACK:
- throw new IndexerException("Unable to roll back: Transaction is already rolled back");
- case Status.STATUS_COMMITTING:
- // Can roll back during commit
- default:
- try
- {
- setStatus(TransactionStatus.ROLLINGBACK);
- doRollBack();
- setStatus(TransactionStatus.ROLLEDBACK);
- }
- catch (IOException e)
- {
- throw new LuceneIndexException("rollback failed ", e);
- }
- break;
- }
- }
-
- /**
- * Mark this index for roll back only. This action can not be reversed. It will reject all other work and only allow
- * roll back.
- */
- public void setRollbackOnly()
- {
- switch (getStatus().getStatus())
- {
- case Status.STATUS_COMMITTING:
- throw new IndexerException("Unable to mark for rollback: Transaction is committing");
- case Status.STATUS_COMMITTED:
- throw new IndexerException("Unable to mark for rollback: Transaction is committed");
- default:
- try
- {
- doSetRollbackOnly();
- setStatus(TransactionStatus.MARKED_ROLLBACK);
- }
- catch (IOException e)
- {
- throw new LuceneIndexException("Set rollback only failed ", e);
- }
- break;
- }
- }
-
- protected abstract void doPrepare() throws IOException;
-
- protected abstract void doCommit() throws IOException;
-
- protected abstract void doRollBack() throws IOException;
-
- protected abstract void doSetRollbackOnly() throws IOException;
-
- protected T2 doInReadthroughTransaction(final RetryingTransactionCallback callback)
- {
- if (isReadThrough)
- {
- return transactionService.getRetryingTransactionHelper().doInTransaction(
- new RetryingTransactionCallback()
- {
- @Override
- public T2 execute() throws Throwable
- {
- // ALF-18383: Regression in Lucene indexing performance in 4.x
- // We accept the loss of some performance in order to ensure accuracy
- // Request clean node data
- if (bulkLoader != null)
- {
- bulkLoader.setCheckNodeConsistency();
- }
- try
- {
- return callback.execute();
- }
- catch (InvalidNodeRefException e)
- {
- // Turn InvalidNodeRefExceptions into retryable exceptions.
- throw new ConcurrencyFailureException(
- "Possible cache integrity issue during reindexing", e);
- }
-
- }
- }, true, true);
- }
- else
- {
- try
- {
- return callback.execute();
- }
- catch (RuntimeException e)
- {
- throw e;
- }
- catch (Error e)
- {
- throw e;
- }
- catch (Throwable e)
- {
- throw new RuntimeException(e);
- }
- }
- }
-
- protected void index(T ref) throws LuceneIndexException
- {
- addCommand(new Command(ref, Action.INDEX));
- }
-
- protected void reindex(T ref, boolean cascadeReindexDirectories) throws LuceneIndexException
- {
- addCommand(new Command(ref, cascadeReindexDirectories ? Action.CASCADEREINDEX : Action.REINDEX));
- }
-
- protected void delete(T ref) throws LuceneIndexException
- {
- addCommand(new Command(ref, Action.DELETE));
- }
-
- private void addCommand(Command command)
- {
- if (commandList.size() > 0)
- {
- Command last = commandList.get(commandList.size() - 1);
- if ((last.action == command.action) && (last.ref.equals(command.ref)))
- {
- return;
- }
- }
- purgeCommandList(command);
- commandList.add(command);
-
- if (commandList.size() > getLuceneConfig().getIndexerBatchSize())
- {
- flushPending();
- }
- }
-
- private void purgeCommandList(Command command)
- {
- removeFromCommandList(command, command.action != Action.DELETE);
- }
-
- private void removeFromCommandList(Command command, boolean matchExact)
- {
- for (ListIterator> it = commandList.listIterator(commandList.size()); it.hasPrevious(); /**/)
- {
- Command current = it.previous();
- if (matchExact)
- {
- if (current.ref.equals(command.ref))
- {
- if ((current.action == command.action))
- {
- it.remove();
- return;
- }
- // If there is an INDEX in this same transaction and the current command is a reindex, remove it and
- // replace the current command with it
- else if (command.action != Action.DELETE && current.action == Action.INDEX)
- {
- it.remove();
- command.action = Action.INDEX;
- }
- }
- }
- else
- {
- if (current.ref.equals(command.ref))
- {
- it.remove();
- }
- }
- }
- }
-
- /**
- * Get the deletions
- *
- * @return - the ids to delete
- */
- public Set getDeletions()
- {
- return Collections.unmodifiableSet(deletions);
- }
-
- /**
- * Get the container deletions
- *
- * @return - the ids to delete
- */
- public Set getContainerDeletions()
- {
- return Collections.unmodifiableSet(containerDeletions);
- }
-}
diff --git a/src/main/java/org/alfresco/repo/search/impl/lucene/ClosingIndexSearcher.java b/src/main/java/org/alfresco/repo/search/impl/lucene/ClosingIndexSearcher.java
deleted file mode 100644
index 4f8976290c..0000000000
--- a/src/main/java/org/alfresco/repo/search/impl/lucene/ClosingIndexSearcher.java
+++ /dev/null
@@ -1,69 +0,0 @@
-/*
- * #%L
- * Alfresco Repository
- * %%
- * Copyright (C) 2005 - 2016 Alfresco Software Limited
- * %%
- * This file is part of the Alfresco software.
- * If the software was purchased under a paid Alfresco license, the terms of
- * the paid license agreement will prevail. Otherwise, the software is
- * provided under the following open source license terms:
- *
- * Alfresco is free software: you can redistribute it and/or modify
- * it under the terms of the GNU Lesser General Public License as published by
- * the Free Software Foundation, either version 3 of the License, or
- * (at your option) any later version.
- *
- * Alfresco is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public License
- * along with Alfresco. If not, see .
- * #L%
- */
-package org.alfresco.repo.search.impl.lucene;
-
-import java.io.IOException;
-
-import org.apache.lucene.index.IndexReader;
-import org.apache.lucene.search.IndexSearcher;
-import org.apache.lucene.store.Directory;
-
-public class ClosingIndexSearcher extends IndexSearcher
-{
- IndexReader reader;
-
- public ClosingIndexSearcher(String path) throws IOException
- {
- super(path);
- }
-
- public ClosingIndexSearcher(Directory directory) throws IOException
- {
- super(directory);
- }
-
- public ClosingIndexSearcher(IndexReader r)
- {
- super(r);
- this.reader = r;
- }
-
- /*package*/ IndexReader getReader()
- {
- return reader;
- }
-
- @Override
- public void close() throws IOException
- {
- super.close();
- if(reader != null)
- {
- reader.close();
- }
- }
-
-}
diff --git a/src/main/java/org/alfresco/repo/search/impl/lucene/FilterIndexReaderByStringId.java b/src/main/java/org/alfresco/repo/search/impl/lucene/FilterIndexReaderByStringId.java
deleted file mode 100644
index fdb098ab59..0000000000
--- a/src/main/java/org/alfresco/repo/search/impl/lucene/FilterIndexReaderByStringId.java
+++ /dev/null
@@ -1,395 +0,0 @@
-/*
- * #%L
- * Alfresco Repository
- * %%
- * Copyright (C) 2005 - 2016 Alfresco Software Limited
- * %%
- * This file is part of the Alfresco software.
- * If the software was purchased under a paid Alfresco license, the terms of
- * the paid license agreement will prevail. Otherwise, the software is
- * provided under the following open source license terms:
- *
- * Alfresco is free software: you can redistribute it and/or modify
- * it under the terms of the GNU Lesser General Public License as published by
- * the Free Software Foundation, either version 3 of the License, or
- * (at your option) any later version.
- *
- * Alfresco is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public License
- * along with Alfresco. If not, see .
- * #L%
- */
-package org.alfresco.repo.search.impl.lucene;
-
-import java.io.IOException;
-import java.util.Set;
-import java.util.concurrent.locks.ReadWriteLock;
-import java.util.concurrent.locks.ReentrantReadWriteLock;
-
-import org.alfresco.error.AlfrescoRuntimeException;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
-import org.apache.lucene.index.FilterIndexReader;
-import org.apache.lucene.index.IndexReader;
-import org.apache.lucene.index.Term;
-import org.apache.lucene.index.TermDocs;
-import org.apache.lucene.index.TermEnum;
-import org.apache.lucene.index.TermPositions;
-import org.apache.lucene.search.Hits;
-import org.apache.lucene.search.IndexSearcher;
-import org.apache.lucene.search.Searcher;
-import org.apache.lucene.search.TermQuery;
-import org.apache.lucene.util.OpenBitSet;
-
-
-/**
- * An index reader that filters documents from another.
- *
- * @author andyh
- *
- */
-public class FilterIndexReaderByStringId extends FilterIndexReader
-{
- private static Log s_logger = LogFactory.getLog(FilterIndexReaderByStringId.class);
-
- private OpenBitSet deletedDocuments;
- private final Set deletions;
- private final Set containerDeletions;
- private final boolean deleteNodesOnly;
- private final ReadWriteLock lock = new ReentrantReadWriteLock();
-
- private final String id;
-
- /**
- * Apply the filter
- *
- * @param id String
- * @param reader IndexReader
- * @param deleteNodesOnly boolean
- */
- public FilterIndexReaderByStringId(String id, IndexReader reader, Set deletions, Set containerDeletions, boolean deleteNodesOnly)
- {
- super(reader);
- reader.incRef();
- this.id = id;
- this.deletions = deletions;
- this.containerDeletions = containerDeletions;
- this.deleteNodesOnly = deleteNodesOnly;
-
- if (s_logger.isDebugEnabled())
- {
- s_logger.debug("Applying deletions FOR "+id +" (the index ito which these are applied is the previous one ...)");
- }
-
- }
-
- public OpenBitSet getDeletedDocuments()
- {
- lock.readLock().lock();
- try
- {
- if (deletedDocuments != null)
- {
- return deletedDocuments;
- }
- }
- finally
- {
- lock.readLock().unlock();
- }
- lock.writeLock().lock();
- try
- {
- if (deletedDocuments != null)
- {
- return deletedDocuments;
- }
- deletedDocuments = new OpenBitSet(in.maxDoc());
-
- Searcher searcher = new IndexSearcher(in);
- for (String stringRef : deletions)
- {
- if (!deleteNodesOnly || containerDeletions.contains(stringRef))
- {
- TermDocs td = in.termDocs(new Term("ID", stringRef));
- while (td.next())
- {
- deletedDocuments.set(td.doc());
- }
- td.close();
- }
- else
- {
- boolean found = false;
- TermDocs td = in.termDocs(new Term("LEAFID", stringRef));
- while (td.next())
- {
- deletedDocuments.set(td.doc());
- found = true;
- }
- td.close();
- // For backward compatibility, use old method of locating non-container docs
- if (!found)
- {
- TermQuery query = new TermQuery(new Term("ID", stringRef));
- Hits hits = searcher.search(query);
- if (hits.length() > 0)
- {
- for (int i = 0; i < hits.length(); i++)
- {
- Document doc = hits.doc(i);
- // Exclude all containers except the root (which is also a node!)
- Field path = doc.getField("PATH");
- if (path == null || path.stringValue().length() == 0)
- {
- deletedDocuments.set(hits.id(i));
- // There should only be one thing to delete
- // break;
- }
- }
- }
- }
- }
- }
- // searcher does not need to be closed, the reader is live
-
- for (String stringRef : containerDeletions)
- {
- TermDocs td = in.termDocs(new Term("ANCESTOR", stringRef));
- while (td.next())
- {
- deletedDocuments.set(td.doc());
- }
- td.close();
- }
- return deletedDocuments;
- }
- catch (IOException e)
- {
- s_logger.error("Error initialising "+id, e);
- throw new AlfrescoRuntimeException("Failed to find deleted documents to filter", e);
- }
- finally
- {
- lock.writeLock().unlock();
- }
-
- }
-
- // Prevent from actually setting the closed flag
- @Override
- protected void doClose() throws IOException
- {
- this.in.decRef();
- }
-
- /**
- * Filter implementation
- *
- * @author andyh
- *
- */
- public class FilterTermDocs implements TermDocs
- {
- protected TermDocs in;
-
- String id;
-
- /**
- * @param id String
- * @param in TermDocs
- */
- public FilterTermDocs(String id, TermDocs in)
- {
- this.in = in;
- }
-
- public void seek(Term term) throws IOException
- {
- // Seek is left to the base implementation
- in.seek(term);
- }
-
- public void seek(TermEnum termEnum) throws IOException
- {
- // Seek is left to the base implementation
- in.seek(termEnum);
- }
-
- public int doc()
- {
- // The current document info is valid in the base implementation
- return in.doc();
- }
-
- public int freq()
- {
- // The frequency is valid in the base implementation
- return in.freq();
- }
-
- public boolean next() throws IOException
- {
- try
- {
- if (!in.next())
- {
- return false;
- }
- OpenBitSet deletedDocuments = getDeletedDocuments();
- while (deletedDocuments.get(in.doc()))
- {
- if (!in.next())
- {
- return false;
- }
- }
- // Not masked
- return true;
- }
- catch(IOException ioe)
- {
- s_logger.error("Error reading docs for "+id);
- throw ioe;
- }
- }
-
- public int read(int[] docs, int[] freqs) throws IOException
- {
- int[] innerDocs = new int[docs.length];
- int[] innerFreq = new int[docs.length];
- int count = in.read(innerDocs, innerFreq);
-
- // Is the stream exhausted
- if (count == 0)
- {
- return 0;
- }
-
- OpenBitSet deletedDocuments = getDeletedDocuments();
- while (allDeleted(innerDocs, count, deletedDocuments))
- {
-
- count = in.read(innerDocs, innerFreq);
-
- // Is the stream exhausted
- if (count == 0)
- {
- return 0;
- }
- }
-
- // Add non deleted
-
- int insertPosition = 0;
- for (int i = 0; i < count; i++)
- {
- if (!deletedDocuments.get(innerDocs[i]))
- {
- docs[insertPosition] = innerDocs[i];
- freqs[insertPosition] = innerFreq[i];
- insertPosition++;
- }
- }
-
- return insertPosition;
- }
-
- private boolean allDeleted(int[] docs, int fillSize, OpenBitSet deletedDocuments)
- {
- for (int i = 0; i < fillSize; i++)
- {
- if (!deletedDocuments.get(docs[i]))
- {
- return false;
- }
- }
- return true;
- }
-
- public boolean skipTo(int i) throws IOException
- {
- if (!in.skipTo(i))
- {
- return false;
- }
-
- OpenBitSet deletedDocuments = getDeletedDocuments();
- while (deletedDocuments.get(in.doc()))
- {
- if (!in.next())
- {
- return false;
- }
- }
- return true;
- }
-
- public void close() throws IOException
- {
- // Leave to internal implementation
- in.close();
- }
- }
-
- /** Base class for filtering {@code TermPositions} implementations. */
- public class FilterTermPositions extends FilterTermDocs implements TermPositions
- {
-
- TermPositions tp;
-
- /**
- * @param id String
- * @param in TermPositions
- */
- public FilterTermPositions(String id, TermPositions in)
- {
- super(id, in);
- tp = in;
- }
-
- public int nextPosition() throws IOException
- {
- return tp.nextPosition();
- }
-
- public byte[] getPayload(byte[] data, int offset) throws IOException
- {
- return tp.getPayload(data, offset);
- }
-
- public int getPayloadLength()
- {
- return tp.getPayloadLength();
- }
-
- public boolean isPayloadAvailable()
- {
- return tp.isPayloadAvailable();
- }
- }
-
- @Override
- public int numDocs()
- {
- return super.numDocs() - (int)getDeletedDocuments().cardinality();
- }
-
- @Override
- public TermDocs termDocs() throws IOException
- {
- return new FilterTermDocs(id, super.termDocs());
- }
-
- @Override
- public TermPositions termPositions() throws IOException
- {
- return new FilterTermPositions(id, super.termPositions());
- }
-}
diff --git a/src/main/java/org/alfresco/repo/search/impl/lucene/LuceneCategoryServiceImpl.java b/src/main/java/org/alfresco/repo/search/impl/lucene/LuceneCategoryServiceImpl.java
index c73d9433fa..b1ab08a356 100644
--- a/src/main/java/org/alfresco/repo/search/impl/lucene/LuceneCategoryServiceImpl.java
+++ b/src/main/java/org/alfresco/repo/search/impl/lucene/LuceneCategoryServiceImpl.java
@@ -1,28 +1,28 @@
-/*
- * #%L
- * Alfresco Repository
- * %%
- * Copyright (C) 2005 - 2016 Alfresco Software Limited
- * %%
- * This file is part of the Alfresco software.
- * If the software was purchased under a paid Alfresco license, the terms of
- * the paid license agreement will prevail. Otherwise, the software is
- * provided under the following open source license terms:
- *
- * Alfresco is free software: you can redistribute it and/or modify
- * it under the terms of the GNU Lesser General Public License as published by
- * the Free Software Foundation, either version 3 of the License, or
- * (at your option) any later version.
- *
- * Alfresco is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public License
- * along with Alfresco. If not, see .
- * #L%
- */
+/*
+ * #%L
+ * Alfresco Repository
+ * %%
+ * Copyright (C) 2005 - 2016 Alfresco Software Limited
+ * %%
+ * This file is part of the Alfresco software.
+ * If the software was purchased under a paid Alfresco license, the terms of
+ * the paid license agreement will prevail. Otherwise, the software is
+ * provided under the following open source license terms:
+ *
+ * Alfresco is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License as published by
+ * the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * Alfresco is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public License
+ * along with Alfresco. If not, see .
+ * #L%
+ */
package org.alfresco.repo.search.impl.lucene;
import java.util.Collection;
@@ -81,8 +81,8 @@ public class LuceneCategoryServiceImpl implements CategoryService
protected DictionaryService dictionaryService;
- protected IndexerAndSearcher indexerAndSearcher;
-
+ protected IndexerAndSearcher indexerAndSearcher;
+
protected int queryFetchSize = 5000;
/**
@@ -153,12 +153,12 @@ public void setDictionaryService(DictionaryService dictionaryService)
public void setIndexerAndSearcher(IndexerAndSearcher indexerAndSearcher)
{
this.indexerAndSearcher = indexerAndSearcher;
- }
+ }
- public void setQueryFetchSize(int queryFetchSize) {
- this.queryFetchSize = queryFetchSize;
- }
-
+ public void setQueryFetchSize(int queryFetchSize) {
+ this.queryFetchSize = queryFetchSize;
+ }
+
public Collection getChildren(NodeRef categoryRef, Mode mode, Depth depth)
{
return getChildren(categoryRef, mode, depth, false, null, queryFetchSize);
@@ -543,68 +543,7 @@ public void deleteClassification(StoreRef storeRef, QName aspectName)
public List> getTopCategories(StoreRef storeRef, QName aspectName, int count)
{
- if (indexerAndSearcher instanceof LuceneIndexerAndSearcher)
- {
- AspectDefinition definition = dictionaryService.getAspect(aspectName);
- if(definition == null)
- {
- throw new IllegalStateException("Unknown aspect");
- }
- QName catProperty = null;
- Map properties = definition.getProperties();
- for(QName pName : properties.keySet())
- {
- if(pName.getNamespaceURI().equals(aspectName.getNamespaceURI()))
- {
- if(pName.getLocalName().equalsIgnoreCase(aspectName.getLocalName()))
- {
- PropertyDefinition def = properties.get(pName);
- if(def.getDataType().getName().equals(DataTypeDefinition.CATEGORY))
- {
- catProperty = pName;
- }
- }
- }
- }
- if(catProperty == null)
- {
- throw new IllegalStateException("Aspect does not have category property mirroring the aspect name");
- }
-
-
- LuceneIndexerAndSearcher lias = (LuceneIndexerAndSearcher) indexerAndSearcher;
- String field = "@" + catProperty;
- SearchService searchService = lias.getSearcher(storeRef, false);
- if (searchService instanceof LuceneSearcher)
- {
- LuceneSearcher luceneSearcher = (LuceneSearcher)searchService;
- List> topTerms = luceneSearcher.getTopTerms(field, count);
- List> answer = new LinkedList>();
- for (Pair term : topTerms)
- {
- Pair toAdd;
- NodeRef nodeRef = new NodeRef(term.getFirst());
- if (nodeService.exists(nodeRef))
- {
- toAdd = new Pair(nodeRef, term.getSecond());
- }
- else
- {
- toAdd = new Pair(null, term.getSecond());
- }
- answer.add(toAdd);
- }
- return answer;
- }
- else
- {
- throw new UnsupportedOperationException("getPolularCategories is only supported for lucene indexes");
- }
- }
- else
- {
- throw new UnsupportedOperationException("getPolularCategories is only supported for lucene indexes");
- }
+ throw new UnsupportedOperationException();
}
}
diff --git a/src/main/java/org/alfresco/repo/search/impl/lucene/LuceneIndexer.java b/src/main/java/org/alfresco/repo/search/impl/lucene/LuceneIndexer.java
deleted file mode 100644
index 9eeb05e9e6..0000000000
--- a/src/main/java/org/alfresco/repo/search/impl/lucene/LuceneIndexer.java
+++ /dev/null
@@ -1,44 +0,0 @@
-/*
- * #%L
- * Alfresco Repository
- * %%
- * Copyright (C) 2005 - 2016 Alfresco Software Limited
- * %%
- * This file is part of the Alfresco software.
- * If the software was purchased under a paid Alfresco license, the terms of
- * the paid license agreement will prevail. Otherwise, the software is
- * provided under the following open source license terms:
- *
- * Alfresco is free software: you can redistribute it and/or modify
- * it under the terms of the GNU Lesser General Public License as published by
- * the Free Software Foundation, either version 3 of the License, or
- * (at your option) any later version.
- *
- * Alfresco is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public License
- * along with Alfresco. If not, see .
- * #L%
- */
-package org.alfresco.repo.search.impl.lucene;
-
-import java.util.Set;
-
-import org.alfresco.repo.search.Indexer;
-import org.alfresco.repo.search.TransactionSynchronisationAwareIndexer;
-import org.alfresco.repo.search.impl.lucene.index.IndexInfo;
-
-/**
- * @author Andy Hind
- */
-public interface LuceneIndexer extends Indexer, TransactionSynchronisationAwareIndexer
-{
- public String getDeltaId();
- public Set getDeletions();
- public Set getContainerDeletions();
- public boolean getDeleteOnlyNodes();
- public R doReadOnly(IndexInfo.LockWork lockWork);
-}
diff --git a/src/main/java/org/alfresco/repo/search/impl/lucene/LuceneResultSet.java b/src/main/java/org/alfresco/repo/search/impl/lucene/LuceneResultSet.java
deleted file mode 100644
index 53b706b398..0000000000
--- a/src/main/java/org/alfresco/repo/search/impl/lucene/LuceneResultSet.java
+++ /dev/null
@@ -1,355 +0,0 @@
-/*
- * #%L
- * Alfresco Repository
- * %%
- * Copyright (C) 2005 - 2016 Alfresco Software Limited
- * %%
- * This file is part of the Alfresco software.
- * If the software was purchased under a paid Alfresco license, the terms of
- * the paid license agreement will prevail. Otherwise, the software is
- * provided under the following open source license terms:
- *
- * Alfresco is free software: you can redistribute it and/or modify
- * it under the terms of the GNU Lesser General Public License as published by
- * the Free Software Foundation, either version 3 of the License, or
- * (at your option) any later version.
- *
- * Alfresco is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public License
- * along with Alfresco. If not, see .
- * #L%
- */
-package org.alfresco.repo.search.impl.lucene;
-
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.BitSet;
-import java.util.List;
-
-import org.alfresco.error.AlfrescoRuntimeException;
-import org.alfresco.repo.node.NodeBulkLoader;
-import org.alfresco.repo.search.AbstractResultSet;
-import org.alfresco.repo.search.ResultSetRowIterator;
-import org.alfresco.repo.search.SearcherException;
-import org.alfresco.repo.search.SimpleResultSetMetaData;
-import org.alfresco.repo.search.impl.lucene.index.CachingIndexReader;
-import org.alfresco.repo.tenant.TenantService;
-import org.alfresco.service.cmr.repository.ChildAssociationRef;
-import org.alfresco.service.cmr.repository.NodeRef;
-import org.alfresco.service.cmr.repository.NodeService;
-import org.alfresco.service.cmr.search.LimitBy;
-import org.alfresco.service.cmr.search.PermissionEvaluationMode;
-import org.alfresco.service.cmr.search.ResultSetMetaData;
-import org.alfresco.service.cmr.search.ResultSetRow;
-import org.alfresco.service.cmr.search.SearchParameters;
-import org.apache.lucene.document.Document;
-import org.apache.lucene.index.IndexReader;
-import org.apache.lucene.search.Hits;
-import org.apache.lucene.search.Searcher;
-
-/**
- * Implementation of a ResultSet on top of Lucene Hits class.
- *
- * @author andyh
- */
-public class LuceneResultSet extends AbstractResultSet
-{
- private static int DEFAULT_BULK_FETCH_SIZE = 1000;
-
- /**
- * The underlying hits
- */
- Hits hits;
-
- private Searcher searcher;
-
- private NodeService nodeService;
-
- private TenantService tenantService;
-
- private SearchParameters searchParameters;
-
- private LuceneConfig config;
-
- private BitSet prefetch;
-
- private boolean bulkFetch = true;
-
- private int bulkFetchSize = DEFAULT_BULK_FETCH_SIZE;
-
- /**
- * Wrap a lucene seach result with node support
- *
- * @param hits Hits
- * @param searcher Searcher
- * @param nodeService nodeService
- * @param tenantService tenant service
- * @param searchParameters SearchParameters
- * @param config - lucene config
- */
- public LuceneResultSet(Hits hits, Searcher searcher, NodeService nodeService, TenantService tenantService, SearchParameters searchParameters,
- LuceneConfig config)
- {
- super();
- this.hits = hits;
- this.searcher = searcher;
- this.nodeService = nodeService;
- this.tenantService = tenantService;
- this.searchParameters = searchParameters;
- this.config = config;
- prefetch = new BitSet(hits.length());
- }
-
- /*
- * ResultSet implementation
- */
-
- public ResultSetRowIterator iterator()
- {
- return new LuceneResultSetRowIterator(this);
- }
-
- public int length()
- {
- return hits.length();
- }
-
- public NodeRef getNodeRef(int n)
- {
- try
- {
- prefetch(n);
- // We have to get the document to resolve this
- // It is possible the store ref is also stored in the index
- if (searcher instanceof ClosingIndexSearcher)
- {
- ClosingIndexSearcher cis = (ClosingIndexSearcher) searcher;
- IndexReader reader = cis.getReader();
- if (reader instanceof CachingIndexReader)
- {
- int id = hits.id(n);
- CachingIndexReader cir = (CachingIndexReader) reader;
- String sid = cir.getId(id);
- return tenantService.getBaseName(new NodeRef(sid));
- }
- }
-
- Document doc = hits.doc(n);
- String id = doc.get("ID");
- return tenantService.getBaseName(new NodeRef(id));
- }
- catch (IOException e)
- {
- throw new SearcherException("IO Error reading reading node ref from the result set", e);
- }
- }
-
- public float getScore(int n) throws SearcherException
- {
- try
- {
- return hits.score(n);
- }
- catch (IOException e)
- {
- throw new SearcherException("IO Error reading score from the result set", e);
- }
- }
-
- public Document getDocument(int n)
- {
- try
- {
- prefetch(n);
- Document doc = hits.doc(n);
- return doc;
- }
- catch (IOException e)
- {
- throw new SearcherException("IO Error reading reading document from the result set", e);
- }
- }
-
- private void prefetch(int n) throws IOException
- {
- NodeBulkLoader bulkLoader = config.getBulkLoader();
- if (!getBulkFetch() || (bulkLoader == null))
- {
- // No prefetching
- return;
- }
- if (prefetch.get(n))
- {
- // The document was already processed
- return;
- }
- // Start at 'n' and process the the next bulk set
- int bulkFetchSize = getBulkFetchSize();
- List fetchList = new ArrayList(bulkFetchSize);
- int totalHits = hits.length();
- for (int i = 0; i < bulkFetchSize; i++)
- {
- int next = n + i;
- if (next >= totalHits)
- {
- // We've hit the end
- break;
- }
- if (prefetch.get(next))
- {
- // This one is in there already
- continue;
- }
- // We store the node and mark it as prefetched
- prefetch.set(next);
- Document doc = hits.doc(next);
- String nodeRefStr = doc.get("ID");
- try
- {
- NodeRef nodeRef = tenantService.getBaseName(new NodeRef(nodeRefStr));
- fetchList.add(nodeRef);
- }
- catch (AlfrescoRuntimeException e)
- {
- // Ignore IDs that don't parse as NodeRefs, e.g. FTSREF docs
- }
- }
- // Now bulk fetch
- if (fetchList.size() > 1)
- {
- bulkLoader.cacheNodes(fetchList);
- }
- }
-
- public void close()
- {
- try
- {
- searcher.close();
- }
- catch (IOException e)
- {
- throw new SearcherException(e);
- }
- }
-
- public NodeService getNodeService()
- {
- return nodeService;
- }
-
- public ResultSetRow getRow(int i)
- {
- if (i < length())
- {
- return new LuceneResultSetRow(this, i);
- }
- else
- {
- throw new SearcherException("Invalid row");
- }
- }
-
- public ChildAssociationRef getChildAssocRef(int n)
- {
- return tenantService.getBaseName(getRow(n).getChildAssocRef());
- }
-
- public ResultSetMetaData getResultSetMetaData()
- {
- return new SimpleResultSetMetaData(LimitBy.UNLIMITED, PermissionEvaluationMode.EAGER, searchParameters);
- }
-
- public int getStart()
- {
- throw new UnsupportedOperationException();
- }
-
- public boolean hasMore()
- {
- throw new UnsupportedOperationException();
- }
-
- public TenantService getTenantService()
- {
- return tenantService;
- }
-
- /**
- * Bulk fetch results in the cache
- *
- * @param bulkFetch boolean
- */
- @Override
- public boolean setBulkFetch(boolean bulkFetch)
- {
- boolean oldBulkFetch = this.bulkFetch;
- this.bulkFetch = bulkFetch;
- return oldBulkFetch;
- }
-
- /**
- * Do we bulk fetch
- *
- * @return - true if we do
- */
- @Override
- public boolean getBulkFetch()
- {
- return bulkFetch;
- }
-
- /**
- * Set the bulk fetch size
- *
- * @param bulkFetchSize int
- */
- @Override
- public int setBulkFetchSize(int bulkFetchSize)
- {
- int oldBulkFetchSize = this.bulkFetchSize;
- this.bulkFetchSize = bulkFetchSize;
- return oldBulkFetchSize;
- }
-
- /**
- * Get the bulk fetch size.
- *
- * @return the fetch size
- */
- @Override
- public int getBulkFetchSize()
- {
- return bulkFetchSize;
- }
-
- /**
- * @param index int
- * @return int
- */
- public int doc(int index)
- {
- try
- {
- return hits.id(index);
- }
- catch (IOException e)
- {
- throw new SearcherException(e);
- }
- }
-
- /* (non-Javadoc)
- * @see org.alfresco.service.cmr.search.ResultSetSPI#getNumberFound()
- */
- @Override
- public long getNumberFound()
- {
- return hits.length();
- }
-
-}
diff --git a/src/main/java/org/alfresco/repo/search/impl/lucene/LuceneResultSetRow.java b/src/main/java/org/alfresco/repo/search/impl/lucene/LuceneResultSetRow.java
deleted file mode 100644
index f68864c48f..0000000000
--- a/src/main/java/org/alfresco/repo/search/impl/lucene/LuceneResultSetRow.java
+++ /dev/null
@@ -1,166 +0,0 @@
-/*
- * #%L
- * Alfresco Repository
- * %%
- * Copyright (C) 2005 - 2016 Alfresco Software Limited
- * %%
- * This file is part of the Alfresco software.
- * If the software was purchased under a paid Alfresco license, the terms of
- * the paid license agreement will prevail. Otherwise, the software is
- * provided under the following open source license terms:
- *
- * Alfresco is free software: you can redistribute it and/or modify
- * it under the terms of the GNU Lesser General Public License as published by
- * the Free Software Foundation, either version 3 of the License, or
- * (at your option) any later version.
- *
- * Alfresco is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public License
- * along with Alfresco. If not, see .
- * #L%
- */
-package org.alfresco.repo.search.impl.lucene;
-
-import java.io.Serializable;
-import java.util.Map;
-
-import org.alfresco.model.ContentModel;
-import org.alfresco.repo.search.AbstractResultSetRow;
-import org.alfresco.repo.tenant.TenantService;
-import org.alfresco.service.cmr.repository.ChildAssociationRef;
-import org.alfresco.service.cmr.repository.NodeRef;
-import org.alfresco.service.namespace.QName;
-import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
-
-/**
- * A row in a result set. Created on the fly.
- *
- * @author Andy Hind
- *
- */
-public class LuceneResultSetRow extends AbstractResultSetRow
-{
- /**
- * The current document - cached so we do not get it for each value
- */
- private Document document;
-
- private TenantService tenantService;
-
- /**
- * Wrap a position in a lucene Hits class with node support
- *
- * @param resultSet LuceneResultSet
- * @param index int
- */
- public LuceneResultSetRow(LuceneResultSet resultSet, int index)
- {
- super(resultSet, index);
-
- tenantService = resultSet.getTenantService();
- }
-
- /**
- * Support to cache the document for this row
- *
- * @return Document
- */
- public Document getDocument()
- {
- if (document == null)
- {
- document = ((LuceneResultSet) getResultSet()).getDocument(getIndex());
- }
- return document;
- }
-
- /*
- * ResultSetRow implementation
- */
-
- protected Map getDirectProperties()
- {
- LuceneResultSet lrs = (LuceneResultSet) getResultSet();
- return lrs.getNodeService().getProperties(lrs.getNodeRef(getIndex()));
- }
-
- public QName getQName()
- {
- Field field = getDocument().getField("QNAME");
- if (field != null)
- {
- String qname = field.stringValue();
- if((qname == null) || (qname.length() == 0))
- {
- return null;
- }
- else
- {
- return QName.createQName(qname);
- }
- }
- else
- {
- return null;
- }
- }
-
- public QName getPrimaryAssocTypeQName()
- {
-
- Field field = getDocument().getField("PRIMARYASSOCTYPEQNAME");
- if (field != null)
- {
- String qname = field.stringValue();
- return QName.createQName(qname);
- }
- else
- {
- return ContentModel.ASSOC_CHILDREN;
- }
- }
-
- @Override
- public ChildAssociationRef getChildAssocRef()
- {
- Field field = getDocument().getField("PRIMARYPARENT");
- String primaryParent = null;
- if (field != null)
- {
- primaryParent = field.stringValue();
- }
- NodeRef childNodeRef = getNodeRef();
- NodeRef parentNodeRef = primaryParent == null ? null : tenantService.getBaseName(new NodeRef(primaryParent));
- return new ChildAssociationRef(getPrimaryAssocTypeQName(), parentNodeRef, getQName(), childNodeRef);
- }
-
- public NodeRef getNodeRef(String selectorName)
- {
- throw new UnsupportedOperationException();
- }
-
- public Map getNodeRefs()
- {
- throw new UnsupportedOperationException();
- }
-
- public float getScore(String selectorName)
- {
- throw new UnsupportedOperationException();
- }
-
- public Map getScores()
- {
- throw new UnsupportedOperationException();
- }
-
- public int doc()
- {
- return ((LuceneResultSet)getResultSet()).doc(getIndex());
- }
-}
diff --git a/src/main/java/org/alfresco/repo/search/impl/lucene/LuceneResultSetRowIterator.java b/src/main/java/org/alfresco/repo/search/impl/lucene/LuceneResultSetRowIterator.java
deleted file mode 100644
index bc291d8d2c..0000000000
--- a/src/main/java/org/alfresco/repo/search/impl/lucene/LuceneResultSetRowIterator.java
+++ /dev/null
@@ -1,59 +0,0 @@
-/*
- * #%L
- * Alfresco Repository
- * %%
- * Copyright (C) 2005 - 2016 Alfresco Software Limited
- * %%
- * This file is part of the Alfresco software.
- * If the software was purchased under a paid Alfresco license, the terms of
- * the paid license agreement will prevail. Otherwise, the software is
- * provided under the following open source license terms:
- *
- * Alfresco is free software: you can redistribute it and/or modify
- * it under the terms of the GNU Lesser General Public License as published by
- * the Free Software Foundation, either version 3 of the License, or
- * (at your option) any later version.
- *
- * Alfresco is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public License
- * along with Alfresco. If not, see .
- * #L%
- */
-package org.alfresco.repo.search.impl.lucene;
-
-import org.alfresco.repo.search.AbstractResultSetRowIterator;
-import org.alfresco.service.cmr.search.ResultSetRow;
-
-/**
- * Iterate over the rows in a LuceneResultSet
- *
- * @author andyh
- *
- */
-public class LuceneResultSetRowIterator extends AbstractResultSetRowIterator
-{
- /**
- * Create an iterator over the result set. Follows standard ListIterator
- * conventions
- *
- * @param resultSet LuceneResultSet
- */
- public LuceneResultSetRowIterator(LuceneResultSet resultSet)
- {
- super(resultSet);
- }
-
- public ResultSetRow next()
- {
- return new LuceneResultSetRow((LuceneResultSet)getResultSet(), moveToNextPosition());
- }
-
- public ResultSetRow previous()
- {
- return new LuceneResultSetRow((LuceneResultSet)getResultSet(), moveToPreviousPosition());
- }
-}
diff --git a/src/main/java/org/alfresco/repo/search/impl/lucene/LuceneSearcher.java b/src/main/java/org/alfresco/repo/search/impl/lucene/LuceneSearcher.java
deleted file mode 100644
index d5be956bcb..0000000000
--- a/src/main/java/org/alfresco/repo/search/impl/lucene/LuceneSearcher.java
+++ /dev/null
@@ -1,72 +0,0 @@
-/*
- * #%L
- * Alfresco Repository
- * %%
- * Copyright (C) 2005 - 2016 Alfresco Software Limited
- * %%
- * This file is part of the Alfresco software.
- * If the software was purchased under a paid Alfresco license, the terms of
- * the paid license agreement will prevail. Otherwise, the software is
- * provided under the following open source license terms:
- *
- * Alfresco is free software: you can redistribute it and/or modify
- * it under the terms of the GNU Lesser General Public License as published by
- * the Free Software Foundation, either version 3 of the License, or
- * (at your option) any later version.
- *
- * Alfresco is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public License
- * along with Alfresco. If not, see .
- * #L%
- */
-package org.alfresco.repo.search.impl.lucene;
-
-import java.util.List;
-
-import org.alfresco.service.cmr.repository.NodeService;
-import org.alfresco.service.cmr.search.SearchService;
-import org.alfresco.service.namespace.NamespacePrefixResolver;
-import org.alfresco.util.Pair;
-
-/**
- * Lucene implementation specific entension to the seracher API
- * @author andyh
- *
- */
-public interface LuceneSearcher extends SearchService
-{
- /**
- * Check if the index exists
- * @return - true if it exists
- */
- public boolean indexExists();
- /**
- * Ste the node service
- * @param nodeService NodeService
- */
- public void setNodeService(NodeService nodeService);
- /**
- * Set the name space service
- * @param namespacePrefixResolver NamespacePrefixResolver
- */
- public void setNamespacePrefixResolver(NamespacePrefixResolver namespacePrefixResolver);
-
- /**
- * Get top terms
- *
- * @param field String
- * @param count int
- * @return List
- */
- public List> getTopTerms(String field, int count);
-
- /**
- * Get a lucene searcher
- * @return ClosingIndexSearcher
- */
- public ClosingIndexSearcher getClosingIndexSearcher();
-}
diff --git a/src/main/java/org/alfresco/repo/search/impl/lucene/SolrJSONResultSet.java b/src/main/java/org/alfresco/repo/search/impl/lucene/SolrJSONResultSet.java
index 1096f11ae9..76647db0d3 100644
--- a/src/main/java/org/alfresco/repo/search/impl/lucene/SolrJSONResultSet.java
+++ b/src/main/java/org/alfresco/repo/search/impl/lucene/SolrJSONResultSet.java
@@ -37,6 +37,7 @@
import java.util.stream.Collectors;
import org.alfresco.repo.domain.node.NodeDAO;
+import org.alfresco.repo.search.QueryParserException;
import org.alfresco.repo.search.SimpleResultSetMetaData;
import org.alfresco.repo.search.impl.solr.facet.facetsresponse.GenericBucket;
import org.alfresco.repo.search.impl.solr.facet.facetsresponse.GenericFacetResponse;
@@ -176,7 +177,7 @@ public SolrJSONResultSet(JSONObject json, SearchParameters searchParameters, Nod
else
{
// No DBID found
- throw new LuceneQueryParserException("No DBID found for doc ...");
+ throw new QueryParserException("No DBID found for doc ...");
}
}
diff --git a/src/main/java/org/alfresco/repo/search/impl/lucene/index/IndexEntry.java b/src/main/java/org/alfresco/repo/search/impl/lucene/index/IndexEntry.java
deleted file mode 100644
index 355dc06ae8..0000000000
--- a/src/main/java/org/alfresco/repo/search/impl/lucene/index/IndexEntry.java
+++ /dev/null
@@ -1,171 +0,0 @@
-/*
- * #%L
- * Alfresco Repository
- * %%
- * Copyright (C) 2005 - 2016 Alfresco Software Limited
- * %%
- * This file is part of the Alfresco software.
- * If the software was purchased under a paid Alfresco license, the terms of
- * the paid license agreement will prevail. Otherwise, the software is
- * provided under the following open source license terms:
- *
- * Alfresco is free software: you can redistribute it and/or modify
- * it under the terms of the GNU Lesser General Public License as published by
- * the Free Software Foundation, either version 3 of the License, or
- * (at your option) any later version.
- *
- * Alfresco is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public License
- * along with Alfresco. If not, see .
- * #L%
- */
-package org.alfresco.repo.search.impl.lucene.index;
-
-/**
- * Describes an entry in an index
- *
- * @author Andy Hind
- */
-class IndexEntry
-{
- /**
- * The type of the index entry
- */
- private IndexType type;
-
- /**
- * The unique name of the index entry
- */
- private String name;
-
- /**
- * The preceeding index name.
- * Allows deltas etc to apply to the index or an overlay for example.
- */
- private String parentName;
-
- /**
- * The status of the index entry
- */
- private TransactionStatus status;
-
- /**
- * If merging, the id where the result is going
- */
- private String mergeId;
-
- private long documentCount;
-
- private long deletions;
-
- private boolean deletOnlyNodes;
-
- IndexEntry(IndexType type, String name, String parentName, TransactionStatus status, String mergeId, long documentCount, long deletions, boolean deletOnlyNodes)
- {
- this.type = type;
- this.name = name;
- this.parentName = parentName;
- this.status = status;
- this.mergeId = mergeId;
- this.documentCount = documentCount;
- this.deletions = deletions;
- this.deletOnlyNodes = deletOnlyNodes;
- }
-
- public String getMergeId()
- {
- return mergeId;
- }
-
- public void setMergeId(String mergeId)
- {
- this.mergeId = mergeId;
- }
-
- public String getName()
- {
- return name;
- }
-
- public void setName(String name)
- {
- this.name = name;
- }
-
- public String getParentName()
- {
- return parentName;
- }
-
- public void setParentName(String parentName)
- {
- this.parentName = parentName;
- }
-
- public TransactionStatus getStatus()
- {
- return status;
- }
-
- public void setStatus(TransactionStatus status)
- {
- this.status = status;
- }
-
- public IndexType getType()
- {
- return type;
- }
-
- public void setType(IndexType type)
- {
- this.type = type;
- }
-
- public long getDocumentCount()
- {
- return documentCount;
- }
-
- public void setDocumentCount(long documentCount)
- {
- this.documentCount = documentCount;
- }
-
- public long getDeletions()
- {
- return deletions;
- }
-
- public void setDeletions(long deletions)
- {
- this.deletions = deletions;
- }
-
- public boolean isDeletOnlyNodes()
- {
- return deletOnlyNodes;
- }
-
- public void setDeletOnlyNodes(boolean deletOnlyNodes)
- {
- this.deletOnlyNodes = deletOnlyNodes;
- }
-
- public String toString()
- {
- StringBuilder builder = new StringBuilder();
- builder.append(" Name=").append(getName()).append(" ");
- builder.append("Type=").append(getType()).append(" ");
- builder.append("Status=").append(getStatus()).append(" ");
- builder.append("Docs=").append(getDocumentCount()).append(" ");
- builder.append("Deletions=").append(getDeletions()).append(" ");
- return builder.toString();
- }
-
-
-}
\ No newline at end of file
diff --git a/src/main/java/org/alfresco/repo/search/impl/lucene/index/IndexEvent.java b/src/main/java/org/alfresco/repo/search/impl/lucene/index/IndexEvent.java
deleted file mode 100644
index 74e8af870d..0000000000
--- a/src/main/java/org/alfresco/repo/search/impl/lucene/index/IndexEvent.java
+++ /dev/null
@@ -1,94 +0,0 @@
-/*
- * #%L
- * Alfresco Repository
- * %%
- * Copyright (C) 2005 - 2016 Alfresco Software Limited
- * %%
- * This file is part of the Alfresco software.
- * If the software was purchased under a paid Alfresco license, the terms of
- * the paid license agreement will prevail. Otherwise, the software is
- * provided under the following open source license terms:
- *
- * Alfresco is free software: you can redistribute it and/or modify
- * it under the terms of the GNU Lesser General Public License as published by
- * the Free Software Foundation, either version 3 of the License, or
- * (at your option) any later version.
- *
- * Alfresco is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public License
- * along with Alfresco. If not, see .
- * #L%
- */
-package org.alfresco.repo.search.impl.lucene.index;
-
-import org.springframework.context.ApplicationEvent;
-
-/**
- * A class of event that notifies the listener of a significant event relating to a Lucene index. Useful for Monitoring
- * purposes.
- *
- * @author dward
- */
-public class IndexEvent extends ApplicationEvent
-{
-
- private static final long serialVersionUID = -4616231785087405506L;
-
- /** The event description. */
- private final String description;
-
- /** Its instance count. */
- private final int count;
-
- /**
- * The Constructor.
- *
- * @param source
- * the source index monitor
- * @param description
- * the event description
- * @param count
- * its instance count
- */
- public IndexEvent(IndexMonitor source, String description, int count)
- {
- super(source);
- this.description = description;
- this.count = count;
- }
-
- /**
- * Gets the source index monitor.
- *
- * @return the index monitor
- */
- public IndexMonitor getIndexMonitor()
- {
- return (IndexMonitor) getSource();
- }
-
- /**
- * Gets the event description.
- *
- * @return the description
- */
- public String getDescription()
- {
- return this.description;
- }
-
- /**
- * Gets the event instance count.
- *
- * @return the count
- */
- public int getCount()
- {
- return this.count;
- }
-
-}
diff --git a/src/main/java/org/alfresco/repo/search/impl/lucene/index/IndexInfo.java b/src/main/java/org/alfresco/repo/search/impl/lucene/index/IndexInfo.java
deleted file mode 100644
index 8861b39dac..0000000000
--- a/src/main/java/org/alfresco/repo/search/impl/lucene/index/IndexInfo.java
+++ /dev/null
@@ -1,4527 +0,0 @@
-/*
- * #%L
- * Alfresco Repository
- * %%
- * Copyright (C) 2005 - 2016 Alfresco Software Limited
- * %%
- * This file is part of the Alfresco software.
- * If the software was purchased under a paid Alfresco license, the terms of
- * the paid license agreement will prevail. Otherwise, the software is
- * provided under the following open source license terms:
- *
- * Alfresco is free software: you can redistribute it and/or modify
- * it under the terms of the GNU Lesser General Public License as published by
- * the Free Software Foundation, either version 3 of the License, or
- * (at your option) any later version.
- *
- * Alfresco is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public License
- * along with Alfresco. If not, see .
- * #L%
- */
-package org.alfresco.repo.search.impl.lucene.index;
-
-import java.io.BufferedInputStream;
-import java.io.BufferedOutputStream;
-import java.io.DataInputStream;
-import java.io.DataOutputStream;
-import java.io.File;
-import java.io.FileInputStream;
-import java.io.FileNotFoundException;
-import java.io.FileOutputStream;
-import java.io.IOException;
-import java.io.RandomAccessFile;
-import java.io.UnsupportedEncodingException;
-import java.nio.Buffer;
-import java.nio.ByteBuffer;
-import java.nio.MappedByteBuffer;
-import java.nio.channels.FileChannel;
-import java.nio.channels.FileChannel.MapMode;
-import java.nio.channels.FileLock;
-import java.util.ArrayList;
-import java.util.Collections;
-import java.util.EnumMap;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.Iterator;
-import java.util.LinkedHashMap;
-import java.util.LinkedList;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
-import java.util.Timer;
-import java.util.TimerTask;
-import java.util.TreeMap;
-import java.util.concurrent.ConcurrentHashMap;
-import java.util.concurrent.ConcurrentLinkedQueue;
-import java.util.concurrent.LinkedBlockingQueue;
-import java.util.concurrent.ThreadPoolExecutor;
-import java.util.concurrent.TimeUnit;
-import java.util.concurrent.locks.ReentrantReadWriteLock;
-import java.util.zip.CRC32;
-
-import org.alfresco.error.AlfrescoRuntimeException;
-import org.alfresco.repo.search.IndexerException;
-import org.alfresco.repo.search.impl.lucene.FilterIndexReaderByStringId;
-import org.alfresco.repo.search.impl.lucene.LuceneConfig;
-import org.alfresco.repo.search.impl.lucene.LuceneXPathHandler;
-import org.alfresco.repo.search.impl.lucene.analysis.AlfrescoStandardAnalyser;
-import org.alfresco.repo.search.impl.lucene.query.PathQuery;
-import org.alfresco.service.cmr.dictionary.DictionaryService;
-import org.alfresco.service.namespace.NamespaceService;
-import org.alfresco.util.ApplicationContextHelper;
-import org.alfresco.util.GUID;
-import org.alfresco.util.TraceableThreadFactory;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.lucene.analysis.Analyzer;
-import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
-import org.apache.lucene.index.CorruptIndexException;
-import org.apache.lucene.index.IndexReader;
-import org.apache.lucene.index.IndexReader.FieldOption;
-import org.apache.lucene.index.IndexWriter;
-import org.apache.lucene.index.IndexWriter.MaxFieldLength;
-import org.apache.lucene.index.LogDocMergePolicy;
-import org.apache.lucene.index.MultiReader;
-import org.apache.lucene.index.SerialMergeScheduler;
-import org.apache.lucene.index.Term;
-import org.apache.lucene.index.TermEnum;
-import org.apache.lucene.search.Hits;
-import org.apache.lucene.search.IndexSearcher;
-import org.apache.lucene.search.Query;
-import org.apache.lucene.search.Searcher;
-import org.apache.lucene.search.TermQuery;
-import org.apache.lucene.search.WildcardQuery;
-import org.apache.lucene.store.Directory;
-import org.apache.lucene.store.FSDirectory;
-import org.apache.lucene.store.IndexInput;
-import org.apache.lucene.store.IndexOutput;
-import org.apache.lucene.store.RAMDirectory;
-import org.jaxen.saxpath.SAXPathException;
-import org.jaxen.saxpath.base.XPathReader;
-import org.safehaus.uuid.UUID;
-import org.springframework.context.ApplicationContext;
-import org.springframework.context.ApplicationEvent;
-import org.springframework.context.ApplicationListener;
-import org.springframework.context.ConfigurableApplicationContext;
-import org.springframework.context.event.ContextRefreshedEvent;
-
-
-/**
- * The information that makes up an index. IndexInfoVersion Repeated information of the form
- *
- * Index Type.
- * sub-directory name.
- * Status
- *
- * Indexes, sub indexes, and overlays must be committed. Status is ACTIVE, MERGING, COMPLETING_INDEX
- * Delta: Transaction status
- * Overlay: Transaction status
- *
- *
- * Merges always take place to new indexes so we can detect merge failure or partial merges. Or we do not know what has
- * merged. Incomplete delete merging does not matter - the overlay would still exist and be treated as such. So a
- * document may be deleted in the index as well as in the applied overlay. It is still correctly deleted. NOTE: Public
- * methods lock as required, the private methods assume that the appropriate locks have been obtained. TODO: Write
- * element status into individual directories. This would be enough for recovery if both index files are lost or
- * corrupted. TODO: Tidy up index status at start up or after some time. How long would you leave a merge to run?
- *
- * The index structure is duplicated to two files. If one is currupted the second is used.
- *
- * TODO:
- *
- *
- * make the index sharing configurable
- * use a thread pool for deletions, merging and index deletions
- * something to control the maximum number of overlays to limit the number of things layered together for searching
- * look at lucene locking again post 2.0, to see if it is improved
- * clean up old data files (that are not old index entries) - should be a config option
- *
- *
- * @author Andy Hind
- */
-public class IndexInfo implements IndexMonitor
-{
- public static synchronized void destroy()
- {
- timer.cancel();
- timer = new Timer(true);
- for(IndexInfo indexInfo : indexInfos.values())
- {
- indexInfo.destroyInstance();
- }
- indexInfos.clear();
- ReferenceCountingReadOnlyIndexReaderFactory.destroy();
- }
-
- public void destroyInstance()
- {
- getWriteLock();
- try
- {
- if(mainIndexReader != null)
- {
- try
- {
- ((ReferenceCounting) mainIndexReader).setInvalidForReuse();
- }
- catch (IOException e)
- {
- // OK filed to close
- }
- mainIndexReader = null;
-
- for(IndexReader reader : referenceCountingReadOnlyIndexReaders.values())
- {
- ReferenceCounting referenceCounting = (ReferenceCounting) reader;
- try
- {
- referenceCounting.setInvalidForReuse();
- }
- catch (IOException e)
- {
- // TODO Auto-generated catch block
- e.printStackTrace();
- }
- }
- }
-
- for(IndexReader reader : indexReaders.values())
- {
- try
- {
- reader.close();
- }
- catch (IOException e)
- {
- // TODO Auto-generated catch block
- e.printStackTrace();
- }
- }
- indexReaders.clear();
-
- for(IndexWriter writer : indexWriters.values())
- {
- try
- {
- writer.close();
- }
- catch (CorruptIndexException e)
- {
- // TODO Auto-generated catch block
- e.printStackTrace();
- }
- catch (IOException e)
- {
- // TODO Auto-generated catch block
- e.printStackTrace();
- }
- }
- indexWriters.clear();
-
- if(indexInfoRAF != null)
- {
- try
- {
- indexInfoRAF.close();
- }
- catch (IOException e)
- {
- // TODO Auto-generated catch block
- e.printStackTrace();
- }
- }
-
- if(indexInfoBackupRAF != null)
- {
- try
- {
- indexInfoBackupRAF.close();
- }
- catch (IOException e)
- {
- // TODO Auto-generated catch block
- e.printStackTrace();
- }
- }
-
- // TODO: should set some running flag .... to abort ungoing stuff
- // at the moment it will die ungracefully ....
- }
- finally
- {
- releaseWriteLock();
- }
- }
-
- public static final String MAIN_READER = "MainReader";
-
- private static Timer timer = new Timer("IndexInfo Cleaner Deamon", true);
-
- /**
- * The logger.
- */
- private static Log s_logger = LogFactory.getLog(IndexInfo.class);
-
- /**
- * Use NIO memory mapping to wite the index control file.
- */
- private static boolean useNIOMemoryMapping = true;
-
- /**
- * The default name for the file that holds the index information
- */
- private static String INDEX_INFO = "IndexInfo";
-
- /**
- * The default name for the back up file that holds the index information
- */
- private static String INDEX_INFO_BACKUP = "IndexInfoBackup";
-
- /**
- * The default name for the index deletions file
- */
- private static String INDEX_INFO_DELETIONS = "IndexInfoDeletions";
-
- /**
- * The default name for the index container deletions file
- */
- private static String INDEX_INFO_CONTAINER_DELETIONS = "IndexInfoContainerDeletions";
-
- /**
- * What to look for to detect the previous index implementation.
- */
- private static String OLD_INDEX = "index";
-
- /**
- * Is this index shared by more than one repository? We can make many lock optimisations if the index is not shared.
- */
- private boolean indexIsShared = false;
-
- /**
- * The directory that holds the index
- */
- private File indexDirectory;
-
- /**
- * The directory relative to the root path
- */
- private String relativePath;
-
- /**
- * The file holding the index information
- */
- private RandomAccessFile indexInfoRAF;
-
- /**
- * And its file channel
- */
- private FileChannel indexInfoChannel;
-
- /**
- * The file holding the backup index information.
- */
-
- private RandomAccessFile indexInfoBackupRAF;
-
- /**
- * And its file channel
- */
- private FileChannel indexInfoBackupChannel;
-
- /**
- * The file version. Negative is not yet written.
- */
- private long version = -1;
-
- /**
- * The index entries that make up this index. Map entries are looked up by name. These are maintained in order so
- * document order is maintained.
- */
- private LinkedHashMap indexEntries = new LinkedHashMap();
-
- /**
- * Lock for the index entries
- */
- private final ReentrantReadWriteLock readWriteLock;
-
- private ReentrantReadWriteLock readOnlyLock = new ReentrantReadWriteLock();
-
- /**
- * Read only index readers that also do reference counting.
- */
- private HashMap referenceCountingReadOnlyIndexReaders = new HashMap();
-
- /**
- * Main index reader
- */
- private IndexReader mainIndexReader;
- private Map mainIndexReaders = new HashMap();
-
- /**
- * Index writers for deltas
- */
- private Map indexWriters = new ConcurrentHashMap(51);
-
- /**
- * Index Readers for deltas
- */
- private Map indexReaders = new ConcurrentHashMap(51);
-
- /**
- * Map of state transitions
- */
- private EnumMap transitions = new EnumMap(TransactionStatus.class);
-
- /**
- * The queue of files and folders to delete
- */
- private ConcurrentLinkedQueue deleteQueue = new ConcurrentLinkedQueue();
-
- /**
- * A queue of reference counting index readers. We wait for these to become unused (ref count falls to zero) then
- * the data can be removed.
- */
- private ConcurrentLinkedQueue deletableReaders = new ConcurrentLinkedQueue();
-
- /**
- * The call that is responsible for deleting old index information from disk.
- */
- private Cleaner cleaner = new Cleaner();
-
- /**
- * The thread that deletes old index data
- */
- // private Thread cleanerThread;
- /**
- * The class the supports index merging and applying deletions from deltas to indexes and deltas that go before it.
- */
- private Merger merger = new Merger();
-
- /**
- * The thread that carries out index merging and applying deletions from deltas to indexes and deltas that go before
- * it.
- */
- // private Thread mergerThread;
- /**
- * A shared empty index to use if non exist.
- */
- private Directory emptyIndex = new RAMDirectory();
-
- /**
- * The index infor files that make up the index
- */
- private static HashMap indexInfos = new HashMap();
-
- // Properties that control lucene indexing
- // --------------------------------------
-
- // Properties for indexes that are created by transactions ...
-
- private int maxDocsForInMemoryMerge = 10000;
-
- private int maxDocsForInMemoryIndex = 10000;
-
- private double maxRamInMbForInMemoryMerge = 16.0;
-
- private double maxRamInMbForInMemoryIndex = 16.0;
-
- private int writerMaxBufferedDocs = IndexWriter.DISABLE_AUTO_FLUSH;
-
- private double writerRamBufferSizeMb = 16.0;
-
- private int writerMergeFactor = 5;
-
- private int writerMaxMergeDocs = 1000000;
-
- private boolean writerUseCompoundFile = true;
-
- // Properties for indexes created by merging
-
- private int mergerMaxBufferedDocs = IndexWriter.DISABLE_AUTO_FLUSH;
-
- private double mergerRamBufferSizeMb = 16.0;
-
- private int mergerMergeFactor = 5;
-
- private int mergerMaxMergeDocs = 1000000;
-
- private boolean mergerUseCompoundFile = true;
-
- private int mergerTargetOverlays = 5;
-
- private int mergerTargetIndexes = 5;
-
- private int mergerTargetOverlaysBlockingFactor = 1;
-
- private Object mergerTargetLock = new Object();
-
- // To avoid deadlock (a thread with multiple deltas never proceeding to commit) we track whether each thread is
- // already in the prepare phase.
- private static ThreadLocal thisThreadPreparing = new ThreadLocal();
-
- // Common properties for indexers
-
- private long writeLockTimeout = IndexWriter.WRITE_LOCK_TIMEOUT;
-
- private int maxFieldLength = IndexWriter.DEFAULT_MAX_FIELD_LENGTH;
-
- private int termIndexInterval = IndexWriter.DEFAULT_TERM_INDEX_INTERVAL;
-
- /**
- * Control if the merger thread is active
- */
-
- private ThreadPoolExecutor threadPoolExecutor;
-
- private LuceneConfig config;
-
- private List applicationListeners = new LinkedList();
-
- static
- {
- // We do not require any of the lucene in-built locking.
- FSDirectory.setDisableLocks(true);
- }
-
- /**
- *
- */
- public void delete(final String deltaId)
- {
-
- getWriteLock();
- try
- {
- doWithFileLock(new LockWork()
- {
- public Object doWork() throws Exception
- {
- setStatusFromFile();
-
- // If the index is not shared we can do some easy clean
- // up
- if (!indexIsShared)
- {
- HashSet deletable = new HashSet();
- // clean up
- for (IndexEntry entry : indexEntries.values())
- {
- if(!entry.getName().equals(deltaId))
- {
- entry.setStatus(TransactionStatus.DELETABLE);
- deletable.add(entry.getName());
- }
- }
- // Delete entries that are not required
- invalidateMainReadersFromFirst(deletable);
- for (String id : deletable)
- {
- indexEntries.remove(id);
- }
-
- clearOldReaders();
-
- cleaner.schedule();
-
- merger.schedule();
-
- // persist the new state
- writeStatus();
-
- if (mainIndexReader != null)
- {
- if (s_logger.isDebugEnabled())
- {
- s_logger.debug("... invalidating main index reader");
- }
- ((ReferenceCounting) mainIndexReader).setInvalidForReuse();
- mainIndexReader = null;
- }
- }
- return null;
- }
-
- public boolean canRetry()
- {
- return false;
- }
-
- });
- }
- finally
- {
- releaseWriteLock();
- }
- if(s_logger.isDebugEnabled())
- {
- s_logger.debug("Index "+ indexDirectory+" deleted");
- }
-
- }
-
- /**
- * Get the IndexInfo object based in the given directory. There is only one object per directory per JVM.
- *
- * @param file File
- * @param config LuceneConfig
- * @return IndexInfo
- * @throws IndexerException
- */
- public static synchronized IndexInfo getIndexInfo(File file, LuceneConfig config) throws IndexerException
- {
- File canonicalFile;
- try
- {
- canonicalFile = file.getCanonicalFile();
- IndexInfo indexInfo = indexInfos.get(canonicalFile);
- if (indexInfo == null)
- {
- indexInfo = new IndexInfo(canonicalFile, config);
- indexInfos.put(canonicalFile, indexInfo);
- if (s_logger.isDebugEnabled())
- {
- s_logger.debug("Made " + indexInfo + " for " + file.getAbsolutePath());
- }
- }
-
- if (s_logger.isDebugEnabled())
- {
- s_logger.debug("Got " + indexInfo + " for " + file.getAbsolutePath());
- }
- return indexInfo;
- }
- catch (IOException e)
- {
- throw new IndexerException("Failed to transform a file into is canonical form", e);
- }
-
- }
-
- /**
- * Construct an index in the given directory.
- *
- * @param indexDirectory File
- * @param config LuceneConfig
- */
- private IndexInfo(File indexDirectory, LuceneConfig config)
- {
- super();
- initialiseTransitions();
- this.config = config;
-
- if (config != null)
- {
- this.readWriteLock = new ReentrantReadWriteLock(config.getFairLocking());
- this.maxFieldLength = config.getIndexerMaxFieldLength();
- this.threadPoolExecutor = config.getThreadPoolExecutor();
- IndexInfo.useNIOMemoryMapping = config.getUseNioMemoryMapping();
- this.maxDocsForInMemoryMerge = config.getMaxDocsForInMemoryMerge();
- this.maxRamInMbForInMemoryMerge = config.getMaxRamInMbForInMemoryMerge();
- this.maxDocsForInMemoryIndex = config.getMaxDocsForInMemoryIndex();
- this.maxRamInMbForInMemoryIndex = config.getMaxRamInMbForInMemoryIndex();
- this.writerMaxBufferedDocs = config.getWriterMaxBufferedDocs();
- this.writerRamBufferSizeMb = config.getWriterRamBufferSizeMb();
- this.writerMergeFactor = config.getWriterMergeFactor();
- this.writerMaxMergeDocs = config.getWriterMaxMergeDocs();
- this.mergerMaxBufferedDocs = config.getMergerMaxBufferedDocs();
- this.mergerRamBufferSizeMb = config.getMergerRamBufferSizeMb();
- this.mergerMergeFactor = config.getMergerMergeFactor();
- this.mergerMaxMergeDocs = config.getMergerMaxMergeDocs();
- this.termIndexInterval = config.getTermIndexInterval();
- this.mergerTargetOverlays = config.getMergerTargetOverlayCount();
- this.mergerTargetIndexes = config.getMergerTargetIndexCount();
- this.mergerTargetOverlaysBlockingFactor = config.getMergerTargetOverlaysBlockingFactor();
- // Work out the relative path of the index
- try
- {
- String indexRoot = new File(config.getIndexRootLocation()).getCanonicalPath();
- this.relativePath = indexDirectory.getCanonicalPath().substring(indexRoot.length() + 1);
- }
- catch (IOException e)
- {
- throw new AlfrescoRuntimeException("Failed to determine index relative path", e);
- }
- }
- else
- {
- this.readWriteLock = new ReentrantReadWriteLock(false);
-
- // need a default thread pool ....
- TraceableThreadFactory threadFactory = new TraceableThreadFactory();
- threadFactory.setThreadDaemon(true);
- threadFactory.setThreadPriority(5);
-
- threadPoolExecutor = new ThreadPoolExecutor(10, 10, 90, TimeUnit.SECONDS, new LinkedBlockingQueue(), threadFactory, new ThreadPoolExecutor.CallerRunsPolicy());
-
- // Create a 'fake' relative path
- try
- {
- this.relativePath = indexDirectory.getCanonicalPath();
- int sepIndex = this.relativePath.indexOf(File.separator);
- if (sepIndex != -1)
- {
- if (this.relativePath.length() > sepIndex + 1)
- {
- this.relativePath = this.relativePath.substring(sepIndex + 1);
- }
- else
- {
- this.relativePath = "";
- }
- }
- }
- catch (IOException e)
- {
- throw new AlfrescoRuntimeException("Failed to determine index relative path", e);
- }
-
- }
-
- // Create an empty in memory index
- IndexWriter writer;
- try
- {
- writer = new IndexWriter(emptyIndex, new AlfrescoStandardAnalyser(), true, MaxFieldLength.LIMITED);
- writer.setUseCompoundFile(writerUseCompoundFile);
- writer.setMaxBufferedDocs(writerMaxBufferedDocs);
- writer.setRAMBufferSizeMB(writerRamBufferSizeMb);
- writer.setMergeFactor(writerMergeFactor);
- writer.setMaxMergeDocs(writerMaxMergeDocs);
- writer.setWriteLockTimeout(writeLockTimeout);
- writer.setMaxFieldLength(maxFieldLength);
- writer.setTermIndexInterval(termIndexInterval);
- writer.setMergeScheduler(new SerialMergeScheduler());
- writer.setMergePolicy(new LogDocMergePolicy());
- writer.close();
- }
- catch (IOException e)
- {
- throw new IndexerException("Failed to create an empty in memory index!");
- }
-
- this.indexDirectory = indexDirectory;
-
- // Make sure the directory exists
- if (!this.indexDirectory.exists())
- {
- if (!this.indexDirectory.mkdirs())
- {
- throw new AlfrescoRuntimeException("Failed to create index directory");
- }
- }
- if (!this.indexDirectory.isDirectory())
- {
- throw new AlfrescoRuntimeException("The index must be held in a directory");
- }
-
- // Create the info files.
- File indexInfoFile = new File(this.indexDirectory, INDEX_INFO);
- File indexInfoBackupFile = new File(this.indexDirectory, INDEX_INFO_BACKUP);
- if (createFile(indexInfoFile) && createFile(indexInfoBackupFile))
- {
- // If both files required creation this is a new index
- version = 0;
- }
-
- // Open the files and channels for the index info file and the backup
- this.indexInfoRAF = openFile(indexInfoFile);
- this.indexInfoChannel = this.indexInfoRAF.getChannel();
-
- this.indexInfoBackupRAF = openFile(indexInfoBackupFile);
- this.indexInfoBackupChannel = this.indexInfoBackupRAF.getChannel();
-
- // If the index found no info files (i.e. it is new), check if there is
- // an old style index and covert it.
- if (version == 0)
- {
- // Check if an old style index exists
-
- final File oldIndex = new File(this.indexDirectory, OLD_INDEX);
- if (IndexReader.indexExists(oldIndex))
- {
- getWriteLock();
- try
- {
- doWithFileLock(new LockWork()
- {
- public Object doWork() throws Exception
- {
- IndexWriter writer;
- try
- {
- writer = new IndexWriter(oldIndex, new AlfrescoStandardAnalyser(), false, MaxFieldLength.LIMITED);
- writer.setUseCompoundFile(writerUseCompoundFile);
- writer.setMaxBufferedDocs(writerMaxBufferedDocs);
- writer.setRAMBufferSizeMB(writerRamBufferSizeMb);
- writer.setMergeFactor(writerMergeFactor);
- writer.setMaxMergeDocs(writerMaxMergeDocs);
- writer.setWriteLockTimeout(writeLockTimeout);
- writer.setMaxFieldLength(maxFieldLength);
- writer.setTermIndexInterval(termIndexInterval);
- writer.setMergeScheduler(new SerialMergeScheduler());
- writer.setMergePolicy(new LogDocMergePolicy());
- writer.optimize();
- long docs = writer.numDocs();
- writer.close();
-
- IndexEntry entry = new IndexEntry(IndexType.INDEX, OLD_INDEX, "", TransactionStatus.COMMITTED, "", docs, 0, false);
- indexEntries.put(OLD_INDEX, entry);
-
- writeStatus();
-
- // The index exists and we should initialise the single reader
- registerReferenceCountingIndexReader(entry.getName(), buildReferenceCountingIndexReader(entry.getName(), entry.getDocumentCount()));
- }
- catch (IOException e)
- {
- throw new IndexerException("Failed to optimise old index");
- }
- return null;
- }
-
- public boolean canRetry()
- {
- return false;
- }
- });
- }
- finally
- {
- releaseWriteLock();
- }
-
- }
- }
-
- // The index exists
- else if (version == -1)
- {
- getWriteLock();
- try
- {
- doWithFileLock(new LockWork()
- {
- public Object doWork() throws Exception
- {
- setStatusFromFile();
-
- // If the index is not shared we can do some easy clean
- // up
- if (!indexIsShared)
- {
- HashSet deletable = new HashSet();
- // clean up
- for (IndexEntry entry : indexEntries.values())
- {
- switch (entry.getStatus())
- {
- // states which can be deleted
- // We could check prepared states can be
- // committed.
- case ACTIVE:
- case MARKED_ROLLBACK:
- case NO_TRANSACTION:
- case PREPARING:
- case ROLLEDBACK:
- case ROLLINGBACK:
- case MERGE_TARGET:
- case UNKNOWN:
- case PREPARED:
- case DELETABLE:
- if (s_logger.isInfoEnabled())
- {
- s_logger.info("Deleting index entry " + entry);
- }
- entry.setStatus(TransactionStatus.DELETABLE);
- deletable.add(entry.getName());
- break;
- // States which are in mid-transition which we
- // can roll back to the committed state
- case COMMITTED_DELETING:
- case MERGE:
- if (s_logger.isInfoEnabled())
- {
- s_logger.info("Resetting merge to committed " + entry);
- }
- entry.setStatus(TransactionStatus.COMMITTED);
- registerReferenceCountingIndexReader(entry.getName(), buildReferenceCountingIndexReader(entry.getName(), entry.getDocumentCount()));
- break;
- // Complete committing (which is post database
- // commit)
- case COMMITTING:
- // do the commit
- if (s_logger.isInfoEnabled())
- {
- s_logger.info("Committing " + entry);
- }
- entry.setStatus(TransactionStatus.COMMITTED);
- registerReferenceCountingIndexReader(entry.getName(), buildReferenceCountingIndexReader(entry.getName(), entry.getDocumentCount()));
- break;
- // States that require no action
- case COMMITTED:
- registerReferenceCountingIndexReader(entry.getName(), buildReferenceCountingIndexReader(entry.getName(), entry.getDocumentCount()));
- break;
- default:
- // nothing to do
- break;
- }
- }
- // Delete entries that are not required
- invalidateMainReadersFromFirst(deletable);
- for (String id : deletable)
- {
- indexEntries.remove(id);
- }
- clearOldReaders();
-
- cleaner.schedule();
-
- merger.schedule();
-
- // persist the new state
- writeStatus();
- }
- return null;
- }
-
- public boolean canRetry()
- {
- return false;
- }
-
- });
- }
- finally
- {
- releaseWriteLock();
- }
- }
- // Need to do with file lock - must share info about other readers to support this with shared indexer
- // implementation
-
- getWriteLock();
- try
- {
- LockWork work = new DeleteUnknownGuidDirectories();
- doWithFileLock(work);
- }
- finally
- {
- releaseWriteLock();
- }
-
- // Run the cleaner around every 20 secods - this just makes the request to the thread pool
- timer.schedule(new TimerTask()
- {
- @Override
- public void run()
- {
- cleaner.schedule();
- }
- }, 0, 20000);
-
- publishDiscoveryEvent();
- }
-
- private class DeleteUnknownGuidDirectories implements LockWork
- {
- public boolean canRetry()
- {
- return true;
- }
-
- public Object doWork() throws Exception
- {
- setStatusFromFile();
-
- // If the index is not shared we can do some easy clean
- // up
- if (!indexIsShared)
- {
- // Safe to tidy up all files that look like guids that we do not know about
- File[] files = indexDirectory.listFiles();
- if (files != null)
- {
- for (File file : files)
- {
- if (file.isDirectory())
- {
- String id = file.getName();
- if (!indexEntries.containsKey(id) && isGUID(id))
- {
- if (s_logger.isDebugEnabled())
- {
- s_logger.debug("Deleting unused index directory " + id);
- }
- deleteQueue.add(id);
- }
- }
- }
- }
-
- }
- return null;
- }
- }
-
- /**
- * This method should only be called from one thread as it is bound to a transaction.
- *
- * @param id String
- * @return IndexReader
- * @throws IOException
- */
- public IndexReader getDeltaIndexReader(String id) throws IOException
- {
- if (id == null)
- {
- throw new IndexerException("\"null\" is not a valid identifier for a transaction");
- }
-
- // No read lock required as the delta should be bound to one thread only
- // Index readers are simply thread safe
- IndexReader reader = indexReaders.get(id);
- if (reader == null)
- {
- // close index writer if required
- closeDeltaIndexWriter(id);
- // Check the index knows about the transaction
- reader = buildAndRegisterDeltaReader(id);
- indexReaders.put(id, reader);
- }
- return reader;
- }
-
- private IndexReader buildAndRegisterDeltaReader(String id) throws IOException
- {
- IndexReader reader;
- // only register on write to avoid any locking for transactions that only ever read
- File location = getDeltaLocation(id);
- // File location = ensureDeltaIsRegistered(id);
- // Create a dummy index reader to deal with empty indexes and not
- // persist these.
- if (IndexReader.indexExists(location))
- {
- reader = IndexReader.open(location);
- }
- else
- {
- reader = IndexReader.open(emptyIndex);
- }
- return reader;
- }
-
- private File getDeltaLocation(String id) throws IOException
- {
- File file = new File(indexDirectory, id).getCanonicalFile();
- return file;
- }
-
- /**
- * The delta information does not need to be saved to disk.
- *
- * @param id String
- * @return File
- * @throws IOException
- */
- private File ensureDeltaIsRegistered(String id) throws IOException
- {
- if (id == null)
- {
- throw new IndexerException("\"null\" is not a valid identifier for a transaction");
- }
-
- // A write lock is required if we have to update the local index
- // entries.
- // There should only be one thread trying to access this delta.
- File location = getDeltaLocation(id);
- getReadLock();
- try
- {
- if (!indexEntries.containsKey(id))
- {
- releaseReadLock();
- // release to upgrade to write lock
- getWriteLock();
- try
- {
- // Make sure the index exists
- if (!indexEntries.containsKey(id))
- {
- indexEntries.put(id, new IndexEntry(IndexType.DELTA, id, "", TransactionStatus.ACTIVE, "", 0, 0, false));
- }
-
- }
- finally
- { // Downgrade lock
- getReadLock();
- releaseWriteLock();
- }
- }
- }
- finally
- {
- // Release the lock
- releaseReadLock();
- }
- return location;
- }
-
- /**
- * Make a lucene index writer
- *
- * @param location File
- * @param analyzer Analyzer
- * @return IndexWriter
- * @throws IOException
- */
- private IndexWriter makeDeltaIndexWriter(File location, Analyzer analyzer) throws IOException
- {
- IndexWriter writer;
- if (!IndexReader.indexExists(location))
- {
- writer = new IndexWriter(location, analyzer, true, MaxFieldLength.LIMITED);
- }
- else
- {
- writer = new IndexWriter(location, analyzer, false, MaxFieldLength.LIMITED);
- }
- writer.setUseCompoundFile(writerUseCompoundFile);
- writer.setMaxBufferedDocs(writerMaxBufferedDocs);
- writer.setRAMBufferSizeMB(writerRamBufferSizeMb);
- writer.setMergeFactor(writerMergeFactor);
- writer.setMaxMergeDocs(writerMaxMergeDocs);
- writer.setWriteLockTimeout(writeLockTimeout);
- writer.setMaxFieldLength(maxFieldLength);
- writer.setTermIndexInterval(termIndexInterval);
- writer.setMergeScheduler(new SerialMergeScheduler());
- writer.setMergePolicy(new LogDocMergePolicy());
- return writer;
-
- }
-
- /**
- * Manage getting a lucene index writer for transactional data - looks after registration and checking there is no
- * active reader.
- *
- * @param id String
- * @param analyzer Analyzer
- * @return IndexWriter
- * @throws IOException
- */
- public IndexWriter getDeltaIndexWriter(String id, Analyzer analyzer) throws IOException
- {
- if (id == null)
- {
- throw new IndexerException("\"null\" is not a valid identifier for a transaction");
- }
-
- // No read lock required as the delta should be bound to one thread only
- IndexWriter writer = indexWriters.get(id);
- if (writer == null)
- {
- // close index writer if required
- closeDeltaIndexReader(id);
- File location = ensureDeltaIsRegistered(id);
- writer = makeDeltaIndexWriter(location, analyzer);
- indexWriters.put(id, writer);
- }
- return writer;
- }
-
- /**
- * Manage closing and unregistering an index reader.
- *
- * @param id String
- * @throws IOException
- */
- public void closeDeltaIndexReader(String id) throws IOException
- {
- if (id == null)
- {
- throw new IndexerException("\"null\" is not a valid identifier for a transaction");
- }
-
- // No lock required as the delta applied to one thread. The delta is
- // still active.
- IndexReader reader = indexReaders.remove(id);
- if (reader != null)
- {
- reader.close();
- }
- }
-
- /**
- * Manage closing and unregistering an index writer .
- *
- * @param id String
- * @throws IOException
- */
- public void closeDeltaIndexWriter(String id) throws IOException
- {
- if (id == null)
- {
- throw new IndexerException("\"null\" is not a valid identifier for a transaction");
- }
-
- // No lock required as the delta applied to one thread. The delta is
- // still active.
- IndexWriter writer = indexWriters.remove(id);
- if (writer != null)
- {
- writer.close();
- }
- }
-
- /**
- * Make sure the writer and reader for TX data are closed.
- *
- * @param id String
- * @throws IOException
- */
- public void closeDelta(String id) throws IOException
- {
- if (id == null)
- {
- throw new IndexerException("\"null\" is not a valid identifier for a transaction");
- }
- closeDeltaIndexReader(id);
- closeDeltaIndexWriter(id);
- }
-
- /**
- * Get the deletions for a given index (there is no check if they should be applied that is up to the calling layer)
- *
- * @param id String
- * @throws IOException
- */
- public Set getDeletions(String id) throws IOException
- {
- return getDeletions(id, INDEX_INFO_DELETIONS);
- }
-
- /**
- * Get the deletions for a given index (there is no check if they should be applied that is up to the calling layer)
- *
- * @param id String
- * @param fileName String
- * @return Set
- * @throws IOException
- */
- private Set getDeletions(String id, String fileName) throws IOException
- {
- if (id == null)
- {
- throw new IndexerException("\"null\" is not a valid identifier for a transaction");
- }
- // Check state
- Set deletions = new HashSet();
- File location = new File(indexDirectory, id).getCanonicalFile();
- File file = new File(location, fileName).getCanonicalFile();
- if (!file.exists())
- {
- if (s_logger.isDebugEnabled())
- {
- s_logger.debug("No deletions for " + id);
- }
- return Collections. emptySet();
- }
- DataInputStream is = new DataInputStream(new BufferedInputStream(new FileInputStream(file)));
- int size = is.readInt();
- for (int i = 0; i < size; i++)
- {
- String ref = is.readUTF();
- deletions.add(ref);
- }
- is.close();
- if (s_logger.isDebugEnabled())
- {
- s_logger.debug("There are " + deletions.size() + " deletions for " + id);
- }
- return deletions;
-
- }
-
- /**
- * Set the aux data for the index entry for a transactional unit of work.
- *
- * @param id -
- * the tx id
- * @param toDelete -
- * noderefs that should be deleted from previous indexes (not this one)
- * @param documents -
- * the number of docs in the index
- * @param deleteNodesOnly -
- * should deletions on apply to nodes (ie not to containers)
- * @throws IOException
- */
- public void setPreparedState(String id, Set toDelete, Set containersToDelete, long documents, boolean deleteNodesOnly) throws IOException
- {
- if (id == null)
- {
- throw new IndexerException("\"null\" is not a valid identifier for a transaction");
- }
- // Check state
- int toDeleteSize = toDelete.size();
- int containersToDeleteSize = containersToDelete.size();
- if (toDeleteSize > 0)
- {
- persistDeletions(id, toDelete, INDEX_INFO_DELETIONS);
- }
- if (containersToDeleteSize > 0)
- {
- persistDeletions(id, containersToDelete, INDEX_INFO_CONTAINER_DELETIONS);
- }
- getWriteLock();
- try
- {
- IndexEntry entry = indexEntries.get(id);
- if (entry == null)
- {
- throw new IndexerException("Invalid index delta id " + id);
- }
- if ((entry.getStatus() != TransactionStatus.PREPARING) && (entry.getStatus() != TransactionStatus.COMMITTING))
- {
- throw new IndexerException("Deletes and doc count can only be set on a preparing index");
- }
- entry.setDocumentCount(documents);
- entry.setDeletions(toDeleteSize + containersToDeleteSize);
- entry.setDeletOnlyNodes(deleteNodesOnly);
- }
- finally
- {
- releaseWriteLock();
- }
- }
-
- /**
- * @param id String
- * @param toDelete Set
- * @param fileName String
- * @throws IOException
- * @throws FileNotFoundException
- */
- private void persistDeletions(String id, Set toDelete, String fileName) throws IOException, FileNotFoundException
- {
- File location = new File(indexDirectory, id).getCanonicalFile();
- if (!location.exists())
- {
- if (!location.mkdirs())
- {
- throw new IndexerException("Failed to make index directory " + location);
- }
- }
- // Write deletions
- DataOutputStream os = new DataOutputStream(new BufferedOutputStream(new FileOutputStream(new File(location, fileName).getCanonicalFile())));
- os.writeInt(toDelete.size());
- for (String ref : toDelete)
- {
- os.writeUTF(ref);
- }
- os.flush();
- os.close();
- }
-
- private void invalidateMainReadersFromFirst(Set ids) throws IOException
- {
- boolean found = false;
- for (String id : indexEntries.keySet())
- {
- if (!found && ids.contains(id))
- {
- found = true;
- }
- if (found)
- {
- IndexReader main = mainIndexReaders.remove(id);
- if (main != null)
- {
- ((ReferenceCounting) main).setInvalidForReuse();
- }
- }
- }
-
- if (found)
- {
- if(mainIndexReader != null)
- {
- ((ReferenceCounting) mainIndexReader).setInvalidForReuse();
- mainIndexReader = null;
- }
- }
-
- }
-
- /**
- * Get the main reader for committed index data
- *
- * @return IndexReader
- * @throws IOException
- */
- public IndexReader getMainIndexReferenceCountingReadOnlyIndexReader() throws IOException
- {
- getReadLock();
- try
- {
- // Check if we need to rebuild the main indexer as it is invalid.
- // (it is shared and quick version check fails)
- if (indexIsShared && !checkVersion())
- {
- releaseReadLock();
- getWriteLock();
- try
- {
- if (mainIndexReader != null)
- {
- ((ReferenceCounting)mainIndexReader).setInvalidForReuse();
- }
- mainIndexReader = null;
- }
- finally
- {
- getReadLock();
- releaseWriteLock();
- }
- }
-
- // Build if required
- if (mainIndexReader == null)
- {
- releaseReadLock();
- getWriteLock();
- try
- {
- if (mainIndexReader == null)
- {
- // Sync with disk image if required
- doWithFileLock(new LockWork()
- {
- public Object doWork() throws Exception
- {
- return null;
- }
-
- public boolean canRetry()
- {
- return true;
- }
-
- });
- mainIndexReader = createMainIndexReader();
-
- }
-
- }
- finally
- {
- getReadLock();
- releaseWriteLock();
- }
- }
- // Manage reference counting
- mainIndexReader.incRef();
- if (s_logger.isDebugEnabled())
- {
- s_logger.debug("Main index reader references = " + ((ReferenceCounting) mainIndexReader).getReferenceCount());
- }
-
- // ALF-10040: Wrap with a one-off CachingIndexReader (with cache disabled) so that LeafScorer behaves and passes through SingleFieldSelectors to the main index readers
- IndexReader reader = ReferenceCountingReadOnlyIndexReaderFactory.createReader(MAIN_READER + GUID.generate(), mainIndexReader, false, config);
- ReferenceCounting refCounting = (ReferenceCounting) reader;
- reader.incRef();
- refCounting.setInvalidForReuse();
- return reader;
- }
- catch (RuntimeException e)
- {
- e.printStackTrace();
- throw e;
- }
- finally
- {
- releaseReadLock();
- }
- }
-
- /**
- * Get the main index reader augmented with the specified TX data As above but we add the TX data
- *
- * @param id String
- * @param deleteOnlyNodes boolean
- * @return IndexReader
- * @throws IOException
- */
- public IndexReader getMainIndexReferenceCountingReadOnlyIndexReader(String id, Set deletions, Set containerDeletions, boolean deleteOnlyNodes) throws IOException
- {
- if (id == null)
- {
- throw new IndexerException("\"null\" is not a valid identifier for a transaction");
- }
- getReadLock();
- try
- {
- if (indexIsShared && !checkVersion())
- {
- releaseReadLock();
- getWriteLock();
- try
- {
- if (mainIndexReader != null)
- {
- ((ReferenceCounting)mainIndexReader).setInvalidForReuse();
- }
- mainIndexReader = null;
- }
- finally
- {
- getReadLock();
- releaseWriteLock();
- }
- }
-
- if (mainIndexReader == null)
- {
- releaseReadLock();
- getWriteLock();
- try
- {
- if (mainIndexReader == null)
- {
- // Sync with disk image if required
- doWithFileLock(new LockWork()
- {
- public Object doWork() throws Exception
- {
- return null;
- }
-
- public boolean canRetry()
- {
- return true;
- }
-
- });
- mainIndexReader = createMainIndexReader();
-
- }
- }
- finally
- {
- getReadLock();
- releaseWriteLock();
- }
- }
- // Combine the index delta with the main index
- // Make sure the index is written to disk
- // TODO: Should use the in memory index but we often end up forcing
- // to disk anyway.
- // Is it worth it?
- // luceneIndexer.flushPending();
-
- IndexReader deltaReader = buildAndRegisterDeltaReader(id);
- IndexReader reader = null;
- if ((deletions == null || deletions.size() == 0) && (containerDeletions == null || containerDeletions.size() == 0))
- {
- reader = new MultiReader(new IndexReader[] { mainIndexReader, deltaReader }, false);
- }
- else
- {
- IndexReader filterReader = new FilterIndexReaderByStringId("main+id", mainIndexReader, deletions, containerDeletions, deleteOnlyNodes);
- reader = new MultiReader(new IndexReader[] { filterReader, deltaReader }, false);
- // Cancel out extra incRef made by MultiReader
- filterReader.decRef();
- }
-
- // The reference count would have been incremented automatically by MultiReader /
- // FilterIndexReaderByStringId
- deltaReader.decRef();
- if (s_logger.isDebugEnabled())
- {
- s_logger.debug("Main index reader references = " + ((ReferenceCounting) mainIndexReader).getReferenceCount());
- }
- reader = ReferenceCountingReadOnlyIndexReaderFactory.createReader(MAIN_READER + id, reader, false, config);
- ReferenceCounting refCounting = (ReferenceCounting) reader;
- reader.incRef();
- refCounting.setInvalidForReuse();
- return reader;
- }
- finally
- {
- releaseReadLock();
- }
- }
-
- private boolean shouldBlock()
- {
- int pendingDeltas = 0;
- int maxDeltas = mergerTargetOverlaysBlockingFactor * mergerTargetOverlays;
- for (IndexEntry entry : indexEntries.values())
- {
- if (entry.getType() == IndexType.DELTA)
- {
- TransactionStatus status = entry.getStatus();
- if (status == TransactionStatus.PREPARED || status == TransactionStatus.COMMITTING
- || status.isCommitted())
- {
- if (++pendingDeltas > maxDeltas)
- {
- return true;
- }
- }
- }
- }
- return false;
- }
-
- public void setStatus(final String id, final TransactionStatus state, final Set toDelete, final Set read) throws IOException
- {
- if (id == null)
- {
- throw new IndexerException("\"null\" is not a valid identifier for a transaction");
- }
- final Transition transition = getTransition(state);
-
- getReadLock();
- try
- {
- transition.beforeWithReadLock(id, toDelete, read);
- releaseReadLock();
- getWriteLock();
- try
- {
- // we may need to block for some deltas to be merged / rolled back
- IndexInfo alreadyPreparing = thisThreadPreparing.get();
- if (state == TransactionStatus.PREPARED)
- {
- // To avoid deadlock (a thread with multiple deltas never proceeding to commit) we don't block if
- // this thread is already in the prepare phase
- if (alreadyPreparing != null)
- {
- if (s_logger.isDebugEnabled())
- {
- s_logger.debug("Can't throttle - " + Thread.currentThread().getName() + " already preparing");
- }
- }
- else
- {
- while (shouldBlock())
- {
- synchronized (mergerTargetLock)
- {
- if (s_logger.isDebugEnabled())
- {
- s_logger.debug("THROTTLING: " + Thread.currentThread().getName() + " " + indexEntries.size());
- }
- merger.schedule();
- releaseWriteLock();
- try
- {
- mergerTargetLock.wait(60000);
- }
- catch (InterruptedException e)
- {
- }
- }
- getWriteLock();
- }
- thisThreadPreparing.set(this);
- }
- }
- else
- {
- // Only clear the flag when the outermost thread exits prepare
- if (alreadyPreparing == this)
- {
- thisThreadPreparing.set(null);
- }
- }
-
- if (transition.requiresFileLock())
- {
- doWithFileLock(new LockWork()
- {
- public Object doWork() throws Exception
- {
- if (s_logger.isDebugEnabled())
- {
- s_logger.debug("Start Index " + id + " state = " + state);
- }
- dumpInfo();
- transition.transition(id, toDelete, read);
- if (s_logger.isDebugEnabled())
- {
- s_logger.debug("End Index " + id + " state = " + state);
- }
- dumpInfo();
- return null;
- }
-
- public boolean canRetry()
- {
- return true;
- }
-
- });
- }
- else
- {
- if (s_logger.isDebugEnabled())
- {
- s_logger.debug("Start Index " + id + " state = " + state);
- }
- dumpInfo();
- transition.transition(id, toDelete, read);
- if (s_logger.isDebugEnabled())
- {
- s_logger.debug("End Index " + id + " state = " + state);
- }
- dumpInfo();
- }
- }
- finally
- {
- getReadLock();
- releaseWriteLock();
- }
- }
- finally
- {
- releaseReadLock();
- }
- }
-
- //
- // Internal support for status management
- //
-
- private Transition getTransition(TransactionStatus state)
- {
- Transition transition = transitions.get(state);
- if (transition != null)
- {
- return transition;
- }
- else
- {
- throw new IndexerException("Invalid state " + state);
- }
-
- }
-
- /**
- * Initialise the definitions for the available transitions.
- */
- private void initialiseTransitions()
- {
-
- transitions.put(TransactionStatus.PREPARING, new PreparingTransition());
- transitions.put(TransactionStatus.PREPARED, new PreparedTransition());
- transitions.put(TransactionStatus.COMMITTING, new CommittingTransition());
- transitions.put(TransactionStatus.COMMITTED, new CommittedTransition());
- transitions.put(TransactionStatus.ROLLINGBACK, new RollingBackTransition());
- transitions.put(TransactionStatus.ROLLEDBACK, new RolledBackTransition());
- transitions.put(TransactionStatus.DELETABLE, new DeletableTransition());
- transitions.put(TransactionStatus.ACTIVE, new ActiveTransition());
- }
-
- /**
- * API for transitions
- *
- * @author andyh
- */
- private interface Transition
- {
- void beforeWithReadLock(String id, Set toDelete, Set read) throws IOException;
-
- void transition(String id, Set toDelete, Set read) throws IOException;
-
- boolean requiresFileLock();
- }
-
- /**
- * Transition to the perparing state
- *
- * @author andyh
- */
- private class PreparingTransition implements Transition
- {
- public void beforeWithReadLock(String id, Set toDelete, Set read) throws IOException
- {
- // Nothing to do
- }
-
- public void transition(String id, Set toDelete, Set read) throws IOException
- {
- IndexEntry entry = indexEntries.get(id);
- if (entry == null)
- {
- throw new IndexerException("Unknown transaction " + id);
- }
-
- if (TransactionStatus.PREPARING.follows(entry.getStatus()))
- {
- entry.setStatus(TransactionStatus.PREPARING);
- }
- else
- {
- throw new IndexerException("Invalid transition for " + id + " from " + entry.getStatus() + " to " + TransactionStatus.PREPARING);
- }
- }
-
- public boolean requiresFileLock()
- {
- return !TransactionStatus.PREPARING.isTransient();
- }
- }
-
- /**
- * Transition to the prepared state.
- *
- * @author andyh
- */
- private class PreparedTransition implements Transition
- {
- public void beforeWithReadLock(String id, Set toDelete, Set read) throws IOException
- {
-
- }
-
- public void transition(String id, Set toDelete, Set read) throws IOException
- {
- IndexEntry entry = indexEntries.get(id);
- if (entry == null)
- {
- throw new IndexerException("Unknown transaction " + id);
- }
-
- if (TransactionStatus.PREPARED.follows(entry.getStatus()))
- {
-
- LinkedHashMap reordered = new LinkedHashMap();
- boolean addedPreparedEntry = false;
- for (String key : indexEntries.keySet())
- {
- IndexEntry current = indexEntries.get(key);
-
- if (!current.getStatus().canBeReordered())
- {
- reordered.put(current.getName(), current);
- }
- else if (!addedPreparedEntry)
- {
- reordered.put(entry.getName(), entry);
- reordered.put(current.getName(), current);
- addedPreparedEntry = true;
- invalidateMainReadersFromFirst(Collections.singleton(current.getName()));
- }
- else if (current.getName().equals(entry.getName()))
- {
- // skip as we are moving it
- }
- else
- {
- reordered.put(current.getName(), current);
- }
- }
-
- if (indexEntries.size() != reordered.size())
- {
- indexEntries = reordered;
- dumpInfo();
- throw new IndexerException("Concurrent modification error");
- }
- indexEntries = reordered;
-
- entry.setStatus(TransactionStatus.PREPARED);
- writeStatus();
-
- }
- else
- {
- throw new IndexerException("Invalid transition for " + id + " from " + entry.getStatus() + " to " + TransactionStatus.PREPARED);
- }
- }
-
- public boolean requiresFileLock()
- {
- return !TransactionStatus.PREPARED.isTransient();
- }
- }
-
- private class CommittingTransition implements Transition
- {
- public void beforeWithReadLock(String id, Set toDelete, Set read) throws IOException
- {
-
- }
-
- public void transition(String id, Set toDelete, Set read) throws IOException
- {
- IndexEntry entry = indexEntries.get(id);
- if (entry == null)
- {
- throw new IndexerException("Unknown transaction " + id);
- }
-
- if (TransactionStatus.COMMITTING.follows(entry.getStatus()))
- {
- entry.setStatus(TransactionStatus.COMMITTING);
- }
- else
- {
- throw new IndexerException("Invalid transition for " + id + " from " + entry.getStatus() + " to " + TransactionStatus.COMMITTING);
- }
- }
-
- public boolean requiresFileLock()
- {
- return !TransactionStatus.COMMITTING.isTransient();
- }
- }
-
- private class CommittedTransition implements Transition
- {
-
- ThreadLocal tl = new ThreadLocal();
-
- public void beforeWithReadLock(String id, Set toDelete, Set read) throws IOException
- {
- // Make sure we have set up the reader for the data
- // ... and close it so we do not up the ref count
- closeDelta(id);
- IndexEntry entry = indexEntries.get(id);
- tl.set(buildReferenceCountingIndexReader(id, entry.getDocumentCount()));
- }
-
- /**
- * This has to be protected to allow for retry
- */
- public void transition(String id, Set toDelete, Set read) throws IOException
- {
- IndexEntry entry = indexEntries.get(id);
- if (entry == null)
- {
- clearOldReaders();
- cleaner.schedule();
- throw new IndexerException("Unknown transaction " + id);
- }
-
- if (TransactionStatus.COMMITTED.follows(entry.getStatus()))
- {
- // Do the deletions
- invalidateMainReadersFromFirst(Collections.singleton(id));
- if ((entry.getDocumentCount() + entry.getDeletions()) == 0)
- {
- registerReferenceCountingIndexReader(id, tl.get());
- indexEntries.remove(id);
- if (s_logger.isDebugEnabled())
- {
- s_logger.debug("Removed commit with no new docs and no deletions");
- }
- clearOldReaders();
- cleaner.schedule();
- }
- else
- {
- registerReferenceCountingIndexReader(id, tl.get());
- entry.setStatus(TransactionStatus.COMMITTED);
- // TODO: optimise to index for no deletions
- // have to allow for this in the application of deletions,
- writeStatus();
- if (mainIndexReader != null)
- {
- if (s_logger.isDebugEnabled())
- {
- s_logger.debug("... invalidating main index reader");
- }
- ((ReferenceCounting) mainIndexReader).setInvalidForReuse();
- mainIndexReader = null;
- }
-
- merger.schedule();
- }
-
- }
- else
- {
- throw new IndexerException("Invalid transition for " + id + " from " + entry.getStatus() + " to " + TransactionStatus.COMMITTED);
- }
- notifyListeners("CommittedTransactions", 1);
- }
-
- public boolean requiresFileLock()
- {
- return !TransactionStatus.COMMITTED.isTransient();
- }
- }
-
- private class RollingBackTransition implements Transition
- {
- public void beforeWithReadLock(String id, Set toDelete, Set read) throws IOException
- {
-
- }
-
- public void transition(String id, Set toDelete, Set read) throws IOException
- {
- IndexEntry entry = indexEntries.get(id);
- if (entry == null)
- {
- throw new IndexerException("Unknown transaction " + id);
- }
-
- if (TransactionStatus.ROLLINGBACK.follows(entry.getStatus()))
- {
- entry.setStatus(TransactionStatus.ROLLINGBACK);
- writeStatus();
- }
- else
- {
- throw new IndexerException("Invalid transition for " + id + " from " + entry.getStatus() + " to " + TransactionStatus.ROLLINGBACK);
- }
- }
-
- public boolean requiresFileLock()
- {
- return !TransactionStatus.ROLLINGBACK.isTransient();
- }
- }
-
- private class RolledBackTransition implements Transition
- {
- ThreadLocal tl = new ThreadLocal();
-
- public void beforeWithReadLock(String id, Set toDelete, Set read) throws IOException
- {
- closeDelta(id);
- IndexEntry entry = indexEntries.get(id);
- tl.set(buildReferenceCountingIndexReader(id, entry.getDocumentCount()));
- }
-
- public void transition(String id, Set toDelete, Set read) throws IOException
- {
- IndexEntry entry = indexEntries.get(id);
- if (entry == null)
- {
- clearOldReaders();
- cleaner.schedule();
- throw new IndexerException("Unknown transaction " + id);
- }
-
- if (TransactionStatus.ROLLEDBACK.follows(entry.getStatus()))
- {
- entry.setStatus(TransactionStatus.ROLLEDBACK);
- writeStatus();
-
- registerReferenceCountingIndexReader(id, tl.get());
- indexEntries.remove(id);
- if (s_logger.isDebugEnabled())
- {
- s_logger.debug("Removed rollback");
- }
- clearOldReaders();
- cleaner.schedule();
- }
- else
- {
- throw new IndexerException("Invalid transition for " + id + " from " + entry.getStatus() + " to " + TransactionStatus.ROLLEDBACK);
- }
- }
-
- public boolean requiresFileLock()
- {
- return !TransactionStatus.ROLLEDBACK.isTransient();
- }
- }
-
- private class DeletableTransition implements Transition
- {
- public void beforeWithReadLock(String id, Set toDelete, Set read) throws IOException
- {
-
- }
-
- public void transition(String id, Set toDelete, Set read) throws IOException
- {
- IndexEntry entry = indexEntries.get(id);
- if (entry == null)
- {
- clearOldReaders();
- cleaner.schedule();
- throw new IndexerException("Unknown transaction " + id);
- }
-
- if (TransactionStatus.DELETABLE.follows(entry.getStatus()))
- {
- invalidateMainReadersFromFirst(Collections.singleton(id));
- indexEntries.remove(id);
- writeStatus();
- clearOldReaders();
- cleaner.schedule();
- }
- else
- {
- throw new IndexerException("Invalid transition for " + id + " from " + entry.getStatus() + " to " + TransactionStatus.DELETABLE);
- }
- }
-
- public boolean requiresFileLock()
- {
- return !TransactionStatus.DELETABLE.isTransient();
- }
- }
-
- private class ActiveTransition implements Transition
- {
- public void beforeWithReadLock(String id, Set toDelete, Set read) throws IOException
- {
-
- }
-
- public void transition(String id, Set toDelete, Set read) throws IOException
- {
- IndexEntry entry = indexEntries.get(id);
- if (entry != null)
- {
- if (entry.getStatus() != TransactionStatus.ACTIVE)
- {
- throw new IndexerException("TX Already active " + id);
- }
- }
-
- if (TransactionStatus.ACTIVE.follows(null))
- {
- indexEntries.put(id, new IndexEntry(IndexType.DELTA, id, "", TransactionStatus.ACTIVE, "", 0, 0, false));
- }
- else
- {
- throw new IndexerException("Invalid transition for " + id + " from " + entry.getStatus() + " to " + TransactionStatus.ACTIVE);
- }
- }
-
- public boolean requiresFileLock()
- {
- return !TransactionStatus.ACTIVE.isTransient();
- }
- }
-
- //
- //
- // Internal methods for implementation support
- // ===========================================
- //
- // These methods should all be called with the appropriate locks.
- //
- //
-
- private static boolean createFile(File file)
- {
-
- if (!file.exists())
- {
- try
- {
- file.createNewFile();
- return true;
- }
- catch (IOException e)
- {
- throw new AlfrescoRuntimeException("Failed to create info file", e);
- }
- }
- return false;
- }
-
- private static RandomAccessFile openFile(File file)
- {
- try
- {
- if (useNIOMemoryMapping)
- {
- return new RandomAccessFile(file, "rw");
- }
- else
- {
- return new RandomAccessFile(file, "rws");
- }
- }
- catch (FileNotFoundException e)
- {
- throw new AlfrescoRuntimeException("Failed to open index info file", e);
- }
- }
-
- /**
- * Check status must be called holding the file lock.
- *
- * @throws IOException
- */
- private void setStatusFromFile() throws IOException
- {
- try
- {
- setStatusFromFile(indexInfoChannel);
- }
- catch (IOException e)
- {
- // The first data file is corrupt so we fall back to the back up
- setStatusFromFile(indexInfoBackupChannel);
- }
- clearOldReaders();
- }
-
- private void clearOldReaders() throws IOException
- {
- // Find current invalid
- HashSet inValid = new HashSet();
- for (String id : referenceCountingReadOnlyIndexReaders.keySet())
- {
- if (!indexEntries.containsKey(id))
- {
- if (s_logger.isDebugEnabled())
- {
- s_logger.debug(id + " is now INVALID ");
- }
- inValid.add(id);
- }
- else
- {
- if (s_logger.isDebugEnabled())
- {
- s_logger.debug(id + " is still part of the index ");
- }
- }
- }
- // Clear invalid
- clearInvalid(inValid);
- }
-
- private void clearInvalid(Set inValid) throws IOException
- {
- boolean hasInvalid = false;
- for (String id : inValid)
- {
- IndexReader reader = referenceCountingReadOnlyIndexReaders.remove(id);
- if (s_logger.isDebugEnabled())
- {
- s_logger.debug("... invalidating sub reader " + id);
- }
- if (reader != null)
- {
- ReferenceCounting referenceCounting = (ReferenceCounting) reader;
- referenceCounting.setInvalidForReuse();
- deletableReaders.add(reader);
- hasInvalid = true;
- }
- }
- if (hasInvalid)
- {
- for (String id : inValid)
- {
- IndexReader main = mainIndexReaders.remove(id);
- if (main != null)
- {
- ((ReferenceCounting) main).setInvalidForReuse();
- }
- }
- if (mainIndexReader != null)
- {
- if (s_logger.isDebugEnabled())
- {
- s_logger.debug("... invalidating main index reader");
- }
- ((ReferenceCounting) mainIndexReader).setInvalidForReuse();
- }
- mainIndexReader = null;
- }
- }
-
- private IndexReader createMainIndexReader() throws IOException
- {
- IndexReader reader = null;
- IndexReader oldReader = null;
- for (String id : indexEntries.keySet())
- {
- IndexEntry entry = indexEntries.get(id);
- if (entry.getStatus().isCommitted())
- {
- IndexReader subReader = getReferenceCountingIndexReader(id);
- if (reader == null)
- {
- reader = subReader;
- }
- else
- {
- boolean oldReaderIsSubReader = oldReader == null;
- oldReader = reader;
- reader = mainIndexReaders.get(id);
- if (reader == null)
- {
- if (entry.getType() == IndexType.INDEX)
- {
- reader = new MultiReader(new IndexReader[] { oldReader, subReader }, false);
- }
- else if (entry.getType() == IndexType.DELTA)
- {
- try
- {
- IndexReader filterReader = new FilterIndexReaderByStringId(id, oldReader, getDeletions(entry.getName(), INDEX_INFO_DELETIONS), getDeletions(entry.getName(), INDEX_INFO_CONTAINER_DELETIONS), entry.isDeletOnlyNodes());
- reader = new MultiReader(new IndexReader[] { filterReader, subReader }, false);
- // Cancel out the incRef on the filter reader
- filterReader.decRef();
- }
- catch (IOException ioe)
- {
- s_logger.error("Failed building filter reader beneath " + entry.getName(), ioe);
- throw ioe;
- }
- }
- reader = ReferenceCountingReadOnlyIndexReaderFactory.createReader(id+"multi", reader, true, config);
- mainIndexReaders.put(id, reader);
- }
- }
- }
- }
- if (reader == null)
- {
- reader = IndexReader.open(emptyIndex);
- }
- else
- {
- // Keep this reader open whilst it is referenced by mainIndexReaders / referenceCountingReadOnlyIndexReaders
- reader.incRef();
- }
-
- reader = ReferenceCountingReadOnlyIndexReaderFactory.createReader(MAIN_READER, reader, false, config);
- return reader;
- }
-
- private IndexReader getReferenceCountingIndexReader(String id) throws IOException
- {
- IndexReader reader = referenceCountingReadOnlyIndexReaders.get(id);
- if (reader == null)
- {
- throw new IllegalStateException("Indexer should have been pre-built for " + id);
- }
- return reader;
- }
-
- private void registerReferenceCountingIndexReader(String id, IndexReader reader) throws IOException
- {
- ReferenceCounting referenceCounting = (ReferenceCounting) reader;
- if (!referenceCounting.getId().equals(id))
- {
- throw new IllegalStateException("Registering " + referenceCounting.getId() + " as " + id);
- }
- // ALF-13981: Be careful not to invalidate the segment reader if we are trying to re-register exactly the same
- // one (e.g. in a doWithFileLock() retry loop)
- if (referenceCountingReadOnlyIndexReaders.get(id) != reader)
- {
- clearInvalid(Collections.singleton(id));
- referenceCountingReadOnlyIndexReaders.put(id, reader);
- }
- }
-
- private double getSizeInMb(File file)
- {
- long size = getSize(file);
- return size/1024.0d/1024.0d;
- }
-
- private long getSize(File file)
- {
- long size = 0l;
- if (file == null)
- {
- return size;
- }
- if (file.isFile())
- {
- return file.length();
- }
- else
- {
- File[] files = file.listFiles();
- if(files == null)
- {
- return size;
- }
- for (File current : files)
- {
- if (current.isDirectory())
- {
- size += getSize(current);
- }
- else
- {
- size += current.length();
- }
- }
- }
- return size;
- }
-
- private IndexReader buildReferenceCountingIndexReader(String id, long size) throws IOException
- {
- IndexReader reader;
- File location = new File(indexDirectory, id).getCanonicalFile();
- double folderSize = getSizeInMb(location);
- if (IndexReader.indexExists(location))
- {
- if ((size < maxDocsForInMemoryIndex) && (folderSize < maxRamInMbForInMemoryIndex))
- {
- RAMDirectory rd = new RAMDirectory(location);
- reader = IndexReader.open(rd);
- }
- else
- {
- reader = IndexReader.open(location);
- }
- }
- else
- {
- reader = IndexReader.open(emptyIndex);
- }
- reader = ReferenceCountingReadOnlyIndexReaderFactory.createReader(id, reader, true, config);
- return reader;
- }
-
- private boolean checkVersion() throws IOException
- {
- try
- {
- return checkVersion(indexInfoChannel);
- }
- catch (IOException e)
- {
- // The first data file is corrupt so we fall back to the back up
- try
- {
- return checkVersion(indexInfoBackupChannel);
- }
- catch (IOException ee)
- {
- return false;
- }
- }
- }
-
- private boolean checkVersion(FileChannel channel) throws IOException
- {
- if (channel.size() > 0)
- {
- channel.position(0);
- ByteBuffer buffer;
-
- if (useNIOMemoryMapping)
- {
- MappedByteBuffer mbb = channel.map(MapMode.READ_ONLY, 0, 8);
- mbb.load();
- buffer = mbb;
- }
- else
- {
- buffer = ByteBuffer.wrap(new byte[8]);
- channel.read(buffer);
- ((Buffer) buffer).position(0);
- }
-
- ((Buffer) buffer).position(0);
- long onDiskVersion = buffer.getLong();
- return (version == onDiskVersion);
- }
- return (version == 0);
- }
-
- private void setStatusFromFile(FileChannel channel) throws IOException
- {
- if (channel.size() > 0)
- {
- channel.position(0);
- ByteBuffer buffer;
-
- if (useNIOMemoryMapping)
- {
- MappedByteBuffer mbb = channel.map(MapMode.READ_ONLY, 0, channel.size());
- mbb.load();
- buffer = mbb;
- }
- else
- {
- buffer = ByteBuffer.wrap(new byte[(int) channel.size()]);
- channel.read(buffer);
- ((Buffer) buffer).position(0);
- }
-
- ((Buffer) buffer).position(0);
- long onDiskVersion = buffer.getLong();
- if (version != onDiskVersion)
- {
- CRC32 crc32 = new CRC32();
- crc32.update((int) (onDiskVersion >>> 32) & 0xFFFFFFFF);
- crc32.update((int) (onDiskVersion >>> 0) & 0xFFFFFFFF);
- int size = buffer.getInt();
- crc32.update(size);
- LinkedHashMap newIndexEntries = new LinkedHashMap();
- // Not all state is saved some is specific to this index so we
- // need to add the transient stuff.
- // Until things are committed they are not shared unless it is
- // prepared
- for (int i = 0; i < size; i++)
- {
- String indexTypeString = readString(buffer, crc32);
- IndexType indexType;
- try
- {
- indexType = IndexType.valueOf(indexTypeString);
- }
- catch (IllegalArgumentException e)
- {
- throw new IOException("Invalid type " + indexTypeString);
- }
-
- String name = readString(buffer, crc32);
-
- String parentName = readString(buffer, crc32);
-
- String txStatus = readString(buffer, crc32);
- TransactionStatus status;
- try
- {
- status = TransactionStatus.valueOf(txStatus);
- }
- catch (IllegalArgumentException e)
- {
- throw new IOException("Invalid status " + txStatus);
- }
-
- String mergeId = readString(buffer, crc32);
-
- long documentCount = buffer.getLong();
- crc32.update((int) (documentCount >>> 32) & 0xFFFFFFFF);
- crc32.update((int) (documentCount >>> 0) & 0xFFFFFFFF);
-
- long deletions = buffer.getLong();
- crc32.update((int) (deletions >>> 32) & 0xFFFFFFFF);
- crc32.update((int) (deletions >>> 0) & 0xFFFFFFFF);
-
- byte deleteOnlyNodesFlag = buffer.get();
- crc32.update(deleteOnlyNodesFlag);
- boolean isDeletOnlyNodes = deleteOnlyNodesFlag == 1;
-
- if (!status.isTransient())
- {
- newIndexEntries.put(name, new IndexEntry(indexType, name, parentName, status, mergeId, documentCount, deletions, isDeletOnlyNodes));
- }
- }
- long onDiskCRC32 = buffer.getLong();
- if (crc32.getValue() == onDiskCRC32)
- {
- for (IndexEntry entry : indexEntries.values())
- {
- if (entry.getStatus().isTransient())
- {
- newIndexEntries.put(entry.getName(), entry);
- }
- }
- version = onDiskVersion;
- indexEntries = newIndexEntries;
- }
- else
- {
- throw new IOException("Invalid file check sum");
- }
- }
- }
-
- }
-
- private String readString(ByteBuffer buffer, CRC32 crc32) throws UnsupportedEncodingException
- {
- int size = buffer.getInt();
- byte[] bytes = new byte[size];
- buffer.get(bytes);
- char[] chars = new char[size];
- for (int i = 0; i < size; i++)
- {
- chars[i] = (char) bytes[i];
- }
- crc32.update(bytes);
- return new String(chars);
- }
-
- private void writeString(ByteBuffer buffer, CRC32 crc32, String string) throws UnsupportedEncodingException
- {
- char[] chars = string.toCharArray();
- byte[] bytes = new byte[chars.length];
- for (int i = 0; i < chars.length; i++)
- {
- if (chars[i] > 0xFF)
- {
- throw new UnsupportedEncodingException();
- }
- bytes[i] = (byte) chars[i];
- }
- buffer.putInt(bytes.length);
- buffer.put(bytes);
- crc32.update(bytes);
- }
-
- private void writeStatus() throws IOException
- {
- version++;
- writeStatusToFile(indexInfoChannel);
- writeStatusToFile(indexInfoBackupChannel);
- // We have a state that allows more transactions. Notify waiting threads
- if (!shouldBlock())
- {
- synchronized (mergerTargetLock)
- {
- mergerTargetLock.notifyAll();
- }
- }
- }
-
- private void writeStatusToFile(FileChannel channel) throws IOException
- {
- long size = getBufferSize();
-
- ByteBuffer buffer;
- if (useNIOMemoryMapping)
- {
- MappedByteBuffer mbb = channel.map(MapMode.READ_WRITE, 0, size);
- mbb.load();
- buffer = mbb;
- }
- else
- {
- channel.truncate(size);
- buffer = ByteBuffer.wrap(new byte[(int) size]);
- }
-
- ((Buffer) buffer).position(0);
-
- buffer.putLong(version);
- CRC32 crc32 = new CRC32();
- crc32.update((int) (version >>> 32) & 0xFFFFFFFF);
- crc32.update((int) (version >>> 0) & 0xFFFFFFFF);
-
- buffer.putInt(indexEntries.size());
- crc32.update(indexEntries.size());
-
- for (IndexEntry entry : indexEntries.values())
- {
- String entryType = entry.getType().toString();
- writeString(buffer, crc32, entryType);
-
- writeString(buffer, crc32, entry.getName());
-
- writeString(buffer, crc32, entry.getParentName());
-
- String entryStatus = entry.getStatus().toString();
- writeString(buffer, crc32, entryStatus);
-
- writeString(buffer, crc32, entry.getMergeId());
-
- buffer.putLong(entry.getDocumentCount());
- crc32.update((int) (entry.getDocumentCount() >>> 32) & 0xFFFFFFFF);
- crc32.update((int) (entry.getDocumentCount() >>> 0) & 0xFFFFFFFF);
-
- buffer.putLong(entry.getDeletions());
- crc32.update((int) (entry.getDeletions() >>> 32) & 0xFFFFFFFF);
- crc32.update((int) (entry.getDeletions() >>> 0) & 0xFFFFFFFF);
-
- buffer.put(entry.isDeletOnlyNodes() ? (byte) 1 : (byte) 0);
- crc32.update(entry.isDeletOnlyNodes() ? new byte[] { (byte) 1 } : new byte[] { (byte) 0 });
- }
- buffer.putLong(crc32.getValue());
-
- if (useNIOMemoryMapping)
- {
- ((MappedByteBuffer) buffer).force();
- }
- else
- {
- ((Buffer) buffer).rewind();
- channel.position(0);
- channel.write(buffer);
- }
- }
-
- private long getBufferSize() throws IOException
- {
- long size = 0;
- size += 8;
- size += 4;
- for (IndexEntry entry : indexEntries.values())
- {
- String entryType = entry.getType().toString();
- size += (entryType.length()) + 4;
- size += (entry.getName().length()) + 4;
- size += (entry.getParentName().length()) + 4;
- String entryStatus = entry.getStatus().toString();
- size += (entryStatus.length()) + 4;
- size += (entry.getMergeId().length()) + 4;
- size += 8;
- size += 8;
- size += 1;
- }
- size += 8;
- return size;
- }
-
- public interface LockWork
- {
- public Result doWork() throws Exception;
-
- public boolean canRetry();
- }
-
- public R doReadOnly(LockWork lockWork)
- {
-
- readOnlyLock.writeLock().lock();
- try
- {
- getReadLock();
- try
- {
- return doWithFileLock(lockWork);
- }
- finally
- {
- releaseReadLock();
- }
- }
- finally
- {
- readOnlyLock.writeLock().unlock();
- }
- }
-
- private static final int CHANNEL_OPEN_RETRIES = 5;
-
- private R doWithFileLock(LockWork lockWork)
- {
- try
- {
- return doWithFileLock(lockWork, CHANNEL_OPEN_RETRIES);
- }
- catch (Throwable e)
- {
- // Re-throw the exception
- if (e instanceof RuntimeException)
- {
- throw (RuntimeException) e;
- }
- else
- {
- throw new RuntimeException("Error during run with lock.", e);
- }
- }
- }
-
- /**
- * Specific exception to catch channel close issues.
- *
- * @author Derek Hulley
- * @since 2.1.3
- */
- private static class IndexInfoChannelException extends IOException
- {
- /**
- *
- */
- private static final long serialVersionUID = 1588898991653057286L;
-
- public IndexInfoChannelException(String msg)
- {
- super(msg);
- }
- }
-
- /**
- * An iterative method that retries the operation in the event of the channel being closed.
- *
- * @param retriesRemaining
- * the number of retries remaining
- * @return Returns the lock work result
- */
- private R doWithFileLock(LockWork lockWork, int retriesRemaining) throws Throwable
- {
- FileLock fileLock = null;
- R result = null;
- long start = 0L;
- try
- {
- // Check that the channel is open
- if (!indexInfoChannel.isOpen())
- {
- if (lockWork.canRetry())
- {
- throw new IndexInfoChannelException("Channel is closed. Manually triggering reopen attempts");
- }
- else
- {
- reopenChannels();
- }
- }
-
- if (indexIsShared)
- {
- if (s_logger.isDebugEnabled())
- {
- s_logger.debug(" ... waiting for file lock");
- start = System.nanoTime();
- }
- fileLock = indexInfoChannel.lock();
- if (s_logger.isDebugEnabled())
- {
- long end = System.nanoTime();
- s_logger.debug(" ... got file lock in " + ((end - start) / 10e6f) + " ms");
- }
- if (!checkVersion())
- {
- setStatusFromFile();
- }
- }
- result = lockWork.doWork();
- return result;
- }
- catch (IOException e)
- {
- if (!lockWork.canRetry())
- {
- // We've done our best
- s_logger.warn("This operation can not retry upon an IOException - it has to roll back to its previous state");
- throw e;
- }
- if (retriesRemaining == 0)
- {
- // We've done our best
- s_logger.warn("No more channel open retries remaining");
- throw e;
- }
- else
- {
- // Attempt to reopen the channel
- if (s_logger.isDebugEnabled())
- {
- s_logger.debug("\n" + "Channel is closed. Will attempt to open it. \n" + " Retries remaining: " + retriesRemaining);
- }
- try
- {
- reopenChannels();
- // Loop around and try again
- return doWithFileLock(lockWork, --retriesRemaining);
- }
- catch (Throwable ee)
- {
- // Report this error, but throw the original
- s_logger.error("Channel reopen failed on index info files in: " + this.indexDirectory, ee);
- throw e;
- }
- }
- }
- finally
- {
- if (fileLock != null)
- {
- try
- {
- fileLock.release();
- long end = System.nanoTime();
- if (s_logger.isDebugEnabled())
- {
- s_logger.debug(" ... released file lock after " + ((end - start) / 10e6f) + " ms");
- }
- }
- catch (IOException e)
- {
- s_logger.warn("Failed to release file lock: " + e.getMessage(), e);
- }
- }
- }
- }
-
- /**
- * Reopens all the channels. The channels are closed first. This method is synchronized.
- */
- private synchronized void reopenChannels() throws Throwable
- {
- try
- {
- indexInfoRAF.close();
- }
- catch (IOException e)
- {
- s_logger.warn("Failed to close indexInfoRAF", e);
- }
- try
- {
- indexInfoBackupRAF.close();
- }
- catch (IOException e)
- {
- s_logger.warn("Failed to close indexInfoRAF", e);
- }
- File indexInfoFile = new File(this.indexDirectory, INDEX_INFO);
- File indexInfoBackupFile = new File(this.indexDirectory, INDEX_INFO_BACKUP);
-
- // Open the files and channels for the index info file and the backup
- this.indexInfoRAF = openFile(indexInfoFile);
- this.indexInfoChannel = this.indexInfoRAF.getChannel();
-
- this.indexInfoBackupRAF = openFile(indexInfoBackupFile);
- this.indexInfoBackupChannel = this.indexInfoBackupRAF.getChannel();
- }
-
- /**
- * Helper to print out index information
- *
- * @param args String[]
- * @throws Throwable
- */
- public static void main(String[] args) throws Throwable
- {
- for (int i = 0; i < args.length; i++)
- {
- File indexLocation = new File(args[i]);
- if (!indexLocation.exists())
- {
- System.err.println("Index directory doesn't exist: " + indexLocation);
- continue;
- }
- readIndexInfo(indexLocation);
- }
- }
-
- static Query getPathQuery(String path) throws SAXPathException
- {
- ApplicationContext ac = ApplicationContextHelper.getApplicationContext();
- XPathReader reader = new XPathReader();
- LuceneXPathHandler handler = new LuceneXPathHandler();
- handler.setNamespacePrefixResolver((NamespaceService) ac.getBean("namespaceService"));
- handler.setDictionaryService((DictionaryService) ac.getBean("dictionaryService"));
- reader.setXPathHandler(handler);
- reader.parse(path);
- PathQuery pathQuery = handler.getQuery();
- pathQuery.setRepeats(false);
- return pathQuery;
- }
-
- private static void readIndexInfo(File indexLocation) throws Throwable
- {
- long start;
- long end;
- IndexInfo ii = new IndexInfo(indexLocation, null);
-
- ii.readWriteLock.writeLock().lock();
- try
- {
- System.out.println("Entry List for " + indexLocation);
- System.out.println(" Size = " + ii.indexEntries.size());
- int i = 0;
- for (IndexEntry entry : ii.indexEntries.values())
- {
- System.out.println("\t" + (i++) + "\t" + entry.toString());
- }
- }
- finally
- {
- ii.releaseWriteLock();
- }
- IndexReader reader = ii.getMainIndexReferenceCountingReadOnlyIndexReader();
- System.out.println(reader.getFieldNames(FieldOption.ALL));
-
- TermEnum te = reader.terms();
- while (te.next())
- {
- if (te.term().field().contains("FTS"))
- {
- System.out.println(te.term());
- }
- }
- // @{http://www.alfresco.org/model/content/1.0}name:product363_ocmwbeersel
-
- IndexSearcher searcher = new IndexSearcher(reader);
- Query query = new TermQuery(new Term("@{http://www.alfresco.org/model/content/1.0}name", "product363_ocmwbeersel"));
- start = System.nanoTime();
- Hits hits = searcher.search(query);
- end = System.nanoTime();
- System.out.println("@{http://www.alfresco.org/model/content/1.0}name:product363_ocmwbeersel = " + hits.length() + " in " + ((end - start) / 1e9));
- searcher.close();
-
- searcher = new IndexSearcher(reader);
- query = new WildcardQuery(new Term("@{http://www.alfresco.org/model/content/1.0}name", "b*"));
- start = System.nanoTime();
- hits = searcher.search(query);
- end = System.nanoTime();
- System.out.println("@{http://www.alfresco.org/model/content/1.0}name:b* = " + hits.length() + " in " + ((end - start) / 1e9));
- searcher.close();
-
- searcher = new IndexSearcher(reader);
- query = new TermQuery(new Term("@{http://www.alfresco.org/model/content/1.0}name", "be"));
- start = System.nanoTime();
- hits = searcher.search(query);
- end = System.nanoTime();
- System.out.println("@{http://www.alfresco.org/model/content/1.0}name:be = " + hits.length() + " in " + ((end - start) / 1e9));
- searcher.close();
- }
-
- /**
- * Clean up support.
- *
- * @author Andy Hind
- */
- private class Cleaner extends AbstractSchedulable
- {
-
- String getLogName()
- {
- return "Index cleaner";
- }
-
- void runImpl()
- {
-
- Iterator i = deletableReaders.iterator();
- while (i.hasNext())
- {
- IndexReader reader = i.next();
- ReferenceCounting refCounting = (ReferenceCounting) reader;
- if (refCounting.getReferenceCount() == 0)
- {
- if (s_logger.isDebugEnabled())
- {
- s_logger.debug("Deleting no longer referenced " + refCounting.getId());
- s_logger.debug("... queued delete for " + refCounting.getId());
- s_logger.debug("... " + ReferenceCountingReadOnlyIndexReaderFactory.getState(refCounting.getId()));
- }
- getReadLock();
- try
- {
- if (indexEntries.containsKey(refCounting.getId()))
- {
- s_logger.error("ERROR - deleting live reader - " + refCounting.getId());
- }
- }
- finally
- {
- releaseReadLock();
- }
- deleteQueue.add(refCounting.getId());
- i.remove();
- }
- else if (s_logger.isTraceEnabled() && refCounting.getCreationTime() < System.currentTimeMillis() - 120000)
- {
- for (Throwable t : refCounting.getReferences())
- {
- s_logger.trace(t.getMessage(), t);
- }
- }
-
- }
-
- Iterator j = deleteQueue.iterator();
- while (j.hasNext())
- {
- String id = j.next();
- try
- {
- if (s_logger.isDebugEnabled())
- {
- s_logger.debug("Expunging " + id + " remaining " + deleteQueue.size());
- s_logger.debug("... " + ReferenceCountingReadOnlyIndexReaderFactory.getState(id));
- }
- // try and delete
- File location = new File(indexDirectory, id).getCanonicalFile();
- if (!deleteDirectory(location))
- {
- if (s_logger.isDebugEnabled())
- {
- s_logger.debug("DELETE FAILED");
- }
- }
- else
- {
- j.remove();
- }
- }
- catch (IOException ioe)
- {
- s_logger.warn("Failed to delete file - invalid canonical file", ioe);
- }
- }
- }
-
- ExitState recoverImpl()
- {
- return ExitState.DONE;
- }
-
- private boolean deleteDirectory(File file)
- {
- File[] children = file.listFiles();
- if (children != null)
- {
- for (int i = 0; i < children.length; i++)
- {
- File child = children[i];
- if (child.isDirectory())
- {
- deleteDirectory(child);
- }
- else
- {
- if (child.exists() && !child.delete() && child.exists())
- {
- return false;
- }
- }
- }
- }
- if (file.exists() && !file.delete() && file.exists())
- {
- return false;
- }
- return true;
- }
-
- }
-
- /**
- * Supported by one thread. 1) If the first index is a delta we can just change it to an index. There is now here to
- * apply the deletions 2) Merge indexes Combine indexes together according to the target index merge strategy. This
- * is a trade off to make an optimised index but not spend too much time merging and optimising small merges. 3)
- * Apply next deletion set to indexes Apply the deletions for the first delta to all the other indexes. Deletes can
- * be applied with relative impunity. If any are applied they take effect as required. 1) 2) and 3) are mutually
- * exclusive try in order This could be supported in another thread 4) Merge deltas Merge two index deltas together.
- * Starting at the end. Several merges can be going on at once. a) Find merge b) Set state c) apply deletions to the
- * previous delta d) update state e) add deletions to the previous delta deletion list f) update state
- */
-
- private enum MergeAction
- {
- NONE, MERGE_INDEX, APPLY_DELTA_DELETION, MERGE_DELTA
- }
-
- private enum ScheduledState
- {
- UN_SCHEDULED, SCHEDULED, FAILED, RECOVERY_SCHEDULED
- }
-
- private enum ExitState
- {
- DONE, RESCHEDULE;
- }
-
- private abstract class AbstractSchedulable implements Schedulable, Runnable
- {
- ScheduledState scheduledState = ScheduledState.UN_SCHEDULED;
-
-
-
- public synchronized void schedule()
- {
- switch (scheduledState)
- {
- case FAILED:
- scheduledState = ScheduledState.RECOVERY_SCHEDULED;
- threadPoolExecutor.execute(this);
- break;
- case UN_SCHEDULED:
- scheduledState = ScheduledState.SCHEDULED;
- threadPoolExecutor.execute(this);
- break;
- case RECOVERY_SCHEDULED:
- case SCHEDULED:
- default:
- // Nothing to do
- break;
- }
- }
-
- synchronized void done()
- {
- switch (scheduledState)
- {
- case RECOVERY_SCHEDULED:
- case SCHEDULED:
- scheduledState = ScheduledState.UN_SCHEDULED;
- break;
- case FAILED:
- case UN_SCHEDULED:
- default:
- throw new IllegalStateException();
- }
- }
-
- private synchronized void rescheduleRecovery()
- {
- switch (scheduledState)
- {
- case RECOVERY_SCHEDULED:
- threadPoolExecutor.execute(this);
- break;
- case SCHEDULED:
- case FAILED:
- case UN_SCHEDULED:
- default:
- throw new IllegalStateException();
- }
- }
-
- private synchronized void fail()
- {
- switch (scheduledState)
- {
- case RECOVERY_SCHEDULED:
- case SCHEDULED:
- scheduledState = ScheduledState.FAILED;
- break;
- case FAILED:
- case UN_SCHEDULED:
- default:
- throw new IllegalStateException();
- }
- }
-
- public void run()
- {
- try
- {
- switch (scheduledState)
- {
- case RECOVERY_SCHEDULED:
- ExitState reschedule = recoverImpl();
- s_logger.error(getLogName() + " has recovered - resuming ... ");
- if (reschedule == ExitState.RESCHEDULE)
- {
- rescheduleRecovery();
- break;
- }
- case SCHEDULED:
- if (s_logger.isDebugEnabled())
- {
- s_logger.debug(getLogName() + " running ... ");
- }
- runImpl();
- done();
- break;
- case FAILED:
- case UN_SCHEDULED:
- default:
- throw new IllegalStateException();
- }
- }
- catch (Throwable t)
- {
- try
- {
- if (s_logger.isWarnEnabled())
- {
- s_logger.warn(getLogName() + " failed with ", t);
- }
- recoverImpl();
- if (s_logger.isWarnEnabled())
- {
- s_logger.warn(getLogName() + " recovered from ", t);
- }
- done();
- }
- catch (Throwable rbt)
- {
- fail();
- s_logger.error(getLogName() + " failed to recover - suspending ", rbt);
- }
- }
- }
-
- abstract void runImpl() throws Exception;
-
- abstract ExitState recoverImpl() throws Exception;
-
- abstract String getLogName();
- }
-
- private class Merger extends AbstractSchedulable
- {
- String getLogName()
- {
- return "Index merger";
- }
-
- @Override
- void done()
- {
- // Reschedule if we need to, based on the current index state, that may have changed since we last got the
- // read lock
- getReadLock();
- try
- {
- synchronized (this)
- {
- if (decideMergeAction() != MergeAction.NONE)
- {
- if (s_logger.isDebugEnabled())
- {
- s_logger.debug(getLogName() + " rescheduling ... ");
- }
- switch (scheduledState)
- {
- case RECOVERY_SCHEDULED:
- scheduledState = ScheduledState.SCHEDULED;
- case SCHEDULED:
- threadPoolExecutor.execute(this);
- break;
- case FAILED:
- case UN_SCHEDULED:
- default:
- throw new IllegalStateException();
- }
- }
- else
- {
- if (s_logger.isDebugEnabled())
- {
- s_logger.debug(getLogName() + " done ");
- }
- super.done();
- }
- }
- }
- finally
- {
- releaseReadLock();
- }
- }
-
- void runImpl() throws IOException
- {
-
- // Get the read lock to decide what to do
- // Single JVM to start with
- MergeAction action;
-
- getReadLock();
- try
- {
- if (indexIsShared && !checkVersion())
- {
- releaseReadLock();
- getWriteLock();
- try
- {
- // Sync with disk image if required
- doWithFileLock(new LockWork()
- {
- public Object doWork() throws Exception
- {
- return null;
- }
-
- public boolean canRetry()
- {
- return true;
- }
- });
- }
- finally
- {
- getReadLock();
- releaseWriteLock();
- }
- }
-
- action = decideMergeAction();
- }
-
- catch (IOException e)
- {
- s_logger.error("Error reading index file", e);
- return;
- }
- finally
- {
- releaseReadLock();
- }
-
- if (s_logger.isDebugEnabled())
- {
- s_logger.debug(getLogName() + " Merger applying MergeAction." + action.toString());
- }
- if (action == MergeAction.APPLY_DELTA_DELETION)
- {
- mergeDeletions();
- }
- else if (action == MergeAction.MERGE_INDEX)
- {
- mergeIndexes();
- }
- if (s_logger.isDebugEnabled())
- {
- dumpInfo();
- }
- }
-
- private MergeAction decideMergeAction()
- {
- MergeAction action = MergeAction.NONE;
- int indexes = 0;
- boolean mergingIndexes = false;
- int deltas = 0;
- boolean applyingDeletions = false;
-
- for (IndexEntry entry : indexEntries.values())
- {
- if (entry.getType() == IndexType.INDEX)
- {
- indexes++;
- if ((entry.getStatus() == TransactionStatus.MERGE) || (entry.getStatus() == TransactionStatus.MERGE_TARGET))
- {
- mergingIndexes = true;
- }
- }
- else if (entry.getType() == IndexType.DELTA)
- {
- if (entry.getStatus() == TransactionStatus.COMMITTED)
- {
- deltas++;
- }
- if (entry.getStatus() == TransactionStatus.COMMITTED_DELETING)
- {
- applyingDeletions = true;
- deltas++;
- }
- }
- }
-
- if (s_logger.isDebugEnabled())
- {
- s_logger.debug("Indexes = " + indexes);
- s_logger.debug("Merging = " + mergingIndexes);
- s_logger.debug("Deltas = " + deltas);
- s_logger.debug("Deleting = " + applyingDeletions);
- }
-
- if (!mergingIndexes && !applyingDeletions)
- {
- if (indexes > mergerTargetIndexes)
- {
- // Try merge
- action = MergeAction.MERGE_INDEX;
- }
- else if (deltas > mergerTargetOverlays)
- {
- // Try delete
- action = MergeAction.APPLY_DELTA_DELETION;
- }
- }
- return action;
- }
-
- ExitState recoverImpl()
- {
- getWriteLock();
- try
- {
- doWithFileLock(new LockWork()
- {
- public Object doWork() throws Exception
- {
- setStatusFromFile();
-
- // If the index is not shared we can do some easy clean
- // up
- if (!indexIsShared)
- {
- HashSet deletable = new HashSet();
- // clean up
- for (IndexEntry entry : indexEntries.values())
- {
- switch (entry.getStatus())
- {
- // states which can be deleted
- // We could check prepared states can be
- // committed.
- case ACTIVE:
- case MARKED_ROLLBACK:
- case NO_TRANSACTION:
- case PREPARING:
- case ROLLEDBACK:
- case ROLLINGBACK:
- case UNKNOWN:
- case PREPARED:
- case DELETABLE:
- case COMMITTING:
- case COMMITTED:
- default:
- if (s_logger.isInfoEnabled())
- {
- s_logger.info("Roll back merge: leaving index entry " + entry);
- }
- break;
- // States which are in mid-transition which we
- // can roll back to the committed state
- case COMMITTED_DELETING:
- case MERGE:
- if (s_logger.isInfoEnabled())
- {
- s_logger.info("Roll back merge: Resetting merge and committed_deleting to committed " + entry);
- }
- entry.setStatus(TransactionStatus.COMMITTED);
- break;
- case MERGE_TARGET:
- if (s_logger.isInfoEnabled())
- {
- s_logger.info("Roll back merge: Deleting merge target " + entry);
- }
- entry.setStatus(TransactionStatus.DELETABLE);
- deletable.add(entry.getName());
- break;
- }
-
- // Check we have a reader registered
- if (referenceCountingReadOnlyIndexReaders.get(entry.getName()) == null)
- {
- registerReferenceCountingIndexReader(entry.getName(), buildReferenceCountingIndexReader(entry.getName(), entry.getDocumentCount()));
- }
- }
-
- if (mainIndexReader != null)
- {
- ReferenceCounting rcMain = (ReferenceCounting) mainIndexReader;
- if (rcMain.isInvalidForReuse())
- {
- mainIndexReader = null;
- }
- }
-
- // Delete entries that are not required
- invalidateMainReadersFromFirst(deletable);
- for (String id : deletable)
- {
- indexEntries.remove(id);
- }
- clearOldReaders();
-
- cleaner.schedule();
-
- // persist the new state
- writeStatus();
- }
- return null;
- }
-
- public boolean canRetry()
- {
- return false;
- }
-
- });
- }
- finally
- {
- releaseWriteLock();
- }
- return ExitState.DONE;
- }
-
- void mergeDeletions() throws IOException
- {
- if (s_logger.isDebugEnabled())
- {
- s_logger.debug("Deleting ...");
- }
-
- // lock for deletions
- final LinkedHashMap toDelete;
- LinkedHashMap indexes;
-
- getWriteLock();
- try
- {
- toDelete = doWithFileLock(new LockWork>()
- {
- public LinkedHashMap doWork() throws Exception
- {
- LinkedHashMap set = new LinkedHashMap();
-
- for (IndexEntry entry : indexEntries.values())
- {
- if ((entry.getType() == IndexType.INDEX) && (entry.getStatus() == TransactionStatus.MERGE))
- {
- return set;
- }
- if ((entry.getType() == IndexType.INDEX) && (entry.getStatus() == TransactionStatus.MERGE_TARGET))
- {
- return set;
- }
- if ((entry.getType() == IndexType.DELTA) && (entry.getStatus() == TransactionStatus.COMMITTED_DELETING))
- {
- return set;
- }
- }
- // Check it is not deleting
- BREAK: for (IndexEntry entry : indexEntries.values())
- {
- // skip indexes at the start
- if (entry.getType() == IndexType.DELTA)
- {
- if (entry.getStatus() == TransactionStatus.COMMITTED)
- {
- entry.setStatus(TransactionStatus.COMMITTED_DELETING);
- set.put(entry.getName(), entry);
- }
- else
- {
- // If not committed we stop as we can not
- // span non committed.
- break BREAK;
- }
- }
- }
- if (set.size() > 0)
- {
- writeStatus();
- }
- return set;
-
- }
-
- public boolean canRetry()
- {
- return false;
- }
-
- });
- }
- finally
- {
- getReadLock();
- releaseWriteLock();
- }
-
- try
- {
- indexes = new LinkedHashMap();
- BREAK: for (IndexEntry entry : indexEntries.values())
- {
- if (entry.getStatus() == TransactionStatus.COMMITTED_DELETING)
- {
- break BREAK;
- }
- indexes.put(entry.getName(), entry);
- }
- }
- finally
- {
- releaseReadLock();
- }
-
- if (toDelete.size() == 0)
- {
- return;
- }
- // Build readers
-
- int size = 2 * (toDelete.size() + indexes.size());
- final HashSet invalidIndexes = new HashSet(size);
-
- final HashMap newIndexCounts = new HashMap(size);
-
- LinkedHashMap readers = new LinkedHashMap(size);
- for (IndexEntry currentDelete : toDelete.values())
- {
- Set deletions = getDeletions(currentDelete.getName(), INDEX_INFO_DELETIONS);
- Set containerDeletions = getDeletions(currentDelete.getName(), INDEX_INFO_CONTAINER_DELETIONS);
- if (!deletions.isEmpty())
- {
- for (String key : indexes.keySet())
- {
- IndexReader reader = getReferenceCountingIndexReader(key);
- Searcher searcher = new IndexSearcher(reader);
- try
- {
- for (String stringRef : deletions)
- {
- TermQuery query = new TermQuery(new Term("ID", stringRef));
- Hits hits = searcher.search(query);
- if (hits.length() > 0)
- {
- IndexReader writeableReader = readers.get(key);
- if (writeableReader == null)
- {
- File location = new File(indexDirectory, key).getCanonicalFile();
- if (IndexReader.indexExists(location))
- {
- writeableReader = IndexReader.open(location);
- }
- else
- {
- continue;
- }
- readers.put(key, writeableReader);
- }
-
- if (currentDelete.isDeletOnlyNodes() && !containerDeletions.contains(stringRef))
- {
- Searcher writeableSearcher = new IndexSearcher(writeableReader);
- hits = writeableSearcher.search(query);
- if (hits.length() > 0)
- {
- for (int i = 0; i < hits.length(); i++)
- {
- Document doc = hits.doc(i);
- // Exclude all containers except the root (which is also a node!)
- Field path = doc.getField("PATH");
- if (path == null || path.stringValue().length() == 0)
- {
- writeableReader.deleteDocument(hits.id(i));
- invalidIndexes.add(key);
- // There should only be one thing to
- // delete
- // break;
- }
- }
- }
- writeableSearcher.close();
- }
- else
- {
- int deletedCount = 0;
- try
- {
- deletedCount = writeableReader.deleteDocuments(new Term("ID", stringRef));
- }
- catch (IOException ioe)
- {
- if (s_logger.isDebugEnabled())
- {
- s_logger.debug("IO Error for " + key);
- throw ioe;
- }
- }
- if (deletedCount > 0)
- {
- if (s_logger.isDebugEnabled())
- {
- s_logger.debug("Deleted " + deletedCount + " from " + key + " for id " + stringRef + " remaining docs " + writeableReader.numDocs());
- }
- invalidIndexes.add(key);
- }
- }
- }
- }
- }
- finally
- {
- searcher.close();
- }
- }
- }
- if (!containerDeletions.isEmpty())
- {
- for (String key : indexes.keySet())
- {
- IndexReader reader = getReferenceCountingIndexReader(key);
- Searcher searcher = new IndexSearcher(reader);
- try
- {
- for (String stringRef : containerDeletions)
- {
- TermQuery query = new TermQuery(new Term("ANCESTOR", stringRef));
- Hits hits = searcher.search(query);
- if (hits.length() > 0)
- {
- IndexReader writeableReader = readers.get(key);
- if (writeableReader == null)
- {
- File location = new File(indexDirectory, key).getCanonicalFile();
- if (IndexReader.indexExists(location))
- {
- writeableReader = IndexReader.open(location);
- }
- else
- {
- continue;
- }
- readers.put(key, writeableReader);
- }
-
- int deletedCount = 0;
- try
- {
- deletedCount = writeableReader.deleteDocuments(new Term("ANCESTOR", stringRef));
- }
- catch (IOException ioe)
- {
- if (s_logger.isDebugEnabled())
- {
- s_logger.debug("IO Error for " + key);
- throw ioe;
- }
- }
- if (deletedCount > 0)
- {
- if (s_logger.isDebugEnabled())
- {
- s_logger.debug("Deleted " + deletedCount + " from " + key + " for id " + stringRef + " remaining docs " + writeableReader.numDocs());
- }
- invalidIndexes.add(key);
- }
- }
- }
- }
- finally
- {
- searcher.close();
- }
- }
- }
- // The delta we have just processed now must be included when we process the deletions of its successor
- indexes.put(currentDelete.getName(), currentDelete);
- }
-
- // Close all readers holding the write lock - so no one tries to
- // read
- getWriteLock();
- try
- {
- for (String key : readers.keySet())
- {
- IndexReader reader = readers.get(key);
- // TODO:Set the new document count
- newIndexCounts.put(key, new Long(reader.numDocs()));
- reader.close();
- }
- }
- finally
- {
- releaseWriteLock();
- }
-
- // Prebuild all readers for affected indexes
- // Register them in the commit.
-
- final HashMap newReaders = new HashMap();
-
- for (String id : invalidIndexes)
- {
- IndexReader reader = buildReferenceCountingIndexReader(id, newIndexCounts.get(id));
- newReaders.put(id, reader);
- }
-
- getWriteLock();
- try
- {
- doWithFileLock(new LockWork()
- {
- public Object doWork() throws Exception
- {
- for (IndexEntry entry : toDelete.values())
- {
- entry.setStatus(TransactionStatus.COMMITTED);
- entry.setType(IndexType.INDEX);
- entry.setDeletions(0);
- }
-
- for (String key : newIndexCounts.keySet())
- {
- Long newCount = newIndexCounts.get(key);
- IndexEntry entry = indexEntries.get(key);
- entry.setDocumentCount(newCount);
- }
-
- writeStatus();
-
- for (String id : invalidIndexes)
- {
- IndexReader reader = referenceCountingReadOnlyIndexReaders.remove(id);
- if (reader != null)
- {
- ReferenceCounting referenceCounting = (ReferenceCounting) reader;
- referenceCounting.setInvalidForReuse();
- if (s_logger.isDebugEnabled())
- {
- s_logger.debug("... invalidating sub reader after applying deletions" + id);
- }
- }
- }
- for (String id : invalidIndexes)
- {
- IndexReader newReader = newReaders.get(id);
- registerReferenceCountingIndexReader(id, newReader);
- }
-
- // Invalidate all main index readers from the first invalid index onwards
- invalidateMainReadersFromFirst(invalidIndexes);
-
-
- if (s_logger.isDebugEnabled())
- {
- for (String id : toDelete.keySet())
- {
- s_logger.debug("...applied deletion for " + id);
- }
- s_logger.debug("...deleting done");
- }
-
- dumpInfo();
-
- notifyListeners("MergedDeletions", toDelete.size());
-
- return null;
- }
-
- public boolean canRetry()
- {
- return false;
- }
-
- });
-
- }
- finally
- {
- releaseWriteLock();
- }
- }
-
- void mergeIndexes() throws IOException
- {
-
- if (s_logger.isDebugEnabled())
- {
- s_logger.debug("Merging...");
- }
-
- final LinkedHashMap