1
0
Fork 0
mirror of https://github.com/eclipse-cdt/cdt synced 2025-04-29 19:45:01 +02:00

Indexer performance with long include search path, bug 225302.

This commit is contained in:
Markus Schorn 2008-04-03 18:07:34 +00:00
parent 6a73a387be
commit fa6684b982
4 changed files with 421 additions and 28 deletions

View file

@ -35,17 +35,19 @@ import org.eclipse.cdt.core.parser.ICodeReaderCache;
import org.eclipse.cdt.core.parser.ParserUtil;
import org.eclipse.cdt.internal.core.parser.scanner.IIndexBasedCodeReaderFactory;
import org.eclipse.cdt.internal.core.parser.scanner.IncludeFileContent;
import org.eclipse.cdt.internal.core.parser.scanner.IncludeFileResolutionCache;
import org.eclipse.cdt.internal.core.parser.scanner.IncludeFileContent.InclusionKind;
import org.eclipse.cdt.internal.core.pdom.ASTFilePathResolver;
import org.eclipse.cdt.internal.core.pdom.AbstractIndexerTask;
import org.eclipse.cdt.internal.core.pdom.AbstractIndexerTask.FileContent;
import org.eclipse.core.runtime.CoreException;
import org.eclipse.core.runtime.IAdaptable;
/**
* Code reader factory, that fakes code readers for header files already stored in the
* index.
*/
public final class IndexBasedCodeReaderFactory implements IIndexBasedCodeReaderFactory {
public final class IndexBasedCodeReaderFactory implements IIndexBasedCodeReaderFactory, IAdaptable {
private static final class NeedToParseException extends Exception {}
private final IIndex fIndex;
@ -55,6 +57,7 @@ public final class IndexBasedCodeReaderFactory implements IIndexBasedCodeReaderF
private final ICodeReaderFactory fFallBackFactory;
private final ASTFilePathResolver fPathResolver;
private final AbstractIndexerTask fRelatedIndexerTask;
private final IncludeFileResolutionCache fIncludeFileResolutionCache;
public IndexBasedCodeReaderFactory(IIndex index, ASTFilePathResolver pathResolver, int linkage,
ICodeReaderFactory fallbackFactory) {
@ -68,6 +71,7 @@ public final class IndexBasedCodeReaderFactory implements IIndexBasedCodeReaderF
fPathResolver= pathResolver;
fRelatedIndexerTask= relatedIndexerTask;
fLinkage= linkage;
fIncludeFileResolutionCache= new IncludeFileResolutionCache(1024);
}
public int getUniqueIdentifier() {
@ -179,4 +183,12 @@ public final class IndexBasedCodeReaderFactory implements IIndexBasedCodeReaderF
public void setLinkage(int linkageID) {
fLinkage= linkageID;
}
@SuppressWarnings("unchecked")
public Object getAdapter(Class adapter) {
if (adapter.isInstance(fIncludeFileResolutionCache)) {
return fIncludeFileResolutionCache;
}
return null;
}
}

View file

@ -45,6 +45,8 @@ import org.eclipse.cdt.core.parser.util.CharArrayIntMap;
import org.eclipse.cdt.core.parser.util.CharArrayMap;
import org.eclipse.cdt.core.parser.util.CharArrayUtils;
import org.eclipse.cdt.internal.core.parser.scanner.ExpressionEvaluator.EvalException;
import org.eclipse.cdt.internal.core.parser.scanner.IncludeFileResolutionCache.ISPKey;
import org.eclipse.cdt.internal.core.parser.scanner.IncludeFileResolutionCache.LookupKey;
import org.eclipse.cdt.internal.core.parser.scanner.Lexer.LexerOptions;
import org.eclipse.cdt.internal.core.parser.scanner.MacroDefinitionParser.InvalidMacroDefinitionException;
import org.eclipse.core.runtime.IAdaptable;
@ -85,18 +87,18 @@ public class CPreprocessor implements ILexerLog, IScanner, IAdaptable {
private static final DynamicMacro __TIME__ = new TimeMacro("__TIME__".toCharArray()); //$NON-NLS-1$
private static final DynamicMacro __LINE__ = new LineMacro("__LINE__".toCharArray()); //$NON-NLS-1$
private interface IIncludeFileTester {
Object checkFile(String path, String fileName);
private interface IIncludeFileTester<T> {
T checkFile(String path, String fileName);
}
final private IIncludeFileTester createCodeReaderTester= new IIncludeFileTester() {
public Object checkFile(String path, String fileName) {
final private IIncludeFileTester<IncludeFileContent> createCodeReaderTester= new IIncludeFileTester<IncludeFileContent>() {
public IncludeFileContent checkFile(String path, String fileName) {
return createReader(path, fileName);
}
};
final private IIncludeFileTester createPathTester= new IIncludeFileTester() {
public Object checkFile(String path, String fileName) {
final private IIncludeFileTester<String> createPathTester= new IIncludeFileTester<String>() {
public String checkFile(String path, String fileName) {
path= ScannerUtility.createReconciledPath(path, fileName);
if (new File(path).exists()) {
return path;
@ -119,6 +121,9 @@ public class CPreprocessor implements ILexerLog, IScanner, IAdaptable {
final private String[] fIncludePaths;
final private String[] fQuoteIncludePaths;
private String[][] fPreIncludedFiles= null;
private final IncludeFileResolutionCache fIncludeResolutionCache;
private final ISPKey fIncludePathKey;
private final ISPKey fQuoteIncludePathKey;
private int fContentAssistLimit= -1;
private boolean fHandledCompletion= false;
@ -157,6 +162,10 @@ public class CPreprocessor implements ILexerLog, IScanner, IAdaptable {
fMacroExpander= new MacroExpander(this, fMacroDictionary, fLocationMap, fLexOptions);
fCodeReaderFactory= wrapReaderFactory(readerFactory);
fIncludeResolutionCache= getIncludeResolutionCache(readerFactory);
fIncludePathKey= fIncludeResolutionCache.getKey(fIncludePaths);
fQuoteIncludePathKey= fIncludeResolutionCache.getKey(fQuoteIncludePaths);
setupMacroDictionary(configuration, info, language);
final String filePath= new String(reader.filename);
@ -170,7 +179,17 @@ public class CPreprocessor implements ILexerLog, IScanner, IAdaptable {
}
}
private IIndexBasedCodeReaderFactory wrapReaderFactory(final ICodeReaderFactory readerFactory) {
private IncludeFileResolutionCache getIncludeResolutionCache(ICodeReaderFactory readerFactory) {
if (readerFactory instanceof IAdaptable) {
IncludeFileResolutionCache cache= (IncludeFileResolutionCache) ((IAdaptable) readerFactory).getAdapter(IncludeFileResolutionCache.class);
if (cache != null) {
return cache;
}
}
return new IncludeFileResolutionCache(1024);
}
private IIndexBasedCodeReaderFactory wrapReaderFactory(final ICodeReaderFactory readerFactory) {
if (readerFactory instanceof IIndexBasedCodeReaderFactory) {
return (IIndexBasedCodeReaderFactory) readerFactory;
}
@ -301,7 +320,7 @@ public class CPreprocessor implements ILexerLog, IScanner, IAdaptable {
fCurrentContext= new ScannerContext(ctx, fCurrentContext, new Lexer(buffer, fLexOptions, this, this));
ScannerContext preCtx= fCurrentContext;
try {
while(internalFetchToken(true, false, true, preCtx).getType() != IToken.tEND_OF_INPUT) {
while(internalFetchToken(true, false, false, true, preCtx).getType() != IToken.tEND_OF_INPUT) {
// just eat the tokens
}
final ILocationCtx locationCtx = fCurrentContext.getLocationCtx();
@ -385,7 +404,7 @@ public class CPreprocessor implements ILexerLog, IScanner, IAdaptable {
}
try {
t= internalFetchToken(true, false, true, fRootContext);
t= internalFetchToken(true, false, false, true, fRootContext);
} catch (OffsetLimitReachedException e) {
fHandledCompletion= true;
throw e;
@ -514,7 +533,7 @@ public class CPreprocessor implements ILexerLog, IScanner, IAdaptable {
buf.append(image, start, image.length-start-1);
}
Token internalFetchToken(final boolean expandMacros, final boolean stopAtNewline,
Token internalFetchToken(final boolean expandMacros, final boolean isPPCondition, final boolean stopAtNewline,
final boolean checkNumbers, final ScannerContext uptoEndOfCtx) throws OffsetLimitReachedException {
Token ppToken= fCurrentContext.currentLexerToken();
while(true) {
@ -563,7 +582,7 @@ public class CPreprocessor implements ILexerLog, IScanner, IAdaptable {
fCurrentContext.nextPPToken(); // consume the identifier
if (expandMacros) {
final Lexer lexer= fCurrentContext.getLexer();
if (lexer != null && expandMacro(ppToken, lexer, stopAtNewline)) {
if (lexer != null && expandMacro(ppToken, lexer, stopAtNewline, isPPCondition)) {
ppToken= fCurrentContext.currentLexerToken();
continue;
}
@ -718,12 +737,12 @@ public class CPreprocessor implements ILexerLog, IScanner, IAdaptable {
private IncludeFileContent findInclusion(final String filename, final boolean quoteInclude,
final boolean includeNext, final File currentDir) {
return (IncludeFileContent) findInclusion(filename, quoteInclude, includeNext, currentDir, createCodeReaderTester);
return findInclusion(filename, quoteInclude, includeNext, currentDir, createCodeReaderTester);
}
private Object findInclusion(final String filename, final boolean quoteInclude,
final boolean includeNext, final File currentDirectory, final IIncludeFileTester tester) {
Object reader = null;
private <T> T findInclusion(final String filename, final boolean quoteInclude,
final boolean includeNext, final File currentDirectory, final IIncludeFileTester<T> tester) {
T reader = null;
// filename is an absolute path or it is a Linux absolute path on a windows machine
if (new File(filename).isAbsolute() || filename.startsWith("/")) { //$NON-NLS-1$
return tester.checkFile( EMPTY_STRING, filename );
@ -740,18 +759,46 @@ public class CPreprocessor implements ILexerLog, IScanner, IAdaptable {
// if we're not include_next, then we are looking for the first occurrence of
// the file, otherwise, we ignore all the paths before the current directory
String[] includePathsToUse = quoteInclude ? fQuoteIncludePaths : fIncludePaths;
if (includePathsToUse != null ) {
int startpos = 0;
String[] isp;
ISPKey ispKey;
if (quoteInclude) {
isp= fQuoteIncludePaths;
ispKey= fQuoteIncludePathKey;
}
else {
isp= fIncludePaths;
ispKey= fIncludePathKey;
}
if (isp != null ) {
if (includeNext && currentDirectory != null) {
startpos = findIncludePos(includePathsToUse, currentDirectory) + 1;
final int startpos = findIncludePos(isp, currentDirectory) + 1;
for (int i= startpos; i < isp.length; ++i) {
reader= tester.checkFile(isp[i], filename);
if (reader != null) {
return reader;
}
}
return null;
}
for (int i = startpos; i < includePathsToUse.length; ++i) {
reader = tester.checkFile(includePathsToUse[i], filename);
final LookupKey lookupKey= fIncludeResolutionCache.getKey(ispKey, filename.toCharArray());
Integer offset= fIncludeResolutionCache.getCachedPathOffset(lookupKey);
if (offset != null) {
if (offset < 0) {
return null;
}
return tester.checkFile(isp[offset], filename);
}
for (int i= 0; i < isp.length; ++i) {
reader = tester.checkFile(isp[i], filename);
if (reader != null) {
fIncludeResolutionCache.putCachedPathOffset(lookupKey, i);
return reader;
}
}
fIncludeResolutionCache.putCachedPathOffset(lookupKey, -1);
}
return null;
}
@ -1000,7 +1047,7 @@ public class CPreprocessor implements ILexerLog, IScanner, IAdaptable {
if (!active) {
// test if the include is inactive just because it was included before (bug 167100)
final File currentDir= userInclude || include_next ? new File(String.valueOf(getCurrentFilename())).getParentFile() : null;
final String resolved= (String) findInclusion(new String(headerName), userInclude, include_next, currentDir, createPathTester);
final String resolved= findInclusion(new String(headerName), userInclude, include_next, currentDir, createPathTester);
if (resolved != null && fCodeReaderFactory.hasFileBeenIncludedInCurrentTranslationUnit(resolved)) {
path= resolved;
}
@ -1185,7 +1232,7 @@ public class CPreprocessor implements ILexerLog, IScanner, IAdaptable {
final ScannerContext scannerCtx= fCurrentContext;
boolean expandMacros= true;
loop: while(true) {
Token t= internalFetchToken(expandMacros, true, false, scannerCtx);
Token t= internalFetchToken(expandMacros, isCondition, true, false, scannerCtx);
switch(t.getType()) {
case IToken.tEND_OF_INPUT:
case IToken.tCOMPLETION:
@ -1340,8 +1387,10 @@ public class CPreprocessor implements ILexerLog, IScanner, IAdaptable {
* @param identifier the token where macro expansion may occur.
* @param lexer the input for the expansion.
* @param stopAtNewline whether or not tokens to be read are limited to the current line.
* @param isPPCondition whether the expansion is inside of a preprocessor condition. This
* implies a specific handling for the defined token.
*/
private boolean expandMacro(final Token identifier, Lexer lexer, boolean stopAtNewline) throws OffsetLimitReachedException {
private boolean expandMacro(final Token identifier, Lexer lexer, boolean stopAtNewline, final boolean isPPCondition) throws OffsetLimitReachedException {
final char[] name= identifier.getCharImage();
PreprocessorMacro macro= fMacroDictionary.get(name);
if (macro == null) {
@ -1360,7 +1409,7 @@ public class CPreprocessor implements ILexerLog, IScanner, IAdaptable {
}
}
final boolean contentAssist = fContentAssistLimit>=0 && fCurrentContext == fRootContext;
TokenList replacement= fMacroExpander.expand(lexer, stopAtNewline, macro, identifier, contentAssist);
TokenList replacement= fMacroExpander.expand(lexer, stopAtNewline, isPPCondition, macro, identifier, contentAssist);
final IASTName[] expansions= fMacroExpander.clearImplicitExpansions();
final ImageLocationInfo[] ili= fMacroExpander.clearImageLocationInfos();
final Token last= replacement.last();

View file

@ -0,0 +1,107 @@
/*******************************************************************************
* Copyright (c) 2008 Wind River Systems, Inc. and others.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Eclipse Public License v1.0
* which accompanies this distribution, and is available at
* http://www.eclipse.org/legal/epl-v10.html
*
* Contributors:
* Markus Schorn - initial API and implementation
*******************************************************************************/
package org.eclipse.cdt.internal.core.parser.scanner;
import java.util.Arrays;
import java.util.LinkedHashMap;
import java.util.Map;
import org.eclipse.cdt.internal.core.parser.util.WeakHashSet;
/**
* A limited LRU cache for looking up files in an include search path.
* @since 5.0
*/
public final class IncludeFileResolutionCache {
public static class ISPKey {
private String[] fISP;
private int fHashCode;
private ISPKey(String[] isp) {
fISP= isp;
fHashCode= Arrays.hashCode(isp);
}
@Override
public boolean equals(Object obj) {
if (this == obj) {
return true;
}
return obj != null && Arrays.equals(fISP, ((ISPKey) obj).fISP);
}
@Override
public int hashCode() {
return fHashCode;
}
}
public static class LookupKey {
private ISPKey fCanonicISP;
private char[] fName;
private int fHashCode;
private LookupKey(ISPKey ispKey, char[] include) {
fCanonicISP= ispKey;
fName= include;
fHashCode= Arrays.hashCode(include) * 31 + ispKey.hashCode();
}
@Override
public int hashCode() {
return fHashCode;
}
@Override
public boolean equals(Object obj) {
LookupKey other= (LookupKey) obj;
if (fCanonicISP != other.fCanonicISP)
return false;
if (!Arrays.equals(fName, other.fName))
return false;
return true;
}
}
private WeakHashSet<ISPKey> fCanonicISPs;
private LinkedHashMap<LookupKey, Integer> fCache;
/**
* Creates a cache for include file resolution using up to the given amount of memory
* @param maxSizeKBytes the maximum size of the cache in kilobytes
*/
public IncludeFileResolutionCache(final int maxSizeKBytes) {
final int size= maxSizeKBytes*1024/72; // HashEntry 32 bytes, Key 16 bytes, Name 16 bytes, Integer 8 bytes
fCache= new LinkedHashMap<LookupKey, Integer>(size, 0.75f, true) {
@Override
protected boolean removeEldestEntry(Map.Entry<LookupKey, Integer> eldest) {
return size() > size;
}
};
fCanonicISPs= new WeakHashSet<ISPKey>();
}
public ISPKey getKey(String[] isp) {
return fCanonicISPs.add(new ISPKey(isp));
}
public LookupKey getKey(ISPKey ispKey, char[] filename) {
return new LookupKey(ispKey, filename);
}
public Integer getCachedPathOffset(LookupKey key) {
return fCache.get(key);
}
public void putCachedPathOffset(LookupKey key, int offset) {
fCache.put(key, offset);
}
}

View file

@ -0,0 +1,225 @@
/*******************************************************************************
* Copyright (c) 2004, 2008 IBM Corporation and others.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Eclipse Public License v1.0
* which accompanies this distribution, and is available at
* http://www.eclipse.org/legal/epl-v10.html
*
* Contributors:
* IBM Corporation - initial API and implementation
* Markus Schorn (Wind River Systems)
*******************************************************************************/
package org.eclipse.cdt.internal.core.parser.util;
import java.lang.ref.ReferenceQueue;
import java.lang.ref.WeakReference;
/**
* A hashset whose values can be garbage collected.
* This is a clone of org.eclipse.jdt.internal.core.util.WeakHashSet.
*/
public class WeakHashSet<T> {
public static class HashableWeakReference<T> extends WeakReference<T> {
public int hashCode;
public HashableWeakReference(T referent, ReferenceQueue<T> queue) {
super(referent, queue);
this.hashCode = referent.hashCode();
}
@Override
public boolean equals(Object obj) {
if (!(obj instanceof HashableWeakReference)) return false;
Object referent = get();
Object other = ((HashableWeakReference<?>) obj).get();
if (referent == null) return other == null;
return referent.equals(other);
}
@Override
public int hashCode() {
return this.hashCode;
}
@Override
public String toString() {
Object referent = get();
if (referent == null) return "[hashCode=" + this.hashCode + "] <referent was garbage collected>"; //$NON-NLS-1$ //$NON-NLS-2$
return "[hashCode=" + this.hashCode + "] " + referent.toString(); //$NON-NLS-1$ //$NON-NLS-2$
}
}
HashableWeakReference<T>[] values;
public int elementSize; // number of elements in the table
int threshold;
ReferenceQueue<T> referenceQueue = new ReferenceQueue<T>();
public WeakHashSet() {
this(5);
}
@SuppressWarnings("unchecked")
public WeakHashSet(int size) {
this.elementSize = 0;
this.threshold = size; // size represents the expected number of elements
int extraRoom = (int) (size * 1.75f);
if (this.threshold == extraRoom)
extraRoom++;
this.values = new HashableWeakReference[extraRoom];
}
/*
* Adds the given object to this set.
* If an object that is equals to the given object already exists, do nothing.
* Returns the existing object or the new object if not found.
*/
public T add(T obj) {
cleanupGarbageCollectedValues();
int valuesLength = this.values.length,
index = (obj.hashCode() & 0x7FFFFFFF) % valuesLength;
HashableWeakReference<T> currentValue;
while ((currentValue = this.values[index]) != null) {
T referent;
if (obj.equals(referent = currentValue.get())) {
return referent;
}
if (++index == valuesLength) {
index = 0;
}
}
this.values[index] = new HashableWeakReference<T>(obj, this.referenceQueue);
// assumes the threshold is never equal to the size of the table
if (++this.elementSize > this.threshold)
rehash();
return obj;
}
private void addValue(HashableWeakReference<T> value) {
T obj = value.get();
if (obj == null) return;
int valuesLength = this.values.length;
int index = (value.hashCode & 0x7FFFFFFF) % valuesLength;
HashableWeakReference<T> currentValue;
while ((currentValue = this.values[index]) != null) {
if (obj.equals(currentValue.get())) {
return;
}
if (++index == valuesLength) {
index = 0;
}
}
this.values[index] = value;
// assumes the threshold is never equal to the size of the table
if (++this.elementSize > this.threshold)
rehash();
}
@SuppressWarnings("unchecked")
private void cleanupGarbageCollectedValues() {
HashableWeakReference<T> toBeRemoved;
while ((toBeRemoved = (HashableWeakReference<T>) this.referenceQueue.poll()) != null) {
int hashCode = toBeRemoved.hashCode;
int valuesLength = this.values.length;
int index = (hashCode & 0x7FFFFFFF) % valuesLength;
HashableWeakReference<T> currentValue;
while ((currentValue = this.values[index]) != null) {
if (currentValue == toBeRemoved) {
// replace the value at index with the last value with the same hash
int sameHash = index;
int current;
while ((currentValue = this.values[current = (sameHash + 1) % valuesLength]) != null && currentValue.hashCode == hashCode)
sameHash = current;
this.values[index] = this.values[sameHash];
this.values[sameHash] = null;
this.elementSize--;
break;
}
if (++index == valuesLength) {
index = 0;
}
}
}
}
public boolean contains(T obj) {
return get(obj) != null;
}
/*
* Return the object that is in this set and that is equals to the given object.
* Return null if not found.
*/
public T get(T obj) {
cleanupGarbageCollectedValues();
int valuesLength = this.values.length;
int index = (obj.hashCode() & 0x7FFFFFFF) % valuesLength;
HashableWeakReference<T> currentValue;
while ((currentValue = this.values[index]) != null) {
T referent;
if (obj.equals(referent = currentValue.get())) {
return referent;
}
if (++index == valuesLength) {
index = 0;
}
}
return null;
}
private void rehash() {
WeakHashSet<T> newHashSet = new WeakHashSet<T>(this.elementSize * 2); // double the number of expected elements
newHashSet.referenceQueue = this.referenceQueue;
HashableWeakReference<T> currentValue;
for (int i = 0, length = this.values.length; i < length; i++)
if ((currentValue = this.values[i]) != null)
newHashSet.addValue(currentValue);
this.values = newHashSet.values;
this.threshold = newHashSet.threshold;
this.elementSize = newHashSet.elementSize;
}
/*
* Removes the object that is in this set and that is equals to the given object.
* Return the object that was in the set, or null if not found.
*/
public T remove(T obj) {
cleanupGarbageCollectedValues();
int valuesLength = this.values.length;
int index = (obj.hashCode() & 0x7FFFFFFF) % valuesLength;
HashableWeakReference<T> currentValue;
while ((currentValue = this.values[index]) != null) {
T referent;
if (obj.equals(referent = currentValue.get())) {
this.elementSize--;
this.values[index] = null;
rehash();
return referent;
}
if (++index == valuesLength) {
index = 0;
}
}
return null;
}
public int size() {
return this.elementSize;
}
@Override
public String toString() {
StringBuffer buffer = new StringBuffer("{"); //$NON-NLS-1$
for (int i = 0, length = this.values.length; i < length; i++) {
HashableWeakReference<T> value = this.values[i];
if (value != null) {
Object ref = value.get();
if (ref != null) {
buffer.append(ref.toString());
buffer.append(", "); //$NON-NLS-1$
}
}
}
buffer.append("}"); //$NON-NLS-1$
return buffer.toString();
}
}