mirror of
https://github.com/eclipse-cdt/cdt
synced 2025-04-29 19:45:01 +02:00
Do not reindex files if their contents haven't changed. Bug 302083.
This commit is contained in:
parent
02335bb8a6
commit
351cb70ef4
22 changed files with 495 additions and 60 deletions
|
@ -25,6 +25,7 @@ public class ScannerTestSuite extends TestSuite {
|
|||
suite.addTest(PreprocessorBugsTests.suite());
|
||||
suite.addTest(ExpansionExplorerTests.suite());
|
||||
suite.addTest(InactiveCodeTests.suite());
|
||||
suite.addTest(StreamHasherTests.suite());
|
||||
return suite;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -0,0 +1,90 @@
|
|||
/*******************************************************************************
|
||||
* Copyright (c) 2010 Google, Inc and others.
|
||||
* All rights reserved. This program and the accompanying materials
|
||||
* are made available under the terms of the Eclipse Public License v1.0
|
||||
* which accompanies this distribution, and is available at
|
||||
* http://www.eclipse.org/legal/epl-v10.html
|
||||
*
|
||||
* Contributors:
|
||||
* Sergey Prigogin (Google) - initial API and implementation
|
||||
*******************************************************************************/
|
||||
package org.eclipse.cdt.core.parser.tests.scanner;
|
||||
|
||||
import junit.framework.TestSuite;
|
||||
|
||||
import org.eclipse.cdt.core.testplugin.util.BaseTestCase;
|
||||
import org.eclipse.cdt.internal.core.parser.scanner.StreamHasher;
|
||||
|
||||
/**
|
||||
* Unit test for StreamHasher class.
|
||||
*/
|
||||
public class StreamHasherTests extends BaseTestCase {
|
||||
|
||||
private static final String TEXT =
|
||||
"'Twas brillig, and the slithy toves\r\n" +
|
||||
"Did gyre and gimble in the wabe;\r\n" +
|
||||
"All mimsy were the borogoves,\r\n" +
|
||||
"And the mome raths outgrabe.\r\n" +
|
||||
"\r\n" +
|
||||
"\"Beware the Jabberwock, my son!\r\n" +
|
||||
"The jaws that bite, the claws that catch!\r\n" +
|
||||
"Beware the Jubjub bird, and shun\r\n" +
|
||||
"The frumious Bandersnatch!\"\r\n" +
|
||||
"\r\n" +
|
||||
"He took his vorpal sword in hand:\r\n" +
|
||||
"Long time the manxome foe he sought—\r\n" +
|
||||
"So rested he by the Tumtum tree,\r\n" +
|
||||
"And stood awhile in thought.\r\n" +
|
||||
"\r\n" +
|
||||
"And as in uffish thought he stood,\r\n" +
|
||||
"The Jabberwock, with eyes of flame,\r\n" +
|
||||
"Came whiffling through the tulgey wood,\r\n" +
|
||||
"And burbled as it came!\r\n" +
|
||||
"\r\n" +
|
||||
"One, two! One, two! and through and through\r\n" +
|
||||
"The vorpal blade went snicker-snack!\r\n" +
|
||||
"He left it dead, and with its head\r\n" +
|
||||
"He went galumphing back.\r\n" +
|
||||
"\r\n" +
|
||||
"\"And hast thou slain the Jabberwock?\r\n" +
|
||||
"Come to my arms, my beamish boy!\r\n" +
|
||||
"O frabjous day! Callooh! Callay!\"\r\n" +
|
||||
"He chortled in his joy.\r\n" +
|
||||
"\r\n" +
|
||||
"'Twas brillig, and the slithy toves\r\n" +
|
||||
"Did gyre and gimble in the wabe;\r\n" +
|
||||
"All mimsy were the borogoves,\r\n" +
|
||||
"And the mome raths outgrabe.\r\n";
|
||||
|
||||
public static TestSuite suite() {
|
||||
return suite(StreamHasherTests.class);
|
||||
}
|
||||
|
||||
public StreamHasherTests() {
|
||||
super();
|
||||
}
|
||||
|
||||
public StreamHasherTests(String name) {
|
||||
super(name);
|
||||
}
|
||||
|
||||
public void testEmpty() throws Exception {
|
||||
// Verify that an empty string has a zero hash value.
|
||||
assertEquals(0, StreamHasher.hash(""));
|
||||
assertEquals(0, new StreamHasher().computeHash());
|
||||
}
|
||||
|
||||
public void testChunks() throws Exception {
|
||||
// Verify that the hash value does not depend on partitioning of the character string into chunks.
|
||||
long h = StreamHasher.hash(TEXT);
|
||||
assertTrue(h != 0);
|
||||
for (int chunkSize = 1; chunkSize <= 20; chunkSize++) {
|
||||
StreamHasher hasher = new StreamHasher();
|
||||
for (int offset = 0; offset < TEXT.length(); offset += chunkSize) {
|
||||
char[] chunk = TEXT.substring(offset, Math.min(offset + chunkSize, TEXT.length())).toCharArray();
|
||||
hasher.addChunk(chunk);
|
||||
}
|
||||
assertEquals(h, hasher.computeHash());
|
||||
}
|
||||
}
|
||||
}
|
|
@ -91,7 +91,6 @@ public class IndexListenerTest extends BaseTestCase {
|
|||
}
|
||||
}
|
||||
|
||||
|
||||
public void testChangeListener() throws Exception {
|
||||
final Object mutex= new Object();
|
||||
final List projects= new ArrayList();
|
||||
|
@ -119,8 +118,8 @@ public class IndexListenerTest extends BaseTestCase {
|
|||
projects.clear();
|
||||
|
||||
|
||||
IFile file1= TestSourceReader.createFile(fProject1.getProject(), "test.cpp", "int a;");
|
||||
IFile file2= TestSourceReader.createFile(fProject2.getProject(), "test.cpp", "int b;");
|
||||
IFile file1= TestSourceReader.createFile(fProject1.getProject(), "test.cpp", "int b;");
|
||||
IFile file2= TestSourceReader.createFile(fProject2.getProject(), "test.cpp", "int c;");
|
||||
synchronized (mutex) {
|
||||
mutex.wait(1000);
|
||||
if (projects.size() < 2) {
|
||||
|
|
|
@ -125,7 +125,10 @@ public class IndexUpdateTests extends IndexTestBase {
|
|||
}
|
||||
|
||||
private void updateFile() throws Exception {
|
||||
fFile= TestSourceReader.createFile(fFile.getParent(), fFile.getName(), fContents[++fContentUsed].toString());
|
||||
// Append variable comment to the end of the file to change its contents.
|
||||
// Indexer would not reindex the file if its contents remain the same.
|
||||
fFile= TestSourceReader.createFile(fFile.getParent(), fFile.getName(),
|
||||
fContents[++fContentUsed].toString() + "\n// " + fContentUsed);
|
||||
TestSourceReader.waitUntilFileIsIndexed(fIndex, fFile, INDEXER_WAIT_TIME);
|
||||
}
|
||||
|
||||
|
|
|
@ -60,6 +60,14 @@ public interface IIndexFile {
|
|||
*/
|
||||
long getTimestamp() throws CoreException;
|
||||
|
||||
/**
|
||||
* Hash of the file contents when the file was indexed.
|
||||
* @return 64-bit hash of the file content.
|
||||
* @throws CoreException
|
||||
* @since 5.2
|
||||
*/
|
||||
long getContentsHash() throws CoreException;
|
||||
|
||||
/**
|
||||
* Returns the hash-code of the scanner configuration that was used to parse the file.
|
||||
* <code>0</code> will be returned in case the hash-code is unknown.
|
||||
|
|
|
@ -79,6 +79,15 @@ public interface IIndexManager extends IPDOMManager {
|
|||
*/
|
||||
public final static int UPDATE_EXTERNAL_FILES_FOR_PROJECT= 0x8;
|
||||
|
||||
/**
|
||||
* This flag modifies behavior of UPDATE_CHECK_TIMESTAMPS. Both, the timestamp and the hash
|
||||
* of the contents of a translation unit, have to change in order to trigger re-indexing.
|
||||
* Checking for content changes may reduce indexing overhead for projects that use code
|
||||
* generation since generated files are sometimes recreated with identical contents.
|
||||
* @since 5.2
|
||||
*/
|
||||
public final static int UPDATE_CHECK_CONTENTS_HASH= 0x10;
|
||||
|
||||
/**
|
||||
* Returns the index for the given project.
|
||||
* @param project the project to get the index for
|
||||
|
@ -192,7 +201,7 @@ public interface IIndexManager extends IPDOMManager {
|
|||
* nested translation units are considered.
|
||||
* @param tuSelection the translation units to update.
|
||||
* @param options one of {@link #UPDATE_ALL} or {@link #UPDATE_CHECK_TIMESTAMPS} optionally
|
||||
* combined with {@link #UPDATE_EXTERNAL_FILES_FOR_PROJECT}.
|
||||
* combined with {@link #UPDATE_EXTERNAL_FILES_FOR_PROJECT} and {@link #UPDATE_CHECK_CONTENTS_HASH}.
|
||||
* @throws CoreException
|
||||
* @since 4.0
|
||||
*/
|
||||
|
|
|
@ -7,6 +7,7 @@
|
|||
*
|
||||
* Contributors:
|
||||
* Markus Schorn - initial API and implementation
|
||||
* Sergey Prigogin (Google)
|
||||
*******************************************************************************/
|
||||
package org.eclipse.cdt.core.parser;
|
||||
|
||||
|
@ -22,8 +23,8 @@ import org.eclipse.core.runtime.IPath;
|
|||
|
||||
|
||||
/**
|
||||
* Abstract class for representing the content of a file. This serves as the
|
||||
* input to the preprocessor.
|
||||
* Abstract class for representing the content of a file.
|
||||
* It serves as the input to the preprocessor.
|
||||
*
|
||||
* @noextend This class is not intended to be subclassed by clients.
|
||||
* @since 5.2
|
||||
|
@ -35,6 +36,10 @@ public abstract class FileContent {
|
|||
*/
|
||||
public abstract String getFileLocation();
|
||||
|
||||
/**
|
||||
* Returns a 64-bit hash value of the file contents.
|
||||
*/
|
||||
public abstract long getContentsHash();
|
||||
|
||||
/**
|
||||
* Creates a file content object for a fixed buffer.
|
||||
|
@ -78,7 +83,6 @@ public abstract class FileContent {
|
|||
return InternalParserUtil.createWorkspaceFileContent(file);
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Creates a file content object for a file location that is not part of the workspace
|
||||
*/
|
||||
|
|
|
@ -28,6 +28,11 @@ public interface IIndexFragmentFile extends IIndexFile {
|
|||
*/
|
||||
void setTimestamp(long timestamp) throws CoreException;
|
||||
|
||||
/**
|
||||
* Sets the hash of the file content.
|
||||
*/
|
||||
void setContentsHash(long hash) throws CoreException;
|
||||
|
||||
/**
|
||||
* Sets the hash-code of the scanner configuration.
|
||||
* @param hashcode a hash-code or <code>0</code> if it is unknown.
|
||||
|
|
|
@ -7,6 +7,7 @@
|
|||
*
|
||||
* Contributors:
|
||||
* Markus Schorn - initial API and implementation
|
||||
* Sergey Prigogin (Google)
|
||||
*******************************************************************************/
|
||||
package org.eclipse.cdt.internal.core.parser.scanner;
|
||||
|
||||
|
@ -35,6 +36,13 @@ public abstract class AbstractCharArray {
|
|||
*/
|
||||
public abstract boolean isValidOffset(int offset);
|
||||
|
||||
/**
|
||||
* Computes 64-bit hash value of the character array. This method doesn't cause any I/O if called
|
||||
* after the array has been traversed.
|
||||
* @return The hash value of the contents of the array.
|
||||
*/
|
||||
public abstract long getContentsHash();
|
||||
|
||||
/**
|
||||
* Returns the character at the given position, subclasses do not have to do range checks.
|
||||
*/
|
||||
|
|
|
@ -7,16 +7,17 @@
|
|||
*
|
||||
* Contributors:
|
||||
* Markus Schorn - initial API and implementation
|
||||
* Sergey Prigogin (Google)
|
||||
*******************************************************************************/
|
||||
package org.eclipse.cdt.internal.core.parser.scanner;
|
||||
|
||||
|
||||
/**
|
||||
* Wrapper around char[] to implement {@link AbstractCharArray}.
|
||||
*/
|
||||
public final class CharArray extends AbstractCharArray {
|
||||
|
||||
private final char[] fArray;
|
||||
private long hash64;
|
||||
|
||||
public CharArray(char[] array) {
|
||||
fArray= array;
|
||||
|
@ -48,11 +49,20 @@ public final class CharArray extends AbstractCharArray {
|
|||
@Override
|
||||
public void arraycopy(int offset, char[] destination, int destPos, int length) {
|
||||
System.arraycopy(fArray, offset, destination, destPos, length);
|
||||
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean isValidOffset(int offset) {
|
||||
return offset < fArray.length;
|
||||
}
|
||||
|
||||
@Override
|
||||
public long getContentsHash() {
|
||||
if (hash64 == 0 && fArray.length != 0) {
|
||||
StreamHasher hasher = new StreamHasher();
|
||||
hasher.addChunk(fArray);
|
||||
hash64 = hasher.computeHash();
|
||||
}
|
||||
return hash64;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -75,8 +75,6 @@ public class FileCharArray extends LazyCharArray {
|
|||
fCharSet= charSet;
|
||||
}
|
||||
|
||||
|
||||
|
||||
@Override
|
||||
protected Chunk createChunk(int chunkOffset) {
|
||||
FileInputStream fis;
|
||||
|
@ -110,7 +108,7 @@ public class FileCharArray extends LazyCharArray {
|
|||
final CharBuffer dest= CharBuffer.allocate(CHUNK_SIZE);
|
||||
|
||||
boolean endOfInput= false;
|
||||
while(dest.position() < CHUNK_SIZE && !endOfInput) {
|
||||
while (dest.position() < CHUNK_SIZE && !endOfInput) {
|
||||
fChannel.position(fileOffset);
|
||||
in.clear();
|
||||
int count= fChannel.read(in);
|
||||
|
@ -128,7 +126,6 @@ public class FileCharArray extends LazyCharArray {
|
|||
return extractChars(dest);
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
protected void rereadChunkData(long fileOffset, long fileEndOffset, char[] dest) {
|
||||
FileInputStream fis;
|
||||
|
@ -156,7 +153,7 @@ public class FileCharArray extends LazyCharArray {
|
|||
final CharsetDecoder decoder = charset.newDecoder().onMalformedInput(CodingErrorAction.REPLACE)
|
||||
.onUnmappableCharacter(CodingErrorAction.REPLACE);
|
||||
|
||||
int needBytes = (int) (fileEndOffset-fileOffset);
|
||||
int needBytes = (int) (fileEndOffset - fileOffset);
|
||||
final ByteBuffer in = ByteBuffer.allocate(needBytes);
|
||||
|
||||
channel.position(fileOffset);
|
||||
|
|
|
@ -7,6 +7,7 @@
|
|||
*
|
||||
* Contributors:
|
||||
* Markus Schorn - initial API and implementation
|
||||
* Sergey Prigogin (Google)
|
||||
*******************************************************************************/
|
||||
package org.eclipse.cdt.internal.core.parser.scanner;
|
||||
|
||||
|
@ -115,6 +116,14 @@ public class InternalFileContent extends FileContent {
|
|||
return fFileLocation;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns a 64-bit hash value of the file contents.
|
||||
*/
|
||||
@Override
|
||||
public long getContentsHash() {
|
||||
return fSource != null ? fSource.getContentsHash() : 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* Valid with {@link InclusionKind#USE_SOURCE}.
|
||||
* @return the codeReader or <code>null</code> if kind is different to {@link InclusionKind#USE_SOURCE}.
|
||||
|
|
|
@ -7,6 +7,7 @@
|
|||
*
|
||||
* Contributors:
|
||||
* Markus Schorn - initial API and implementation
|
||||
* Sergey Prigogin (Google)
|
||||
*******************************************************************************/
|
||||
package org.eclipse.cdt.internal.core.parser.scanner;
|
||||
|
||||
|
@ -38,8 +39,11 @@ public abstract class LazyCharArray extends AbstractCharArray {
|
|||
|
||||
private int fLength= -1;
|
||||
private List<Chunk> fChunks= new ArrayList<Chunk>();
|
||||
private StreamHasher hasher;
|
||||
private long hash64;
|
||||
|
||||
protected LazyCharArray() {
|
||||
hasher = new StreamHasher();
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -66,8 +70,18 @@ public abstract class LazyCharArray extends AbstractCharArray {
|
|||
return true;
|
||||
}
|
||||
|
||||
@Override
|
||||
public long getContentsHash() {
|
||||
if (hasher != null) {
|
||||
readUpTo(Integer.MAX_VALUE);
|
||||
hash64 = hasher.computeHash();
|
||||
hasher = null;
|
||||
}
|
||||
return hash64;
|
||||
}
|
||||
|
||||
private void readUpTo(int offset) {
|
||||
if (fLength >=0)
|
||||
if (fLength >= 0)
|
||||
return;
|
||||
|
||||
final int chunkOffset= offset >> CHUNK_BITS;
|
||||
|
@ -78,13 +92,13 @@ public abstract class LazyCharArray extends AbstractCharArray {
|
|||
public final char get(int offset) {
|
||||
int chunkOffset= offset >> CHUNK_BITS;
|
||||
char[] data= getChunkData(chunkOffset);
|
||||
return data[offset & (CHUNK_SIZE-1)];
|
||||
return data[offset & (CHUNK_SIZE - 1)];
|
||||
}
|
||||
|
||||
@Override
|
||||
public final void arraycopy(int offset, char[] destination, int destinationPos, int length) {
|
||||
int chunkOffset= offset >> CHUNK_BITS;
|
||||
int loffset= offset & (CHUNK_SIZE-1);
|
||||
int loffset= offset & (CHUNK_SIZE - 1);
|
||||
char[] data= getChunkData(chunkOffset);
|
||||
final int canCopy = data.length-loffset;
|
||||
if (length <= canCopy) {
|
||||
|
@ -124,7 +138,7 @@ public abstract class LazyCharArray extends AbstractCharArray {
|
|||
*/
|
||||
protected Chunk createChunk(int chunkOffset) {
|
||||
final int chunkCount = fChunks.size();
|
||||
long fileOffset= chunkCount == 0 ? 0 : fChunks.get(chunkCount-1).fFileEndOffset;
|
||||
long fileOffset= chunkCount == 0 ? 0 : fChunks.get(chunkCount - 1).fFileEndOffset;
|
||||
try {
|
||||
for (int i = chunkCount; i <= chunkOffset; i++) {
|
||||
long[] fileEndOffset= {0};
|
||||
|
@ -134,11 +148,14 @@ public abstract class LazyCharArray extends AbstractCharArray {
|
|||
fLength= fChunks.size() * CHUNK_SIZE;
|
||||
break;
|
||||
}
|
||||
if (hasher != null) {
|
||||
hasher.addChunk(data);
|
||||
}
|
||||
// New chunk
|
||||
Chunk chunk= new Chunk(fileOffset, fileEndOffset[0], data);
|
||||
fChunks.add(chunk);
|
||||
if (charCount < CHUNK_SIZE) {
|
||||
fLength= (fChunks.size()-1) * CHUNK_SIZE + charCount;
|
||||
fLength= (fChunks.size() - 1) * CHUNK_SIZE + charCount;
|
||||
break;
|
||||
}
|
||||
fileOffset= fileEndOffset[0];
|
||||
|
@ -162,8 +179,8 @@ public abstract class LazyCharArray extends AbstractCharArray {
|
|||
}
|
||||
|
||||
/**
|
||||
* Read the chunk data at the given source offset and provide the end-offset in the
|
||||
* source.
|
||||
* Read the chunk data at the given source offset and provide the end-offset in
|
||||
* the source.
|
||||
*/
|
||||
protected abstract char[] readChunkData(long sourceOffset, long[] sourceEndOffsetHolder) throws Exception;
|
||||
|
||||
|
|
|
@ -0,0 +1,236 @@
|
|||
/*******************************************************************************
|
||||
* Copyright (c) 2010 Google, Inc and others.
|
||||
* All rights reserved. This program and the accompanying materials
|
||||
* are made available under the terms of the Eclipse Public License v1.0
|
||||
* which accompanies this distribution, and is available at
|
||||
* http://www.eclipse.org/legal/epl-v10.html
|
||||
*
|
||||
* Contributors:
|
||||
* Sergey Prigogin (Google) - initial API and implementation
|
||||
*
|
||||
* Based on lookup3.c, by Bob Jenkins {@link "http://burtleburtle.net/bob/c/lookup3.c"}
|
||||
*
|
||||
* Here is the original comment by Bob Jenkins:
|
||||
* -------------------------------------------------------------------------------
|
||||
* lookup3.c, by Bob Jenkins, May 2006, Public Domain.
|
||||
*
|
||||
* These are functions for producing 32-bit hashes for hash table lookup.
|
||||
* hashword(), hashlittle(), hashlittle2(), hashbig(), mix(), and final()
|
||||
* are externally useful functions. Routines to test the hash are included
|
||||
* if SELF_TEST is defined. You can use this free for any purpose. It's in
|
||||
* the public domain. It has no warranty.
|
||||
*
|
||||
* You probably want to use hashlittle(). hashlittle() and hashbig()
|
||||
* hash byte arrays. hashlittle() is is faster than hashbig() on
|
||||
* little-endian machines. Intel and AMD are little-endian machines.
|
||||
* On second thought, you probably want hashlittle2(), which is identical to
|
||||
* hashlittle() except it returns two 32-bit hashes for the price of one.
|
||||
* You could implement hashbig2() if you wanted but I haven't bothered here.
|
||||
*
|
||||
* If you want to find a hash of, say, exactly 7 integers, do
|
||||
* a = i1; b = i2; c = i3;
|
||||
* mix(a, b, c);
|
||||
* a += i4; b += i5; c += i6;
|
||||
* mix(a, b, c);
|
||||
* a += i7;
|
||||
* finalMix(a, b, c);
|
||||
* then use c as the hash value. If you have a variable length array of
|
||||
* 4-byte integers to hash, use hashword(). If you have a byte array (like
|
||||
* a character string), use hashlittle(). If you have several byte arrays, or
|
||||
* a mix of things, see the comments above hashlittle().
|
||||
*
|
||||
* Why is this so big? I read 12 bytes at a time into 3 4-byte integers,
|
||||
* then mix those integers. This is fast (you can do a lot more thorough
|
||||
* mixing with 12*3 instructions on 3 integers than you can with 3 instructions
|
||||
* on 1 byte), but shoehorning those bytes into integers efficiently is messy.
|
||||
*******************************************************************************/
|
||||
|
||||
package org.eclipse.cdt.internal.core.parser.scanner;
|
||||
|
||||
/**
|
||||
* Computes a 64-bit hash value of a character stream that can be supplied one chunk at a time.
|
||||
* Usage:
|
||||
* <pre>
|
||||
* StreamHasher hasher = new StreamHasher();
|
||||
* for (long offset = 0; offset < streamLength; offset += chunkLength) {
|
||||
* hasher.addChunk(offset, chunkOfCharacters);
|
||||
* }
|
||||
* int64 hashValue = hasher.computeHash();
|
||||
* </pre>
|
||||
*
|
||||
* Based on lookup3.c by Bob Jenkins from {@link "http://burtleburtle.net/bob/c/lookup3.c"}
|
||||
*/
|
||||
public final class StreamHasher {
|
||||
private static final long SEED = 3141592653589793238L; // PI
|
||||
private static final long EMPTY_STRING_HASH = new StreamHasher().computeHashInternal();
|
||||
|
||||
long hashedOffset; // Current position in the stream of characters.
|
||||
int state; // Current position in the stream of characters modulo 6, or -1 after computeHash is called.
|
||||
int a;
|
||||
int b;
|
||||
int c;
|
||||
char previousCharacter;
|
||||
|
||||
public StreamHasher() {
|
||||
// Set up the internal state.
|
||||
hashedOffset = 0;
|
||||
state = 0;
|
||||
a = b = c = (int) SEED;
|
||||
c += SEED >>> 32;
|
||||
}
|
||||
|
||||
/**
|
||||
* Adds a chunk of data to the hasher.
|
||||
* @param chunk Contents of the chunk.
|
||||
*/
|
||||
public void addChunk(char[] chunk) {
|
||||
for (int pos = 0; pos < chunk.length; pos++, hashedOffset++) {
|
||||
char cc = chunk[pos];
|
||||
switch (state++) {
|
||||
case -1:
|
||||
throw new IllegalStateException("addChunk is called after computeHash."); //$NON-NLS-1$
|
||||
case 0:
|
||||
case 2:
|
||||
case 4:
|
||||
previousCharacter = cc;
|
||||
break;
|
||||
case 1:
|
||||
a += previousCharacter | (cc << 16);
|
||||
break;
|
||||
case 3:
|
||||
b += previousCharacter | (cc << 16);
|
||||
break;
|
||||
case 5:
|
||||
c += previousCharacter | (cc << 16);
|
||||
mix();
|
||||
state = 0;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Computes and returns the hash value. Must be called once after the last chunk.
|
||||
* @return The hash value of the character stream.
|
||||
*/
|
||||
public long computeHash() {
|
||||
if (state < 0) {
|
||||
throw new IllegalStateException("computeHash method is called more than once."); //$NON-NLS-1$
|
||||
}
|
||||
return computeHashInternal() ^ EMPTY_STRING_HASH;
|
||||
}
|
||||
|
||||
private long computeHashInternal() {
|
||||
switch (state) {
|
||||
case 1:
|
||||
a += previousCharacter;
|
||||
break;
|
||||
case 3:
|
||||
b += previousCharacter;
|
||||
break;
|
||||
case 5:
|
||||
c += previousCharacter;
|
||||
break;
|
||||
}
|
||||
state = -1; // Protect against subsequent calls.
|
||||
finalMix();
|
||||
return (c & 0xFFFFFFFFL) | ((long) b << 32);
|
||||
}
|
||||
|
||||
/**
|
||||
* Computes a 64-bit hash value of a String. The resulting hash value
|
||||
* is zero if the string is empty.
|
||||
*
|
||||
* @param str The string to hash.
|
||||
* @return The hash value.
|
||||
*/
|
||||
public static long hash(String str) {
|
||||
StreamHasher hasher = new StreamHasher();
|
||||
hasher.addChunk(str.toCharArray());
|
||||
return hasher.computeHash();
|
||||
}
|
||||
|
||||
/**
|
||||
* Mixes three 32-bit values reversibly.
|
||||
*
|
||||
* This is reversible, so any information in a, b, c before mix() is
|
||||
* still in a, b, c after mix().
|
||||
*
|
||||
* If four pairs of a, b, c inputs are run through mix(), or through
|
||||
* mix() in reverse, there are at least 32 bits of the output that
|
||||
* are sometimes the same for one pair and different for another pair.
|
||||
* This was tested for:
|
||||
* * pairs that differed by one bit, by two bits, in any combination
|
||||
* of top bits of a, b, c, or in any combination of bottom bits of
|
||||
* a, b, c.
|
||||
* * "differ" is defined as +, -, ^, or ~^. For + and -, I transformed
|
||||
* the output delta to a Gray code (a ^ (a >> 1)) so a string of 1's
|
||||
* (as is commonly produced by subtraction) look like a single 1-bit
|
||||
* difference.
|
||||
* * the base values were pseudo-random, all zero but one bit set, or
|
||||
* all zero plus a counter that starts at zero.
|
||||
*
|
||||
* Some k values for my "a -= c; a ^= Integer.rotateLeft(c, k); c += b;"
|
||||
* arrangement that satisfy this are
|
||||
* 4 6 8 16 19 4
|
||||
* 9 15 3 18 27 15
|
||||
* 14 9 3 7 17 3
|
||||
* Well, "9 15 3 18 27 15" didn't quite get 32 bits diffing
|
||||
* for "differ" defined as + with a one-bit base and a two-bit delta.
|
||||
* I used http://burtleburtle.net/bob/hash/avalanche.html to choose
|
||||
* the operations, constants, and arrangements of the variables.
|
||||
*
|
||||
* This does not achieve avalanche. There are input bits of a, b, c
|
||||
* that fail to affect some output bits of a, b, c, especially of a.
|
||||
* The most thoroughly mixed value is c, but it doesn't really even
|
||||
* achieve avalanche in c.
|
||||
*
|
||||
* This allows some parallelism. Read-after-writes are good at doubling
|
||||
* the number of bits affected, so the goal of mixing pulls in the opposite
|
||||
* direction as the goal of parallelism. I did what I could. Rotates
|
||||
* seem to cost as much as shifts on every machine I could lay my hands
|
||||
* on, and rotates are much kinder to the top and bottom bits, so I used
|
||||
* rotates.
|
||||
*/
|
||||
private void mix() {
|
||||
a -= c; a ^= Integer.rotateLeft(c, 4); c += b;
|
||||
b -= a; b ^= Integer.rotateLeft(a, 6); a += c;
|
||||
c -= b; c ^= Integer.rotateLeft(b, 8); b += a;
|
||||
a -= c; a ^= Integer.rotateLeft(c, 16); c += b;
|
||||
b -= a; b ^= Integer.rotateLeft(a, 19); a += c;
|
||||
c -= b; c ^= Integer.rotateLeft(b, 4); b += a;
|
||||
}
|
||||
|
||||
/**
|
||||
* Final mixing of 3 32-bit values a, b, c into c
|
||||
*
|
||||
* Pairs of a, b, c values differing in only a few bits will usually
|
||||
* produce values of c that look totally different. This was tested for
|
||||
* * pairs that differed by one bit, by two bits, in any combination
|
||||
* of top bits of a, b, c, or in any combination of bottom bits of
|
||||
* a, b, c.
|
||||
* * "differ" is defined as +, -, ^, or ~^. For + and -, I transformed
|
||||
* the output delta to a Gray code (a ^ (a >> 1)) so a string of 1's (as
|
||||
* is commonly produced by subtraction) look like a single 1-bit
|
||||
* difference.
|
||||
* * the base values were pseudo-random, all zero but one bit set, or
|
||||
* all zero plus a counter that starts at zero.
|
||||
*
|
||||
* These constants passed:
|
||||
* 14 11 25 16 4 14 24
|
||||
* 12 14 25 16 4 14 24
|
||||
* and these came close:
|
||||
* 4 8 15 26 3 22 24
|
||||
* 10 8 15 26 3 22 24
|
||||
* 11 8 15 26 3 22 24
|
||||
*/
|
||||
private void finalMix() {
|
||||
c ^= b; c -= Integer.rotateLeft(b, 14);
|
||||
a ^= c; a -= Integer.rotateLeft(c, 11);
|
||||
b ^= a; b -= Integer.rotateLeft(a, 25);
|
||||
c ^= b; c -= Integer.rotateLeft(b, 16);
|
||||
a ^= c; a -= Integer.rotateLeft(c, 4);
|
||||
b ^= a; b -= Integer.rotateLeft(a, 14);
|
||||
c ^= b; c -= Integer.rotateLeft(b, 24);
|
||||
}
|
||||
}
|
|
@ -50,6 +50,7 @@ import org.eclipse.cdt.internal.core.index.IIndexFragment;
|
|||
import org.eclipse.cdt.internal.core.index.IIndexFragmentFile;
|
||||
import org.eclipse.cdt.internal.core.index.IWritableIndex;
|
||||
import org.eclipse.cdt.internal.core.index.IndexBasedFileContentProvider;
|
||||
import org.eclipse.cdt.internal.core.parser.scanner.StreamHasher;
|
||||
import org.eclipse.cdt.internal.core.parser.scanner.InternalFileContentProvider;
|
||||
import org.eclipse.cdt.internal.core.pdom.dom.PDOMNotImplementedError;
|
||||
import org.eclipse.core.runtime.Assert;
|
||||
|
@ -245,9 +246,8 @@ public abstract class AbstractIndexerTask extends PDOMWriter {
|
|||
}
|
||||
|
||||
|
||||
private final IASTTranslationUnit createAST(Object tu, AbstractLanguage language, IScannerInfo scanInfo, int options,
|
||||
boolean inContext, IProgressMonitor pm) throws CoreException {
|
||||
final FileContent codeReader= fResolver.getCodeReader(tu);
|
||||
private final IASTTranslationUnit createAST(Object tu, AbstractLanguage language, FileContent codeReader,
|
||||
IScannerInfo scanInfo, int options, boolean inContext, IProgressMonitor pm) throws CoreException {
|
||||
if (codeReader == null) {
|
||||
return null;
|
||||
}
|
||||
|
@ -368,6 +368,7 @@ public abstract class AbstractIndexerTask extends PDOMWriter {
|
|||
IProgressMonitor monitor) throws CoreException {
|
||||
final boolean forceAll= (fUpdateFlags & IIndexManager.UPDATE_ALL) != 0;
|
||||
final boolean checkTimestamps= (fUpdateFlags & IIndexManager.UPDATE_CHECK_TIMESTAMPS) != 0;
|
||||
final boolean checkFileContentsHash = (fUpdateFlags & IIndexManager.UPDATE_CHECK_CONTENTS_HASH) != 0;
|
||||
final boolean checkConfig= (fUpdateFlags & IIndexManager.UPDATE_CHECK_CONFIGURATION) != 0;
|
||||
|
||||
int count= 0;
|
||||
|
@ -401,7 +402,7 @@ public abstract class AbstractIndexerTask extends PDOMWriter {
|
|||
if (checkConfig) {
|
||||
update= isSourceUnit ? isSourceUnitConfigChange(tu, ifile) : isHeaderConfigChange(tu, ifile);
|
||||
}
|
||||
update= update || force || (checkTimestamps && fResolver.getLastModified(ifl) != ifile.getTimestamp());
|
||||
update= update || force || isModified(checkTimestamps, checkFileContentsHash, ifl, tu, ifile);
|
||||
if (update) {
|
||||
requestUpdate(linkageID, ifl, ifile);
|
||||
store(tu, linkageID, isSourceUnit, files);
|
||||
|
@ -423,7 +424,7 @@ public abstract class AbstractIndexerTask extends PDOMWriter {
|
|||
if (checkConfig) {
|
||||
update= isHeaderConfigChange(tu, ifile);
|
||||
}
|
||||
update= update || force || (checkTimestamps && fResolver.getLastModified(ifl) != ifile.getTimestamp());
|
||||
update= update || force || isModified(checkTimestamps, checkFileContentsHash, ifl, tu, ifile);
|
||||
if (update) {
|
||||
final int linkageID = ifile.getLinkageID();
|
||||
requestUpdate(linkageID, ifl, ifile);
|
||||
|
@ -438,6 +439,17 @@ public abstract class AbstractIndexerTask extends PDOMWriter {
|
|||
fFilesToUpdate= null;
|
||||
}
|
||||
|
||||
private boolean isModified(boolean checkTimestamps, boolean checkFileContentsHash, IIndexFileLocation ifl,
|
||||
Object tu, IIndexFragmentFile file) throws CoreException {
|
||||
boolean timestampDifferent = checkTimestamps && fResolver.getLastModified(ifl) != file.getTimestamp();
|
||||
if (timestampDifferent) {
|
||||
if (checkFileContentsHash && computeFileContentsHash(tu) == file.getContentsHash()) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
return timestampDifferent;
|
||||
}
|
||||
|
||||
private void requestUpdate(int linkageID, IIndexFileLocation ifl, IIndexFragmentFile ifile) {
|
||||
FileKey key= new FileKey(linkageID, ifl.getURI());
|
||||
IndexFileContent info= fFileInfos.get(key);
|
||||
|
@ -589,7 +601,8 @@ public abstract class AbstractIndexerTask extends PDOMWriter {
|
|||
}
|
||||
}
|
||||
}
|
||||
writeToIndex(linkageID, ast, computeHashCode(scanInfo), monitor);
|
||||
writeToIndex(linkageID, ast, StreamHasher.hash(code), computeHashCode(scanInfo),
|
||||
monitor);
|
||||
updateFileCount(0, 0, 1);
|
||||
}
|
||||
}
|
||||
|
@ -734,10 +747,11 @@ public abstract class AbstractIndexerTask extends PDOMWriter {
|
|||
pm.subTask(getMessage(MessageKind.parsingFileTask,
|
||||
path.lastSegment(), path.removeLastSegments(1).toString()));
|
||||
long start= System.currentTimeMillis();
|
||||
IASTTranslationUnit ast= createAST(tu, lang, scanInfo, fASTOptions, inContext, pm);
|
||||
FileContent codeReader= fResolver.getCodeReader(tu);
|
||||
IASTTranslationUnit ast= createAST(tu, lang, codeReader, scanInfo, fASTOptions, inContext, pm);
|
||||
fStatistics.fParsingTime += System.currentTimeMillis() - start;
|
||||
if (ast != null) {
|
||||
writeToIndex(linkageID, ast, computeHashCode(scanInfo), pm);
|
||||
writeToIndex(linkageID, ast, codeReader.getContentsHash(), computeHashCode(scanInfo), pm);
|
||||
}
|
||||
} catch (CoreException e) {
|
||||
th= e;
|
||||
|
@ -755,8 +769,8 @@ public abstract class AbstractIndexerTask extends PDOMWriter {
|
|||
}
|
||||
}
|
||||
|
||||
private void writeToIndex(final int linkageID, IASTTranslationUnit ast, int configHash,
|
||||
IProgressMonitor pm) throws CoreException, InterruptedException {
|
||||
private void writeToIndex(final int linkageID, IASTTranslationUnit ast, long fileContentsHash,
|
||||
int configHash, IProgressMonitor pm) throws CoreException, InterruptedException {
|
||||
HashSet<IIndexFileLocation> enteredFiles= new HashSet<IIndexFileLocation>();
|
||||
ArrayList<IIndexFileLocation> orderedIFLs= new ArrayList<IIndexFileLocation>();
|
||||
|
||||
|
@ -775,7 +789,7 @@ public abstract class AbstractIndexerTask extends PDOMWriter {
|
|||
|
||||
IIndexFileLocation[] ifls= orderedIFLs.toArray(new IIndexFileLocation[orderedIFLs.size()]);
|
||||
try {
|
||||
addSymbols(ast, ifls, fIndex, 1, false, configHash, fTodoTaskUpdater, pm);
|
||||
addSymbols(ast, ifls, fIndex, 1, false, fileContentsHash, configHash, fTodoTaskUpdater, pm);
|
||||
} finally {
|
||||
// mark as updated in any case, to avoid parsing files that caused an exception to be thrown.
|
||||
for (IIndexFileLocation ifl : ifls) {
|
||||
|
@ -940,6 +954,11 @@ public abstract class AbstractIndexerTask extends PDOMWriter {
|
|||
return result * 31 + key.hashCode();
|
||||
}
|
||||
|
||||
private long computeFileContentsHash(Object tu) {
|
||||
FileContent codeReader= fResolver.getCodeReader(tu);
|
||||
return codeReader != null ? codeReader.getContentsHash() : 0;
|
||||
}
|
||||
|
||||
public final IndexFileContent getFileContent(int linkageID, IIndexFileLocation ifl) throws CoreException {
|
||||
if (!needToUpdateHeader(linkageID, ifl)) {
|
||||
IndexFileContent info= getFileInfo(linkageID, ifl);
|
||||
|
|
|
@ -140,7 +140,8 @@ public class IndexUpdatePolicy {
|
|||
}
|
||||
else if (fIndexer != null) {
|
||||
if (oldPolicy == MANUAL) {
|
||||
task= new PDOMUpdateTask(fIndexer, IIndexManager.UPDATE_CHECK_TIMESTAMPS);
|
||||
task= new PDOMUpdateTask(fIndexer,
|
||||
IIndexManager.UPDATE_CHECK_TIMESTAMPS | IIndexManager.UPDATE_CHECK_CONTENTS_HASH);
|
||||
clearTUs();
|
||||
}
|
||||
else if (fKind == POST_CHANGE) {
|
||||
|
|
|
@ -191,10 +191,11 @@ public class PDOM extends PlatformObject implements IPDOM {
|
|||
* 94.0 - new model for storing types, bug 294306.
|
||||
* 95.0 - parameter packs, bug 294730.
|
||||
* 96.0 - storing pack expansions in the template parameter map, bug 294730.
|
||||
* 97.0 - storing file contents hash in PDOMFile, bug 302083.
|
||||
*/
|
||||
private static final int MIN_SUPPORTED_VERSION= version(96, 0);
|
||||
private static final int MAX_SUPPORTED_VERSION= version(96, Short.MAX_VALUE);
|
||||
private static final int DEFAULT_VERSION = version(96, 0);
|
||||
private static final int MIN_SUPPORTED_VERSION= version(97, 0);
|
||||
private static final int MAX_SUPPORTED_VERSION= version(97, Short.MAX_VALUE);
|
||||
private static final int DEFAULT_VERSION = version(97, 0);
|
||||
|
||||
private static int version(int major, int minor) {
|
||||
return (major << 16) + minor;
|
||||
|
|
|
@ -571,7 +571,8 @@ public class PDOMManager implements IWritableIndexManager, IListener {
|
|||
pdom.releaseReadLock();
|
||||
}
|
||||
if (resume) {
|
||||
enqueue(new PDOMUpdateTask(indexer, IIndexManager.UPDATE_CHECK_TIMESTAMPS));
|
||||
enqueue(new PDOMUpdateTask(indexer,
|
||||
IIndexManager.UPDATE_CHECK_TIMESTAMPS | IIndexManager.UPDATE_CHECK_CONTENTS_HASH));
|
||||
}
|
||||
}
|
||||
return;
|
||||
|
@ -592,7 +593,8 @@ public class PDOMManager implements IWritableIndexManager, IListener {
|
|||
|
||||
IPDOMIndexerTask task= null;
|
||||
if (operation.wasSuccessful()) {
|
||||
task= new PDOMUpdateTask(indexer, IIndexManager.UPDATE_CHECK_TIMESTAMPS);
|
||||
task= new PDOMUpdateTask(indexer,
|
||||
IIndexManager.UPDATE_CHECK_TIMESTAMPS | IIndexManager.UPDATE_CHECK_CONTENTS_HASH);
|
||||
}
|
||||
else {
|
||||
task= new PDOMRebuildTask(indexer);
|
||||
|
|
|
@ -138,15 +138,15 @@ abstract public class PDOMWriter {
|
|||
}
|
||||
|
||||
/**
|
||||
* Extracts symbols from the given ast and adds them to the index.
|
||||
* Extracts symbols from the given AST and adds them to the index.
|
||||
*
|
||||
* When flushIndex is set to <code>false</code>, you must make sure to flush the
|
||||
* index after your last write operation.
|
||||
* @since 4.0
|
||||
*/
|
||||
public void addSymbols(IASTTranslationUnit ast, IIndexFileLocation[] ifls, IWritableIndex index,
|
||||
int readlockCount, boolean flushIndex, int configHash, ITodoTaskUpdater taskUpdater,
|
||||
IProgressMonitor pm) throws InterruptedException, CoreException {
|
||||
int readlockCount, boolean flushIndex, long fileContentsHash, int configHash,
|
||||
ITodoTaskUpdater taskUpdater, IProgressMonitor pm) throws InterruptedException, CoreException {
|
||||
if (fShowProblems) {
|
||||
fShowInclusionProblems= true;
|
||||
fShowScannerProblems= true;
|
||||
|
@ -165,8 +165,8 @@ abstract public class PDOMWriter {
|
|||
resolveNames(symbolMap, ifls, stati, pm);
|
||||
|
||||
// index update
|
||||
storeSymbolsInIndex(symbolMap, ifls, ast.getLinkage().getLinkageID(), configHash, contextIncludes,
|
||||
index, readlockCount, flushIndex, stati, pm);
|
||||
storeSymbolsInIndex(symbolMap, ifls, ast.getLinkage().getLinkageID(), fileContentsHash,
|
||||
configHash, contextIncludes, index, readlockCount, flushIndex, stati, pm);
|
||||
|
||||
if (taskUpdater != null) {
|
||||
taskUpdater.updateTasks(ast.getComments(), ifls);
|
||||
|
@ -193,9 +193,10 @@ abstract public class PDOMWriter {
|
|||
}
|
||||
|
||||
private void storeSymbolsInIndex(final Map<IIndexFileLocation, Symbols> symbolMap, IIndexFileLocation[] ifls,
|
||||
int linkageID, int configHash, HashSet<IASTPreprocessorIncludeStatement> contextIncludes,
|
||||
IWritableIndex index, int readlockCount, boolean flushIndex,
|
||||
ArrayList<IStatus> stati, IProgressMonitor pm) throws InterruptedException, CoreException {
|
||||
int linkageID, long fileContentsHash, int configHash,
|
||||
HashSet<IASTPreprocessorIncludeStatement> contextIncludes, IWritableIndex index, int readlockCount,
|
||||
boolean flushIndex, ArrayList<IStatus> stati, IProgressMonitor pm)
|
||||
throws InterruptedException, CoreException {
|
||||
for (int i= 0; i < ifls.length; i++) {
|
||||
if (pm.isCanceled())
|
||||
return;
|
||||
|
@ -209,7 +210,8 @@ abstract public class PDOMWriter {
|
|||
YieldableIndexLock lock = new YieldableIndexLock(index, readlockCount, flushIndex);
|
||||
lock.acquire();
|
||||
try {
|
||||
storeFileInIndex(index, ifl, symbolMap, linkageID, configHash, contextIncludes, lock);
|
||||
storeFileInIndex(index, ifl, symbolMap, linkageID, fileContentsHash, configHash,
|
||||
contextIncludes, lock);
|
||||
} catch (RuntimeException e) {
|
||||
th= e;
|
||||
} catch (PDOMNotImplementedError e) {
|
||||
|
@ -457,9 +459,9 @@ abstract public class PDOMWriter {
|
|||
}
|
||||
|
||||
private IIndexFragmentFile storeFileInIndex(IWritableIndex index, IIndexFileLocation location,
|
||||
Map<IIndexFileLocation, Symbols> symbolMap, int linkageID, int configHash,
|
||||
Set<IASTPreprocessorIncludeStatement> contextIncludes, YieldableIndexLock lock)
|
||||
throws CoreException, InterruptedException {
|
||||
Map<IIndexFileLocation, Symbols> symbolMap, int linkageID, long fileContentsHash,
|
||||
int configHash, Set<IASTPreprocessorIncludeStatement> contextIncludes,
|
||||
YieldableIndexLock lock) throws CoreException, InterruptedException {
|
||||
Set<IIndexFileLocation> clearedContexts= Collections.emptySet();
|
||||
IIndexFragmentFile file;
|
||||
long timestamp = fResolver.getLastModified(location);
|
||||
|
@ -518,6 +520,7 @@ abstract public class PDOMWriter {
|
|||
}
|
||||
if (SEMI_TRANSACTIONAL_UPDATES) {
|
||||
file.setTimestamp(timestamp);
|
||||
file.setContentsHash(fileContentsHash);
|
||||
file = index.commitUncommittedFile();
|
||||
}
|
||||
} finally {
|
||||
|
|
|
@ -71,11 +71,12 @@ public class PDOMFile implements IIndexFragmentFile {
|
|||
private static final int LOCATION_REPRESENTATION = 16;
|
||||
private static final int LINKAGE_ID= 20;
|
||||
private static final int TIME_STAMP = 24;
|
||||
private static final int SCANNER_CONFIG_HASH= 32;
|
||||
private static final int LAST_USING_DIRECTIVE= 36;
|
||||
private static final int FIRST_MACRO_REFERENCE= 40;
|
||||
private static final int CONTENT_HASH= 32;
|
||||
private static final int SCANNER_CONFIG_HASH= 40;
|
||||
private static final int LAST_USING_DIRECTIVE= 44;
|
||||
private static final int FIRST_MACRO_REFERENCE= 48;
|
||||
|
||||
private static final int RECORD_SIZE= 44;
|
||||
private static final int RECORD_SIZE= 52;
|
||||
|
||||
public static class Comparator implements IBTreeComparator {
|
||||
private Database db;
|
||||
|
@ -223,6 +224,7 @@ public class PDOMFile implements IIndexFragmentFile {
|
|||
}
|
||||
|
||||
setTimestamp(sourceFile.getTimestamp());
|
||||
setContentsHash(sourceFile.getContentsHash());
|
||||
setScannerConfigurationHashcode(sourceFile.getScannerConfigurationHashcode());
|
||||
|
||||
sourceFile.delete();
|
||||
|
@ -271,6 +273,16 @@ public class PDOMFile implements IIndexFragmentFile {
|
|||
db.putLong(record + TIME_STAMP, timestamp);
|
||||
}
|
||||
|
||||
public long getContentsHash() throws CoreException {
|
||||
Database db = fLinkage.getDB();
|
||||
return db.getLong(record + CONTENT_HASH);
|
||||
}
|
||||
|
||||
public void setContentsHash(long hash) throws CoreException {
|
||||
Database db= fLinkage.getDB();
|
||||
db.putLong(record + CONTENT_HASH, hash);
|
||||
}
|
||||
|
||||
public int getScannerConfigurationHashcode() throws CoreException {
|
||||
Database db = fLinkage.getDB();
|
||||
return db.getInt(record + SCANNER_CONFIG_HASH);
|
||||
|
|
|
@ -99,7 +99,7 @@ public abstract class PDOMIndexerTask extends AbstractIndexerTask implements IPD
|
|||
setIndexFilesWithoutBuildConfiguration(false);
|
||||
setIndexHeadersWithoutContext(UnusedHeaderStrategy.skip);
|
||||
}
|
||||
setUpdateFlags(IIndexManager.UPDATE_CHECK_TIMESTAMPS);
|
||||
setUpdateFlags(IIndexManager.UPDATE_CHECK_TIMESTAMPS | IIndexManager.UPDATE_CHECK_CONTENTS_HASH);
|
||||
setForceFirstFiles(forceFiles.length);
|
||||
}
|
||||
|
||||
|
|
|
@ -16,6 +16,7 @@ public class UpdateIndexWithModifiedFilesAction extends AbstractUpdateIndexActio
|
|||
|
||||
@Override
|
||||
protected int getUpdateOptions() {
|
||||
return IIndexManager.UPDATE_CHECK_TIMESTAMPS | IIndexManager.UPDATE_CHECK_CONFIGURATION | IIndexManager.UPDATE_EXTERNAL_FILES_FOR_PROJECT;
|
||||
return IIndexManager.UPDATE_CHECK_TIMESTAMPS | IIndexManager.UPDATE_CHECK_CONFIGURATION |
|
||||
IIndexManager.UPDATE_EXTERNAL_FILES_FOR_PROJECT | IIndexManager.UPDATE_CHECK_CONTENTS_HASH;
|
||||
}
|
||||
}
|
||||
|
|
Loading…
Add table
Reference in a new issue