diff --git a/core/org.eclipse.cdt.core.tests/parser/org/eclipse/cdt/core/parser/tests/scanner/ScannerTestSuite.java b/core/org.eclipse.cdt.core.tests/parser/org/eclipse/cdt/core/parser/tests/scanner/ScannerTestSuite.java index 05de0774722..fa7128bc998 100644 --- a/core/org.eclipse.cdt.core.tests/parser/org/eclipse/cdt/core/parser/tests/scanner/ScannerTestSuite.java +++ b/core/org.eclipse.cdt.core.tests/parser/org/eclipse/cdt/core/parser/tests/scanner/ScannerTestSuite.java @@ -25,6 +25,7 @@ public class ScannerTestSuite extends TestSuite { suite.addTest(PreprocessorBugsTests.suite()); suite.addTest(ExpansionExplorerTests.suite()); suite.addTest(InactiveCodeTests.suite()); + suite.addTest(StreamHasherTests.suite()); return suite; } } diff --git a/core/org.eclipse.cdt.core.tests/parser/org/eclipse/cdt/core/parser/tests/scanner/StreamHasherTests.java b/core/org.eclipse.cdt.core.tests/parser/org/eclipse/cdt/core/parser/tests/scanner/StreamHasherTests.java new file mode 100644 index 00000000000..5aa287a7edb --- /dev/null +++ b/core/org.eclipse.cdt.core.tests/parser/org/eclipse/cdt/core/parser/tests/scanner/StreamHasherTests.java @@ -0,0 +1,90 @@ +/******************************************************************************* + * Copyright (c) 2010 Google, Inc and others. + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the Eclipse Public License v1.0 + * which accompanies this distribution, and is available at + * http://www.eclipse.org/legal/epl-v10.html + * + * Contributors: + * Sergey Prigogin (Google) - initial API and implementation + *******************************************************************************/ +package org.eclipse.cdt.core.parser.tests.scanner; + +import junit.framework.TestSuite; + +import org.eclipse.cdt.core.testplugin.util.BaseTestCase; +import org.eclipse.cdt.internal.core.parser.scanner.StreamHasher; + +/** + * Unit test for StreamHasher class. + */ +public class StreamHasherTests extends BaseTestCase { + + private static final String TEXT = + "'Twas brillig, and the slithy toves\r\n" + + "Did gyre and gimble in the wabe;\r\n" + + "All mimsy were the borogoves,\r\n" + + "And the mome raths outgrabe.\r\n" + + "\r\n" + + "\"Beware the Jabberwock, my son!\r\n" + + "The jaws that bite, the claws that catch!\r\n" + + "Beware the Jubjub bird, and shun\r\n" + + "The frumious Bandersnatch!\"\r\n" + + "\r\n" + + "He took his vorpal sword in hand:\r\n" + + "Long time the manxome foe he sought—\r\n" + + "So rested he by the Tumtum tree,\r\n" + + "And stood awhile in thought.\r\n" + + "\r\n" + + "And as in uffish thought he stood,\r\n" + + "The Jabberwock, with eyes of flame,\r\n" + + "Came whiffling through the tulgey wood,\r\n" + + "And burbled as it came!\r\n" + + "\r\n" + + "One, two! One, two! and through and through\r\n" + + "The vorpal blade went snicker-snack!\r\n" + + "He left it dead, and with its head\r\n" + + "He went galumphing back.\r\n" + + "\r\n" + + "\"And hast thou slain the Jabberwock?\r\n" + + "Come to my arms, my beamish boy!\r\n" + + "O frabjous day! Callooh! Callay!\"\r\n" + + "He chortled in his joy.\r\n" + + "\r\n" + + "'Twas brillig, and the slithy toves\r\n" + + "Did gyre and gimble in the wabe;\r\n" + + "All mimsy were the borogoves,\r\n" + + "And the mome raths outgrabe.\r\n"; + + public static TestSuite suite() { + return suite(StreamHasherTests.class); + } + + public StreamHasherTests() { + super(); + } + + public StreamHasherTests(String name) { + super(name); + } + + public void testEmpty() throws Exception { + // Verify that an empty string has a zero hash value. + assertEquals(0, StreamHasher.hash("")); + assertEquals(0, new StreamHasher().computeHash()); + } + + public void testChunks() throws Exception { + // Verify that the hash value does not depend on partitioning of the character string into chunks. + long h = StreamHasher.hash(TEXT); + assertTrue(h != 0); + for (int chunkSize = 1; chunkSize <= 20; chunkSize++) { + StreamHasher hasher = new StreamHasher(); + for (int offset = 0; offset < TEXT.length(); offset += chunkSize) { + char[] chunk = TEXT.substring(offset, Math.min(offset + chunkSize, TEXT.length())).toCharArray(); + hasher.addChunk(chunk); + } + assertEquals(h, hasher.computeHash()); + } + } +} diff --git a/core/org.eclipse.cdt.core.tests/parser/org/eclipse/cdt/internal/index/tests/IndexListenerTest.java b/core/org.eclipse.cdt.core.tests/parser/org/eclipse/cdt/internal/index/tests/IndexListenerTest.java index c5c85f2079c..028bba6d9f8 100644 --- a/core/org.eclipse.cdt.core.tests/parser/org/eclipse/cdt/internal/index/tests/IndexListenerTest.java +++ b/core/org.eclipse.cdt.core.tests/parser/org/eclipse/cdt/internal/index/tests/IndexListenerTest.java @@ -91,7 +91,6 @@ public class IndexListenerTest extends BaseTestCase { } } - public void testChangeListener() throws Exception { final Object mutex= new Object(); final List projects= new ArrayList(); @@ -119,8 +118,8 @@ public class IndexListenerTest extends BaseTestCase { projects.clear(); - IFile file1= TestSourceReader.createFile(fProject1.getProject(), "test.cpp", "int a;"); - IFile file2= TestSourceReader.createFile(fProject2.getProject(), "test.cpp", "int b;"); + IFile file1= TestSourceReader.createFile(fProject1.getProject(), "test.cpp", "int b;"); + IFile file2= TestSourceReader.createFile(fProject2.getProject(), "test.cpp", "int c;"); synchronized (mutex) { mutex.wait(1000); if (projects.size() < 2) { diff --git a/core/org.eclipse.cdt.core.tests/parser/org/eclipse/cdt/internal/index/tests/IndexUpdateTests.java b/core/org.eclipse.cdt.core.tests/parser/org/eclipse/cdt/internal/index/tests/IndexUpdateTests.java index a0750153cc3..e1c033c3d81 100644 --- a/core/org.eclipse.cdt.core.tests/parser/org/eclipse/cdt/internal/index/tests/IndexUpdateTests.java +++ b/core/org.eclipse.cdt.core.tests/parser/org/eclipse/cdt/internal/index/tests/IndexUpdateTests.java @@ -125,7 +125,10 @@ public class IndexUpdateTests extends IndexTestBase { } private void updateFile() throws Exception { - fFile= TestSourceReader.createFile(fFile.getParent(), fFile.getName(), fContents[++fContentUsed].toString()); + // Append variable comment to the end of the file to change its contents. + // Indexer would not reindex the file if its contents remain the same. + fFile= TestSourceReader.createFile(fFile.getParent(), fFile.getName(), + fContents[++fContentUsed].toString() + "\n// " + fContentUsed); TestSourceReader.waitUntilFileIsIndexed(fIndex, fFile, INDEXER_WAIT_TIME); } diff --git a/core/org.eclipse.cdt.core/parser/org/eclipse/cdt/core/index/IIndexFile.java b/core/org.eclipse.cdt.core/parser/org/eclipse/cdt/core/index/IIndexFile.java index 95a532f5e5a..23171e84735 100644 --- a/core/org.eclipse.cdt.core/parser/org/eclipse/cdt/core/index/IIndexFile.java +++ b/core/org.eclipse.cdt.core/parser/org/eclipse/cdt/core/index/IIndexFile.java @@ -60,6 +60,14 @@ public interface IIndexFile { */ long getTimestamp() throws CoreException; + /** + * Hash of the file contents when the file was indexed. + * @return 64-bit hash of the file content. + * @throws CoreException + * @since 5.2 + */ + long getContentsHash() throws CoreException; + /** * Returns the hash-code of the scanner configuration that was used to parse the file. * 0 will be returned in case the hash-code is unknown. diff --git a/core/org.eclipse.cdt.core/parser/org/eclipse/cdt/core/index/IIndexManager.java b/core/org.eclipse.cdt.core/parser/org/eclipse/cdt/core/index/IIndexManager.java index 67fb6a4139d..c064e8d2966 100644 --- a/core/org.eclipse.cdt.core/parser/org/eclipse/cdt/core/index/IIndexManager.java +++ b/core/org.eclipse.cdt.core/parser/org/eclipse/cdt/core/index/IIndexManager.java @@ -79,6 +79,15 @@ public interface IIndexManager extends IPDOMManager { */ public final static int UPDATE_EXTERNAL_FILES_FOR_PROJECT= 0x8; + /** + * This flag modifies behavior of UPDATE_CHECK_TIMESTAMPS. Both, the timestamp and the hash + * of the contents of a translation unit, have to change in order to trigger re-indexing. + * Checking for content changes may reduce indexing overhead for projects that use code + * generation since generated files are sometimes recreated with identical contents. + * @since 5.2 + */ + public final static int UPDATE_CHECK_CONTENTS_HASH= 0x10; + /** * Returns the index for the given project. * @param project the project to get the index for @@ -192,7 +201,7 @@ public interface IIndexManager extends IPDOMManager { * nested translation units are considered. * @param tuSelection the translation units to update. * @param options one of {@link #UPDATE_ALL} or {@link #UPDATE_CHECK_TIMESTAMPS} optionally - * combined with {@link #UPDATE_EXTERNAL_FILES_FOR_PROJECT}. + * combined with {@link #UPDATE_EXTERNAL_FILES_FOR_PROJECT} and {@link #UPDATE_CHECK_CONTENTS_HASH}. * @throws CoreException * @since 4.0 */ diff --git a/core/org.eclipse.cdt.core/parser/org/eclipse/cdt/core/parser/FileContent.java b/core/org.eclipse.cdt.core/parser/org/eclipse/cdt/core/parser/FileContent.java index 584c54d49de..fc66524d4dd 100644 --- a/core/org.eclipse.cdt.core/parser/org/eclipse/cdt/core/parser/FileContent.java +++ b/core/org.eclipse.cdt.core/parser/org/eclipse/cdt/core/parser/FileContent.java @@ -7,6 +7,7 @@ * * Contributors: * Markus Schorn - initial API and implementation + * Sergey Prigogin (Google) *******************************************************************************/ package org.eclipse.cdt.core.parser; @@ -22,8 +23,8 @@ import org.eclipse.core.runtime.IPath; /** - * Abstract class for representing the content of a file. This serves as the - * input to the preprocessor. + * Abstract class for representing the content of a file. + * It serves as the input to the preprocessor. * * @noextend This class is not intended to be subclassed by clients. * @since 5.2 @@ -35,6 +36,10 @@ public abstract class FileContent { */ public abstract String getFileLocation(); + /** + * Returns a 64-bit hash value of the file contents. + */ + public abstract long getContentsHash(); /** * Creates a file content object for a fixed buffer. @@ -78,7 +83,6 @@ public abstract class FileContent { return InternalParserUtil.createWorkspaceFileContent(file); } - /** * Creates a file content object for a file location that is not part of the workspace */ diff --git a/core/org.eclipse.cdt.core/parser/org/eclipse/cdt/internal/core/index/IIndexFragmentFile.java b/core/org.eclipse.cdt.core/parser/org/eclipse/cdt/internal/core/index/IIndexFragmentFile.java index b6979bd13a3..a47d28cd555 100644 --- a/core/org.eclipse.cdt.core/parser/org/eclipse/cdt/internal/core/index/IIndexFragmentFile.java +++ b/core/org.eclipse.cdt.core/parser/org/eclipse/cdt/internal/core/index/IIndexFragmentFile.java @@ -28,6 +28,11 @@ public interface IIndexFragmentFile extends IIndexFile { */ void setTimestamp(long timestamp) throws CoreException; + /** + * Sets the hash of the file content. + */ + void setContentsHash(long hash) throws CoreException; + /** * Sets the hash-code of the scanner configuration. * @param hashcode a hash-code or 0 if it is unknown. diff --git a/core/org.eclipse.cdt.core/parser/org/eclipse/cdt/internal/core/parser/scanner/AbstractCharArray.java b/core/org.eclipse.cdt.core/parser/org/eclipse/cdt/internal/core/parser/scanner/AbstractCharArray.java index 189de488220..d5bc0fc5e9a 100644 --- a/core/org.eclipse.cdt.core/parser/org/eclipse/cdt/internal/core/parser/scanner/AbstractCharArray.java +++ b/core/org.eclipse.cdt.core/parser/org/eclipse/cdt/internal/core/parser/scanner/AbstractCharArray.java @@ -7,6 +7,7 @@ * * Contributors: * Markus Schorn - initial API and implementation + * Sergey Prigogin (Google) *******************************************************************************/ package org.eclipse.cdt.internal.core.parser.scanner; @@ -35,6 +36,13 @@ public abstract class AbstractCharArray { */ public abstract boolean isValidOffset(int offset); + /** + * Computes 64-bit hash value of the character array. This method doesn't cause any I/O if called + * after the array has been traversed. + * @return The hash value of the contents of the array. + */ + public abstract long getContentsHash(); + /** * Returns the character at the given position, subclasses do not have to do range checks. */ diff --git a/core/org.eclipse.cdt.core/parser/org/eclipse/cdt/internal/core/parser/scanner/CharArray.java b/core/org.eclipse.cdt.core/parser/org/eclipse/cdt/internal/core/parser/scanner/CharArray.java index 3b537744abd..329be98e79f 100644 --- a/core/org.eclipse.cdt.core/parser/org/eclipse/cdt/internal/core/parser/scanner/CharArray.java +++ b/core/org.eclipse.cdt.core/parser/org/eclipse/cdt/internal/core/parser/scanner/CharArray.java @@ -7,16 +7,17 @@ * * Contributors: * Markus Schorn - initial API and implementation + * Sergey Prigogin (Google) *******************************************************************************/ package org.eclipse.cdt.internal.core.parser.scanner; - /** * Wrapper around char[] to implement {@link AbstractCharArray}. */ public final class CharArray extends AbstractCharArray { private final char[] fArray; + private long hash64; public CharArray(char[] array) { fArray= array; @@ -48,11 +49,20 @@ public final class CharArray extends AbstractCharArray { @Override public void arraycopy(int offset, char[] destination, int destPos, int length) { System.arraycopy(fArray, offset, destination, destPos, length); - } @Override public boolean isValidOffset(int offset) { return offset < fArray.length; } + + @Override + public long getContentsHash() { + if (hash64 == 0 && fArray.length != 0) { + StreamHasher hasher = new StreamHasher(); + hasher.addChunk(fArray); + hash64 = hasher.computeHash(); + } + return hash64; + } } diff --git a/core/org.eclipse.cdt.core/parser/org/eclipse/cdt/internal/core/parser/scanner/FileCharArray.java b/core/org.eclipse.cdt.core/parser/org/eclipse/cdt/internal/core/parser/scanner/FileCharArray.java index 65261db4abe..7feb42650c5 100644 --- a/core/org.eclipse.cdt.core/parser/org/eclipse/cdt/internal/core/parser/scanner/FileCharArray.java +++ b/core/org.eclipse.cdt.core/parser/org/eclipse/cdt/internal/core/parser/scanner/FileCharArray.java @@ -75,8 +75,6 @@ public class FileCharArray extends LazyCharArray { fCharSet= charSet; } - - @Override protected Chunk createChunk(int chunkOffset) { FileInputStream fis; @@ -110,7 +108,7 @@ public class FileCharArray extends LazyCharArray { final CharBuffer dest= CharBuffer.allocate(CHUNK_SIZE); boolean endOfInput= false; - while(dest.position() < CHUNK_SIZE && !endOfInput) { + while (dest.position() < CHUNK_SIZE && !endOfInput) { fChannel.position(fileOffset); in.clear(); int count= fChannel.read(in); @@ -127,8 +125,7 @@ public class FileCharArray extends LazyCharArray { dest.flip(); return extractChars(dest); } - - + @Override protected void rereadChunkData(long fileOffset, long fileEndOffset, char[] dest) { FileInputStream fis; @@ -156,7 +153,7 @@ public class FileCharArray extends LazyCharArray { final CharsetDecoder decoder = charset.newDecoder().onMalformedInput(CodingErrorAction.REPLACE) .onUnmappableCharacter(CodingErrorAction.REPLACE); - int needBytes = (int) (fileEndOffset-fileOffset); + int needBytes = (int) (fileEndOffset - fileOffset); final ByteBuffer in = ByteBuffer.allocate(needBytes); channel.position(fileOffset); diff --git a/core/org.eclipse.cdt.core/parser/org/eclipse/cdt/internal/core/parser/scanner/InternalFileContent.java b/core/org.eclipse.cdt.core/parser/org/eclipse/cdt/internal/core/parser/scanner/InternalFileContent.java index 69b29cd54c4..428dde74fbe 100644 --- a/core/org.eclipse.cdt.core/parser/org/eclipse/cdt/internal/core/parser/scanner/InternalFileContent.java +++ b/core/org.eclipse.cdt.core/parser/org/eclipse/cdt/internal/core/parser/scanner/InternalFileContent.java @@ -7,6 +7,7 @@ * * Contributors: * Markus Schorn - initial API and implementation + * Sergey Prigogin (Google) *******************************************************************************/ package org.eclipse.cdt.internal.core.parser.scanner; @@ -115,6 +116,14 @@ public class InternalFileContent extends FileContent { return fFileLocation; } + /** + * Returns a 64-bit hash value of the file contents. + */ + @Override + public long getContentsHash() { + return fSource != null ? fSource.getContentsHash() : 0; + } + /** * Valid with {@link InclusionKind#USE_SOURCE}. * @return the codeReader or null if kind is different to {@link InclusionKind#USE_SOURCE}. diff --git a/core/org.eclipse.cdt.core/parser/org/eclipse/cdt/internal/core/parser/scanner/LazyCharArray.java b/core/org.eclipse.cdt.core/parser/org/eclipse/cdt/internal/core/parser/scanner/LazyCharArray.java index 5b82731a3f0..bd98af308dd 100644 --- a/core/org.eclipse.cdt.core/parser/org/eclipse/cdt/internal/core/parser/scanner/LazyCharArray.java +++ b/core/org.eclipse.cdt.core/parser/org/eclipse/cdt/internal/core/parser/scanner/LazyCharArray.java @@ -7,6 +7,7 @@ * * Contributors: * Markus Schorn - initial API and implementation + * Sergey Prigogin (Google) *******************************************************************************/ package org.eclipse.cdt.internal.core.parser.scanner; @@ -38,8 +39,11 @@ public abstract class LazyCharArray extends AbstractCharArray { private int fLength= -1; private List fChunks= new ArrayList(); + private StreamHasher hasher; + private long hash64; protected LazyCharArray() { + hasher = new StreamHasher(); } @Override @@ -66,8 +70,18 @@ public abstract class LazyCharArray extends AbstractCharArray { return true; } + @Override + public long getContentsHash() { + if (hasher != null) { + readUpTo(Integer.MAX_VALUE); + hash64 = hasher.computeHash(); + hasher = null; + } + return hash64; + } + private void readUpTo(int offset) { - if (fLength >=0) + if (fLength >= 0) return; final int chunkOffset= offset >> CHUNK_BITS; @@ -78,13 +92,13 @@ public abstract class LazyCharArray extends AbstractCharArray { public final char get(int offset) { int chunkOffset= offset >> CHUNK_BITS; char[] data= getChunkData(chunkOffset); - return data[offset & (CHUNK_SIZE-1)]; + return data[offset & (CHUNK_SIZE - 1)]; } @Override public final void arraycopy(int offset, char[] destination, int destinationPos, int length) { int chunkOffset= offset >> CHUNK_BITS; - int loffset= offset & (CHUNK_SIZE-1); + int loffset= offset & (CHUNK_SIZE - 1); char[] data= getChunkData(chunkOffset); final int canCopy = data.length-loffset; if (length <= canCopy) { @@ -124,7 +138,7 @@ public abstract class LazyCharArray extends AbstractCharArray { */ protected Chunk createChunk(int chunkOffset) { final int chunkCount = fChunks.size(); - long fileOffset= chunkCount == 0 ? 0 : fChunks.get(chunkCount-1).fFileEndOffset; + long fileOffset= chunkCount == 0 ? 0 : fChunks.get(chunkCount - 1).fFileEndOffset; try { for (int i = chunkCount; i <= chunkOffset; i++) { long[] fileEndOffset= {0}; @@ -133,12 +147,15 @@ public abstract class LazyCharArray extends AbstractCharArray { if (charCount == 0) { fLength= fChunks.size() * CHUNK_SIZE; break; - } + } + if (hasher != null) { + hasher.addChunk(data); + } // New chunk Chunk chunk= new Chunk(fileOffset, fileEndOffset[0], data); fChunks.add(chunk); if (charCount < CHUNK_SIZE) { - fLength= (fChunks.size()-1) * CHUNK_SIZE + charCount; + fLength= (fChunks.size() - 1) * CHUNK_SIZE + charCount; break; } fileOffset= fileEndOffset[0]; @@ -162,8 +179,8 @@ public abstract class LazyCharArray extends AbstractCharArray { } /** - * Read the chunk data at the given source offset and provide the end-offset in the - * source. + * Read the chunk data at the given source offset and provide the end-offset in + * the source. */ protected abstract char[] readChunkData(long sourceOffset, long[] sourceEndOffsetHolder) throws Exception; diff --git a/core/org.eclipse.cdt.core/parser/org/eclipse/cdt/internal/core/parser/scanner/StreamHasher.java b/core/org.eclipse.cdt.core/parser/org/eclipse/cdt/internal/core/parser/scanner/StreamHasher.java new file mode 100644 index 00000000000..5cbf46fae79 --- /dev/null +++ b/core/org.eclipse.cdt.core/parser/org/eclipse/cdt/internal/core/parser/scanner/StreamHasher.java @@ -0,0 +1,236 @@ +/******************************************************************************* + * Copyright (c) 2010 Google, Inc and others. + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the Eclipse Public License v1.0 + * which accompanies this distribution, and is available at + * http://www.eclipse.org/legal/epl-v10.html + * + * Contributors: + * Sergey Prigogin (Google) - initial API and implementation + * + * Based on lookup3.c, by Bob Jenkins {@link "http://burtleburtle.net/bob/c/lookup3.c"} + * + * Here is the original comment by Bob Jenkins: + * ------------------------------------------------------------------------------- + * lookup3.c, by Bob Jenkins, May 2006, Public Domain. + * + * These are functions for producing 32-bit hashes for hash table lookup. + * hashword(), hashlittle(), hashlittle2(), hashbig(), mix(), and final() + * are externally useful functions. Routines to test the hash are included + * if SELF_TEST is defined. You can use this free for any purpose. It's in + * the public domain. It has no warranty. + * + * You probably want to use hashlittle(). hashlittle() and hashbig() + * hash byte arrays. hashlittle() is is faster than hashbig() on + * little-endian machines. Intel and AMD are little-endian machines. + * On second thought, you probably want hashlittle2(), which is identical to + * hashlittle() except it returns two 32-bit hashes for the price of one. + * You could implement hashbig2() if you wanted but I haven't bothered here. + * + * If you want to find a hash of, say, exactly 7 integers, do + * a = i1; b = i2; c = i3; + * mix(a, b, c); + * a += i4; b += i5; c += i6; + * mix(a, b, c); + * a += i7; + * finalMix(a, b, c); + * then use c as the hash value. If you have a variable length array of + * 4-byte integers to hash, use hashword(). If you have a byte array (like + * a character string), use hashlittle(). If you have several byte arrays, or + * a mix of things, see the comments above hashlittle(). + * + * Why is this so big? I read 12 bytes at a time into 3 4-byte integers, + * then mix those integers. This is fast (you can do a lot more thorough + * mixing with 12*3 instructions on 3 integers than you can with 3 instructions + * on 1 byte), but shoehorning those bytes into integers efficiently is messy. + *******************************************************************************/ + +package org.eclipse.cdt.internal.core.parser.scanner; + +/** + * Computes a 64-bit hash value of a character stream that can be supplied one chunk at a time. + * Usage: + *
+ *   StreamHasher hasher = new StreamHasher();
+ *   for (long offset = 0; offset < streamLength; offset += chunkLength) {
+ *     hasher.addChunk(offset, chunkOfCharacters);
+ *   }
+ *   int64 hashValue = hasher.computeHash();
+ * 
+ * + * Based on lookup3.c by Bob Jenkins from {@link "http://burtleburtle.net/bob/c/lookup3.c"} + */ +public final class StreamHasher { + private static final long SEED = 3141592653589793238L; // PI + private static final long EMPTY_STRING_HASH = new StreamHasher().computeHashInternal(); + + long hashedOffset; // Current position in the stream of characters. + int state; // Current position in the stream of characters modulo 6, or -1 after computeHash is called. + int a; + int b; + int c; + char previousCharacter; + + public StreamHasher() { + // Set up the internal state. + hashedOffset = 0; + state = 0; + a = b = c = (int) SEED; + c += SEED >>> 32; + } + + /** + * Adds a chunk of data to the hasher. + * @param chunk Contents of the chunk. + */ + public void addChunk(char[] chunk) { + for (int pos = 0; pos < chunk.length; pos++, hashedOffset++) { + char cc = chunk[pos]; + switch (state++) { + case -1: + throw new IllegalStateException("addChunk is called after computeHash."); //$NON-NLS-1$ + case 0: + case 2: + case 4: + previousCharacter = cc; + break; + case 1: + a += previousCharacter | (cc << 16); + break; + case 3: + b += previousCharacter | (cc << 16); + break; + case 5: + c += previousCharacter | (cc << 16); + mix(); + state = 0; + break; + } + } + } + + /** + * Computes and returns the hash value. Must be called once after the last chunk. + * @return The hash value of the character stream. + */ + public long computeHash() { + if (state < 0) { + throw new IllegalStateException("computeHash method is called more than once."); //$NON-NLS-1$ + } + return computeHashInternal() ^ EMPTY_STRING_HASH; + } + + private long computeHashInternal() { + switch (state) { + case 1: + a += previousCharacter; + break; + case 3: + b += previousCharacter; + break; + case 5: + c += previousCharacter; + break; + } + state = -1; // Protect against subsequent calls. + finalMix(); + return (c & 0xFFFFFFFFL) | ((long) b << 32); + } + + /** + * Computes a 64-bit hash value of a String. The resulting hash value + * is zero if the string is empty. + * + * @param str The string to hash. + * @return The hash value. + */ + public static long hash(String str) { + StreamHasher hasher = new StreamHasher(); + hasher.addChunk(str.toCharArray()); + return hasher.computeHash(); + } + + /** + * Mixes three 32-bit values reversibly. + * + * This is reversible, so any information in a, b, c before mix() is + * still in a, b, c after mix(). + * + * If four pairs of a, b, c inputs are run through mix(), or through + * mix() in reverse, there are at least 32 bits of the output that + * are sometimes the same for one pair and different for another pair. + * This was tested for: + * * pairs that differed by one bit, by two bits, in any combination + * of top bits of a, b, c, or in any combination of bottom bits of + * a, b, c. + * * "differ" is defined as +, -, ^, or ~^. For + and -, I transformed + * the output delta to a Gray code (a ^ (a >> 1)) so a string of 1's + * (as is commonly produced by subtraction) look like a single 1-bit + * difference. + * * the base values were pseudo-random, all zero but one bit set, or + * all zero plus a counter that starts at zero. + * + * Some k values for my "a -= c; a ^= Integer.rotateLeft(c, k); c += b;" + * arrangement that satisfy this are + * 4 6 8 16 19 4 + * 9 15 3 18 27 15 + * 14 9 3 7 17 3 + * Well, "9 15 3 18 27 15" didn't quite get 32 bits diffing + * for "differ" defined as + with a one-bit base and a two-bit delta. + * I used http://burtleburtle.net/bob/hash/avalanche.html to choose + * the operations, constants, and arrangements of the variables. + * + * This does not achieve avalanche. There are input bits of a, b, c + * that fail to affect some output bits of a, b, c, especially of a. + * The most thoroughly mixed value is c, but it doesn't really even + * achieve avalanche in c. + * + * This allows some parallelism. Read-after-writes are good at doubling + * the number of bits affected, so the goal of mixing pulls in the opposite + * direction as the goal of parallelism. I did what I could. Rotates + * seem to cost as much as shifts on every machine I could lay my hands + * on, and rotates are much kinder to the top and bottom bits, so I used + * rotates. + */ + private void mix() { + a -= c; a ^= Integer.rotateLeft(c, 4); c += b; + b -= a; b ^= Integer.rotateLeft(a, 6); a += c; + c -= b; c ^= Integer.rotateLeft(b, 8); b += a; + a -= c; a ^= Integer.rotateLeft(c, 16); c += b; + b -= a; b ^= Integer.rotateLeft(a, 19); a += c; + c -= b; c ^= Integer.rotateLeft(b, 4); b += a; + } + + /** + * Final mixing of 3 32-bit values a, b, c into c + * + * Pairs of a, b, c values differing in only a few bits will usually + * produce values of c that look totally different. This was tested for + * * pairs that differed by one bit, by two bits, in any combination + * of top bits of a, b, c, or in any combination of bottom bits of + * a, b, c. + * * "differ" is defined as +, -, ^, or ~^. For + and -, I transformed + * the output delta to a Gray code (a ^ (a >> 1)) so a string of 1's (as + * is commonly produced by subtraction) look like a single 1-bit + * difference. + * * the base values were pseudo-random, all zero but one bit set, or + * all zero plus a counter that starts at zero. + * + * These constants passed: + * 14 11 25 16 4 14 24 + * 12 14 25 16 4 14 24 + * and these came close: + * 4 8 15 26 3 22 24 + * 10 8 15 26 3 22 24 + * 11 8 15 26 3 22 24 + */ + private void finalMix() { + c ^= b; c -= Integer.rotateLeft(b, 14); + a ^= c; a -= Integer.rotateLeft(c, 11); + b ^= a; b -= Integer.rotateLeft(a, 25); + c ^= b; c -= Integer.rotateLeft(b, 16); + a ^= c; a -= Integer.rotateLeft(c, 4); + b ^= a; b -= Integer.rotateLeft(a, 14); + c ^= b; c -= Integer.rotateLeft(b, 24); + } +} diff --git a/core/org.eclipse.cdt.core/parser/org/eclipse/cdt/internal/core/pdom/AbstractIndexerTask.java b/core/org.eclipse.cdt.core/parser/org/eclipse/cdt/internal/core/pdom/AbstractIndexerTask.java index e68d36f4ee9..28ed9e397b8 100644 --- a/core/org.eclipse.cdt.core/parser/org/eclipse/cdt/internal/core/pdom/AbstractIndexerTask.java +++ b/core/org.eclipse.cdt.core/parser/org/eclipse/cdt/internal/core/pdom/AbstractIndexerTask.java @@ -50,6 +50,7 @@ import org.eclipse.cdt.internal.core.index.IIndexFragment; import org.eclipse.cdt.internal.core.index.IIndexFragmentFile; import org.eclipse.cdt.internal.core.index.IWritableIndex; import org.eclipse.cdt.internal.core.index.IndexBasedFileContentProvider; +import org.eclipse.cdt.internal.core.parser.scanner.StreamHasher; import org.eclipse.cdt.internal.core.parser.scanner.InternalFileContentProvider; import org.eclipse.cdt.internal.core.pdom.dom.PDOMNotImplementedError; import org.eclipse.core.runtime.Assert; @@ -245,9 +246,8 @@ public abstract class AbstractIndexerTask extends PDOMWriter { } - private final IASTTranslationUnit createAST(Object tu, AbstractLanguage language, IScannerInfo scanInfo, int options, - boolean inContext, IProgressMonitor pm) throws CoreException { - final FileContent codeReader= fResolver.getCodeReader(tu); + private final IASTTranslationUnit createAST(Object tu, AbstractLanguage language, FileContent codeReader, + IScannerInfo scanInfo, int options, boolean inContext, IProgressMonitor pm) throws CoreException { if (codeReader == null) { return null; } @@ -368,6 +368,7 @@ public abstract class AbstractIndexerTask extends PDOMWriter { IProgressMonitor monitor) throws CoreException { final boolean forceAll= (fUpdateFlags & IIndexManager.UPDATE_ALL) != 0; final boolean checkTimestamps= (fUpdateFlags & IIndexManager.UPDATE_CHECK_TIMESTAMPS) != 0; + final boolean checkFileContentsHash = (fUpdateFlags & IIndexManager.UPDATE_CHECK_CONTENTS_HASH) != 0; final boolean checkConfig= (fUpdateFlags & IIndexManager.UPDATE_CHECK_CONFIGURATION) != 0; int count= 0; @@ -401,7 +402,7 @@ public abstract class AbstractIndexerTask extends PDOMWriter { if (checkConfig) { update= isSourceUnit ? isSourceUnitConfigChange(tu, ifile) : isHeaderConfigChange(tu, ifile); } - update= update || force || (checkTimestamps && fResolver.getLastModified(ifl) != ifile.getTimestamp()); + update= update || force || isModified(checkTimestamps, checkFileContentsHash, ifl, tu, ifile); if (update) { requestUpdate(linkageID, ifl, ifile); store(tu, linkageID, isSourceUnit, files); @@ -423,7 +424,7 @@ public abstract class AbstractIndexerTask extends PDOMWriter { if (checkConfig) { update= isHeaderConfigChange(tu, ifile); } - update= update || force || (checkTimestamps && fResolver.getLastModified(ifl) != ifile.getTimestamp()); + update= update || force || isModified(checkTimestamps, checkFileContentsHash, ifl, tu, ifile); if (update) { final int linkageID = ifile.getLinkageID(); requestUpdate(linkageID, ifl, ifile); @@ -437,7 +438,18 @@ public abstract class AbstractIndexerTask extends PDOMWriter { updateRequestedFiles(count - fFilesToUpdate.length); fFilesToUpdate= null; } - + + private boolean isModified(boolean checkTimestamps, boolean checkFileContentsHash, IIndexFileLocation ifl, + Object tu, IIndexFragmentFile file) throws CoreException { + boolean timestampDifferent = checkTimestamps && fResolver.getLastModified(ifl) != file.getTimestamp(); + if (timestampDifferent) { + if (checkFileContentsHash && computeFileContentsHash(tu) == file.getContentsHash()) { + return false; + } + } + return timestampDifferent; + } + private void requestUpdate(int linkageID, IIndexFileLocation ifl, IIndexFragmentFile ifile) { FileKey key= new FileKey(linkageID, ifl.getURI()); IndexFileContent info= fFileInfos.get(key); @@ -589,7 +601,8 @@ public abstract class AbstractIndexerTask extends PDOMWriter { } } } - writeToIndex(linkageID, ast, computeHashCode(scanInfo), monitor); + writeToIndex(linkageID, ast, StreamHasher.hash(code), computeHashCode(scanInfo), + monitor); updateFileCount(0, 0, 1); } } @@ -734,10 +747,11 @@ public abstract class AbstractIndexerTask extends PDOMWriter { pm.subTask(getMessage(MessageKind.parsingFileTask, path.lastSegment(), path.removeLastSegments(1).toString())); long start= System.currentTimeMillis(); - IASTTranslationUnit ast= createAST(tu, lang, scanInfo, fASTOptions, inContext, pm); + FileContent codeReader= fResolver.getCodeReader(tu); + IASTTranslationUnit ast= createAST(tu, lang, codeReader, scanInfo, fASTOptions, inContext, pm); fStatistics.fParsingTime += System.currentTimeMillis() - start; if (ast != null) { - writeToIndex(linkageID, ast, computeHashCode(scanInfo), pm); + writeToIndex(linkageID, ast, codeReader.getContentsHash(), computeHashCode(scanInfo), pm); } } catch (CoreException e) { th= e; @@ -755,8 +769,8 @@ public abstract class AbstractIndexerTask extends PDOMWriter { } } - private void writeToIndex(final int linkageID, IASTTranslationUnit ast, int configHash, - IProgressMonitor pm) throws CoreException, InterruptedException { + private void writeToIndex(final int linkageID, IASTTranslationUnit ast, long fileContentsHash, + int configHash, IProgressMonitor pm) throws CoreException, InterruptedException { HashSet enteredFiles= new HashSet(); ArrayList orderedIFLs= new ArrayList(); @@ -775,7 +789,7 @@ public abstract class AbstractIndexerTask extends PDOMWriter { IIndexFileLocation[] ifls= orderedIFLs.toArray(new IIndexFileLocation[orderedIFLs.size()]); try { - addSymbols(ast, ifls, fIndex, 1, false, configHash, fTodoTaskUpdater, pm); + addSymbols(ast, ifls, fIndex, 1, false, fileContentsHash, configHash, fTodoTaskUpdater, pm); } finally { // mark as updated in any case, to avoid parsing files that caused an exception to be thrown. for (IIndexFileLocation ifl : ifls) { @@ -940,6 +954,11 @@ public abstract class AbstractIndexerTask extends PDOMWriter { return result * 31 + key.hashCode(); } + private long computeFileContentsHash(Object tu) { + FileContent codeReader= fResolver.getCodeReader(tu); + return codeReader != null ? codeReader.getContentsHash() : 0; + } + public final IndexFileContent getFileContent(int linkageID, IIndexFileLocation ifl) throws CoreException { if (!needToUpdateHeader(linkageID, ifl)) { IndexFileContent info= getFileInfo(linkageID, ifl); diff --git a/core/org.eclipse.cdt.core/parser/org/eclipse/cdt/internal/core/pdom/IndexUpdatePolicy.java b/core/org.eclipse.cdt.core/parser/org/eclipse/cdt/internal/core/pdom/IndexUpdatePolicy.java index 48fe8b519e9..b06291087ed 100644 --- a/core/org.eclipse.cdt.core/parser/org/eclipse/cdt/internal/core/pdom/IndexUpdatePolicy.java +++ b/core/org.eclipse.cdt.core/parser/org/eclipse/cdt/internal/core/pdom/IndexUpdatePolicy.java @@ -140,7 +140,8 @@ public class IndexUpdatePolicy { } else if (fIndexer != null) { if (oldPolicy == MANUAL) { - task= new PDOMUpdateTask(fIndexer, IIndexManager.UPDATE_CHECK_TIMESTAMPS); + task= new PDOMUpdateTask(fIndexer, + IIndexManager.UPDATE_CHECK_TIMESTAMPS | IIndexManager.UPDATE_CHECK_CONTENTS_HASH); clearTUs(); } else if (fKind == POST_CHANGE) { diff --git a/core/org.eclipse.cdt.core/parser/org/eclipse/cdt/internal/core/pdom/PDOM.java b/core/org.eclipse.cdt.core/parser/org/eclipse/cdt/internal/core/pdom/PDOM.java index b687fe1a6d4..8c6049ef55e 100644 --- a/core/org.eclipse.cdt.core/parser/org/eclipse/cdt/internal/core/pdom/PDOM.java +++ b/core/org.eclipse.cdt.core/parser/org/eclipse/cdt/internal/core/pdom/PDOM.java @@ -191,10 +191,11 @@ public class PDOM extends PlatformObject implements IPDOM { * 94.0 - new model for storing types, bug 294306. * 95.0 - parameter packs, bug 294730. * 96.0 - storing pack expansions in the template parameter map, bug 294730. + * 97.0 - storing file contents hash in PDOMFile, bug 302083. */ - private static final int MIN_SUPPORTED_VERSION= version(96, 0); - private static final int MAX_SUPPORTED_VERSION= version(96, Short.MAX_VALUE); - private static final int DEFAULT_VERSION = version(96, 0); + private static final int MIN_SUPPORTED_VERSION= version(97, 0); + private static final int MAX_SUPPORTED_VERSION= version(97, Short.MAX_VALUE); + private static final int DEFAULT_VERSION = version(97, 0); private static int version(int major, int minor) { return (major << 16) + minor; diff --git a/core/org.eclipse.cdt.core/parser/org/eclipse/cdt/internal/core/pdom/PDOMManager.java b/core/org.eclipse.cdt.core/parser/org/eclipse/cdt/internal/core/pdom/PDOMManager.java index 79eb0631809..ee843c56018 100644 --- a/core/org.eclipse.cdt.core/parser/org/eclipse/cdt/internal/core/pdom/PDOMManager.java +++ b/core/org.eclipse.cdt.core/parser/org/eclipse/cdt/internal/core/pdom/PDOMManager.java @@ -571,7 +571,8 @@ public class PDOMManager implements IWritableIndexManager, IListener { pdom.releaseReadLock(); } if (resume) { - enqueue(new PDOMUpdateTask(indexer, IIndexManager.UPDATE_CHECK_TIMESTAMPS)); + enqueue(new PDOMUpdateTask(indexer, + IIndexManager.UPDATE_CHECK_TIMESTAMPS | IIndexManager.UPDATE_CHECK_CONTENTS_HASH)); } } return; @@ -592,7 +593,8 @@ public class PDOMManager implements IWritableIndexManager, IListener { IPDOMIndexerTask task= null; if (operation.wasSuccessful()) { - task= new PDOMUpdateTask(indexer, IIndexManager.UPDATE_CHECK_TIMESTAMPS); + task= new PDOMUpdateTask(indexer, + IIndexManager.UPDATE_CHECK_TIMESTAMPS | IIndexManager.UPDATE_CHECK_CONTENTS_HASH); } else { task= new PDOMRebuildTask(indexer); diff --git a/core/org.eclipse.cdt.core/parser/org/eclipse/cdt/internal/core/pdom/PDOMWriter.java b/core/org.eclipse.cdt.core/parser/org/eclipse/cdt/internal/core/pdom/PDOMWriter.java index f81cab32664..16c9686539a 100644 --- a/core/org.eclipse.cdt.core/parser/org/eclipse/cdt/internal/core/pdom/PDOMWriter.java +++ b/core/org.eclipse.cdt.core/parser/org/eclipse/cdt/internal/core/pdom/PDOMWriter.java @@ -138,15 +138,15 @@ abstract public class PDOMWriter { } /** - * Extracts symbols from the given ast and adds them to the index. + * Extracts symbols from the given AST and adds them to the index. * * When flushIndex is set to false, you must make sure to flush the * index after your last write operation. * @since 4.0 */ public void addSymbols(IASTTranslationUnit ast, IIndexFileLocation[] ifls, IWritableIndex index, - int readlockCount, boolean flushIndex, int configHash, ITodoTaskUpdater taskUpdater, - IProgressMonitor pm) throws InterruptedException, CoreException { + int readlockCount, boolean flushIndex, long fileContentsHash, int configHash, + ITodoTaskUpdater taskUpdater, IProgressMonitor pm) throws InterruptedException, CoreException { if (fShowProblems) { fShowInclusionProblems= true; fShowScannerProblems= true; @@ -165,8 +165,8 @@ abstract public class PDOMWriter { resolveNames(symbolMap, ifls, stati, pm); // index update - storeSymbolsInIndex(symbolMap, ifls, ast.getLinkage().getLinkageID(), configHash, contextIncludes, - index, readlockCount, flushIndex, stati, pm); + storeSymbolsInIndex(symbolMap, ifls, ast.getLinkage().getLinkageID(), fileContentsHash, + configHash, contextIncludes, index, readlockCount, flushIndex, stati, pm); if (taskUpdater != null) { taskUpdater.updateTasks(ast.getComments(), ifls); @@ -193,9 +193,10 @@ abstract public class PDOMWriter { } private void storeSymbolsInIndex(final Map symbolMap, IIndexFileLocation[] ifls, - int linkageID, int configHash, HashSet contextIncludes, - IWritableIndex index, int readlockCount, boolean flushIndex, - ArrayList stati, IProgressMonitor pm) throws InterruptedException, CoreException { + int linkageID, long fileContentsHash, int configHash, + HashSet contextIncludes, IWritableIndex index, int readlockCount, + boolean flushIndex, ArrayList stati, IProgressMonitor pm) + throws InterruptedException, CoreException { for (int i= 0; i < ifls.length; i++) { if (pm.isCanceled()) return; @@ -209,7 +210,8 @@ abstract public class PDOMWriter { YieldableIndexLock lock = new YieldableIndexLock(index, readlockCount, flushIndex); lock.acquire(); try { - storeFileInIndex(index, ifl, symbolMap, linkageID, configHash, contextIncludes, lock); + storeFileInIndex(index, ifl, symbolMap, linkageID, fileContentsHash, configHash, + contextIncludes, lock); } catch (RuntimeException e) { th= e; } catch (PDOMNotImplementedError e) { @@ -457,9 +459,9 @@ abstract public class PDOMWriter { } private IIndexFragmentFile storeFileInIndex(IWritableIndex index, IIndexFileLocation location, - Map symbolMap, int linkageID, int configHash, - Set contextIncludes, YieldableIndexLock lock) - throws CoreException, InterruptedException { + Map symbolMap, int linkageID, long fileContentsHash, + int configHash, Set contextIncludes, + YieldableIndexLock lock) throws CoreException, InterruptedException { Set clearedContexts= Collections.emptySet(); IIndexFragmentFile file; long timestamp = fResolver.getLastModified(location); @@ -518,6 +520,7 @@ abstract public class PDOMWriter { } if (SEMI_TRANSACTIONAL_UPDATES) { file.setTimestamp(timestamp); + file.setContentsHash(fileContentsHash); file = index.commitUncommittedFile(); } } finally { diff --git a/core/org.eclipse.cdt.core/parser/org/eclipse/cdt/internal/core/pdom/dom/PDOMFile.java b/core/org.eclipse.cdt.core/parser/org/eclipse/cdt/internal/core/pdom/dom/PDOMFile.java index ad1fce180b3..d8cdc2cd00b 100644 --- a/core/org.eclipse.cdt.core/parser/org/eclipse/cdt/internal/core/pdom/dom/PDOMFile.java +++ b/core/org.eclipse.cdt.core/parser/org/eclipse/cdt/internal/core/pdom/dom/PDOMFile.java @@ -71,11 +71,12 @@ public class PDOMFile implements IIndexFragmentFile { private static final int LOCATION_REPRESENTATION = 16; private static final int LINKAGE_ID= 20; private static final int TIME_STAMP = 24; - private static final int SCANNER_CONFIG_HASH= 32; - private static final int LAST_USING_DIRECTIVE= 36; - private static final int FIRST_MACRO_REFERENCE= 40; + private static final int CONTENT_HASH= 32; + private static final int SCANNER_CONFIG_HASH= 40; + private static final int LAST_USING_DIRECTIVE= 44; + private static final int FIRST_MACRO_REFERENCE= 48; - private static final int RECORD_SIZE= 44; + private static final int RECORD_SIZE= 52; public static class Comparator implements IBTreeComparator { private Database db; @@ -223,6 +224,7 @@ public class PDOMFile implements IIndexFragmentFile { } setTimestamp(sourceFile.getTimestamp()); + setContentsHash(sourceFile.getContentsHash()); setScannerConfigurationHashcode(sourceFile.getScannerConfigurationHashcode()); sourceFile.delete(); @@ -271,6 +273,16 @@ public class PDOMFile implements IIndexFragmentFile { db.putLong(record + TIME_STAMP, timestamp); } + public long getContentsHash() throws CoreException { + Database db = fLinkage.getDB(); + return db.getLong(record + CONTENT_HASH); + } + + public void setContentsHash(long hash) throws CoreException { + Database db= fLinkage.getDB(); + db.putLong(record + CONTENT_HASH, hash); + } + public int getScannerConfigurationHashcode() throws CoreException { Database db = fLinkage.getDB(); return db.getInt(record + SCANNER_CONFIG_HASH); diff --git a/core/org.eclipse.cdt.core/parser/org/eclipse/cdt/internal/core/pdom/indexer/PDOMIndexerTask.java b/core/org.eclipse.cdt.core/parser/org/eclipse/cdt/internal/core/pdom/indexer/PDOMIndexerTask.java index c7841f07931..e1cdf3e7770 100644 --- a/core/org.eclipse.cdt.core/parser/org/eclipse/cdt/internal/core/pdom/indexer/PDOMIndexerTask.java +++ b/core/org.eclipse.cdt.core/parser/org/eclipse/cdt/internal/core/pdom/indexer/PDOMIndexerTask.java @@ -99,7 +99,7 @@ public abstract class PDOMIndexerTask extends AbstractIndexerTask implements IPD setIndexFilesWithoutBuildConfiguration(false); setIndexHeadersWithoutContext(UnusedHeaderStrategy.skip); } - setUpdateFlags(IIndexManager.UPDATE_CHECK_TIMESTAMPS); + setUpdateFlags(IIndexManager.UPDATE_CHECK_TIMESTAMPS | IIndexManager.UPDATE_CHECK_CONTENTS_HASH); setForceFirstFiles(forceFiles.length); } diff --git a/core/org.eclipse.cdt.ui/src/org/eclipse/cdt/internal/ui/actions/UpdateIndexWithModifiedFilesAction.java b/core/org.eclipse.cdt.ui/src/org/eclipse/cdt/internal/ui/actions/UpdateIndexWithModifiedFilesAction.java index a48312b325c..0a75159d855 100644 --- a/core/org.eclipse.cdt.ui/src/org/eclipse/cdt/internal/ui/actions/UpdateIndexWithModifiedFilesAction.java +++ b/core/org.eclipse.cdt.ui/src/org/eclipse/cdt/internal/ui/actions/UpdateIndexWithModifiedFilesAction.java @@ -16,6 +16,7 @@ public class UpdateIndexWithModifiedFilesAction extends AbstractUpdateIndexActio @Override protected int getUpdateOptions() { - return IIndexManager.UPDATE_CHECK_TIMESTAMPS | IIndexManager.UPDATE_CHECK_CONFIGURATION | IIndexManager.UPDATE_EXTERNAL_FILES_FOR_PROJECT; + return IIndexManager.UPDATE_CHECK_TIMESTAMPS | IIndexManager.UPDATE_CHECK_CONFIGURATION | + IIndexManager.UPDATE_EXTERNAL_FILES_FOR_PROJECT | IIndexManager.UPDATE_CHECK_CONTENTS_HASH; } }