Autopsy  4.19.3
Graphical digital forensics platform for The Sleuth Kit and other tools.
LuceneQuery.java
Go to the documentation of this file.
1 /*
2  * Autopsy Forensic Browser
3  *
4  * Copyright 2011-2017 Basis Technology Corp.
5  * Contact: carrier <at> sleuthkit <dot> org
6  *
7  * Licensed under the Apache License, Version 2.0 (the "License");
8  * you may not use this file except in compliance with the License.
9  * You may obtain a copy of the License at
10  *
11  * http://www.apache.org/licenses/LICENSE-2.0
12  *
13  * Unless required by applicable law or agreed to in writing, software
14  * distributed under the License is distributed on an "AS IS" BASIS,
15  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16  * See the License for the specific language governing permissions and
17  * limitations under the License.
18  */
19 package org.sleuthkit.autopsy.keywordsearch;
20 
21 import java.util.ArrayList;
22 import java.util.Collection;
23 import java.util.List;
24 import java.util.Map;
25 import java.util.logging.Level;
26 import org.apache.commons.lang3.StringUtils;
27 import org.apache.commons.lang3.math.NumberUtils;
28 import org.apache.solr.client.solrj.SolrQuery;
29 import org.apache.solr.client.solrj.SolrRequest;
30 import org.apache.solr.client.solrj.SolrRequest.METHOD;
31 import org.apache.solr.client.solrj.response.QueryResponse;
32 import org.apache.solr.common.SolrDocument;
33 import org.apache.solr.common.SolrDocumentList;
34 import org.apache.solr.common.params.CursorMarkParams;
38 import org.sleuthkit.datamodel.BlackboardArtifact;
39 import org.sleuthkit.datamodel.BlackboardArtifact.ARTIFACT_TYPE;
40 import org.sleuthkit.datamodel.BlackboardAttribute;
41 import org.sleuthkit.datamodel.BlackboardAttribute.ATTRIBUTE_TYPE;
42 import org.sleuthkit.datamodel.Content;
43 import org.sleuthkit.datamodel.Score;
44 import org.sleuthkit.datamodel.TskCoreException;
45 import org.sleuthkit.datamodel.TskException;
46 
51 class LuceneQuery implements KeywordSearchQuery {
52 
53  private static final Logger logger = Logger.getLogger(LuceneQuery.class.getName());
54  private String keywordStringEscaped;
55  private boolean isEscaped;
56  private final Keyword originalKeyword;
57  private final KeywordList keywordList;
58  private final List<KeywordQueryFilter> filters = new ArrayList<>();
59  private String field = null;
60  private static final int MAX_RESULTS_PER_CURSOR_MARK = 512;
61  static final int SNIPPET_LENGTH = 50;
62  static final String HIGHLIGHT_FIELD = Server.Schema.TEXT.toString();
63 
64  private static final boolean DEBUG = (Version.getBuildType() == Version.Type.DEVELOPMENT);
65 
71  LuceneQuery(KeywordList keywordList, Keyword keyword) {
72  this.keywordList = keywordList;
73  this.originalKeyword = keyword;
74  this.keywordStringEscaped = this.originalKeyword.getSearchTerm();
75  }
76 
77  @Override
78  public void addFilter(KeywordQueryFilter filter) {
79  this.filters.add(filter);
80  }
81 
82  @Override
83  public void setField(String field) {
84  this.field = field;
85  }
86 
87  @Override
88  public void setSubstringQuery() {
89  // Note that this is not a full substring search. Normally substring
90  // searches will be done with TermComponentQuery objects instead.
91  keywordStringEscaped += "*";
92  }
93 
94  @Override
95  public void escape() {
96  keywordStringEscaped = KeywordSearchUtil.escapeLuceneQuery(originalKeyword.getSearchTerm());
97  isEscaped = true;
98  }
99 
100  @Override
101  public boolean isEscaped() {
102  return isEscaped;
103  }
104 
105  @Override
106  public boolean isLiteral() {
107  return originalKeyword.searchTermIsLiteral();
108  }
109 
110  @Override
111  public String getEscapedQueryString() {
112  return this.keywordStringEscaped;
113  }
114 
115  @Override
116  public String getQueryString() {
117  return this.originalKeyword.getSearchTerm();
118  }
119 
120  @Override
121  public KeywordList getKeywordList() {
122  return keywordList;
123  }
124 
125  @Override
126  public QueryResults performQuery() throws KeywordSearchModuleException, NoOpenCoreException {
127 
128  final Server solrServer = KeywordSearch.getServer();
129  double indexSchemaVersion = NumberUtils.toDouble(solrServer.getIndexInfo().getSchemaVersion());
130 
131  SolrQuery solrQuery = createAndConfigureSolrQuery(KeywordSearchSettings.getShowSnippets());
132 
133  final String strippedQueryString = StringUtils.strip(getQueryString(), "\"");
134 
135  String cursorMark = CursorMarkParams.CURSOR_MARK_START;
136  boolean allResultsProcessed = false;
137  List<KeywordHit> matches = new ArrayList<>();
138  LanguageSpecificContentQueryHelper.QueryResults languageSpecificQueryResults = new LanguageSpecificContentQueryHelper.QueryResults();
139  while (!allResultsProcessed) {
140  solrQuery.set(CursorMarkParams.CURSOR_MARK_PARAM, cursorMark);
141  QueryResponse response = solrServer.query(solrQuery, SolrRequest.METHOD.POST);
142  SolrDocumentList resultList = response.getResults();
143  // objectId_chunk -> "text" -> List of previews
144  Map<String, Map<String, List<String>>> highlightResponse = response.getHighlighting();
145 
146  if (2.2 <= indexSchemaVersion) {
147  languageSpecificQueryResults.highlighting.putAll(response.getHighlighting());
148  }
149 
150  for (SolrDocument resultDoc : resultList) {
151  if (2.2 <= indexSchemaVersion) {
152  Object language = resultDoc.getFieldValue(Server.Schema.LANGUAGE.toString());
153  if (language != null) {
154  LanguageSpecificContentQueryHelper.updateQueryResults(languageSpecificQueryResults, resultDoc);
155  }
156  }
157 
158  try {
159  /*
160  * for each result doc, check that the first occurence of
161  * that term is before the window. if all the ocurences
162  * start within the window, don't record them for this
163  * chunk, they will get picked up in the next one.
164  */
165  final String docId = resultDoc.getFieldValue(Server.Schema.ID.toString()).toString();
166  final Integer chunkSize = (Integer) resultDoc.getFieldValue(Server.Schema.CHUNK_SIZE.toString());
167  final Collection<Object> content = resultDoc.getFieldValues(Server.Schema.CONTENT_STR.toString());
168 
169  // if the document has language, it should be hit in language specific content fields. So skip here.
170  if (resultDoc.containsKey(Server.Schema.LANGUAGE.toString())) {
171  continue;
172  }
173 
174  if (indexSchemaVersion < 2.0) {
175  //old schema versions don't support chunk_size or the content_str fields, so just accept hits
176  matches.add(createKeywordtHit(highlightResponse, docId));
177  } else {
178  //check against file name and actual content seperately.
179  for (Object content_obj : content) {
180  String content_str = (String) content_obj;
181  //for new schemas, check that the hit is before the chunk/window boundary.
182  int firstOccurence = StringUtils.indexOfIgnoreCase(content_str, strippedQueryString);
183  //there is no chunksize field for "parent" entries in the index
184  if (chunkSize == null || chunkSize == 0 || (firstOccurence > -1 && firstOccurence < chunkSize)) {
185  matches.add(createKeywordtHit(highlightResponse, docId));
186  }
187  }
188  }
189  } catch (TskException ex) {
190  throw new KeywordSearchModuleException(ex);
191  }
192  }
193  String nextCursorMark = response.getNextCursorMark();
194  if (cursorMark.equals(nextCursorMark)) {
195  allResultsProcessed = true;
196  }
197  cursorMark = nextCursorMark;
198  }
199 
200  List<KeywordHit> mergedMatches;
201  if (2.2 <= indexSchemaVersion) {
202  mergedMatches = LanguageSpecificContentQueryHelper.mergeKeywordHits(matches, originalKeyword, languageSpecificQueryResults);
203  } else {
204  mergedMatches = matches;
205  }
206 
207  QueryResults results = new QueryResults(this);
208  //in case of single term literal query there is only 1 term
209  results.addResult(new Keyword(originalKeyword.getSearchTerm(), true, true, originalKeyword.getListName(), originalKeyword.getOriginalTerm()), mergedMatches);
210 
211  return results;
212  }
213 
214  @Override
215  public boolean validate() {
216  return StringUtils.isNotBlank(originalKeyword.getSearchTerm());
217  }
218 
235  @Override
236  public BlackboardArtifact createKeywordHitArtifact(Content content, Keyword foundKeyword, KeywordHit hit, String snippet, String listName, Long ingestJobId) {
237  return createKeywordHitArtifact(content, originalKeyword, foundKeyword, hit, snippet, listName, ingestJobId);
238  }
239 
240  public static BlackboardArtifact createKeywordHitArtifact(Content content, Keyword originalKW, Keyword foundKeyword, KeywordHit hit, String snippet, String listName, Long ingestJobId) {
241  final String MODULE_NAME = KeywordSearchModuleFactory.getModuleName();
242 
243  Collection<BlackboardAttribute> attributes = new ArrayList<>();
244  if (snippet != null) {
245  attributes.add(new BlackboardAttribute(ATTRIBUTE_TYPE.TSK_KEYWORD_PREVIEW, MODULE_NAME, snippet));
246  }
247  attributes.add(new BlackboardAttribute(ATTRIBUTE_TYPE.TSK_KEYWORD, MODULE_NAME, foundKeyword.getSearchTerm().toLowerCase()));
248  if (StringUtils.isNotBlank(listName)) {
249  attributes.add(new BlackboardAttribute(ATTRIBUTE_TYPE.TSK_SET_NAME, MODULE_NAME, listName));
250  }
251 
252  if (originalKW != null) {
253  BlackboardAttribute.ATTRIBUTE_TYPE selType = originalKW.getArtifactAttributeType();
254  if (selType != null) {
255  attributes.add(new BlackboardAttribute(selType, MODULE_NAME, foundKeyword.getSearchTerm()));
256  }
257 
258  if (originalKW.searchTermIsWholeWord()) {
259  attributes.add(new BlackboardAttribute(BlackboardAttribute.ATTRIBUTE_TYPE.TSK_KEYWORD_SEARCH_TYPE, MODULE_NAME, KeywordSearch.QueryType.LITERAL.ordinal()));
260  } else {
261  attributes.add(new BlackboardAttribute(BlackboardAttribute.ATTRIBUTE_TYPE.TSK_KEYWORD_SEARCH_TYPE, MODULE_NAME, KeywordSearch.QueryType.SUBSTRING.ordinal()));
262  }
263  }
264 
265  hit.getArtifactID().ifPresent(artifactID
266  -> attributes.add(new BlackboardAttribute(BlackboardAttribute.ATTRIBUTE_TYPE.TSK_ASSOCIATED_ARTIFACT, MODULE_NAME, artifactID))
267  );
268 
269  try {
270  return content.newAnalysisResult(
271  BlackboardArtifact.Type.TSK_KEYWORD_HIT, Score.SCORE_LIKELY_NOTABLE,
272  null, listName, null,
273  attributes)
274  .getAnalysisResult();
275  } catch (TskCoreException e) {
276  logger.log(Level.WARNING, "Error adding bb artifact for keyword hit", e); //NON-NLS
277  return null;
278  }
279  }
280 
281 
282  /*
283  * Create the query object for the stored keyword
284  *
285  * @param snippets True if query should request snippets
286  *
287  * @return
288  */
289  private SolrQuery createAndConfigureSolrQuery(boolean snippets) throws NoOpenCoreException, KeywordSearchModuleException {
290  double indexSchemaVersion = NumberUtils.toDouble(KeywordSearch.getServer().getIndexInfo().getSchemaVersion());
291 
292  SolrQuery q = new SolrQuery();
293  q.setShowDebugInfo(DEBUG); //debug
294  // Wrap the query string in quotes if this is a literal search term.
295  String queryStr = originalKeyword.searchTermIsLiteral()
296  ? KeywordSearchUtil.quoteQuery(keywordStringEscaped) : keywordStringEscaped;
297 
298  // Run the query against an optional alternative field.
299  if (field != null) {
300  //use the optional field
301  queryStr = field + ":" + queryStr;
302  q.setQuery(queryStr);
303  } else if (2.2 <= indexSchemaVersion && originalKeyword.searchTermIsLiteral()) {
304  q.setQuery(LanguageSpecificContentQueryHelper.expandQueryString(queryStr));
305  } else {
306  q.setQuery(queryStr);
307  }
308  q.setRows(MAX_RESULTS_PER_CURSOR_MARK);
309  // Setting the sort order is necessary for cursor based paging to work.
310  q.setSort(SolrQuery.SortClause.asc(Server.Schema.ID.toString()));
311 
312  q.setFields(Server.Schema.ID.toString(),
313  Server.Schema.CHUNK_SIZE.toString(),
314  Server.Schema.CONTENT_STR.toString());
315 
316  if (2.2 <= indexSchemaVersion && originalKeyword.searchTermIsLiteral()) {
317  q.addField(Server.Schema.LANGUAGE.toString());
318  LanguageSpecificContentQueryHelper.configureTermfreqQuery(q, keywordStringEscaped);
319  }
320 
321  for (KeywordQueryFilter filter : filters) {
322  q.addFilterQuery(filter.toString());
323  }
324 
325  if (snippets) {
326  configurwQueryForHighlighting(q);
327  }
328 
329  return q;
330  }
331 
338  private static void configurwQueryForHighlighting(SolrQuery q) throws NoOpenCoreException {
339  double indexSchemaVersion = NumberUtils.toDouble(KeywordSearch.getServer().getIndexInfo().getSchemaVersion());
340  if (2.2 <= indexSchemaVersion) {
341  for (Server.Schema field : LanguageSpecificContentQueryHelper.getQueryFields()) {
342  q.addHighlightField(field.toString());
343  }
344  } else {
345  q.addHighlightField(HIGHLIGHT_FIELD);
346  }
347 
348  q.setHighlightSnippets(1);
349  q.setHighlightFragsize(SNIPPET_LENGTH);
350 
351  //tune the highlighter
352  q.setParam("hl.useFastVectorHighlighter", "on"); //fast highlighter scales better than standard one NON-NLS
353  q.setParam("hl.tag.pre", "&laquo;"); //makes sense for FastVectorHighlighter only NON-NLS
354  q.setParam("hl.tag.post", "&laquo;"); //makes sense for FastVectorHighlighter only NON-NLS
355  q.setParam("hl.fragListBuilder", "simple"); //makes sense for FastVectorHighlighter only NON-NLS
356 
357  //Solr bug if fragCharSize is smaller than Query string, StringIndexOutOfBoundsException is thrown.
358  q.setParam("hl.fragCharSize", Integer.toString(q.getQuery().length())); //makes sense for FastVectorHighlighter only NON-NLS
359 
360  //docs says makes sense for the original Highlighter only, but not really
361  //analyze all content SLOW! consider lowering
362  q.setParam("hl.maxAnalyzedChars", Server.HL_ANALYZE_CHARS_UNLIMITED); //NON-NLS
363  }
364 
365  private KeywordHit createKeywordtHit(Map<String, Map<String, List<String>>> highlightResponse, String docId) throws TskException {
370  String snippet = "";
371  if (KeywordSearchSettings.getShowSnippets()) {
372  List<String> snippetList = highlightResponse.get(docId).get(Server.Schema.TEXT.toString());
373  // list is null if there wasn't a snippet
374  if (snippetList != null) {
375  snippet = EscapeUtil.unEscapeHtml(snippetList.get(0)).trim();
376  }
377  }
378 
379  return new KeywordHit(docId, snippet, originalKeyword.getSearchTerm());
380  }
381 
396  static String querySnippet(String query, long solrObjectId, boolean isRegex, boolean group) throws NoOpenCoreException {
397  return querySnippet(query, solrObjectId, 0, isRegex, group);
398  }
399 
415  static String querySnippet(String query, long solrObjectId, int chunkID, boolean isRegex, boolean group) throws NoOpenCoreException {
416  SolrQuery q = new SolrQuery();
417  q.setShowDebugInfo(DEBUG); //debug
418 
419  String queryStr;
420  if (isRegex) {
421  queryStr = HIGHLIGHT_FIELD + ":"
422  + (group ? KeywordSearchUtil.quoteQuery(query)
423  : query);
424  } else {
425  /*
426  * simplify query/escaping and use default field always force
427  * grouping/quotes
428  */
429  queryStr = KeywordSearchUtil.quoteQuery(query);
430  }
431  q.setQuery(queryStr);
432 
433  String contentIDStr = (chunkID == 0)
434  ? Long.toString(solrObjectId)
435  : Server.getChunkIdString(solrObjectId, chunkID);
436  String idQuery = Server.Schema.ID.toString() + ":" + KeywordSearchUtil.escapeLuceneQuery(contentIDStr);
437  q.addFilterQuery(idQuery);
438 
439  configurwQueryForHighlighting(q);
440 
441  Server solrServer = KeywordSearch.getServer();
442 
443  try {
444  QueryResponse response = solrServer.query(q, METHOD.POST);
445  Map<String, Map<String, List<String>>> responseHighlight = response.getHighlighting();
446  Map<String, List<String>> responseHighlightID = responseHighlight.get(contentIDStr);
447  if (responseHighlightID == null) {
448  return "";
449  }
450  double indexSchemaVersion = NumberUtils.toDouble(solrServer.getIndexInfo().getSchemaVersion());
451  List<String> contentHighlights;
452  if (2.2 <= indexSchemaVersion) {
453  contentHighlights = LanguageSpecificContentQueryHelper.getHighlights(responseHighlightID).orElse(null);
454  } else {
455  contentHighlights = responseHighlightID.get(LuceneQuery.HIGHLIGHT_FIELD);
456  }
457  if (contentHighlights == null) {
458  return "";
459  } else {
460  // extracted content is HTML-escaped, but snippet goes in a plain text field
461  return EscapeUtil.unEscapeHtml(contentHighlights.get(0)).trim();
462  }
463  } catch (NoOpenCoreException ex) {
464  logger.log(Level.SEVERE, "Error executing Lucene Solr Query: " + query + ". Solr doc id " + solrObjectId + ", chunkID " + chunkID, ex); //NON-NLS
465  throw ex;
466  } catch (KeywordSearchModuleException ex) {
467  logger.log(Level.SEVERE, "Error executing Lucene Solr Query: " + query + ". Solr doc id " + solrObjectId + ", chunkID " + chunkID, ex); //NON-NLS
468  return "";
469  }
470  }
471 }

Copyright © 2012-2022 Basis Technology. Generated on: Tue Jun 27 2023
This work is licensed under a Creative Commons Attribution-Share Alike 3.0 United States License.