More sample code:

import org.apache.lucene.analysis.standard.StandardAnalyzer;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.queryParser.ParseException;
import org.apache.lucene.queryParser.QueryParser;
import org.apache.lucene.util.Version;
public class LuceneTest{
   public static void main(String[] args) {
      // Construct a RAMDirectory to hold the in-memory representation
      // of the index.
      RAMDirectory idx = new RAMDirectory();
      try {
         // Make an writer to create the index
         IndexWriter writer =
                 new IndexWriter(idx, 
                         new StandardAnalyzer(Version.LUCENE_30), 
         // Add some Document objects containing quotes
         writer.addDocument(createDocument("Theodore Roosevelt",
                 "It behooves every man to remember that the work of the " +
                         "critic, is of altogether secondary importance, and that, " +
                         "in the end, progress is accomplished by the man who does " +
         writer.addDocument(createDocument("Friedrich Hayek",
                 "The case for individual freedom rests largely on the " +
                         "recognition of the inevitable and universal ignorance " +
                         "of all of us concerning a great many of the factors on " +
                         "which the achievements of our ends and welfare depend."));
         writer.addDocument(createDocument("Ayn Rand",
                 "There is nothing to take a man's freedom away from " +
                         "him, save other men. To be free, a man must be free " +
                         "of his brothers."));
         writer.addDocument(createDocument("Mohandas Gandhi",
                 "Freedom is not worth having if it does not connote " +
                         "freedom to err."));
         // Optimize and close the writer to finish building the index
         // Build an IndexSearcher using the in-memory index
         Searcher searcher = new IndexSearcher(idx);
         // Run some queries
         search(searcher, "freedom");
         search(searcher, "free");
         search(searcher, "progress or achievements");
      catch (IOException ioe) {
         // In this example we aren't really doing an I/O, so this
         // exception should never actually be thrown.
      catch (ParseException pe) {
    * Make a Document object with an un-indexed title field and an
    * indexed content field.
   private static Document createDocument(String title, String content) {
      Document doc = new Document();
      // Add the title as an unindexed field...
      doc.add(new Field("title", title, Field.Store.YES, Field.Index.NO));
      // ...and the content as an indexed field. Note that indexed
      // Text fields are constructed using a Reader. Lucene can read
      // and index very large chunks of text, without storing the
      // entire content verbatim in the index. In this example we
      // can just wrap the content string in a StringReader.
      doc.add(new Field("content", content, Field.Store.YES, Field.Index.ANALYZED));
      return doc;
    * Searches for the given string in the "content" field
   private static void search(Searcher searcher, String queryString)
           throws ParseException, IOException {
      // Build a Query object
      QueryParser parser = new QueryParser(Version.LUCENE_30, 
              new StandardAnalyzer(Version.LUCENE_30));
      Query query = parser.parse(queryString);
      int hitsPerPage = 10;
      // Search for the query
      TopScoreDocCollector collector = TopScoreDocCollector.create(5 * hitsPerPage, false);, collector);
      ScoreDoc[] hits = collector.topDocs().scoreDocs;
      int hitCount = collector.getTotalHits();
      System.out.println(hitCount + " total matching documents");
      // Examine the Hits object to see if there were any matches
      if (hitCount == 0) {
                 "No matches were found for "" + queryString + """);
      } else {
         System.out.println("Hits for "" +
                 queryString + "" were found in quotes by:");
         // Iterate over the Documents in the Hits object
         for (int i = 0; i < hitCount; i++) {
            ScoreDoc scoreDoc = hits[i];
            int docId = scoreDoc.doc;
            float docScore = scoreDoc.score;
            System.out.println("docId: " + docId + "t" + "docScore: " + docScore);
            Document doc = searcher.doc(docId);
            // Print the value that we stored in the "title" field. Note
            // that this Field was not indexed, but (unlike the
            // "contents" field) was stored verbatim and can be
            // retrieved.
            System.out.println("  " + (i + 1) + ". " + doc.get("title"));
            System.out.println("Content: " + doc.get("content"));            





The post Lucene In-Memory Search Example and Sample Code appeared first on The Big Data Blog.

Source: Lucene In-Memory Search Example and Sample Code

Leave a Reply

Your email address will not be published. Required fields are marked *


1 2 3
December 22nd, 2015

Phonegap PHP MySQL Example | Apache Cordova

PhoneGap PHP MySQL example Phonegap PHP MySQL Example In this article, I would like to write an article about PhoneGap […]

December 20th, 2015

Install apache zeppelin service in ambari

Setup the Ambari service To deploy the Zeppelin service, run below on ambari server sudo git clone /var/lib/ambari-server/resources/stacks/HDP/$VERSION/services/ZEPPELIN   […]

December 17th, 2015

Facebook’s software architecture

I had summarized/discussed a couple papers (Haystack, Memcache caching) about Facebook’s architecture before. Facebook uses simple architecture that gets things […]

December 5th, 2015

Anaconda: Free enterprise-ready Python for Big data, Predictive Analytics

125+ cross-platform tested and optimized Python packages for advanced analytics totally free, even for commercial use. Completely free enterprise-ready Python […]

December 3rd, 2015

The Hadoop Distributed File System

The Hadoop Distributed File System (HDFS) is designed to store very large data sets reliably, and to stream those data […]

November 28th, 2015


Retail is one of the most important business domains for data science and data mining applications because of its prolific […]

November 26th, 2015


The shortcomings and drawbacks of batch-oriented data processing were widely recognized by the Big Data community quite a long time […]

November 26th, 2015

Spark and Storm face new competition for real-time Hadoop processing

Real-time processing of streaming data in Hadoop typically comes down to choosing between two projects: Storm or Spark. But a […]

November 21st, 2015

Walmart Recruiting II: Sales in Stormy Weather

Predict how sales of weather-sensitive products are affected by snow and rain Walmart operates 11,450 stores in 27 countries, managing inventory […]

November 21st, 2015

Restaurant Revenue Prediction Kaggle solution

Predict annual restaurant sales based on objective measurements With over 1,200 quick service restaurants across the globe, TFI is the company […]