コード例 #1
0
ファイル: Utility.java プロジェクト: pichitpr/Trimmed_SPMF
 public static SequenceDatabase load(List<List<Integer>>[] aryListDB) {
   SequenceDatabase db = new SequenceDatabase();
   Sequence seq;
   for (int i = 0; i < aryListDB.length; i++) {
     if (aryListDB[i] != null) {
       seq = new Sequence(i);
       for (List<Integer> itemset : aryListDB[i]) {
         seq.addItemset(itemset);
       }
       db.addSequence(seq);
     }
   }
   return db;
 }
コード例 #2
0
  public static void main(String[] arg) throws IOException {
    // Load a sequence database
    SequenceDatabase sequenceDatabase = new SequenceDatabase();
    sequenceDatabase.loadFile(fileToPath("contextPrefixSpan.txt"));
    sequenceDatabase.print();

    int minsup = 2; // we use a minsup of 2 sequences (50 % of the database size)

    AlgoBIDEPlus algo = new AlgoBIDEPlus(); //

    // execute the algorithm
    algo.runAlgorithm(sequenceDatabase, "C://patterns//closed_sequential_patterns.txt", minsup);
    algo.printStatistics(sequenceDatabase.size());
  }
コード例 #3
0
ファイル: AlgoBIDEPlus.java プロジェクト: vikasmb/DataZoomer
 /**
  * For each item, calculate the sequence id of sequences containing that item
  *
  * @param database the current sequence database
  * @return Map of items to sequence IDs that contains each item
  */
 private Map<Integer, Set<Integer>> findSequencesContainingItems(SequenceDatabase database) {
   // We use a map to store the sequence IDs where an item appear
   // Key : item   Value :  a set of sequence IDs
   Map<Integer, Set<Integer>> mapSequenceID =
       new HashMap<
           Integer,
           Set<
               Integer>>(); // pour conserver les ID des séquences: <Id Item, Set d'id de
                            // séquences>
   // for each sequence
   for (Sequence sequence : database.getSequences()) {
     // for each itemset in that sequence
     for (List<Integer> itemset : sequence.getItemsets()) {
       // for each item
       for (Integer item : itemset) {
         // get the set of sequence ids for that item
         Set<Integer> sequenceIDs = mapSequenceID.get(item);
         if (sequenceIDs == null) {
           // if null create a new set
           sequenceIDs = new HashSet<Integer>();
           mapSequenceID.put(item, sequenceIDs);
         }
         // add the current sequence id to this set
         sequenceIDs.add(sequence.getId());
       }
     }
   }
   return mapSequenceID;
 }
コード例 #4
0
  public static void main(String[] arg) throws IOException {
    String outputPath = ".//output.txt";
    // Load a sequence database
    SequenceDatabase sequenceDatabase = new SequenceDatabase();
    sequenceDatabase.loadFile(fileToPath("contextPrefixSpan.txt"));
    // print the database to console
    sequenceDatabase.print();

    // Create an instance of the algorithm with minsup = 50 %
    AlgoPrefixSpan algo = new AlgoPrefixSpan();

    int minsup = 2; // we use a minimum support of 2 sequences.

    // execute the algorithm
    algo.runAlgorithm(sequenceDatabase, outputPath, minsup);
    algo.printStatistics(sequenceDatabase.size());
  }
コード例 #5
0
  public static void main(String[] arg) throws IOException {
    // Load a sequence database
    SequenceDatabase sequenceDatabase = new SequenceDatabase();
    sequenceDatabase.loadFile(fileToPath("contextPrefixSpan.txt"));
    sequenceDatabase.print();
    // Create an instance of the algorithm
    AlgoMaxSP algo = new AlgoMaxSP();

    // if you set the following parameter to true, the sequence ids of the sequences where
    // each pattern appears will be shown in the result
    boolean showSequenceIdentifiers = false;

    // execute the algorithm
    SequentialPatterns patterns = algo.runAlgorithm(sequenceDatabase, null, 2);
    algo.printStatistics(sequenceDatabase.size());
    patterns.printFrequentPatterns(sequenceDatabase.size(), showSequenceIdentifiers);
  }
コード例 #6
0
  public static void main(String[] arg) throws IOException {
    // Load a sequence database
    SequenceDatabase sequenceDatabase = new SequenceDatabase();
    sequenceDatabase.loadFile(fileToPath("contextPrefixSpan.txt"));
    //		sequenceDatabase.print();

    int minsup = 2; // we use a minsup of 2 sequences (50 % of the database size)

    AlgoMaxSP algo = new AlgoMaxSP(); //

    // if you set the following parameter to true, the sequence ids of the sequences where
    // each pattern appears will be shown in the result
    algo.setShowSequenceIdentifiers(false);

    // execute the algorithm
    algo.runAlgorithm(sequenceDatabase, ".//output.txt", minsup);
    algo.printStatistics(sequenceDatabase.size());
  }
コード例 #7
0
  public static void main(String[] arg) throws IOException {
    // Load a sequence database
    SequenceDatabase sequenceDatabase = new SequenceDatabase();
    sequenceDatabase.loadFile(fileToPath("contextPrefixSpan.txt"));
    // print the database to console
    sequenceDatabase.print();

    // Create an instance of the algorithm
    AlgoFSGP algo = new AlgoFSGP();
    //		algo.setMaximumPatternLength(3);

    // execute the algorithm with minsup = 50 %
    boolean performPruning = true; // to activate pruning of search space
    List<SequentialPattern> patterns = algo.runAlgorithm(sequenceDatabase, 0.5, performPruning);
    algo.printStatistics(sequenceDatabase.size());
    System.out.println(" == PATTERNS ==");
    for (SequentialPattern pattern : patterns) {
      System.out.println(pattern + " support : " + pattern.getAbsoluteSupport());
    }
  }
コード例 #8
0
ファイル: Utility.java プロジェクト: pichitpr/Trimmed_SPMF
 public static SequenceDatabase load(String strDB) {
   SequenceDatabase db = new SequenceDatabase();
   Sequence seq;
   List<Integer> iset;
   String[] sequences = strDB.split("\\n");
   String[] itemsets;
   String[] items;
   for (String seqStr : sequences) {
     itemsets = seqStr.trim().split("\\s*\\|\\s*");
     seq = new Sequence(Integer.valueOf(itemsets[0]));
     for (int i = 1; i < itemsets.length; i++) {
       items = itemsets[i].split("\\s+");
       iset = new ArrayList<Integer>();
       for (String itemStr : items) {
         iset.add(Integer.valueOf(itemStr));
       }
       seq.addItemset(iset);
     }
     db.addSequence(seq);
   }
   return db;
 }
コード例 #9
0
ファイル: AlgoBIDEPlus.java プロジェクト: vikasmb/DataZoomer
  /**
   * This is the main method for the BIDE+ algorithm.
   *
   * @param database a sequence database
   * @throws IOException exception if some error occurs while writing the output file.
   */
  private void bide(SequenceDatabase database, String outputFilePath) throws IOException {
    // if the user want to keep the result into memory
    if (outputFilePath == null) {
      writer = null;
      patterns = new SequentialPatterns("FREQUENT SEQUENTIAL PATTERNS");
    } else { // if the user want to save the result to a file
      patterns = null;
      writer = new BufferedWriter(new FileWriter(outputFilePath));
    }

    // The algorithm first scan the database to find all frequent items
    // The algorithm note the sequences in which these items appear.
    // This is stored in a map:  Key: item  Value : IDs of sequences containing the item
    Map<Integer, Set<Integer>> mapSequenceID = findSequencesContainingItems(database);

    // WE CONVERT THE DATABASE TO A PSEUDO-DATABASE, AND REMOVE
    // THE ITEMS OF SIZE 1 THAT ARE NOT FREQUENT, SO THAT THE ALGORITHM
    // WILL NOT CONSIDER THEM ANYMORE.

    // OPTIMIZATION Create COOC MAP
    //		coocMapBefore = new HashMap<Integer, Map<Integer,
    // Integer>>(mapSequenceID.entrySet().size());

    // we create a database
    initialDatabase = new ArrayList<PseudoSequenceBIDE>();
    // for each sequence of the original database
    for (Sequence sequence : database.getSequences()) {
      // we make a copy of the sequence while removing infrequent items
      Sequence optimizedSequence = sequence.cloneSequenceMinusItems(mapSequenceID, minsuppAbsolute);
      if (optimizedSequence.size() != 0) {
        // if this sequence has size >0, we add it to the new database
        initialDatabase.add(new PseudoSequenceBIDE(optimizedSequence, 0, 0));
      }

      //			// update COOC map
      //			HashSet<Integer> alreadySeen = new HashSet<Integer>();
      //			for(List<Integer> itemset : optimizedSequence.getItemsets()) {
      //				for(Integer item : itemset) {
      //					Map<Integer, Integer> mapCoocItem = coocMapBefore.get(item);
      //					if(mapCoocItem == null) {
      //						mapCoocItem = new HashMap<Integer, Integer>();
      //						coocMapBefore.put(item, mapCoocItem);
      //					}
      //					for(Integer itemSeen : alreadySeen) {
      //						if(itemSeen != item) {
      //							Integer frequency = mapCoocItem.get(itemSeen);
      //							if(frequency == null) {
      //								mapCoocItem.put(itemSeen, 1);
      //							}else {
      //								mapCoocItem.put(itemSeen, frequency+1);
      //							}
      //						}
      //					}
      //					alreadySeen.add(item);
      //				}
      //			}
    }

    // For each frequent item
    loop1:
    for (Entry<Integer, Set<Integer>> entry : mapSequenceID.entrySet()) {
      // if the item is frequent
      if (entry.getValue().size() >= minsuppAbsolute) {
        //				Map<Integer, Integer> mapCoocItem = coocMapBefore.get(entry.getKey());
        //				if(mapCoocItem != null) {
        //					for(Integer supportCoocBefore : mapCoocItem.values()) {
        //						if(supportCoocBefore >= entry.getValue().size()) {
        //							continue loop1;
        //						}
        //					}
        //				}

        // build the projected database with this item
        Integer item = entry.getKey();
        List<PseudoSequenceBIDE> projectedContext =
            buildProjectedContextSingleItem(item, initialDatabase, false, entry.getValue());

        // Create the prefix with this item
        SequentialPattern prefix = new SequentialPattern();
        prefix.addItemset(new Itemset(item));
        // set the sequence IDS of this prefix
        prefix.setSequenceIDs(entry.getValue());

        // variable to store the largest support of patterns
        // that will be found starting with this prefix
        if (projectedContext.size() >= minsuppAbsolute) {
          int successorSupport = 0;

          if (!checkBackScanPruning(prefix, entry.getValue())) {
            successorSupport = recursion(prefix, projectedContext); // récursion;
          }

          // Finally, because this prefix has support > minsup
          // and passed the backscan pruning,
          // we check if it has no sucessor with support >= minsup
          // (a forward extension)
          // IF no forward extension
          if (successorSupport != entry.getValue().size()) { // ######### MODIFICATION ####
            // IF there is also no backward extension
            if (!checkBackwardExtension(prefix, entry.getValue())) {
              // the pattern is closed and we save it
              savePattern(prefix);
            }
          }
        } else {
          if (!checkBackwardExtension(prefix, entry.getValue())) {
            // the pattern is closed and we save it
            savePattern(prefix);
          }
        }
      }
    }
    // check the memory usage for statistics
    MemoryLogger.getInstance().checkMemory();
  }