|
| 1 | +/* |
| 2 | + * Licensed to the Apache Software Foundation (ASF) under one or more |
| 3 | + * contributor license agreements. See the NOTICE file distributed with |
| 4 | + * this work for additional information regarding copyright ownership. |
| 5 | + * The ASF licenses this file to You under the Apache License, Version 2.0 |
| 6 | + * (the "License"); you may not use this file except in compliance with |
| 7 | + * the License. You may obtain a copy of the License at |
| 8 | + * |
| 9 | + * http://www.apache.org/licenses/LICENSE-2.0 |
| 10 | + * |
| 11 | + * Unless required by applicable law or agreed to in writing, software |
| 12 | + * distributed under the License is distributed on an "AS IS" BASIS, |
| 13 | + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| 14 | + * See the License for the specific language governing permissions and |
| 15 | + * limitations under the License. |
| 16 | + */ |
| 17 | + |
| 18 | +package org.apache.spark.mllib.fpm |
| 19 | + |
| 20 | +import java.{util => ju} |
| 21 | + |
| 22 | +import scala.collection.mutable |
| 23 | + |
| 24 | +import org.apache.spark.{SparkException, HashPartitioner, Logging, Partitioner} |
| 25 | +import org.apache.spark.rdd.RDD |
| 26 | +import org.apache.spark.storage.StorageLevel |
| 27 | + |
| 28 | +class FPGrowthModel(val freqItemsets: RDD[(Array[String], Long)]) extends Serializable |
| 29 | + |
| 30 | +/** |
| 31 | + * This class implements Parallel FP-growth algorithm to do frequent pattern matching on input data. |
| 32 | + * Parallel FPGrowth (PFP) partitions computation in such a way that each machine executes an |
| 33 | + * independent group of mining tasks. More detail of this algorithm can be found at |
| 34 | + * [[http://dx.doi.org/10.1145/1454008.1454027, PFP]], and the original FP-growth paper can be |
| 35 | + * found at [[http://dx.doi.org/10.1145/335191.335372, FP-growth]] |
| 36 | + * |
| 37 | + * @param minSupport the minimal support level of the frequent pattern, any pattern appears |
| 38 | + * more than (minSupport * size-of-the-dataset) times will be output |
| 39 | + * @param numPartitions number of partitions used by parallel FP-growth |
| 40 | + */ |
| 41 | +class FPGrowth private ( |
| 42 | + private var minSupport: Double, |
| 43 | + private var numPartitions: Int) extends Logging with Serializable { |
| 44 | + |
| 45 | + /** |
| 46 | + * Constructs a FPGrowth instance with default parameters: |
| 47 | + * {minSupport: 0.3, numPartitions: auto} |
| 48 | + */ |
| 49 | + def this() = this(0.3, -1) |
| 50 | + |
| 51 | + /** |
| 52 | + * Sets the minimal support level (default: 0.3). |
| 53 | + */ |
| 54 | + def setMinSupport(minSupport: Double): this.type = { |
| 55 | + this.minSupport = minSupport |
| 56 | + this |
| 57 | + } |
| 58 | + |
| 59 | + /** |
| 60 | + * Sets the number of partitions used by parallel FP-growth (default: same as input data). |
| 61 | + */ |
| 62 | + def setNumPartitions(numPartitions: Int): this.type = { |
| 63 | + this.numPartitions = numPartitions |
| 64 | + this |
| 65 | + } |
| 66 | + |
| 67 | + /** |
| 68 | + * Computes an FP-Growth model that contains frequent itemsets. |
| 69 | + * @param data input data set, each element contains a transaction |
| 70 | + * @return an [[FPGrowthModel]] |
| 71 | + */ |
| 72 | + def run(data: RDD[Array[String]]): FPGrowthModel = { |
| 73 | + if (data.getStorageLevel == StorageLevel.NONE) { |
| 74 | + logWarning("Input data is not cached.") |
| 75 | + } |
| 76 | + val count = data.count() |
| 77 | + val minCount = math.ceil(minSupport * count).toLong |
| 78 | + val numParts = if (numPartitions > 0) numPartitions else data.partitions.length |
| 79 | + val partitioner = new HashPartitioner(numParts) |
| 80 | + val freqItems = genFreqItems(data, minCount, partitioner) |
| 81 | + val freqItemsets = genFreqItemsets(data, minCount, freqItems, partitioner) |
| 82 | + new FPGrowthModel(freqItemsets) |
| 83 | + } |
| 84 | + |
| 85 | + /** |
| 86 | + * Generates frequent items by filtering the input data using minimal support level. |
| 87 | + * @param minCount minimum count for frequent itemsets |
| 88 | + * @param partitioner partitioner used to distribute items |
| 89 | + * @return array of frequent pattern ordered by their frequencies |
| 90 | + */ |
| 91 | + private def genFreqItems( |
| 92 | + data: RDD[Array[String]], |
| 93 | + minCount: Long, |
| 94 | + partitioner: Partitioner): Array[String] = { |
| 95 | + data.flatMap { t => |
| 96 | + val uniq = t.toSet |
| 97 | + if (t.length != uniq.size) { |
| 98 | + throw new SparkException(s"Items in a transaction must be unique but got ${t.toSeq}.") |
| 99 | + } |
| 100 | + t |
| 101 | + }.map(v => (v, 1L)) |
| 102 | + .reduceByKey(partitioner, _ + _) |
| 103 | + .filter(_._2 >= minCount) |
| 104 | + .collect() |
| 105 | + .sortBy(-_._2) |
| 106 | + .map(_._1) |
| 107 | + } |
| 108 | + |
| 109 | + /** |
| 110 | + * Generate frequent itemsets by building FP-Trees, the extraction is done on each partition. |
| 111 | + * @param data transactions |
| 112 | + * @param minCount minimum count for frequent itemsets |
| 113 | + * @param freqItems frequent items |
| 114 | + * @param partitioner partitioner used to distribute transactions |
| 115 | + * @return an RDD of (frequent itemset, count) |
| 116 | + */ |
| 117 | + private def genFreqItemsets( |
| 118 | + data: RDD[Array[String]], |
| 119 | + minCount: Long, |
| 120 | + freqItems: Array[String], |
| 121 | + partitioner: Partitioner): RDD[(Array[String], Long)] = { |
| 122 | + val itemToRank = freqItems.zipWithIndex.toMap |
| 123 | + data.flatMap { transaction => |
| 124 | + genCondTransactions(transaction, itemToRank, partitioner) |
| 125 | + }.aggregateByKey(new FPTree[Int], partitioner.numPartitions)( |
| 126 | + (tree, transaction) => tree.add(transaction, 1L), |
| 127 | + (tree1, tree2) => tree1.merge(tree2)) |
| 128 | + .flatMap { case (part, tree) => |
| 129 | + tree.extract(minCount, x => partitioner.getPartition(x) == part) |
| 130 | + }.map { case (ranks, count) => |
| 131 | + (ranks.map(i => freqItems(i)).toArray, count) |
| 132 | + } |
| 133 | + } |
| 134 | + |
| 135 | + /** |
| 136 | + * Generates conditional transactions. |
| 137 | + * @param transaction a transaction |
| 138 | + * @param itemToRank map from item to their rank |
| 139 | + * @param partitioner partitioner used to distribute transactions |
| 140 | + * @return a map of (target partition, conditional transaction) |
| 141 | + */ |
| 142 | + private def genCondTransactions( |
| 143 | + transaction: Array[String], |
| 144 | + itemToRank: Map[String, Int], |
| 145 | + partitioner: Partitioner): mutable.Map[Int, Array[Int]] = { |
| 146 | + val output = mutable.Map.empty[Int, Array[Int]] |
| 147 | + // Filter the basket by frequent items pattern and sort their ranks. |
| 148 | + val filtered = transaction.flatMap(itemToRank.get) |
| 149 | + ju.Arrays.sort(filtered) |
| 150 | + val n = filtered.length |
| 151 | + var i = n - 1 |
| 152 | + while (i >= 0) { |
| 153 | + val item = filtered(i) |
| 154 | + val part = partitioner.getPartition(item) |
| 155 | + if (!output.contains(part)) { |
| 156 | + output(part) = filtered.slice(0, i + 1) |
| 157 | + } |
| 158 | + i -= 1 |
| 159 | + } |
| 160 | + output |
| 161 | + } |
| 162 | +} |
0 commit comments