1 /***********************************************************************
2  * Copyright (c) 2013-2025 General Atomics Integrated Intelligence, Inc.
3  * All rights reserved. This program and the accompanying materials
4  * are made available under the terms of the Apache License, Version 2.0
5  * which accompanies this distribution and is available at
6  * https://www.apache.org/licenses/LICENSE-2.0
7  ***********************************************************************/
8 
9 package org.locationtech.geomesa.accumulo.tools.data
10 
11 import com.beust.jcommander.{Parameter, ParameterException, Parameters}
12 import org.apache.accumulo.core.client.admin.TableOperations
13 import org.apache.accumulo.core.data.Key
14 import org.apache.hadoop.io.Text
15 import org.locationtech.geomesa.accumulo.tools.data.AccumuloCompactCommand.{CompactParams, RangeCompaction}
16 import org.locationtech.geomesa.accumulo.tools.{AccumuloDataStoreCommand, AccumuloDataStoreParams}
17 import org.locationtech.geomesa.curve.BinnedTime
18 import org.locationtech.geomesa.index.api.GeoMesaFeatureIndex
19 import org.locationtech.geomesa.index.index.id.IdIndex
20 import org.locationtech.geomesa.index.index.z3.Z3Index
21 import org.locationtech.geomesa.tools.utils.ParameterConverters.DurationConverter
22 import org.locationtech.geomesa.tools.{Command, RequiredTypeNameParam}
23 import org.locationtech.geomesa.utils.index.ByteArrays
24 import org.locationtech.geomesa.utils.text.TextTools
25 import org.locationtech.geomesa.utils.uuid.Z3UuidGenerator
26 
27 import java.nio.charset.StandardCharsets
28 import java.time.{Instant, ZoneOffset, ZonedDateTime}
29 import java.util.concurrent.{Executors, TimeUnit}
30 import java.util.{Date, UUID}
31 import scala.concurrent.duration.Duration
32 import scala.util.control.NonFatal
33 
34 class AccumuloCompactCommand extends AccumuloDataStoreCommand {
35 
36   import org.locationtech.geomesa.filter.ff
37   import org.locationtech.geomesa.utils.geotools.RichSimpleFeatureType.RichSimpleFeatureType
38 
39   import scala.collection.JavaConverters._
40 
41   override val name = "compact"
42   override val params = new CompactParams
43 
44   override def execute(): Unit = withDataStore { ds =>
45     val sft = ds.getSchema(params.featureName)
46     if (sft == null) {
47       throw new ParameterException(s"Schema '${params.featureName}' does not exist in the data store")
48     }
49 
50     val start = System.currentTimeMillis()
51 
52     val executor = Executors.newFixedThreadPool(params.threads)
53 
54     val ops = ds.client.tableOperations()
55 
56     val msg = new StringBuilder(s"Starting incremental compaction using ${params.threads} simultaneous threads")
57 
58     val interval = Option(params.from).map { from =>
59       val now = System.currentTimeMillis()
60       val start = now - from.toMillis
61       val end = Option(params.duration).map(d => start + d.toMillis).getOrElse(now)
62       def toString(millis: Long) = ZonedDateTime.ofInstant(Instant.ofEpochMilli(millis), ZoneOffset.UTC).toString
63       msg.append(s" from ${toString(start)}/${toString(end)}")
64       (start, end)
65     }
66 
67     val z3Bins = interval.map { case (s, e) =>
68       val toBin = BinnedTime.timeToBinnedTime(sft.getZ3Interval)
69       (toBin(s).bin, toBin(e).bin)
70     }
71 
72     val filter = interval.flatMap { case (s, e) =>
73       sft.getDtgField.map { dtg =>
74         ff.between(ff.property(dtg), ff.literal(new Date(s)), ff.literal(new Date(e)))
75       }
76     }
77 
78     Command.user.info(msg.toString)
79 
80     def filterSplits(index: GeoMesaFeatureIndex[_, _]): Iterator[Seq[Text]] => Iterator[Seq[Text]] = {
81       z3Bins match {
82         case Some((min, max)) if index.name == Z3Index.name =>
83           val offset = index.keySpace.sharding.length + index.keySpace.sharing.length
84 
85           def compareStart(s: Text): Boolean =
86             s == null || s.getLength < offset + 2 || ByteArrays.readShort(s.getBytes, offset) <= max
87           def compareEnd(e: Text): Boolean =
88             e == null || e.getLength < offset + 2 || ByteArrays.readShort(e.getBytes, offset) >= min
89 
90           iter => iter.filter { case Seq(s, e) => compareStart(s) && compareEnd(e) }
91 
92         case Some((min, max)) if params.z3Ids && index.name == IdIndex.name =>
93           val offset = if (sft.isTableSharing) { 1 } else { 0 }
94           if (sft.isUuidEncoded) {
95             // uuid is already stored in correct binary format
96             def compareStart(s: Text): Boolean =
97               s == null || s.getLength < offset + 3 || Z3UuidGenerator.timeBin(s.getBytes, offset) <= max
98             def compareEnd(e: Text): Boolean =
99               e == null || e.getLength < offset + 3 || Z3UuidGenerator.timeBin(e.getBytes, offset) >= min
100 
101             iter => iter.filter { case Seq(s, e) => compareStart(s) && compareEnd(e) }
102           } else {
103             // uuid is stored as a string, must be parsed into a uuid and converted to bytes
104             def compareStart(s: Text): Boolean = {
105               if (s == null) { true } else {
106                 try {
107                   val uuidString = new String(s.getBytes, offset, s.getLength - offset, StandardCharsets.UTF_8)
108                   val uuid = UUID.fromString(uuidString)
109                   Z3UuidGenerator.timeBin(ByteArrays.toBytes(uuid.getMostSignificantBits)) <= max
110                 } catch {
111                   case NonFatal(_) => true // split doesn't contain a whole row key
112                 }
113               }
114             }
115             def compareEnd(e: Text): Boolean = {
116               if (e == null) { true } else {
117                 try {
118                   val uuidString = new String(e.getBytes, offset, e.getLength - offset, StandardCharsets.UTF_8)
119                   val uuid = UUID.fromString(uuidString)
120                   Z3UuidGenerator.timeBin(ByteArrays.toBytes(uuid.getMostSignificantBits)) >= min
121                 } catch {
122                   case NonFatal(_) => true // split doesn't contain a whole row key
123                 }
124               }
125             }
126 
127             iter => iter.filter { case Seq(s, e) => compareStart(s) && compareEnd(e) }
128           }
129 
130         case _ =>
131           iter => iter
132       }
133     }
134 
135     ds.manager.indices(sft).foreach { index =>
136       val filtering = filterSplits(index)
137 
138       index.getTablesForQuery(filter).foreach { table =>
139         val tableSplits = ops.listSplits(table).asScala.toList
140 
141         var count = 0
142 
143         if (tableSplits.isEmpty) {
144           executor.submit(new RangeCompaction(ops, table, null, null))
145           count += 1
146         } else {
147           val head = Iterator.single(Seq(null, tableSplits.head))
148           val last = Iterator.single(Seq(tableSplits.last, null))
149           val middle = if (tableSplits.lengthCompare(1) == 0) { Iterator.empty } else { tableSplits.sliding(2) }
150 
151           // filter out ranges by table sharing, if possible
152           val splits = if (sft.isTableSharing) {
153             val Array(prefix) = sft.getTableSharingBytes // should be one byte
154             (head ++ middle ++ last).filter { case Seq(s, e) =>
155               (s == null || ByteArrays.UnsignedByteOrdering.compare(s.getBytes.apply(0), prefix) <= 0) &&
156                   (e == null || ByteArrays.UnsignedByteOrdering.compare(e.getBytes.apply(0), prefix) >= 0)
157             }
158           } else {
159             head ++ middle ++ last
160           }
161 
162           // filter out ranges based on our time interval, if possible
163           filtering(splits).foreach { case Seq(s, e)  =>
164             executor.submit(new RangeCompaction(ops, table, s, e))
165             count += 1
166           }
167         }
168         Command.user.info(s"Found $count splits for table $table")
169       }
170     }
171 
172     executor.shutdown()
173     executor.awaitTermination(Long.MaxValue, TimeUnit.MILLISECONDS)
174 
175     Command.user.info(s"Compactions complete in ${TextTools.getTime(start)}")
176   }
177 }
178 
179 object AccumuloCompactCommand {
180 
181   @Parameters(commandDescription = "Incrementally compact tables for a GeoMesa feature type")
182   class CompactParams extends RequiredTypeNameParam with AccumuloDataStoreParams {
183 
184     @Parameter(names = Array("--threads"), description = "Number of ranges to compact simultaneously")
185     var threads: Integer = Int.box(4)
186 
187     @Parameter(names = Array("--from"), description = "How long ago to compact data, based on the default date attribute, relative to current time. E.g. '1 day', '2 weeks and 1 hour', etc", converter = classOf[DurationConverter])
188     var from: Duration = _
189 
190     @Parameter(names = Array("--duration"), description = "Amount of time to compact data, based on the default date attribute, relative to '--from'. E.g. '1 day', '2 weeks and 1 hour', etc", converter = classOf[DurationConverter])
191     var duration: Duration = _
192 
193     @Parameter(names = Array("--z3-feature-ids"), description = "Will only compact ID records that correspond with the time period, based on features being written with the Z3FeatureIdGenerator")
194     var z3Ids: Boolean = false
195   }
196 
197   class RangeCompaction(ops: TableOperations, table: String, start: Text, end: Text) extends Runnable {
198     override def run(): Unit = {
199       Command.user.info(s"Starting compaction of $table [ ${rowToString(start)} :: ${rowToString(end)} ]")
200       ops.compact(table, start, end, false, true)
201     }
202   }
203 
204   private def rowToString(row: Text): String = {
205     if (row == null) { "null" } else { Key.toPrintableString(row.getBytes, 0, row.getLength, row.getLength) }
206   }
207 }
Line Stmt Id Pos Tree Symbol Tests Code
41 242 2000 - 2009 Literal <nosymbol> "compact"
42 243 2034 - 2051 Apply org.locationtech.geomesa.accumulo.tools.data.AccumuloCompactCommand.CompactParams.<init> new org.locationtech.geomesa.accumulo.tools.data.AccumuloCompactCommand.CompactParams()
44 414 2086 - 7666 Apply org.locationtech.geomesa.tools.DataStoreCommand.withDataStore AccumuloCompactCommand.this.withDataStore[Unit](((ds: org.locationtech.geomesa.accumulo.data.AccumuloDataStore) => { val sft: org.geotools.api.feature.simple.SimpleFeatureType = ds.getSchema(AccumuloCompactCommand.this.params.featureName); if (sft.==(null)) throw new com.beust.jcommander.ParameterException(scala.StringContext.apply("Schema \'", "\' does not exist in the data store").s(AccumuloCompactCommand.this.params.featureName)) else (); val start: Long = java.lang.System.currentTimeMillis(); val executor: java.util.concurrent.ExecutorService = java.util.concurrent.Executors.newFixedThreadPool(scala.Predef.Integer2int(AccumuloCompactCommand.this.params.threads)); val ops: org.apache.accumulo.core.client.admin.TableOperations = ds.client.tableOperations(); val msg: StringBuilder = new scala.`package`.StringBuilder(scala.StringContext.apply("Starting incremental compaction using ", " simultaneous threads").s(AccumuloCompactCommand.this.params.threads)); val interval: Option[(Long, Long)] = scala.Option.apply[scala.concurrent.duration.Duration](AccumuloCompactCommand.this.params.from).map[(Long, Long)](((from: scala.concurrent.duration.Duration) => { val now: Long = java.lang.System.currentTimeMillis(); val start: Long = now.-(from.toMillis); val end: Long = scala.Option.apply[scala.concurrent.duration.Duration](AccumuloCompactCommand.this.params.duration).map[Long](((d: scala.concurrent.duration.Duration) => start.+(d.toMillis))).getOrElse[Long](now); def toString(millis: Long): String = java.time.ZonedDateTime.ofInstant(java.time.Instant.ofEpochMilli(millis), java.time.ZoneOffset.UTC).toString(); msg.append(scala.StringContext.apply(" from ", "/", "").s(toString(start), toString(end))); scala.Tuple2.apply[Long, Long](start, end) })); val z3Bins: Option[(Short, Short)] = interval.map[(Short, Short)](((x0$1: (Long, Long)) => x0$1 match { case (_1: Long, _2: Long)(Long, Long)((s @ _), (e @ _)) => { val toBin: org.locationtech.geomesa.curve.BinnedTime.TimeToBinnedTime = org.locationtech.geomesa.curve.BinnedTime.timeToBinnedTime(org.locationtech.geomesa.utils.geotools.RichSimpleFeatureType.RichSimpleFeatureType(sft).getZ3Interval); scala.Tuple2.apply[Short, Short](toBin.apply(s).bin, toBin.apply(e).bin) } })); val filter: Option[org.geotools.api.filter.PropertyIsBetween] = interval.flatMap[org.geotools.api.filter.PropertyIsBetween](((x0$2: (Long, Long)) => x0$2 match { case (_1: Long, _2: Long)(Long, Long)((s @ _), (e @ _)) => org.locationtech.geomesa.utils.geotools.RichSimpleFeatureType.RichSimpleFeatureType(sft).getDtgField.map[org.geotools.api.filter.PropertyIsBetween](((dtg: String) => org.locationtech.geomesa.filter.`package`.ff.between(org.locationtech.geomesa.filter.`package`.ff.property(dtg), org.locationtech.geomesa.filter.`package`.ff.literal(new java.util.Date(s)), org.locationtech.geomesa.filter.`package`.ff.literal(new java.util.Date(e))))) })); (if (org.locationtech.geomesa.tools.`package`.Command.user.underlying.isInfoEnabled()) org.locationtech.geomesa.tools.`package`.Command.user.underlying.info(msg.toString()) else (): Unit); def filterSplits(index: org.locationtech.geomesa.index.api.GeoMesaFeatureIndex[_, _]): Iterator[Seq[org.apache.hadoop.io.Text]] => Iterator[Seq[org.apache.hadoop.io.Text]] = z3Bins match { case (value: (Short, Short))Some[(Short, Short)]((_1: Short, _2: Short)(Short, Short)((min @ _), (max @ _))) if index.name.==(org.locationtech.geomesa.index.index.z3.Z3Index.name) => { val offset: Int = index.keySpace.sharding.length.+(index.keySpace.sharing.length); def compareStart(s: org.apache.hadoop.io.Text): Boolean = s.==(null).||(s.getLength().<(offset.+(2))).||(org.locationtech.geomesa.utils.index.ByteArrays.readShort(s.getBytes(), offset).<=(max)); def compareEnd(e: org.apache.hadoop.io.Text): Boolean = e.==(null).||(e.getLength().<(offset.+(2))).||(org.locationtech.geomesa.utils.index.ByteArrays.readShort(e.getBytes(), offset).>=(min)); ((iter: Iterator[Seq[org.apache.hadoop.io.Text]]) => iter.filter(((x0$1: Seq[org.apache.hadoop.io.Text]) => x0$1 match { case scala.collection.Seq.unapplySeq[org.apache.hadoop.io.Text](<unapply-selector>) <unapply> ((s @ _), (e @ _)) => compareStart(s).&&(compareEnd(e)) }))) } case (value: (Short, Short))Some[(Short, Short)]((_1: Short, _2: Short)(Short, Short)((min @ _), (max @ _))) if AccumuloCompactCommand.this.params.z3Ids.&&(index.name.==(org.locationtech.geomesa.index.index.id.IdIndex.name)) => { val offset: Int = if (org.locationtech.geomesa.utils.geotools.RichSimpleFeatureType.RichSimpleFeatureType(sft).isTableSharing) 1 else 0; if (org.locationtech.geomesa.utils.geotools.RichSimpleFeatureType.RichSimpleFeatureType(sft).isUuidEncoded) { def compareStart(s: org.apache.hadoop.io.Text): Boolean = s.==(null).||(s.getLength().<(offset.+(3))).||(org.locationtech.geomesa.utils.uuid.Z3UuidGenerator.timeBin(s.getBytes(), offset).<=(max)); def compareEnd(e: org.apache.hadoop.io.Text): Boolean = e.==(null).||(e.getLength().<(offset.+(3))).||(org.locationtech.geomesa.utils.uuid.Z3UuidGenerator.timeBin(e.getBytes(), offset).>=(min)); ((iter: Iterator[Seq[org.apache.hadoop.io.Text]]) => iter.filter(((x0$2: Seq[org.apache.hadoop.io.Text]) => x0$2 match { case scala.collection.Seq.unapplySeq[org.apache.hadoop.io.Text](<unapply-selector>) <unapply> ((s @ _), (e @ _)) => compareStart(s).&&(compareEnd(e)) }))) } else { def compareStart(s: org.apache.hadoop.io.Text): Boolean = if (s.==(null)) true else try { val uuidString: String = new scala.Predef.String(s.getBytes(), offset, s.getLength().-(offset), java.nio.charset.StandardCharsets.UTF_8); val uuid: java.util.UUID = java.util.UUID.fromString(uuidString); org.locationtech.geomesa.utils.uuid.Z3UuidGenerator.timeBin(org.locationtech.geomesa.utils.index.ByteArrays.toBytes(uuid.getMostSignificantBits()), org.locationtech.geomesa.utils.uuid.Z3UuidGenerator.timeBin$default$2).<=(max) } catch { case scala.util.control.NonFatal.unapply(<unapply-selector>) <unapply> (_) => true }; def compareEnd(e: org.apache.hadoop.io.Text): Boolean = if (e.==(null)) true else try { val uuidString: String = new scala.Predef.String(e.getBytes(), offset, e.getLength().-(offset), java.nio.charset.StandardCharsets.UTF_8); val uuid: java.util.UUID = java.util.UUID.fromString(uuidString); org.locationtech.geomesa.utils.uuid.Z3UuidGenerator.timeBin(org.locationtech.geomesa.utils.index.ByteArrays.toBytes(uuid.getMostSignificantBits()), org.locationtech.geomesa.utils.uuid.Z3UuidGenerator.timeBin$default$2).>=(min) } catch { case scala.util.control.NonFatal.unapply(<unapply-selector>) <unapply> (_) => true }; ((iter: Iterator[Seq[org.apache.hadoop.io.Text]]) => iter.filter(((x0$3: Seq[org.apache.hadoop.io.Text]) => x0$3 match { case scala.collection.Seq.unapplySeq[org.apache.hadoop.io.Text](<unapply-selector>) <unapply> ((s @ _), (e @ _)) => compareStart(s).&&(compareEnd(e)) }))) } } case _ => ((iter: Iterator[Seq[org.apache.hadoop.io.Text]]) => iter) }; ds.manager.indices(sft, ds.manager.indices$default$2).foreach[Unit](((index: org.locationtech.geomesa.index.api.GeoMesaFeatureIndex[_, _]) => { val filtering: Iterator[Seq[org.apache.hadoop.io.Text]] => Iterator[Seq[org.apache.hadoop.io.Text]] = filterSplits(index); index.getTablesForQuery(filter).foreach[Unit](((table: String) => { val tableSplits: List[org.apache.hadoop.io.Text] = scala.collection.JavaConverters.collectionAsScalaIterableConverter[org.apache.hadoop.io.Text](ops.listSplits(table)).asScala.toList; var count: Int = 0; if (tableSplits.isEmpty) { executor.submit(new org.locationtech.geomesa.accumulo.tools.data.AccumuloCompactCommand.RangeCompaction(ops, table, null, null)); count = count.+(1) } else { val head: Iterator[Seq[org.apache.hadoop.io.Text]] = scala.`package`.Iterator.single[Seq[org.apache.hadoop.io.Text]](scala.collection.Seq.apply[org.apache.hadoop.io.Text](null, tableSplits.head)); val last: Iterator[Seq[org.apache.hadoop.io.Text]] = scala.`package`.Iterator.single[Seq[org.apache.hadoop.io.Text]](scala.collection.Seq.apply[org.apache.hadoop.io.Text](tableSplits.last, null)); val middle: Iterator[List[org.apache.hadoop.io.Text]] = if (tableSplits.lengthCompare(1).==(0)) scala.`package`.Iterator.empty else tableSplits.sliding(2); val splits: Iterator[Seq[org.apache.hadoop.io.Text]] = if (org.locationtech.geomesa.utils.geotools.RichSimpleFeatureType.RichSimpleFeatureType(sft).isTableSharing) { val prefix: Byte = (org.locationtech.geomesa.utils.geotools.RichSimpleFeatureType.RichSimpleFeatureType(sft).getTableSharingBytes: Array[Byte] @unchecked) match { case scala.Array.unapplySeq[Byte](<unapply-selector>) <unapply> ((prefix @ _)) => prefix }; head.++[Seq[org.apache.hadoop.io.Text]](middle).++[Seq[org.apache.hadoop.io.Text]](last).filter(((x0$3: Seq[org.apache.hadoop.io.Text]) => x0$3 match { case scala.collection.Seq.unapplySeq[org.apache.hadoop.io.Text](<unapply-selector>) <unapply> ((s @ _), (e @ _)) => s.==(null).||(org.locationtech.geomesa.utils.index.ByteArrays.UnsignedByteOrdering.compare(s.getBytes().apply(0), prefix).<=(0)).&&(e.==(null).||(org.locationtech.geomesa.utils.index.ByteArrays.UnsignedByteOrdering.compare(e.getBytes().apply(0), prefix).>=(0))) })) } else head.++[Seq[org.apache.hadoop.io.Text]](middle).++[Seq[org.apache.hadoop.io.Text]](last); filtering.apply(splits).foreach[Unit](((x0$4: Seq[org.apache.hadoop.io.Text]) => x0$4 match { case scala.collection.Seq.unapplySeq[org.apache.hadoop.io.Text](<unapply-selector>) <unapply> ((s @ _), (e @ _)) => { executor.submit(new org.locationtech.geomesa.accumulo.tools.data.AccumuloCompactCommand.RangeCompaction(ops, table, s, e)); count = count.+(1) } })) }; (if (org.locationtech.geomesa.tools.`package`.Command.user.underlying.isInfoEnabled()) org.locationtech.geomesa.tools.`package`.Command.user.underlying.info("Found {} splits for table {}", (scala.Array.apply[AnyRef](count.asInstanceOf[AnyRef], (table: AnyRef))((ClassTag.AnyRef: scala.reflect.ClassTag[AnyRef])): _*)) else (): Unit) })) })); executor.shutdown(); executor.awaitTermination(9223372036854775807L, MILLISECONDS); (if (org.locationtech.geomesa.tools.`package`.Command.user.underlying.isInfoEnabled()) org.locationtech.geomesa.tools.`package`.Command.user.underlying.info("Compactions complete in {}", (org.locationtech.geomesa.utils.text.TextTools.getTime(start): AnyRef)) else (): Unit) }))
45 244 2135 - 2153 Select org.locationtech.geomesa.tools.RequiredTypeNameParam.featureName AccumuloCompactCommand.this.params.featureName
45 245 2122 - 2154 Apply org.locationtech.geomesa.index.geotools.GeoMesaDataStore.getSchema ds.getSchema(AccumuloCompactCommand.this.params.featureName)
46 246 2163 - 2174 Apply java.lang.Object.== sft.==(null)
46 249 2159 - 2159 Literal <nosymbol> ()
46 250 2159 - 2159 Block <nosymbol> ()
47 247 2184 - 2280 Throw <nosymbol> throw new com.beust.jcommander.ParameterException(scala.StringContext.apply("Schema \'", "\' does not exist in the data store").s(AccumuloCompactCommand.this.params.featureName))
47 248 2184 - 2280 Block <nosymbol> throw new com.beust.jcommander.ParameterException(scala.StringContext.apply("Schema \'", "\' does not exist in the data store").s(AccumuloCompactCommand.this.params.featureName))
50 251 2304 - 2330 Apply java.lang.System.currentTimeMillis java.lang.System.currentTimeMillis()
52 252 2380 - 2394 Select org.locationtech.geomesa.accumulo.tools.data.AccumuloCompactCommand.CompactParams.threads AccumuloCompactCommand.this.params.threads
52 253 2380 - 2394 ApplyImplicitView scala.Predef.Integer2int scala.Predef.Integer2int(AccumuloCompactCommand.this.params.threads)
52 254 2351 - 2395 Apply java.util.concurrent.Executors.newFixedThreadPool java.util.concurrent.Executors.newFixedThreadPool(scala.Predef.Integer2int(AccumuloCompactCommand.this.params.threads))
54 255 2411 - 2438 Apply org.apache.accumulo.core.client.AccumuloClient.tableOperations ds.client.tableOperations()
56 256 2474 - 2513 Literal <nosymbol> "Starting incremental compaction using "
56 257 2529 - 2551 Literal <nosymbol> " simultaneous threads"
56 258 2514 - 2528 Select org.locationtech.geomesa.accumulo.tools.data.AccumuloCompactCommand.CompactParams.threads AccumuloCompactCommand.this.params.threads
56 259 2472 - 2551 Apply scala.StringContext.s scala.StringContext.apply("Starting incremental compaction using ", " simultaneous threads").s(AccumuloCompactCommand.this.params.threads)
56 260 2454 - 2552 Apply scala.collection.mutable.StringBuilder.<init> new scala.`package`.StringBuilder(scala.StringContext.apply("Starting incremental compaction using ", " simultaneous threads").s(AccumuloCompactCommand.this.params.threads))
58 261 2580 - 2591 Select org.locationtech.geomesa.accumulo.tools.data.AccumuloCompactCommand.CompactParams.from AccumuloCompactCommand.this.params.from
58 275 2573 - 2973 Apply scala.Option.map scala.Option.apply[scala.concurrent.duration.Duration](AccumuloCompactCommand.this.params.from).map[(Long, Long)](((from: scala.concurrent.duration.Duration) => { val now: Long = java.lang.System.currentTimeMillis(); val start: Long = now.-(from.toMillis); val end: Long = scala.Option.apply[scala.concurrent.duration.Duration](AccumuloCompactCommand.this.params.duration).map[Long](((d: scala.concurrent.duration.Duration) => start.+(d.toMillis))).getOrElse[Long](now); def toString(millis: Long): String = java.time.ZonedDateTime.ofInstant(java.time.Instant.ofEpochMilli(millis), java.time.ZoneOffset.UTC).toString(); msg.append(scala.StringContext.apply(" from ", "/", "").s(toString(start), toString(end))); scala.Tuple2.apply[Long, Long](start, end) }))
59 262 2623 - 2649 Apply java.lang.System.currentTimeMillis java.lang.System.currentTimeMillis()
60 263 2674 - 2687 Select scala.concurrent.duration.Duration.toMillis from.toMillis
60 264 2668 - 2687 Apply scala.Long.- now.-(from.toMillis)
61 265 2704 - 2771 Apply scala.Option.getOrElse scala.Option.apply[scala.concurrent.duration.Duration](AccumuloCompactCommand.this.params.duration).map[Long](((d: scala.concurrent.duration.Duration) => start.+(d.toMillis))).getOrElse[Long](now)
62 266 2807 - 2885 Apply java.time.ZonedDateTime.toString java.time.ZonedDateTime.ofInstant(java.time.Instant.ofEpochMilli(millis), java.time.ZoneOffset.UTC).toString()
63 267 2905 - 2912 Literal <nosymbol> " from "
63 268 2929 - 2931 Literal <nosymbol> "/"
63 269 2946 - 2947 Literal <nosymbol> ""
63 270 2913 - 2928 Apply org.locationtech.geomesa.accumulo.tools.data.AccumuloCompactCommand.toString toString(start)
63 271 2932 - 2945 Apply org.locationtech.geomesa.accumulo.tools.data.AccumuloCompactCommand.toString toString(end)
63 272 2903 - 2947 Apply scala.StringContext.s scala.StringContext.apply(" from ", "/", "").s(toString(start), toString(end))
63 273 2892 - 2948 Apply scala.collection.mutable.StringBuilder.append msg.append(scala.StringContext.apply(" from ", "/", "").s(toString(start), toString(end)))
64 274 2955 - 2967 Apply scala.Tuple2.apply scala.Tuple2.apply[Long, Long](start, end)
67 281 3019 - 3121 Block <nosymbol> { val toBin: org.locationtech.geomesa.curve.BinnedTime.TimeToBinnedTime = org.locationtech.geomesa.curve.BinnedTime.timeToBinnedTime(org.locationtech.geomesa.utils.geotools.RichSimpleFeatureType.RichSimpleFeatureType(sft).getZ3Interval); scala.Tuple2.apply[Short, Short](toBin.apply(s).bin, toBin.apply(e).bin) }
67 282 2992 - 3127 Apply scala.Option.map interval.map[(Short, Short)](((x0$1: (Long, Long)) => x0$1 match { case (_1: Long, _2: Long)(Long, Long)((s @ _), (e @ _)) => { val toBin: org.locationtech.geomesa.curve.BinnedTime.TimeToBinnedTime = org.locationtech.geomesa.curve.BinnedTime.timeToBinnedTime(org.locationtech.geomesa.utils.geotools.RichSimpleFeatureType.RichSimpleFeatureType(sft).getZ3Interval); scala.Tuple2.apply[Short, Short](toBin.apply(s).bin, toBin.apply(e).bin) } }))
68 276 3068 - 3085 Select org.locationtech.geomesa.utils.geotools.RichSimpleFeatureType.RichSimpleFeatureType.getZ3Interval org.locationtech.geomesa.utils.geotools.RichSimpleFeatureType.RichSimpleFeatureType(sft).getZ3Interval
68 277 3040 - 3086 Apply org.locationtech.geomesa.curve.BinnedTime.timeToBinnedTime org.locationtech.geomesa.curve.BinnedTime.timeToBinnedTime(org.locationtech.geomesa.utils.geotools.RichSimpleFeatureType.RichSimpleFeatureType(sft).getZ3Interval)
69 278 3094 - 3106 Select org.locationtech.geomesa.curve.BinnedTime.bin toBin.apply(s).bin
69 279 3108 - 3120 Select org.locationtech.geomesa.curve.BinnedTime.bin toBin.apply(e).bin
69 280 3093 - 3121 Apply scala.Tuple2.apply scala.Tuple2.apply[Short, Short](toBin.apply(s).bin, toBin.apply(e).bin)
72 291 3146 - 3315 Apply scala.Option.flatMap interval.flatMap[org.geotools.api.filter.PropertyIsBetween](((x0$2: (Long, Long)) => x0$2 match { case (_1: Long, _2: Long)(Long, Long)((s @ _), (e @ _)) => org.locationtech.geomesa.utils.geotools.RichSimpleFeatureType.RichSimpleFeatureType(sft).getDtgField.map[org.geotools.api.filter.PropertyIsBetween](((dtg: String) => org.locationtech.geomesa.filter.`package`.ff.between(org.locationtech.geomesa.filter.`package`.ff.property(dtg), org.locationtech.geomesa.filter.`package`.ff.literal(new java.util.Date(s)), org.locationtech.geomesa.filter.`package`.ff.literal(new java.util.Date(e))))) }))
73 289 3186 - 3309 Apply scala.Option.map org.locationtech.geomesa.utils.geotools.RichSimpleFeatureType.RichSimpleFeatureType(sft).getDtgField.map[org.geotools.api.filter.PropertyIsBetween](((dtg: String) => org.locationtech.geomesa.filter.`package`.ff.between(org.locationtech.geomesa.filter.`package`.ff.property(dtg), org.locationtech.geomesa.filter.`package`.ff.literal(new java.util.Date(s)), org.locationtech.geomesa.filter.`package`.ff.literal(new java.util.Date(e)))))
73 290 3186 - 3309 Block scala.Option.map org.locationtech.geomesa.utils.geotools.RichSimpleFeatureType.RichSimpleFeatureType(sft).getDtgField.map[org.geotools.api.filter.PropertyIsBetween](((dtg: String) => org.locationtech.geomesa.filter.`package`.ff.between(org.locationtech.geomesa.filter.`package`.ff.property(dtg), org.locationtech.geomesa.filter.`package`.ff.literal(new java.util.Date(s)), org.locationtech.geomesa.filter.`package`.ff.literal(new java.util.Date(e)))))
74 283 3234 - 3250 Apply org.geotools.api.filter.FilterFactory.property org.locationtech.geomesa.filter.`package`.ff.property(dtg)
74 284 3263 - 3274 Apply java.util.Date.<init> new java.util.Date(s)
74 285 3252 - 3275 Apply org.geotools.api.filter.FilterFactory.literal org.locationtech.geomesa.filter.`package`.ff.literal(new java.util.Date(s))
74 286 3288 - 3299 Apply java.util.Date.<init> new java.util.Date(e)
74 287 3277 - 3300 Apply org.geotools.api.filter.FilterFactory.literal org.locationtech.geomesa.filter.`package`.ff.literal(new java.util.Date(e))
74 288 3223 - 3301 Apply org.geotools.api.filter.FilterFactory.between org.locationtech.geomesa.filter.`package`.ff.between(org.locationtech.geomesa.filter.`package`.ff.property(dtg), org.locationtech.geomesa.filter.`package`.ff.literal(new java.util.Date(s)), org.locationtech.geomesa.filter.`package`.ff.literal(new java.util.Date(e)))
82 292 3525 - 3537 Select org.locationtech.geomesa.index.index.z3.Z3Index.name org.locationtech.geomesa.index.index.z3.Z3Index.name
82 293 3511 - 3537 Apply java.lang.Object.== index.name.==(org.locationtech.geomesa.index.index.z3.Z3Index.name)
82 310 3538 - 4007 Block <nosymbol> { val offset: Int = index.keySpace.sharding.length.+(index.keySpace.sharing.length); def compareStart(s: org.apache.hadoop.io.Text): Boolean = s.==(null).||(s.getLength().<(offset.+(2))).||(org.locationtech.geomesa.utils.index.ByteArrays.readShort(s.getBytes(), offset).<=(max)); def compareEnd(e: org.apache.hadoop.io.Text): Boolean = e.==(null).||(e.getLength().<(offset.+(2))).||(org.locationtech.geomesa.utils.index.ByteArrays.readShort(e.getBytes(), offset).>=(min)); ((iter: Iterator[Seq[org.apache.hadoop.io.Text]]) => iter.filter(((x0$1: Seq[org.apache.hadoop.io.Text]) => x0$1 match { case scala.collection.Seq.unapplySeq[org.apache.hadoop.io.Text](<unapply-selector>) <unapply> ((s @ _), (e @ _)) => compareStart(s).&&(compareEnd(e)) }))) }
83 294 3597 - 3626 Select scala.Array.length index.keySpace.sharing.length
83 295 3564 - 3626 Apply scala.Int.+ index.keySpace.sharding.length.+(index.keySpace.sharing.length)
86 296 3692 - 3696 Literal <nosymbol> null
86 297 3714 - 3724 Apply scala.Int.+ offset.+(2)
86 298 3700 - 3724 Apply scala.Int.< s.getLength().<(offset.+(2))
86 299 3728 - 3775 Apply scala.Short.<= org.locationtech.geomesa.utils.index.ByteArrays.readShort(s.getBytes(), offset).<=(max)
86 300 3687 - 3775 Apply scala.Boolean.|| s.==(null).||(s.getLength().<(offset.+(2))).||(org.locationtech.geomesa.utils.index.ByteArrays.readShort(s.getBytes(), offset).<=(max))
88 301 3838 - 3842 Literal <nosymbol> null
88 302 3860 - 3870 Apply scala.Int.+ offset.+(2)
88 303 3846 - 3870 Apply scala.Int.< e.getLength().<(offset.+(2))
88 304 3874 - 3921 Apply scala.Short.>= org.locationtech.geomesa.utils.index.ByteArrays.readShort(e.getBytes(), offset).>=(min)
88 305 3833 - 3921 Apply scala.Boolean.|| e.==(null).||(e.getLength().<(offset.+(2))).||(org.locationtech.geomesa.utils.index.ByteArrays.readShort(e.getBytes(), offset).>=(min))
90 306 3992 - 4005 Apply org.locationtech.geomesa.accumulo.tools.data.AccumuloCompactCommand.compareEnd compareEnd(e)
90 307 3973 - 4005 Apply scala.Boolean.&& compareStart(s).&&(compareEnd(e))
90 308 3973 - 4005 Block scala.Boolean.&& compareStart(s).&&(compareEnd(e))
90 309 3941 - 4007 Apply scala.collection.Iterator.filter iter.filter(((x0$1: Seq[org.apache.hadoop.io.Text]) => x0$1 match { case scala.collection.Seq.unapplySeq[org.apache.hadoop.io.Text](<unapply-selector>) <unapply> ((s @ _), (e @ _)) => compareStart(s).&&(compareEnd(e)) }))
92 311 4072 - 4084 Select org.locationtech.geomesa.index.index.id.IdIndex.name org.locationtech.geomesa.index.index.id.IdIndex.name
92 312 4058 - 4084 Apply java.lang.Object.== index.name.==(org.locationtech.geomesa.index.index.id.IdIndex.name)
92 313 4042 - 4084 Apply scala.Boolean.&& AccumuloCompactCommand.this.params.z3Ids.&&(index.name.==(org.locationtech.geomesa.index.index.id.IdIndex.name))
92 366 4085 - 5941 Block <nosymbol> { val offset: Int = if (org.locationtech.geomesa.utils.geotools.RichSimpleFeatureType.RichSimpleFeatureType(sft).isTableSharing) 1 else 0; if (org.locationtech.geomesa.utils.geotools.RichSimpleFeatureType.RichSimpleFeatureType(sft).isUuidEncoded) { def compareStart(s: org.apache.hadoop.io.Text): Boolean = s.==(null).||(s.getLength().<(offset.+(3))).||(org.locationtech.geomesa.utils.uuid.Z3UuidGenerator.timeBin(s.getBytes(), offset).<=(max)); def compareEnd(e: org.apache.hadoop.io.Text): Boolean = e.==(null).||(e.getLength().<(offset.+(3))).||(org.locationtech.geomesa.utils.uuid.Z3UuidGenerator.timeBin(e.getBytes(), offset).>=(min)); ((iter: Iterator[Seq[org.apache.hadoop.io.Text]]) => iter.filter(((x0$2: Seq[org.apache.hadoop.io.Text]) => x0$2 match { case scala.collection.Seq.unapplySeq[org.apache.hadoop.io.Text](<unapply-selector>) <unapply> ((s @ _), (e @ _)) => compareStart(s).&&(compareEnd(e)) }))) } else { def compareStart(s: org.apache.hadoop.io.Text): Boolean = if (s.==(null)) true else try { val uuidString: String = new scala.Predef.String(s.getBytes(), offset, s.getLength().-(offset), java.nio.charset.StandardCharsets.UTF_8); val uuid: java.util.UUID = java.util.UUID.fromString(uuidString); org.locationtech.geomesa.utils.uuid.Z3UuidGenerator.timeBin(org.locationtech.geomesa.utils.index.ByteArrays.toBytes(uuid.getMostSignificantBits()), org.locationtech.geomesa.utils.uuid.Z3UuidGenerator.timeBin$default$2).<=(max) } catch { case scala.util.control.NonFatal.unapply(<unapply-selector>) <unapply> (_) => true }; def compareEnd(e: org.apache.hadoop.io.Text): Boolean = if (e.==(null)) true else try { val uuidString: String = new scala.Predef.String(e.getBytes(), offset, e.getLength().-(offset), java.nio.charset.StandardCharsets.UTF_8); val uuid: java.util.UUID = java.util.UUID.fromString(uuidString); org.locationtech.geomesa.utils.uuid.Z3UuidGenerator.timeBin(org.locationtech.geomesa.utils.index.ByteArrays.toBytes(uuid.getMostSignificantBits()), org.locationtech.geomesa.utils.uuid.Z3UuidGenerator.timeBin$default$2).>=(min) } catch { case scala.util.control.NonFatal.unapply(<unapply-selector>) <unapply> (_) => true }; ((iter: Iterator[Seq[org.apache.hadoop.io.Text]]) => iter.filter(((x0$3: Seq[org.apache.hadoop.io.Text]) => x0$3 match { case scala.collection.Seq.unapplySeq[org.apache.hadoop.io.Text](<unapply-selector>) <unapply> ((s @ _), (e @ _)) => compareStart(s).&&(compareEnd(e)) }))) } }
93 314 4115 - 4133 Select org.locationtech.geomesa.utils.geotools.RichSimpleFeatureType.RichSimpleFeatureType.isTableSharing org.locationtech.geomesa.utils.geotools.RichSimpleFeatureType.RichSimpleFeatureType(sft).isTableSharing
93 315 4137 - 4138 Literal <nosymbol> 1
93 316 4137 - 4138 Block <nosymbol> 1
93 317 4148 - 4149 Literal <nosymbol> 0
93 318 4148 - 4149 Block <nosymbol> 0
94 319 4166 - 4183 Select org.locationtech.geomesa.utils.geotools.RichSimpleFeatureType.RichSimpleFeatureType.isUuidEncoded org.locationtech.geomesa.utils.geotools.RichSimpleFeatureType.RichSimpleFeatureType(sft).isUuidEncoded
94 334 4185 - 4657 Block <nosymbol> { def compareStart(s: org.apache.hadoop.io.Text): Boolean = s.==(null).||(s.getLength().<(offset.+(3))).||(org.locationtech.geomesa.utils.uuid.Z3UuidGenerator.timeBin(s.getBytes(), offset).<=(max)); def compareEnd(e: org.apache.hadoop.io.Text): Boolean = e.==(null).||(e.getLength().<(offset.+(3))).||(org.locationtech.geomesa.utils.uuid.Z3UuidGenerator.timeBin(e.getBytes(), offset).>=(min)); ((iter: Iterator[Seq[org.apache.hadoop.io.Text]]) => iter.filter(((x0$2: Seq[org.apache.hadoop.io.Text]) => x0$2 match { case scala.collection.Seq.unapplySeq[org.apache.hadoop.io.Text](<unapply-selector>) <unapply> ((s @ _), (e @ _)) => compareStart(s).&&(compareEnd(e)) }))) }
97 320 4318 - 4322 Literal <nosymbol> null
97 321 4340 - 4350 Apply scala.Int.+ offset.+(3)
97 322 4326 - 4350 Apply scala.Int.< s.getLength().<(offset.+(3))
97 323 4354 - 4404 Apply scala.Short.<= org.locationtech.geomesa.utils.uuid.Z3UuidGenerator.timeBin(s.getBytes(), offset).<=(max)
97 324 4313 - 4404 Apply scala.Boolean.|| s.==(null).||(s.getLength().<(offset.+(3))).||(org.locationtech.geomesa.utils.uuid.Z3UuidGenerator.timeBin(s.getBytes(), offset).<=(max))
99 325 4471 - 4475 Literal <nosymbol> null
99 326 4493 - 4503 Apply scala.Int.+ offset.+(3)
99 327 4479 - 4503 Apply scala.Int.< e.getLength().<(offset.+(3))
99 328 4507 - 4557 Apply scala.Short.>= org.locationtech.geomesa.utils.uuid.Z3UuidGenerator.timeBin(e.getBytes(), offset).>=(min)
99 329 4466 - 4557 Apply scala.Boolean.|| e.==(null).||(e.getLength().<(offset.+(3))).||(org.locationtech.geomesa.utils.uuid.Z3UuidGenerator.timeBin(e.getBytes(), offset).>=(min))
101 330 4630 - 4643 Apply org.locationtech.geomesa.accumulo.tools.data.AccumuloCompactCommand.compareEnd compareEnd(e)
101 331 4611 - 4643 Apply scala.Boolean.&& compareStart(s).&&(compareEnd(e))
101 332 4611 - 4643 Block scala.Boolean.&& compareStart(s).&&(compareEnd(e))
101 333 4579 - 4645 Apply scala.collection.Iterator.filter iter.filter(((x0$2: Seq[org.apache.hadoop.io.Text]) => x0$2 match { case scala.collection.Seq.unapplySeq[org.apache.hadoop.io.Text](<unapply-selector>) <unapply> ((s @ _), (e @ _)) => compareStart(s).&&(compareEnd(e)) }))
102 365 4663 - 5941 Block <nosymbol> { def compareStart(s: org.apache.hadoop.io.Text): Boolean = if (s.==(null)) true else try { val uuidString: String = new scala.Predef.String(s.getBytes(), offset, s.getLength().-(offset), java.nio.charset.StandardCharsets.UTF_8); val uuid: java.util.UUID = java.util.UUID.fromString(uuidString); org.locationtech.geomesa.utils.uuid.Z3UuidGenerator.timeBin(org.locationtech.geomesa.utils.index.ByteArrays.toBytes(uuid.getMostSignificantBits()), org.locationtech.geomesa.utils.uuid.Z3UuidGenerator.timeBin$default$2).<=(max) } catch { case scala.util.control.NonFatal.unapply(<unapply-selector>) <unapply> (_) => true }; def compareEnd(e: org.apache.hadoop.io.Text): Boolean = if (e.==(null)) true else try { val uuidString: String = new scala.Predef.String(e.getBytes(), offset, e.getLength().-(offset), java.nio.charset.StandardCharsets.UTF_8); val uuid: java.util.UUID = java.util.UUID.fromString(uuidString); org.locationtech.geomesa.utils.uuid.Z3UuidGenerator.timeBin(org.locationtech.geomesa.utils.index.ByteArrays.toBytes(uuid.getMostSignificantBits()), org.locationtech.geomesa.utils.uuid.Z3UuidGenerator.timeBin$default$2).>=(min) } catch { case scala.util.control.NonFatal.unapply(<unapply-selector>) <unapply> (_) => true }; ((iter: Iterator[Seq[org.apache.hadoop.io.Text]]) => iter.filter(((x0$3: Seq[org.apache.hadoop.io.Text]) => x0$3 match { case scala.collection.Seq.unapplySeq[org.apache.hadoop.io.Text](<unapply-selector>) <unapply> ((s @ _), (e @ _)) => compareStart(s).&&(compareEnd(e)) }))) }
105 335 4827 - 4836 Apply java.lang.Object.== s.==(null)
105 336 4840 - 4844 Literal <nosymbol> true
105 337 4840 - 4844 Block <nosymbol> true
106 344 4894 - 5142 Block <nosymbol> { val uuidString: String = new scala.Predef.String(s.getBytes(), offset, s.getLength().-(offset), java.nio.charset.StandardCharsets.UTF_8); val uuid: java.util.UUID = java.util.UUID.fromString(uuidString); org.locationtech.geomesa.utils.uuid.Z3UuidGenerator.timeBin(org.locationtech.geomesa.utils.index.ByteArrays.toBytes(uuid.getMostSignificantBits()), org.locationtech.geomesa.utils.uuid.Z3UuidGenerator.timeBin$default$2).<=(max) }
106 347 4870 - 5270 Try <nosymbol> try { val uuidString: String = new scala.Predef.String(s.getBytes(), offset, s.getLength().-(offset), java.nio.charset.StandardCharsets.UTF_8); val uuid: java.util.UUID = java.util.UUID.fromString(uuidString); org.locationtech.geomesa.utils.uuid.Z3UuidGenerator.timeBin(org.locationtech.geomesa.utils.index.ByteArrays.toBytes(uuid.getMostSignificantBits()), org.locationtech.geomesa.utils.uuid.Z3UuidGenerator.timeBin$default$2).<=(max) } catch { case scala.util.control.NonFatal.unapply(<unapply-selector>) <unapply> (_) => true }
107 338 4922 - 4932 Apply org.apache.hadoop.io.Text.getBytes s.getBytes()
107 339 4942 - 4962 Apply scala.Int.- s.getLength().-(offset)
107 340 4964 - 4986 Select java.nio.charset.StandardCharsets.UTF_8 java.nio.charset.StandardCharsets.UTF_8
107 341 4911 - 4987 Apply java.lang.String.<init> new scala.Predef.String(s.getBytes(), offset, s.getLength().-(offset), java.nio.charset.StandardCharsets.UTF_8)
108 342 5017 - 5044 Apply java.util.UUID.fromString java.util.UUID.fromString(uuidString)
109 343 5063 - 5142 Apply scala.Short.<= org.locationtech.geomesa.utils.uuid.Z3UuidGenerator.timeBin(org.locationtech.geomesa.utils.index.ByteArrays.toBytes(uuid.getMostSignificantBits()), org.locationtech.geomesa.utils.uuid.Z3UuidGenerator.timeBin$default$2).<=(max)
111 345 5207 - 5211 Literal <nosymbol> true
111 346 5207 - 5211 Block <nosymbol> true
116 348 5368 - 5377 Apply java.lang.Object.== e.==(null)
116 349 5381 - 5385 Literal <nosymbol> true
116 350 5381 - 5385 Block <nosymbol> true
117 357 5435 - 5683 Block <nosymbol> { val uuidString: String = new scala.Predef.String(e.getBytes(), offset, e.getLength().-(offset), java.nio.charset.StandardCharsets.UTF_8); val uuid: java.util.UUID = java.util.UUID.fromString(uuidString); org.locationtech.geomesa.utils.uuid.Z3UuidGenerator.timeBin(org.locationtech.geomesa.utils.index.ByteArrays.toBytes(uuid.getMostSignificantBits()), org.locationtech.geomesa.utils.uuid.Z3UuidGenerator.timeBin$default$2).>=(min) }
117 360 5411 - 5811 Try <nosymbol> try { val uuidString: String = new scala.Predef.String(e.getBytes(), offset, e.getLength().-(offset), java.nio.charset.StandardCharsets.UTF_8); val uuid: java.util.UUID = java.util.UUID.fromString(uuidString); org.locationtech.geomesa.utils.uuid.Z3UuidGenerator.timeBin(org.locationtech.geomesa.utils.index.ByteArrays.toBytes(uuid.getMostSignificantBits()), org.locationtech.geomesa.utils.uuid.Z3UuidGenerator.timeBin$default$2).>=(min) } catch { case scala.util.control.NonFatal.unapply(<unapply-selector>) <unapply> (_) => true }
118 351 5463 - 5473 Apply org.apache.hadoop.io.Text.getBytes e.getBytes()
118 352 5483 - 5503 Apply scala.Int.- e.getLength().-(offset)
118 353 5505 - 5527 Select java.nio.charset.StandardCharsets.UTF_8 java.nio.charset.StandardCharsets.UTF_8
118 354 5452 - 5528 Apply java.lang.String.<init> new scala.Predef.String(e.getBytes(), offset, e.getLength().-(offset), java.nio.charset.StandardCharsets.UTF_8)
119 355 5558 - 5585 Apply java.util.UUID.fromString java.util.UUID.fromString(uuidString)
120 356 5604 - 5683 Apply scala.Short.>= org.locationtech.geomesa.utils.uuid.Z3UuidGenerator.timeBin(org.locationtech.geomesa.utils.index.ByteArrays.toBytes(uuid.getMostSignificantBits()), org.locationtech.geomesa.utils.uuid.Z3UuidGenerator.timeBin$default$2).>=(min)
122 358 5748 - 5752 Literal <nosymbol> true
122 359 5748 - 5752 Block <nosymbol> true
127 361 5914 - 5927 Apply org.locationtech.geomesa.accumulo.tools.data.AccumuloCompactCommand.compareEnd compareEnd(e)
127 362 5895 - 5927 Apply scala.Boolean.&& compareStart(s).&&(compareEnd(e))
127 363 5895 - 5927 Block scala.Boolean.&& compareStart(s).&&(compareEnd(e))
127 364 5863 - 5929 Apply scala.collection.Iterator.filter iter.filter(((x0$3: Seq[org.apache.hadoop.io.Text]) => x0$3 match { case scala.collection.Seq.unapplySeq[org.apache.hadoop.io.Text](<unapply-selector>) <unapply> ((s @ _), (e @ _)) => compareStart(s).&&(compareEnd(e)) }))
131 367 5971 - 5983 Function org.locationtech.geomesa.accumulo.tools.data.AccumuloCompactCommand.$anonfun ((iter: Iterator[Seq[org.apache.hadoop.io.Text]]) => iter)
135 411 6003 - 7490 Apply scala.collection.IterableLike.foreach ds.manager.indices(sft, ds.manager.indices$default$2).foreach[Unit](((index: org.locationtech.geomesa.index.api.GeoMesaFeatureIndex[_, _]) => { val filtering: Iterator[Seq[org.apache.hadoop.io.Text]] => Iterator[Seq[org.apache.hadoop.io.Text]] = filterSplits(index); index.getTablesForQuery(filter).foreach[Unit](((table: String) => { val tableSplits: List[org.apache.hadoop.io.Text] = scala.collection.JavaConverters.collectionAsScalaIterableConverter[org.apache.hadoop.io.Text](ops.listSplits(table)).asScala.toList; var count: Int = 0; if (tableSplits.isEmpty) { executor.submit(new org.locationtech.geomesa.accumulo.tools.data.AccumuloCompactCommand.RangeCompaction(ops, table, null, null)); count = count.+(1) } else { val head: Iterator[Seq[org.apache.hadoop.io.Text]] = scala.`package`.Iterator.single[Seq[org.apache.hadoop.io.Text]](scala.collection.Seq.apply[org.apache.hadoop.io.Text](null, tableSplits.head)); val last: Iterator[Seq[org.apache.hadoop.io.Text]] = scala.`package`.Iterator.single[Seq[org.apache.hadoop.io.Text]](scala.collection.Seq.apply[org.apache.hadoop.io.Text](tableSplits.last, null)); val middle: Iterator[List[org.apache.hadoop.io.Text]] = if (tableSplits.lengthCompare(1).==(0)) scala.`package`.Iterator.empty else tableSplits.sliding(2); val splits: Iterator[Seq[org.apache.hadoop.io.Text]] = if (org.locationtech.geomesa.utils.geotools.RichSimpleFeatureType.RichSimpleFeatureType(sft).isTableSharing) { val prefix: Byte = (org.locationtech.geomesa.utils.geotools.RichSimpleFeatureType.RichSimpleFeatureType(sft).getTableSharingBytes: Array[Byte] @unchecked) match { case scala.Array.unapplySeq[Byte](<unapply-selector>) <unapply> ((prefix @ _)) => prefix }; head.++[Seq[org.apache.hadoop.io.Text]](middle).++[Seq[org.apache.hadoop.io.Text]](last).filter(((x0$3: Seq[org.apache.hadoop.io.Text]) => x0$3 match { case scala.collection.Seq.unapplySeq[org.apache.hadoop.io.Text](<unapply-selector>) <unapply> ((s @ _), (e @ _)) => s.==(null).||(org.locationtech.geomesa.utils.index.ByteArrays.UnsignedByteOrdering.compare(s.getBytes().apply(0), prefix).<=(0)).&&(e.==(null).||(org.locationtech.geomesa.utils.index.ByteArrays.UnsignedByteOrdering.compare(e.getBytes().apply(0), prefix).>=(0))) })) } else head.++[Seq[org.apache.hadoop.io.Text]](middle).++[Seq[org.apache.hadoop.io.Text]](last); filtering.apply(splits).foreach[Unit](((x0$4: Seq[org.apache.hadoop.io.Text]) => x0$4 match { case scala.collection.Seq.unapplySeq[org.apache.hadoop.io.Text](<unapply-selector>) <unapply> ((s @ _), (e @ _)) => { executor.submit(new org.locationtech.geomesa.accumulo.tools.data.AccumuloCompactCommand.RangeCompaction(ops, table, s, e)); count = count.+(1) } })) }; (if (org.locationtech.geomesa.tools.`package`.Command.user.underlying.isInfoEnabled()) org.locationtech.geomesa.tools.`package`.Command.user.underlying.info("Found {} splits for table {}", (scala.Array.apply[AnyRef](count.asInstanceOf[AnyRef], (table: AnyRef))((ClassTag.AnyRef: scala.reflect.ClassTag[AnyRef])): _*)) else (): Unit) })) }))
136 368 6068 - 6087 Apply org.locationtech.geomesa.accumulo.tools.data.AccumuloCompactCommand.filterSplits filterSplits(index)
138 410 6095 - 7484 Apply scala.collection.IterableLike.foreach index.getTablesForQuery(filter).foreach[Unit](((table: String) => { val tableSplits: List[org.apache.hadoop.io.Text] = scala.collection.JavaConverters.collectionAsScalaIterableConverter[org.apache.hadoop.io.Text](ops.listSplits(table)).asScala.toList; var count: Int = 0; if (tableSplits.isEmpty) { executor.submit(new org.locationtech.geomesa.accumulo.tools.data.AccumuloCompactCommand.RangeCompaction(ops, table, null, null)); count = count.+(1) } else { val head: Iterator[Seq[org.apache.hadoop.io.Text]] = scala.`package`.Iterator.single[Seq[org.apache.hadoop.io.Text]](scala.collection.Seq.apply[org.apache.hadoop.io.Text](null, tableSplits.head)); val last: Iterator[Seq[org.apache.hadoop.io.Text]] = scala.`package`.Iterator.single[Seq[org.apache.hadoop.io.Text]](scala.collection.Seq.apply[org.apache.hadoop.io.Text](tableSplits.last, null)); val middle: Iterator[List[org.apache.hadoop.io.Text]] = if (tableSplits.lengthCompare(1).==(0)) scala.`package`.Iterator.empty else tableSplits.sliding(2); val splits: Iterator[Seq[org.apache.hadoop.io.Text]] = if (org.locationtech.geomesa.utils.geotools.RichSimpleFeatureType.RichSimpleFeatureType(sft).isTableSharing) { val prefix: Byte = (org.locationtech.geomesa.utils.geotools.RichSimpleFeatureType.RichSimpleFeatureType(sft).getTableSharingBytes: Array[Byte] @unchecked) match { case scala.Array.unapplySeq[Byte](<unapply-selector>) <unapply> ((prefix @ _)) => prefix }; head.++[Seq[org.apache.hadoop.io.Text]](middle).++[Seq[org.apache.hadoop.io.Text]](last).filter(((x0$3: Seq[org.apache.hadoop.io.Text]) => x0$3 match { case scala.collection.Seq.unapplySeq[org.apache.hadoop.io.Text](<unapply-selector>) <unapply> ((s @ _), (e @ _)) => s.==(null).||(org.locationtech.geomesa.utils.index.ByteArrays.UnsignedByteOrdering.compare(s.getBytes().apply(0), prefix).<=(0)).&&(e.==(null).||(org.locationtech.geomesa.utils.index.ByteArrays.UnsignedByteOrdering.compare(e.getBytes().apply(0), prefix).>=(0))) })) } else head.++[Seq[org.apache.hadoop.io.Text]](middle).++[Seq[org.apache.hadoop.io.Text]](last); filtering.apply(splits).foreach[Unit](((x0$4: Seq[org.apache.hadoop.io.Text]) => x0$4 match { case scala.collection.Seq.unapplySeq[org.apache.hadoop.io.Text](<unapply-selector>) <unapply> ((s @ _), (e @ _)) => { executor.submit(new org.locationtech.geomesa.accumulo.tools.data.AccumuloCompactCommand.RangeCompaction(ops, table, s, e)); count = count.+(1) } })) }; (if (org.locationtech.geomesa.tools.`package`.Command.user.underlying.isInfoEnabled()) org.locationtech.geomesa.tools.`package`.Command.user.underlying.info("Found {} splits for table {}", (scala.Array.apply[AnyRef](count.asInstanceOf[AnyRef], (table: AnyRef))((ClassTag.AnyRef: scala.reflect.ClassTag[AnyRef])): _*)) else (): Unit) }))
139 369 6172 - 6193 Apply org.apache.accumulo.core.client.admin.TableOperations.listSplits ops.listSplits(table)
139 370 6172 - 6208 Select scala.collection.TraversableOnce.toList scala.collection.JavaConverters.collectionAsScalaIterableConverter[org.apache.hadoop.io.Text](ops.listSplits(table)).asScala.toList
141 371 6230 - 6231 Literal <nosymbol> 0
143 372 6245 - 6264 Select scala.collection.SeqLike.isEmpty tableSplits.isEmpty
143 376 6266 - 6369 Block <nosymbol> { executor.submit(new org.locationtech.geomesa.accumulo.tools.data.AccumuloCompactCommand.RangeCompaction(ops, table, null, null)); count = count.+(1) }
144 373 6294 - 6337 Apply org.locationtech.geomesa.accumulo.tools.data.AccumuloCompactCommand.RangeCompaction.<init> new org.locationtech.geomesa.accumulo.tools.data.AccumuloCompactCommand.RangeCompaction(ops, table, null, null)
144 374 6278 - 6338 Apply java.util.concurrent.ExecutorService.submit executor.submit(new org.locationtech.geomesa.accumulo.tools.data.AccumuloCompactCommand.RangeCompaction(ops, table, null, null))
145 375 6349 - 6359 Apply scala.Int.+ count.+(1)
146 409 6375 - 7409 Block <nosymbol> { val head: Iterator[Seq[org.apache.hadoop.io.Text]] = scala.`package`.Iterator.single[Seq[org.apache.hadoop.io.Text]](scala.collection.Seq.apply[org.apache.hadoop.io.Text](null, tableSplits.head)); val last: Iterator[Seq[org.apache.hadoop.io.Text]] = scala.`package`.Iterator.single[Seq[org.apache.hadoop.io.Text]](scala.collection.Seq.apply[org.apache.hadoop.io.Text](tableSplits.last, null)); val middle: Iterator[List[org.apache.hadoop.io.Text]] = if (tableSplits.lengthCompare(1).==(0)) scala.`package`.Iterator.empty else tableSplits.sliding(2); val splits: Iterator[Seq[org.apache.hadoop.io.Text]] = if (org.locationtech.geomesa.utils.geotools.RichSimpleFeatureType.RichSimpleFeatureType(sft).isTableSharing) { val prefix: Byte = (org.locationtech.geomesa.utils.geotools.RichSimpleFeatureType.RichSimpleFeatureType(sft).getTableSharingBytes: Array[Byte] @unchecked) match { case scala.Array.unapplySeq[Byte](<unapply-selector>) <unapply> ((prefix @ _)) => prefix }; head.++[Seq[org.apache.hadoop.io.Text]](middle).++[Seq[org.apache.hadoop.io.Text]](last).filter(((x0$3: Seq[org.apache.hadoop.io.Text]) => x0$3 match { case scala.collection.Seq.unapplySeq[org.apache.hadoop.io.Text](<unapply-selector>) <unapply> ((s @ _), (e @ _)) => s.==(null).||(org.locationtech.geomesa.utils.index.ByteArrays.UnsignedByteOrdering.compare(s.getBytes().apply(0), prefix).<=(0)).&&(e.==(null).||(org.locationtech.geomesa.utils.index.ByteArrays.UnsignedByteOrdering.compare(e.getBytes().apply(0), prefix).>=(0))) })) } else head.++[Seq[org.apache.hadoop.io.Text]](middle).++[Seq[org.apache.hadoop.io.Text]](last); filtering.apply(splits).foreach[Unit](((x0$4: Seq[org.apache.hadoop.io.Text]) => x0$4 match { case scala.collection.Seq.unapplySeq[org.apache.hadoop.io.Text](<unapply-selector>) <unapply> ((s @ _), (e @ _)) => { executor.submit(new org.locationtech.geomesa.accumulo.tools.data.AccumuloCompactCommand.RangeCompaction(ops, table, s, e)); count = count.+(1) } })) }
147 377 6418 - 6422 Literal <nosymbol> null
147 378 6424 - 6440 Select scala.collection.IterableLike.head tableSplits.head
147 379 6414 - 6441 Apply scala.collection.generic.GenericCompanion.apply scala.collection.Seq.apply[org.apache.hadoop.io.Text](null, tableSplits.head)
147 380 6398 - 6442 Apply scala.collection.Iterator.single scala.`package`.Iterator.single[Seq[org.apache.hadoop.io.Text]](scala.collection.Seq.apply[org.apache.hadoop.io.Text](null, tableSplits.head))
148 381 6484 - 6500 Select scala.collection.LinearSeqOptimized.last tableSplits.last
148 382 6502 - 6506 Literal <nosymbol> null
148 383 6480 - 6507 Apply scala.collection.generic.GenericCompanion.apply scala.collection.Seq.apply[org.apache.hadoop.io.Text](tableSplits.last, null)
148 384 6464 - 6508 Apply scala.collection.Iterator.single scala.`package`.Iterator.single[Seq[org.apache.hadoop.io.Text]](scala.collection.Seq.apply[org.apache.hadoop.io.Text](tableSplits.last, null))
149 385 6536 - 6569 Apply scala.Int.== tableSplits.lengthCompare(1).==(0)
149 386 6573 - 6587 Select scala.collection.Iterator.empty scala.`package`.Iterator.empty
149 387 6573 - 6587 Block scala.collection.Iterator.empty scala.`package`.Iterator.empty
149 388 6597 - 6619 Apply scala.collection.IterableLike.sliding tableSplits.sliding(2)
149 389 6597 - 6619 Block scala.collection.IterableLike.sliding tableSplits.sliding(2)
152 390 6711 - 6729 Select org.locationtech.geomesa.utils.geotools.RichSimpleFeatureType.RichSimpleFeatureType.isTableSharing org.locationtech.geomesa.utils.geotools.RichSimpleFeatureType.RichSimpleFeatureType(sft).isTableSharing
152 401 6731 - 7114 Block <nosymbol> { val prefix: Byte = (org.locationtech.geomesa.utils.geotools.RichSimpleFeatureType.RichSimpleFeatureType(sft).getTableSharingBytes: Array[Byte] @unchecked) match { case scala.Array.unapplySeq[Byte](<unapply-selector>) <unapply> ((prefix @ _)) => prefix }; head.++[Seq[org.apache.hadoop.io.Text]](middle).++[Seq[org.apache.hadoop.io.Text]](last).filter(((x0$3: Seq[org.apache.hadoop.io.Text]) => x0$3 match { case scala.collection.Seq.unapplySeq[org.apache.hadoop.io.Text](<unapply-selector>) <unapply> ((s @ _), (e @ _)) => s.==(null).||(org.locationtech.geomesa.utils.index.ByteArrays.UnsignedByteOrdering.compare(s.getBytes().apply(0), prefix).<=(0)).&&(e.==(null).||(org.locationtech.geomesa.utils.index.ByteArrays.UnsignedByteOrdering.compare(e.getBytes().apply(0), prefix).>=(0))) })) }
153 391 6765 - 6789 Select org.locationtech.geomesa.utils.geotools.RichSimpleFeatureType.RichSimpleFeatureType.getTableSharingBytes org.locationtech.geomesa.utils.geotools.RichSimpleFeatureType.RichSimpleFeatureType(sft).getTableSharingBytes
153 392 6749 - 6762 Ident org.locationtech.geomesa.accumulo.tools.data.AccumuloCompactCommand.prefix prefix
154 400 6824 - 7102 Apply scala.collection.Iterator.filter head.++[Seq[org.apache.hadoop.io.Text]](middle).++[Seq[org.apache.hadoop.io.Text]](last).filter(((x0$3: Seq[org.apache.hadoop.io.Text]) => x0$3 match { case scala.collection.Seq.unapplySeq[org.apache.hadoop.io.Text](<unapply-selector>) <unapply> ((s @ _), (e @ _)) => s.==(null).||(org.locationtech.geomesa.utils.index.ByteArrays.UnsignedByteOrdering.compare(s.getBytes().apply(0), prefix).<=(0)).&&(e.==(null).||(org.locationtech.geomesa.utils.index.ByteArrays.UnsignedByteOrdering.compare(e.getBytes().apply(0), prefix).>=(0))) }))
155 393 6896 - 6900 Literal <nosymbol> null
155 394 6904 - 6977 Apply scala.Int.<= org.locationtech.geomesa.utils.index.ByteArrays.UnsignedByteOrdering.compare(s.getBytes().apply(0), prefix).<=(0)
155 398 6890 - 7088 Apply scala.Boolean.&& s.==(null).||(org.locationtech.geomesa.utils.index.ByteArrays.UnsignedByteOrdering.compare(s.getBytes().apply(0), prefix).<=(0)).&&(e.==(null).||(org.locationtech.geomesa.utils.index.ByteArrays.UnsignedByteOrdering.compare(e.getBytes().apply(0), prefix).>=(0)))
155 399 6890 - 7088 Block scala.Boolean.&& s.==(null).||(org.locationtech.geomesa.utils.index.ByteArrays.UnsignedByteOrdering.compare(s.getBytes().apply(0), prefix).<=(0)).&&(e.==(null).||(org.locationtech.geomesa.utils.index.ByteArrays.UnsignedByteOrdering.compare(e.getBytes().apply(0), prefix).>=(0)))
156 395 7006 - 7010 Literal <nosymbol> null
156 396 7014 - 7087 Apply scala.Int.>= org.locationtech.geomesa.utils.index.ByteArrays.UnsignedByteOrdering.compare(e.getBytes().apply(0), prefix).>=(0)
156 397 7001 - 7087 Apply scala.Boolean.|| e.==(null).||(org.locationtech.geomesa.utils.index.ByteArrays.UnsignedByteOrdering.compare(e.getBytes().apply(0), prefix).>=(0))
159 402 7134 - 7156 Apply scala.collection.Iterator.++ head.++[Seq[org.apache.hadoop.io.Text]](middle).++[Seq[org.apache.hadoop.io.Text]](last)
159 403 7134 - 7156 Block scala.collection.Iterator.++ head.++[Seq[org.apache.hadoop.io.Text]](middle).++[Seq[org.apache.hadoop.io.Text]](last)
163 407 7295 - 7387 Block <nosymbol> { executor.submit(new org.locationtech.geomesa.accumulo.tools.data.AccumuloCompactCommand.RangeCompaction(ops, table, s, e)); count = count.+(1) }
163 408 7251 - 7399 Apply scala.collection.Iterator.foreach filtering.apply(splits).foreach[Unit](((x0$4: Seq[org.apache.hadoop.io.Text]) => x0$4 match { case scala.collection.Seq.unapplySeq[org.apache.hadoop.io.Text](<unapply-selector>) <unapply> ((s @ _), (e @ _)) => { executor.submit(new org.locationtech.geomesa.accumulo.tools.data.AccumuloCompactCommand.RangeCompaction(ops, table, s, e)); count = count.+(1) } }))
164 404 7326 - 7363 Apply org.locationtech.geomesa.accumulo.tools.data.AccumuloCompactCommand.RangeCompaction.<init> new org.locationtech.geomesa.accumulo.tools.data.AccumuloCompactCommand.RangeCompaction(ops, table, s, e)
164 405 7310 - 7364 Apply java.util.concurrent.ExecutorService.submit executor.submit(new org.locationtech.geomesa.accumulo.tools.data.AccumuloCompactCommand.RangeCompaction(ops, table, s, e))
165 406 7377 - 7387 Apply scala.Int.+ count.+(1)
172 412 7496 - 7515 Apply java.util.concurrent.ExecutorService.shutdown executor.shutdown()
173 413 7520 - 7583 Apply java.util.concurrent.ExecutorService.awaitTermination executor.awaitTermination(9223372036854775807L, MILLISECONDS)
185 415 8011 - 8021 Apply scala.Int.box scala.Int.box(4)
194 416 8766 - 8771 Literal <nosymbol> false
200 417 9039 - 9044 Select org.locationtech.geomesa.accumulo.tools.data.AccumuloCompactCommand.RangeCompaction.table RangeCompaction.this.table
200 418 9046 - 9051 Select org.locationtech.geomesa.accumulo.tools.data.AccumuloCompactCommand.RangeCompaction.start RangeCompaction.this.start
200 419 9053 - 9056 Select org.locationtech.geomesa.accumulo.tools.data.AccumuloCompactCommand.RangeCompaction.end RangeCompaction.this.end
200 420 9058 - 9063 Literal <nosymbol> false
200 421 9065 - 9069 Literal <nosymbol> true
200 422 9027 - 9070 Apply org.apache.accumulo.core.client.admin.TableOperations.compact RangeCompaction.this.ops.compact(RangeCompaction.this.table, RangeCompaction.this.start, RangeCompaction.this.end, false, true)
205 423 9139 - 9150 Apply java.lang.Object.== row.==(null)
205 424 9154 - 9160 Literal <nosymbol> "null"
205 425 9154 - 9160 Block <nosymbol> "null"
205 426 9192 - 9204 Apply org.apache.hadoop.io.Text.getBytes row.getBytes()
205 427 9206 - 9207 Literal <nosymbol> 0
205 428 9209 - 9222 Apply org.apache.hadoop.io.Text.getLength row.getLength()
205 429 9224 - 9237 Apply org.apache.hadoop.io.Text.getLength row.getLength()
205 430 9170 - 9238 Apply org.apache.accumulo.core.data.Key.toPrintableString org.apache.accumulo.core.data.Key.toPrintableString(row.getBytes(), 0, row.getLength(), row.getLength())
205 431 9170 - 9238 Block org.apache.accumulo.core.data.Key.toPrintableString org.apache.accumulo.core.data.Key.toPrintableString(row.getBytes(), 0, row.getLength(), row.getLength())