1 /***********************************************************************
2  * Copyright (c) 2013-2024 Commonwealth Computer Research, Inc.
3  * All rights reserved. This program and the accompanying materials
4  * are made available under the terms of the Apache License, Version 2.0
5  * which accompanies this distribution and is available at
6  * http://www.opensource.org/licenses/apache2.0.php.
7  ***********************************************************************/
8 
9 package org.locationtech.geomesa.fs.storage.common.metadata
10 
11 import com.github.benmanes.caffeine.cache.{CacheLoader, Caffeine, LoadingCache}
12 import com.typesafe.config._
13 import com.typesafe.scalalogging.LazyLogging
14 import org.apache.hadoop.fs.Options.CreateOpts
15 import org.apache.hadoop.fs._
16 import org.geotools.api.feature.simple.SimpleFeatureType
17 import org.locationtech.geomesa.fs.storage.api.StorageMetadata.PartitionMetadata
18 import org.locationtech.geomesa.fs.storage.api._
19 import org.locationtech.geomesa.fs.storage.common.utils.PathCache
20 import org.locationtech.geomesa.utils.concurrent.{CachedThreadPool, PhaserUtils}
21 import org.locationtech.geomesa.utils.io.WithClose
22 import org.locationtech.geomesa.utils.stats.MethodProfiling
23 import org.locationtech.geomesa.utils.text.StringSerialization
24 
25 import java.io.{FileNotFoundException, InputStreamReader}
26 import java.nio.charset.StandardCharsets
27 import java.util.UUID
28 import java.util.concurrent._
29 import java.util.function.BiFunction
30 import scala.collection.mutable.ArrayBuffer
31 import scala.runtime.BoxedUnit
32 import scala.util.control.NonFatal
33 
34 /**
35  * StorageMetadata implementation. Saves changes as a series of timestamped changelogs to allow
36  * concurrent modifications. The current state is obtained by replaying all the logs.
37  *
38  * Note that state is not read off disk until 'reload' is called.
39  *
40  * When accessed through the standard factory methods, the state will periodically reload from disk
41  * in order to pick up external changes (every 10 minutes by default).
42  *
43  * Note that modifications made to the metadata may not be immediately available, if they occur
44  * simultaneously with a reload. For example, calling `getPartition` immediately after `addPartition` may
45  * not return anything. However, the change is always persisted to disk, and will be available after the next
46  * reload. In general this does not cause problems, as reads and writes happen in different JVMs (ingest
47  * vs query).
48  *
49  * @param fc file context
50  * @param directory metadata root path
51  * @param sft simple feature type
52  * @param meta basic metadata config
53  * @param converter file converter
54  */
55 class FileBasedMetadata(
56     private val fc: FileContext,
57     val directory: Path,
58     val sft: SimpleFeatureType,
59     private val meta: Metadata,
60     private val converter: MetadataConverter
61   ) extends StorageMetadata with MethodProfiling with LazyLogging {
62 
63   import FileBasedMetadata._
64 
65   import scala.collection.JavaConverters._
66 
67   private val expiry = PathCache.CacheDurationProperty.toDuration.get.toMillis
68 
69   // cache of files associated with each partition
70   // we use a cache to provide lazy non-blocking refresh, but the cache will only ever have 1 element in it
71   private val partitions: LoadingCache[BoxedUnit, ConcurrentMap[String, PartitionFiles]] =
72     Caffeine.newBuilder().refreshAfterWrite(expiry, TimeUnit.MILLISECONDS).build(
73       new CacheLoader[BoxedUnit, ConcurrentMap[String, PartitionFiles]]() {
74         override def load(key: BoxedUnit): ConcurrentMap[String, PartitionFiles] = readPartitionFiles(8)
75       }
76     )
77 
78   // cache of parsed metadata, keyed by partition
79   private val metadata: LoadingCache[String, PartitionMetadata] =
80     Caffeine.newBuilder().refreshAfterWrite(expiry, TimeUnit.MILLISECONDS).build(
81       new CacheLoader[String, PartitionMetadata]() {
82         override def load(key: String): PartitionMetadata =
83           Option(partitions.get(BoxedUnit.UNIT).get(key)).flatMap(readPartition(_, 8)).map(_.toMetadata).orNull
84       }
85     )
86 
87   override val scheme: PartitionScheme = PartitionSchemeFactory.load(sft, meta.scheme)
88   override val encoding: String = meta.config(Metadata.Encoding)
89   override val leafStorage: Boolean = meta.config(Metadata.LeafStorage).toBoolean
90 
91   private val kvs = new ConcurrentHashMap[String, String](meta.config.asJava)
92 
93   override def get(key: String): Option[String] = Option(kvs.get(key))
94 
95   override def set(key: String, value: String): Unit = {
96     kvs.put(key, value)
97     FileBasedMetadataFactory.write(fc, directory.getParent, meta.copy(config = kvs.asScala.toMap))
98   }
99 
100   override def getPartitions(prefix: Option[String]): Seq[PartitionMetadata] = {
101     partitions.get(BoxedUnit.UNIT).asScala.toStream.flatMap { case (p, _) =>
102       if (prefix.forall(p.startsWith)) { Option(metadata.get(p)) } else { None }
103     }
104   }
105 
106   override def getPartition(name: String): Option[PartitionMetadata] = Option(metadata.get(name))
107 
108   override def addPartition(partition: PartitionMetadata): Unit = {
109     val config = {
110       val action = PartitionAction.Add
111       val envelope = partition.bounds.map(b => EnvelopeConfig(b.envelope)).getOrElse(Seq.empty)
112       PartitionConfig(partition.name, action, partition.files, partition.count, envelope, System.currentTimeMillis())
113     }
114     val path = writePartition(config)
115     // if we have already loaded the partition, merge in the new value
116     if (metadata.getIfPresent(partition.name) != null) {
117       metadata.asMap.merge(partition.name, partition, addMetadata)
118     }
119     Option(partitions.getIfPresent(BoxedUnit.UNIT)).foreach { files =>
120       files.merge(partition.name, PartitionFiles(config = Seq(config), parsed = Seq(path)), addFiles)
121     }
122   }
123 
124   override def removePartition(partition: PartitionMetadata): Unit = {
125     val config = {
126       val action = PartitionAction.Remove
127       val envelope = partition.bounds.map(b => EnvelopeConfig(b.envelope)).getOrElse(Seq.empty)
128       PartitionConfig(partition.name, action, partition.files, partition.count, envelope, System.currentTimeMillis())
129     }
130     val path = writePartition(config)
131     // if we have already loaded the partition, merge in the new value
132     if (metadata.getIfPresent(partition.name) != null) {
133       metadata.asMap.merge(partition.name, partition, removeMetadata)
134     }
135     Option(partitions.getIfPresent(BoxedUnit.UNIT)).foreach { files =>
136       files.merge(partition.name, PartitionFiles(config = Seq(config), parsed = Seq(path)), addFiles)
137     }
138   }
139 
140   override def setPartitions(partitions: Seq[PartitionMetadata]): Unit = {
141     val map = new ConcurrentHashMap[String, PartitionFiles]
142     this.partitions.put(BoxedUnit.UNIT, map)
143     this.metadata.invalidateAll()
144 
145     val configs = partitions.map { partition =>
146       val action = PartitionAction.Add
147       val envelope = partition.bounds.map(b => EnvelopeConfig(b.envelope)).getOrElse(Seq.empty)
148       val config = PartitionConfig(partition.name, action, partition.files, partition.count, envelope, System.currentTimeMillis())
149       // note: side effects in map
150       this.metadata.put(partition.name, partition)
151       map.put(partition.name, PartitionFiles(config = Seq(config)))
152       config
153     }
154 
155     writeCompactedConfig(configs)
156     delete(readPartitionFiles(8).asScala.flatMap { case (_, f) => f.unparsed ++ f.parsed }, 8)
157   }
158 
159   override def compact(partition: Option[String], fileSize: Option[Long], threads: Int): Unit = {
160     require(threads > 0, "Threads must be a positive number")
161 
162     // in normal usage, we never pass in a partition to this method
163     partition.foreach(p => logger.warn(s"Ignoring requested partition '$p' and compacting all metadata"))
164 
165     val configs = ArrayBuffer.empty[PartitionConfig]
166     val paths = ArrayBuffer.empty[Path]
167 
168     readPartitionFiles(threads).asScala.foreach { case (name, f) =>
169       val config = readPartition(f, threads).filter(_.files.nonEmpty)
170       config match {
171         case None => metadata.invalidate(name)
172         case Some(c) =>
173           metadata.put(name, c.toMetadata)
174           configs += c
175       }
176       paths ++= f.unparsed
177       paths ++= f.parsed
178     }
179 
180     writeCompactedConfig(configs.toSeq)
181     delete(paths, threads)
182 
183     partitions.invalidate(BoxedUnit.UNIT)
184   }
185 
186   override def invalidate(): Unit = {
187     partitions.invalidateAll()
188     metadata.invalidateAll()
189   }
190 
191   override def close(): Unit = {}
192 
193   /**
194    * Serialize a partition config to disk
195    *
196    * @param config config
197    * @return
198    */
199   private def writePartition(config: PartitionConfig): Path = {
200     val data = profile("Serialized partition configuration") {
201       converter.renderPartition(config)
202     }
203     profile("Persisted partition configuration") {
204       val encoded = StringSerialization.alphaNumericSafeString(config.name)
205       val name = s"$UpdatePartitionPrefix$encoded-${UUID.randomUUID()}${converter.suffix}"
206       val file = new Path(directory, name)
207       WithClose(fc.create(file, java.util.EnumSet.of(CreateFlag.CREATE), CreateOpts.createParent)) { out =>
208         out.write(data.getBytes(StandardCharsets.UTF_8))
209         out.hflush()
210         out.hsync()
211       }
212       PathCache.register(fc, file)
213       file
214     }
215   }
216 
217   /**
218    * Write metadata for a compacted set of partition operations to disk
219    *
220    * @param config partition config
221    */
222   private def writeCompactedConfig(config: Seq[PartitionConfig]): Unit = {
223     val data = profile("Serialized compacted partition configuration") {
224       converter.renderCompaction(config)
225     }
226     profile("Persisted compacted partition configuration") {
227       val file = new Path(directory, CompactedPrefix + converter.suffix)
228       val flags = java.util.EnumSet.of(CreateFlag.CREATE, CreateFlag.OVERWRITE)
229       WithClose(fc.create(file, flags, CreateOpts.createParent)) { out =>
230         out.write(data.getBytes(StandardCharsets.UTF_8))
231         out.hflush()
232         out.hsync()
233       }
234       PathCache.register(fc, file)
235       // generally we overwrite the existing file but if we change rendering the name will change
236       val toRemove =
237         new Path(directory, if (converter.suffix == HoconPathSuffix) { CompactedJson } else { CompactedHocon })
238       if (PathCache.exists(fc, toRemove, reload = true)) {
239         fc.delete(toRemove, false)
240         PathCache.invalidate(fc, toRemove)
241       }
242     }
243   }
244 
245   /**
246    * Reads all the metadata files and groups them by partition, parsing them only if needed
247    * to determine the partition name
248    *
249    * @param threads threads
250    * @return
251    */
252   private def readPartitionFiles(threads: Int): ConcurrentMap[String, PartitionFiles] = {
253     val result = new ConcurrentHashMap[String, PartitionFiles]()
254 
255     // list all the metadata files on disk
256     profile("Listed metadata files") {
257       val pool = new CachedThreadPool(threads)
258       // use a phaser to track worker thread completion
259       val phaser = new Phaser(2) // 1 for the initial directory worker + 1 for this thread
260       pool.submit(new DirectoryWorker(pool, phaser, fc.listStatus(directory), result))
261       // wait for the worker threads to complete
262       try {
263         phaser.awaitAdvanceInterruptibly(phaser.arrive())
264       } finally {
265         pool.shutdown()
266       }
267     }
268 
269     result
270   }
271 
272   /**
273    * Parses and merges the config files for a given partition
274    *
275    * @param files files associated with the partition
276    * @param threads threads
277    * @return
278    */
279   private def readPartition(files: PartitionFiles, threads: Int): Option[PartitionConfig] = {
280     val updates = if (threads < 2) {
281       files.unparsed.flatMap(readPartitionConfig)
282     } else {
283       val ec = new CachedThreadPool(threads)
284       try {
285         val results = Seq.newBuilder[PartitionConfig]
286         def readOne(p: Path): Unit = {
287           readPartitionConfig(p).foreach { c =>
288             results.synchronized(results += c)
289           }
290         }
291         files.unparsed.toList.map(p => ec.submit(new Runnable() { override def run(): Unit = readOne(p)})).foreach(_.get)
292         results.result
293       } finally {
294         ec.shutdown()
295       }
296     }
297     mergePartitionConfigs(updates ++ files.config).filter(_.files.nonEmpty)
298   }
299 
300   /**
301    * Read and parse a partition metadata file
302    *
303    * @param file file path
304    * @return
305    */
306   private def readPartitionConfig(file: Path): Option[PartitionConfig] = {
307     try {
308       val config = profile("Loaded partition configuration") {
309         WithClose(new InputStreamReader(fc.open(file), StandardCharsets.UTF_8)) { in =>
310           ConfigFactory.parseReader(in, ConfigParseOptions.defaults().setSyntax(getSyntax(file.getName)))
311         }
312       }
313       profile("Parsed partition configuration") {
314         Some(converter.parsePartition(config))
315       }
316     } catch {
317       case NonFatal(e) => logger.error(s"Error reading config at path $file:", e); None
318     }
319   }
320 
321   /**
322    * Read and parse a compacted partition metadata file
323    *
324    * @param file compacted config file
325    * @return
326    */
327   private def readCompactedConfig(file: Path): Seq[PartitionConfig] = {
328     try {
329       val config = profile("Loaded compacted partition configuration") {
330         WithClose(new InputStreamReader(fc.open(file), StandardCharsets.UTF_8)) { in =>
331           ConfigFactory.parseReader(in, ConfigParseOptions.defaults().setSyntax(getSyntax(file.getName)))
332         }
333       }
334       profile("Parsed compacted partition configuration") {
335         converter.parseCompaction(config)
336       }
337     } catch {
338       case NonFatal(e) => logger.error(s"Error reading config at path $file:", e); Seq.empty
339     }
340   }
341 
342   /**
343    * Delete a seq of paths
344    *
345    * @param paths paths to delete
346    * @param threads number of threads to use
347    */
348   private def delete(paths: Iterable[Path], threads: Int): Unit = {
349     if (threads < 2) {
350       paths.foreach(fc.delete(_, false))
351     } else {
352       val ec = new CachedThreadPool(threads)
353       try {
354         paths.toList.map(p => ec.submit(new Runnable() { override def run(): Unit = fc.delete(p, false)})).foreach(_.get)
355       } finally {
356         ec.shutdown()
357       }
358     }
359   }
360 
361   private class DirectoryWorker(
362       es: ExecutorService,
363       phaser: Phaser,
364       listDirectory: => RemoteIterator[FileStatus],
365       result: ConcurrentHashMap[String, PartitionFiles]
366     ) extends Runnable {
367 
368     override def run(): Unit = {
369       try {
370         var i = phaser.getRegisteredParties + 1
371         val iter = listDirectory
372         while (iter.hasNext && i < PhaserUtils.MaxParties) {
373           val status = iter.next
374           val path = status.getPath
375           lazy val name = path.getName
376           if (status.isDirectory) {
377             i += 1
378             // use a tiered phaser on each directory avoid the limit of 65535 registered parties
379             es.submit(new DirectoryWorker(es, new Phaser(phaser, 1), fc.listStatus(path), result))
380           } else if (name.startsWith(UpdatePartitionPrefix)) {
381             // pull out the partition name but don't parse the file yet
382             val encoded = name.substring(8, name.length - 42) // strip out prefix and suffix
383             val partition = StringSerialization.decodeAlphaNumericSafeString(encoded)
384             result.merge(partition, PartitionFiles(unparsed = Seq(path)), addFiles)
385           } else if (name == CompactedHocon || name == CompactedJson) {
386             i += 1
387             phaser.register() // register the new worker thread
388             es.submit(new CompactedParser(phaser, path, result))
389           } else if (name.startsWith(UpdateFilePrefix) && name.endsWith(JsonPathSuffix)) {
390             // old update files - have to parse them to get the partition name
391             i += 1
392             phaser.register() // register the new worker thread
393             es.submit(new UpdateParser(phaser, path, result))
394           }
395         }
396         if (iter.hasNext) {
397           es.submit(new DirectoryWorker(es, new Phaser(phaser, 1), iter, result))
398         }
399       } catch {
400         case _: FileNotFoundException => // the partition dir was deleted... just return
401         case NonFatal(e) => logger.error("Error scanning metadata directory:", e)
402       } finally {
403         phaser.arrive() // notify that this thread is done
404       }
405     }
406   }
407 
408   private class CompactedParser(phaser: Phaser, path: Path, result: ConcurrentHashMap[String, PartitionFiles])
409       extends Runnable {
410 
411     override def run(): Unit = {
412       try {
413         readCompactedConfig(path).foreach { config =>
414           // note: don't track the path since it's from the compacted config file
415           result.merge(config.name, PartitionFiles(config = Seq(config)), addFiles)
416         }
417       } catch {
418         case _: FileNotFoundException => // the file was deleted... just return
419         case NonFatal(e) => logger.error("Error reading compacted metadata entry:", e)
420       } finally {
421         phaser.arrive() // notify that this thread is done
422       }
423     }
424   }
425 
426   private class UpdateParser(phaser: Phaser, path: Path, result: ConcurrentHashMap[String, PartitionFiles])
427       extends Runnable {
428 
429     override def run(): Unit = {
430       try {
431         readPartitionConfig(path).foreach { config =>
432           result.merge(config.name, PartitionFiles(config = Seq(config), parsed = Seq(path)), addFiles)
433         }
434       } catch {
435         case _: FileNotFoundException => // the file was deleted... just return
436         case NonFatal(e) => logger.error("Error reading metadata update entry:", e)
437       } finally {
438         phaser.arrive() // notify that this thread is done
439       }
440     }
441   }
442 }
443 
444 object FileBasedMetadata {
445 
446   val MetadataType = "file"
447   val DefaultOptions: NamedOptions = NamedOptions(MetadataType, Map(Config.RenderKey -> Config.RenderCompact))
448   val LegacyOptions : NamedOptions = NamedOptions(MetadataType, Map(Config.RenderKey -> Config.RenderPretty))
449 
450   object Config {
451     val RenderKey = "render"
452 
453     val RenderPretty  = "pretty"
454     val RenderCompact = "compact"
455   }
456 
457   private val CompactedPrefix       = "compacted"
458   private val UpdateFilePrefix      = "update-"
459   private val UpdatePartitionPrefix = UpdateFilePrefix + "$"
460   private val JsonPathSuffix        = RenderPretty.suffix
461   private val HoconPathSuffix       = RenderCompact.suffix
462   private val CompactedJson         = CompactedPrefix + JsonPathSuffix
463   private val CompactedHocon        = CompactedPrefix + HoconPathSuffix
464 
465   // function to add/merge an existing partition in an atomic call
466   private val addMetadata = new BiFunction[PartitionMetadata, PartitionMetadata, PartitionMetadata]() {
467     override def apply(existing: PartitionMetadata, update: PartitionMetadata): PartitionMetadata =
468       existing + update
469   }
470 
471   // function to remove/merge an existing partition in an atomic call
472   private val removeMetadata = new BiFunction[PartitionMetadata, PartitionMetadata, PartitionMetadata]() {
473     override def apply(existing: PartitionMetadata, update: PartitionMetadata): PartitionMetadata = {
474       val result = existing - update
475       if (result.files.isEmpty) { null } else { result }
476     }
477   }
478 
479   // function to merge partition files in an atomic call
480   private val addFiles = new BiFunction[PartitionFiles, PartitionFiles, PartitionFiles]() {
481     override def apply(existing: PartitionFiles, update: PartitionFiles): PartitionFiles = {
482       val config = existing.config ++ update.config
483       PartitionFiles(config, existing.parsed ++ update.parsed, existing.unparsed ++ update.unparsed)
484     }
485   }
486 
487   /**
488    * Copy a metadata instance. Discards any cached state
489    *
490    * @param m metadata
491    * @return
492    */
493   def copy(m: FileBasedMetadata): FileBasedMetadata =
494     new FileBasedMetadata(m.fc, m.directory, m.sft, m.meta, m.converter)
495 
496   private def getSyntax(file: String): ConfigSyntax = {
497     if (file.endsWith(HoconPathSuffix)) {
498       ConfigSyntax.CONF
499     } else if (file.endsWith(JsonPathSuffix)) {
500       ConfigSyntax.JSON
501     } else {
502       ConfigSyntax.JSON
503     }
504   }
505 
506   /**
507    * Holder for metadata files for a partition
508    *
509    * @param config any parsed configurations
510    * @param parsed references to the files corresponding to `config`
511    * @param unparsed unparsed config files associated with the partition
512    */
513   private case class PartitionFiles(
514       config: Seq[PartitionConfig] = Seq.empty,
515       parsed: Seq[Path] = Seq.empty,
516       unparsed: Seq[Path] = Seq.empty
517     )
518 }
Line Stmt Id Pos Tree Symbol Tests Code
67 827 2964 - 3019 Select scala.concurrent.duration.Duration.toMillis org.locationtech.geomesa.fs.storage.common.utils.PathCache.CacheDurationProperty.toDuration.get.toMillis
72 828 3315 - 3321 Select org.locationtech.geomesa.fs.storage.common.metadata.FileBasedMetadata.expiry FileBasedMetadata.this.expiry
72 829 3323 - 3344 Literal <nosymbol> MILLISECONDS
72 832 3275 - 3547 Apply com.github.benmanes.caffeine.cache.Caffeine.build com.github.benmanes.caffeine.cache.Caffeine.newBuilder().refreshAfterWrite(FileBasedMetadata.this.expiry, MILLISECONDS).build[scala.runtime.BoxedUnit, java.util.concurrent.ConcurrentMap[String,org.locationtech.geomesa.fs.storage.common.metadata.FileBasedMetadata.PartitionFiles]]({ final class $anon extends Object with com.github.benmanes.caffeine.cache.CacheLoader[scala.runtime.BoxedUnit,java.util.concurrent.ConcurrentMap[String,org.locationtech.geomesa.fs.storage.common.metadata.FileBasedMetadata.PartitionFiles]] { def <init>(): <$anon: com.github.benmanes.caffeine.cache.CacheLoader[scala.runtime.BoxedUnit,java.util.concurrent.ConcurrentMap[String,org.locationtech.geomesa.fs.storage.common.metadata.FileBasedMetadata.PartitionFiles]]> = { $anon.super.<init>(); () }; override def load(key: scala.runtime.BoxedUnit): java.util.concurrent.ConcurrentMap[String,org.locationtech.geomesa.fs.storage.common.metadata.FileBasedMetadata.PartitionFiles] = FileBasedMetadata.this.readPartitionFiles(8) }; new $anon() })
73 831 3359 - 3362 Apply org.locationtech.geomesa.fs.storage.common.metadata.FileBasedMetadata.$anon.<init> new $anon()
74 830 3512 - 3533 Apply org.locationtech.geomesa.fs.storage.common.metadata.FileBasedMetadata.readPartitionFiles FileBasedMetadata.this.readPartitionFiles(8)
80 833 3709 - 3715 Select org.locationtech.geomesa.fs.storage.common.metadata.FileBasedMetadata.expiry FileBasedMetadata.this.expiry
80 834 3717 - 3738 Literal <nosymbol> MILLISECONDS
80 841 3669 - 3985 Apply com.github.benmanes.caffeine.cache.Caffeine.build com.github.benmanes.caffeine.cache.Caffeine.newBuilder().refreshAfterWrite(FileBasedMetadata.this.expiry, MILLISECONDS).build[String, org.locationtech.geomesa.fs.storage.api.StorageMetadata.PartitionMetadata]({ final class $anon extends Object with com.github.benmanes.caffeine.cache.CacheLoader[String,org.locationtech.geomesa.fs.storage.api.StorageMetadata.PartitionMetadata] { def <init>(): <$anon: com.github.benmanes.caffeine.cache.CacheLoader[String,org.locationtech.geomesa.fs.storage.api.StorageMetadata.PartitionMetadata]> = { $anon.super.<init>(); () }; override def load(key: String): org.locationtech.geomesa.fs.storage.api.StorageMetadata.PartitionMetadata = scala.Option.apply[org.locationtech.geomesa.fs.storage.common.metadata.FileBasedMetadata.PartitionFiles](FileBasedMetadata.this.partitions.get(scala.runtime.BoxedUnit.UNIT).get(key)).flatMap[org.locationtech.geomesa.fs.storage.common.metadata.PartitionConfig](((x$1: org.locationtech.geomesa.fs.storage.common.metadata.FileBasedMetadata.PartitionFiles) => FileBasedMetadata.this.readPartition(x$1, 8))).map[org.locationtech.geomesa.fs.storage.api.StorageMetadata.PartitionMetadata](((x$2: org.locationtech.geomesa.fs.storage.common.metadata.PartitionConfig) => x$2.toMetadata)).orNull[org.locationtech.geomesa.fs.storage.api.StorageMetadata.PartitionMetadata](scala.Predef.$conforms[Null]) }; new $anon() })
81 840 3753 - 3756 Apply org.locationtech.geomesa.fs.storage.common.metadata.FileBasedMetadata.$anon.<init> new $anon()
83 835 3877 - 3916 Apply java.util.Map.get FileBasedMetadata.this.partitions.get(scala.runtime.BoxedUnit.UNIT).get(key)
83 836 3926 - 3945 Apply org.locationtech.geomesa.fs.storage.common.metadata.FileBasedMetadata.readPartition FileBasedMetadata.this.readPartition(x$1, 8)
83 837 3951 - 3963 Select org.locationtech.geomesa.fs.storage.common.metadata.PartitionConfig.toMetadata x$2.toMetadata
83 838 3965 - 3965 TypeApply scala.Predef.$conforms scala.Predef.$conforms[Null]
83 839 3870 - 3971 ApplyToImplicitArgs scala.Option.orNull scala.Option.apply[org.locationtech.geomesa.fs.storage.common.metadata.FileBasedMetadata.PartitionFiles](FileBasedMetadata.this.partitions.get(scala.runtime.BoxedUnit.UNIT).get(key)).flatMap[org.locationtech.geomesa.fs.storage.common.metadata.PartitionConfig](((x$1: org.locationtech.geomesa.fs.storage.common.metadata.FileBasedMetadata.PartitionFiles) => FileBasedMetadata.this.readPartition(x$1, 8))).map[org.locationtech.geomesa.fs.storage.api.StorageMetadata.PartitionMetadata](((x$2: org.locationtech.geomesa.fs.storage.common.metadata.PartitionConfig) => x$2.toMetadata)).orNull[org.locationtech.geomesa.fs.storage.api.StorageMetadata.PartitionMetadata](scala.Predef.$conforms[Null])
87 842 4056 - 4059 Select org.locationtech.geomesa.fs.storage.common.metadata.FileBasedMetadata.sft FileBasedMetadata.this.sft
87 843 4061 - 4072 Select org.locationtech.geomesa.fs.storage.api.Metadata.scheme FileBasedMetadata.this.meta.scheme
87 844 4028 - 4073 Apply org.locationtech.geomesa.fs.storage.api.PartitionSchemeFactory.load org.locationtech.geomesa.fs.storage.api.PartitionSchemeFactory.load(FileBasedMetadata.this.sft, FileBasedMetadata.this.meta.scheme)
88 845 4120 - 4137 Select org.locationtech.geomesa.fs.storage.api.Metadata.Encoding org.locationtech.geomesa.fs.storage.api.`package`.Metadata.Encoding
88 846 4108 - 4138 Apply scala.collection.MapLike.apply FileBasedMetadata.this.meta.config.apply(org.locationtech.geomesa.fs.storage.api.`package`.Metadata.Encoding)
89 847 4189 - 4209 Select org.locationtech.geomesa.fs.storage.api.Metadata.LeafStorage org.locationtech.geomesa.fs.storage.api.`package`.Metadata.LeafStorage
89 848 4177 - 4210 Apply scala.collection.MapLike.apply FileBasedMetadata.this.meta.config.apply(org.locationtech.geomesa.fs.storage.api.`package`.Metadata.LeafStorage)
89 849 4177 - 4220 Select scala.collection.immutable.StringLike.toBoolean scala.Predef.augmentString(FileBasedMetadata.this.meta.config.apply(org.locationtech.geomesa.fs.storage.api.`package`.Metadata.LeafStorage)).toBoolean
91 850 4280 - 4291 Select org.locationtech.geomesa.fs.storage.api.Metadata.config FileBasedMetadata.this.meta.config
91 851 4280 - 4298 Select scala.collection.convert.Decorators.AsJava.asJava scala.collection.JavaConverters.mapAsJavaMapConverter[String, String](FileBasedMetadata.this.meta.config).asJava
91 852 4242 - 4299 Apply java.util.concurrent.ConcurrentHashMap.<init> new java.util.concurrent.ConcurrentHashMap[String,String](scala.collection.JavaConverters.mapAsJavaMapConverter[String, String](FileBasedMetadata.this.meta.config).asJava)
93 853 4358 - 4370 Apply java.util.concurrent.ConcurrentHashMap.get FileBasedMetadata.this.kvs.get(key)
93 854 4351 - 4371 Apply scala.Option.apply scala.Option.apply[String](FileBasedMetadata.this.kvs.get(key))
96 855 4434 - 4453 Apply java.util.concurrent.ConcurrentHashMap.put FileBasedMetadata.this.kvs.put(key, value)
97 856 4489 - 4491 Select org.locationtech.geomesa.fs.storage.common.metadata.FileBasedMetadata.fc FileBasedMetadata.this.fc
97 857 4493 - 4512 Apply org.apache.hadoop.fs.Path.getParent FileBasedMetadata.this.directory.getParent()
97 858 4533 - 4536 Select org.locationtech.geomesa.fs.storage.common.metadata.FileBasedMetadata.kvs FileBasedMetadata.this.kvs
97 859 4545 - 4545 TypeApply scala.Predef.$conforms scala.Predef.$conforms[(String, String)]
97 860 4533 - 4550 ApplyToImplicitArgs scala.collection.TraversableOnce.toMap scala.collection.JavaConverters.mapAsScalaConcurrentMapConverter[String, String](FileBasedMetadata.this.kvs).asScala.toMap[String, String](scala.Predef.$conforms[(String, String)])
97 861 4514 - 4551 Apply org.locationtech.geomesa.fs.storage.api.Metadata.copy FileBasedMetadata.this.meta.copy(x$2, x$3, x$1)
97 862 4458 - 4552 Apply org.locationtech.geomesa.fs.storage.common.metadata.FileBasedMetadataFactory.write FileBasedMetadataFactory.write(FileBasedMetadata.this.fc, FileBasedMetadata.this.directory.getParent(), { <artifact> val x$1: scala.collection.immutable.Map[String,String] @scala.reflect.internal.annotations.uncheckedBounds = scala.collection.JavaConverters.mapAsScalaConcurrentMapConverter[String, String](FileBasedMetadata.this.kvs).asScala.toMap[String, String](scala.Predef.$conforms[(String, String)]); <artifact> val x$2: org.geotools.api.feature.simple.SimpleFeatureType = FileBasedMetadata.this.meta.copy$default$1; <artifact> val x$3: org.locationtech.geomesa.fs.storage.api.NamedOptions = FileBasedMetadata.this.meta.copy$default$2; FileBasedMetadata.this.meta.copy(x$2, x$3, x$1) })
101 863 4658 - 4672 Select scala.runtime.BoxedUnit.UNIT scala.runtime.BoxedUnit.UNIT
101 864 4643 - 4673 Apply com.github.benmanes.caffeine.cache.LoadingCache.get FileBasedMetadata.this.partitions.get(scala.runtime.BoxedUnit.UNIT)
101 875 4699 - 4699 TypeApply scala.collection.immutable.Stream.canBuildFrom immutable.this.Stream.canBuildFrom[org.locationtech.geomesa.fs.storage.api.StorageMetadata.PartitionMetadata]
101 876 4643 - 4802 ApplyToImplicitArgs scala.collection.immutable.Stream.flatMap scala.collection.JavaConverters.mapAsScalaConcurrentMapConverter[String, org.locationtech.geomesa.fs.storage.common.metadata.FileBasedMetadata.PartitionFiles](FileBasedMetadata.this.partitions.get(scala.runtime.BoxedUnit.UNIT)).asScala.toStream.flatMap[org.locationtech.geomesa.fs.storage.api.StorageMetadata.PartitionMetadata, Seq[org.locationtech.geomesa.fs.storage.api.StorageMetadata.PartitionMetadata]](((x0$1: (String, org.locationtech.geomesa.fs.storage.common.metadata.FileBasedMetadata.PartitionFiles)) => x0$1 match { case (_1: String, _2: org.locationtech.geomesa.fs.storage.common.metadata.FileBasedMetadata.PartitionFiles)(String, org.locationtech.geomesa.fs.storage.common.metadata.FileBasedMetadata.PartitionFiles)((p @ _), _) => if (prefix.forall({ ((x$1: String) => p.startsWith(x$1)) })) scala.this.Option.option2Iterable[org.locationtech.geomesa.fs.storage.api.StorageMetadata.PartitionMetadata](scala.Option.apply[org.locationtech.geomesa.fs.storage.api.StorageMetadata.PartitionMetadata](FileBasedMetadata.this.metadata.get(p))) else scala.this.Option.option2Iterable[Nothing](scala.None) }))(immutable.this.Stream.canBuildFrom[org.locationtech.geomesa.fs.storage.api.StorageMetadata.PartitionMetadata])
102 865 4740 - 4752 Apply java.lang.String.startsWith p.startsWith(x$1)
102 866 4726 - 4753 Apply scala.Option.forall prefix.forall({ ((x$1: String) => p.startsWith(x$1)) })
102 867 4764 - 4779 Apply com.github.benmanes.caffeine.cache.LoadingCache.get FileBasedMetadata.this.metadata.get(p)
102 868 4757 - 4780 Apply scala.Option.apply scala.Option.apply[org.locationtech.geomesa.fs.storage.api.StorageMetadata.PartitionMetadata](FileBasedMetadata.this.metadata.get(p))
102 869 4757 - 4780 ApplyImplicitView scala.Option.option2Iterable scala.this.Option.option2Iterable[org.locationtech.geomesa.fs.storage.api.StorageMetadata.PartitionMetadata](scala.Option.apply[org.locationtech.geomesa.fs.storage.api.StorageMetadata.PartitionMetadata](FileBasedMetadata.this.metadata.get(p)))
102 870 4757 - 4780 Block scala.Option.option2Iterable scala.this.Option.option2Iterable[org.locationtech.geomesa.fs.storage.api.StorageMetadata.PartitionMetadata](scala.Option.apply[org.locationtech.geomesa.fs.storage.api.StorageMetadata.PartitionMetadata](FileBasedMetadata.this.metadata.get(p)))
102 871 4790 - 4794 Select scala.None scala.None
102 872 4790 - 4794 ApplyImplicitView scala.Option.option2Iterable scala.this.Option.option2Iterable[Nothing](scala.None)
102 873 4790 - 4794 Block scala.Option.option2Iterable scala.this.Option.option2Iterable[Nothing](scala.None)
102 874 4722 - 4796 If <nosymbol> if (prefix.forall({ ((x$1: String) => p.startsWith(x$1)) })) scala.this.Option.option2Iterable[org.locationtech.geomesa.fs.storage.api.StorageMetadata.PartitionMetadata](scala.Option.apply[org.locationtech.geomesa.fs.storage.api.StorageMetadata.PartitionMetadata](FileBasedMetadata.this.metadata.get(p))) else scala.this.Option.option2Iterable[Nothing](scala.None)
106 877 4886 - 4904 Apply com.github.benmanes.caffeine.cache.LoadingCache.get FileBasedMetadata.this.metadata.get(name)
106 878 4879 - 4905 Apply scala.Option.apply scala.Option.apply[org.locationtech.geomesa.fs.storage.api.StorageMetadata.PartitionMetadata](FileBasedMetadata.this.metadata.get(name))
110 879 5013 - 5032 Select org.locationtech.geomesa.fs.storage.common.metadata.PartitionAction.Add metadata.this.`package`.PartitionAction.Add
111 880 5095 - 5105 Select org.locationtech.geomesa.fs.storage.api.StorageMetadata.PartitionBounds.envelope b.envelope
111 881 5080 - 5106 Apply org.locationtech.geomesa.fs.storage.common.metadata.EnvelopeConfig.apply metadata.this.`package`.EnvelopeConfig.apply(b.envelope)
111 882 5118 - 5127 TypeApply scala.collection.generic.GenericCompanion.empty scala.collection.Seq.empty[Nothing]
111 883 5054 - 5128 Apply scala.Option.getOrElse partition.bounds.map[Seq[Double]](((b: org.locationtech.geomesa.fs.storage.api.StorageMetadata.PartitionBounds) => metadata.this.`package`.EnvelopeConfig.apply(b.envelope))).getOrElse[Seq[Double]](scala.collection.Seq.empty[Nothing])
112 884 5151 - 5165 Select org.locationtech.geomesa.fs.storage.api.StorageMetadata.PartitionMetadata.name partition.name
112 885 5175 - 5190 Select org.locationtech.geomesa.fs.storage.api.StorageMetadata.PartitionMetadata.files partition.files
112 886 5192 - 5207 Select org.locationtech.geomesa.fs.storage.api.StorageMetadata.PartitionMetadata.count partition.count
112 887 5219 - 5245 Apply java.lang.System.currentTimeMillis java.lang.System.currentTimeMillis()
112 888 5135 - 5246 Apply org.locationtech.geomesa.fs.storage.common.metadata.PartitionConfig.apply metadata.this.`package`.PartitionConfig.apply(partition.name, action, partition.files, partition.count, envelope, java.lang.System.currentTimeMillis())
114 889 5268 - 5290 Apply org.locationtech.geomesa.fs.storage.common.metadata.FileBasedMetadata.writePartition FileBasedMetadata.this.writePartition(config)
116 890 5370 - 5415 Apply java.lang.Object.!= FileBasedMetadata.this.metadata.getIfPresent(partition.name).!=(null)
116 895 5366 - 5366 Literal <nosymbol> ()
116 896 5366 - 5366 Block <nosymbol> ()
117 891 5446 - 5460 Select org.locationtech.geomesa.fs.storage.api.StorageMetadata.PartitionMetadata.name partition.name
117 892 5473 - 5484 Select org.locationtech.geomesa.fs.storage.common.metadata.FileBasedMetadata.addMetadata FileBasedMetadata.addMetadata
117 893 5425 - 5485 Apply java.util.concurrent.ConcurrentMap.merge FileBasedMetadata.this.metadata.asMap().merge(partition.name, partition, FileBasedMetadata.addMetadata)
117 894 5425 - 5485 Block java.util.concurrent.ConcurrentMap.merge FileBasedMetadata.this.metadata.asMap().merge(partition.name, partition, FileBasedMetadata.addMetadata)
119 897 5527 - 5541 Select scala.runtime.BoxedUnit.UNIT scala.runtime.BoxedUnit.UNIT
119 898 5503 - 5542 Apply com.github.benmanes.caffeine.cache.Cache.getIfPresent FileBasedMetadata.this.partitions.getIfPresent(scala.runtime.BoxedUnit.UNIT)
119 905 5496 - 5670 Apply scala.Option.foreach scala.Option.apply[java.util.concurrent.ConcurrentMap[String,org.locationtech.geomesa.fs.storage.common.metadata.FileBasedMetadata.PartitionFiles]](FileBasedMetadata.this.partitions.getIfPresent(scala.runtime.BoxedUnit.UNIT)).foreach[org.locationtech.geomesa.fs.storage.common.metadata.FileBasedMetadata.PartitionFiles](((files: java.util.concurrent.ConcurrentMap[String,org.locationtech.geomesa.fs.storage.common.metadata.FileBasedMetadata.PartitionFiles]) => files.merge(partition.name, FileBasedMetadata.PartitionFiles.apply(scala.collection.Seq.apply[org.locationtech.geomesa.fs.storage.common.metadata.PartitionConfig](config), scala.collection.Seq.apply[org.apache.hadoop.fs.Path](path), FileBasedMetadata.PartitionFiles.apply$default$3), FileBasedMetadata.addFiles)))
120 899 5581 - 5595 Select org.locationtech.geomesa.fs.storage.api.StorageMetadata.PartitionMetadata.name partition.name
120 900 5621 - 5632 Apply scala.collection.generic.GenericCompanion.apply scala.collection.Seq.apply[org.locationtech.geomesa.fs.storage.common.metadata.PartitionConfig](config)
120 901 5643 - 5652 Apply scala.collection.generic.GenericCompanion.apply scala.collection.Seq.apply[org.apache.hadoop.fs.Path](path)
120 902 5597 - 5653 Apply org.locationtech.geomesa.fs.storage.common.metadata.FileBasedMetadata.PartitionFiles.apply FileBasedMetadata.PartitionFiles.apply(scala.collection.Seq.apply[org.locationtech.geomesa.fs.storage.common.metadata.PartitionConfig](config), scala.collection.Seq.apply[org.apache.hadoop.fs.Path](path), FileBasedMetadata.PartitionFiles.apply$default$3)
120 903 5655 - 5663 Select org.locationtech.geomesa.fs.storage.common.metadata.FileBasedMetadata.addFiles FileBasedMetadata.addFiles
120 904 5569 - 5664 Apply java.util.concurrent.ConcurrentMap.merge files.merge(partition.name, FileBasedMetadata.PartitionFiles.apply(scala.collection.Seq.apply[org.locationtech.geomesa.fs.storage.common.metadata.PartitionConfig](config), scala.collection.Seq.apply[org.apache.hadoop.fs.Path](path), FileBasedMetadata.PartitionFiles.apply$default$3), FileBasedMetadata.addFiles)
126 906 5785 - 5807 Select org.locationtech.geomesa.fs.storage.common.metadata.PartitionAction.Remove metadata.this.`package`.PartitionAction.Remove
127 907 5870 - 5880 Select org.locationtech.geomesa.fs.storage.api.StorageMetadata.PartitionBounds.envelope b.envelope
127 908 5855 - 5881 Apply org.locationtech.geomesa.fs.storage.common.metadata.EnvelopeConfig.apply metadata.this.`package`.EnvelopeConfig.apply(b.envelope)
127 909 5893 - 5902 TypeApply scala.collection.generic.GenericCompanion.empty scala.collection.Seq.empty[Nothing]
127 910 5829 - 5903 Apply scala.Option.getOrElse partition.bounds.map[Seq[Double]](((b: org.locationtech.geomesa.fs.storage.api.StorageMetadata.PartitionBounds) => metadata.this.`package`.EnvelopeConfig.apply(b.envelope))).getOrElse[Seq[Double]](scala.collection.Seq.empty[Nothing])
128 911 5926 - 5940 Select org.locationtech.geomesa.fs.storage.api.StorageMetadata.PartitionMetadata.name partition.name
128 912 5950 - 5965 Select org.locationtech.geomesa.fs.storage.api.StorageMetadata.PartitionMetadata.files partition.files
128 913 5967 - 5982 Select org.locationtech.geomesa.fs.storage.api.StorageMetadata.PartitionMetadata.count partition.count
128 914 5994 - 6020 Apply java.lang.System.currentTimeMillis java.lang.System.currentTimeMillis()
128 915 5910 - 6021 Apply org.locationtech.geomesa.fs.storage.common.metadata.PartitionConfig.apply metadata.this.`package`.PartitionConfig.apply(partition.name, action, partition.files, partition.count, envelope, java.lang.System.currentTimeMillis())
130 916 6043 - 6065 Apply org.locationtech.geomesa.fs.storage.common.metadata.FileBasedMetadata.writePartition FileBasedMetadata.this.writePartition(config)
132 917 6145 - 6190 Apply java.lang.Object.!= FileBasedMetadata.this.metadata.getIfPresent(partition.name).!=(null)
132 922 6141 - 6141 Literal <nosymbol> ()
132 923 6141 - 6141 Block <nosymbol> ()
133 918 6221 - 6235 Select org.locationtech.geomesa.fs.storage.api.StorageMetadata.PartitionMetadata.name partition.name
133 919 6248 - 6262 Select org.locationtech.geomesa.fs.storage.common.metadata.FileBasedMetadata.removeMetadata FileBasedMetadata.removeMetadata
133 920 6200 - 6263 Apply java.util.concurrent.ConcurrentMap.merge FileBasedMetadata.this.metadata.asMap().merge(partition.name, partition, FileBasedMetadata.removeMetadata)
133 921 6200 - 6263 Block java.util.concurrent.ConcurrentMap.merge FileBasedMetadata.this.metadata.asMap().merge(partition.name, partition, FileBasedMetadata.removeMetadata)
135 924 6305 - 6319 Select scala.runtime.BoxedUnit.UNIT scala.runtime.BoxedUnit.UNIT
135 925 6281 - 6320 Apply com.github.benmanes.caffeine.cache.Cache.getIfPresent FileBasedMetadata.this.partitions.getIfPresent(scala.runtime.BoxedUnit.UNIT)
135 932 6274 - 6448 Apply scala.Option.foreach scala.Option.apply[java.util.concurrent.ConcurrentMap[String,org.locationtech.geomesa.fs.storage.common.metadata.FileBasedMetadata.PartitionFiles]](FileBasedMetadata.this.partitions.getIfPresent(scala.runtime.BoxedUnit.UNIT)).foreach[org.locationtech.geomesa.fs.storage.common.metadata.FileBasedMetadata.PartitionFiles](((files: java.util.concurrent.ConcurrentMap[String,org.locationtech.geomesa.fs.storage.common.metadata.FileBasedMetadata.PartitionFiles]) => files.merge(partition.name, FileBasedMetadata.PartitionFiles.apply(scala.collection.Seq.apply[org.locationtech.geomesa.fs.storage.common.metadata.PartitionConfig](config), scala.collection.Seq.apply[org.apache.hadoop.fs.Path](path), FileBasedMetadata.PartitionFiles.apply$default$3), FileBasedMetadata.addFiles)))
136 926 6359 - 6373 Select org.locationtech.geomesa.fs.storage.api.StorageMetadata.PartitionMetadata.name partition.name
136 927 6399 - 6410 Apply scala.collection.generic.GenericCompanion.apply scala.collection.Seq.apply[org.locationtech.geomesa.fs.storage.common.metadata.PartitionConfig](config)
136 928 6421 - 6430 Apply scala.collection.generic.GenericCompanion.apply scala.collection.Seq.apply[org.apache.hadoop.fs.Path](path)
136 929 6375 - 6431 Apply org.locationtech.geomesa.fs.storage.common.metadata.FileBasedMetadata.PartitionFiles.apply FileBasedMetadata.PartitionFiles.apply(scala.collection.Seq.apply[org.locationtech.geomesa.fs.storage.common.metadata.PartitionConfig](config), scala.collection.Seq.apply[org.apache.hadoop.fs.Path](path), FileBasedMetadata.PartitionFiles.apply$default$3)
136 930 6433 - 6441 Select org.locationtech.geomesa.fs.storage.common.metadata.FileBasedMetadata.addFiles FileBasedMetadata.addFiles
136 931 6347 - 6442 Apply java.util.concurrent.ConcurrentMap.merge files.merge(partition.name, FileBasedMetadata.PartitionFiles.apply(scala.collection.Seq.apply[org.locationtech.geomesa.fs.storage.common.metadata.PartitionConfig](config), scala.collection.Seq.apply[org.apache.hadoop.fs.Path](path), FileBasedMetadata.PartitionFiles.apply$default$3), FileBasedMetadata.addFiles)
141 933 6543 - 6588 Apply java.util.concurrent.ConcurrentHashMap.<init> new java.util.concurrent.ConcurrentHashMap[String,org.locationtech.geomesa.fs.storage.common.metadata.FileBasedMetadata.PartitionFiles]()
142 934 6613 - 6627 Select scala.runtime.BoxedUnit.UNIT scala.runtime.BoxedUnit.UNIT
142 935 6593 - 6633 Apply com.github.benmanes.caffeine.cache.Cache.put this.partitions.put(scala.runtime.BoxedUnit.UNIT, map)
143 936 6638 - 6667 Apply com.github.benmanes.caffeine.cache.Cache.invalidateAll this.metadata.invalidateAll()
145 953 6702 - 6702 TypeApply scala.collection.Seq.canBuildFrom collection.this.Seq.canBuildFrom[org.locationtech.geomesa.fs.storage.common.metadata.PartitionConfig]
145 954 6687 - 7155 ApplyToImplicitArgs scala.collection.TraversableLike.map partitions.map[org.locationtech.geomesa.fs.storage.common.metadata.PartitionConfig, Seq[org.locationtech.geomesa.fs.storage.common.metadata.PartitionConfig]](((partition: org.locationtech.geomesa.fs.storage.api.StorageMetadata.PartitionMetadata) => { val action: org.locationtech.geomesa.fs.storage.common.metadata.PartitionAction.Value = metadata.this.`package`.PartitionAction.Add; val envelope: Seq[Double] = partition.bounds.map[Seq[Double]](((b: org.locationtech.geomesa.fs.storage.api.StorageMetadata.PartitionBounds) => metadata.this.`package`.EnvelopeConfig.apply(b.envelope))).getOrElse[Seq[Double]](scala.collection.Seq.empty[Nothing]); val config: org.locationtech.geomesa.fs.storage.common.metadata.PartitionConfig = metadata.this.`package`.PartitionConfig.apply(partition.name, action, partition.files, partition.count, envelope, java.lang.System.currentTimeMillis()); this.metadata.put(partition.name, partition); map.put(partition.name, FileBasedMetadata.PartitionFiles.apply(scala.collection.Seq.apply[org.locationtech.geomesa.fs.storage.common.metadata.PartitionConfig](config), FileBasedMetadata.PartitionFiles.apply$default$2, FileBasedMetadata.PartitionFiles.apply$default$3)); config }))(collection.this.Seq.canBuildFrom[org.locationtech.geomesa.fs.storage.common.metadata.PartitionConfig])
146 937 6736 - 6755 Select org.locationtech.geomesa.fs.storage.common.metadata.PartitionAction.Add metadata.this.`package`.PartitionAction.Add
147 938 6818 - 6828 Select org.locationtech.geomesa.fs.storage.api.StorageMetadata.PartitionBounds.envelope b.envelope
147 939 6803 - 6829 Apply org.locationtech.geomesa.fs.storage.common.metadata.EnvelopeConfig.apply metadata.this.`package`.EnvelopeConfig.apply(b.envelope)
147 940 6841 - 6850 TypeApply scala.collection.generic.GenericCompanion.empty scala.collection.Seq.empty[Nothing]
147 941 6777 - 6851 Apply scala.Option.getOrElse partition.bounds.map[Seq[Double]](((b: org.locationtech.geomesa.fs.storage.api.StorageMetadata.PartitionBounds) => metadata.this.`package`.EnvelopeConfig.apply(b.envelope))).getOrElse[Seq[Double]](scala.collection.Seq.empty[Nothing])
148 942 6887 - 6901 Select org.locationtech.geomesa.fs.storage.api.StorageMetadata.PartitionMetadata.name partition.name
148 943 6911 - 6926 Select org.locationtech.geomesa.fs.storage.api.StorageMetadata.PartitionMetadata.files partition.files
148 944 6928 - 6943 Select org.locationtech.geomesa.fs.storage.api.StorageMetadata.PartitionMetadata.count partition.count
148 945 6955 - 6981 Apply java.lang.System.currentTimeMillis java.lang.System.currentTimeMillis()
148 946 6871 - 6982 Apply org.locationtech.geomesa.fs.storage.common.metadata.PartitionConfig.apply metadata.this.`package`.PartitionConfig.apply(partition.name, action, partition.files, partition.count, envelope, java.lang.System.currentTimeMillis())
150 947 7042 - 7056 Select org.locationtech.geomesa.fs.storage.api.StorageMetadata.PartitionMetadata.name partition.name
150 948 7024 - 7068 Apply com.github.benmanes.caffeine.cache.Cache.put this.metadata.put(partition.name, partition)
151 949 7083 - 7097 Select org.locationtech.geomesa.fs.storage.api.StorageMetadata.PartitionMetadata.name partition.name
151 950 7123 - 7134 Apply scala.collection.generic.GenericCompanion.apply scala.collection.Seq.apply[org.locationtech.geomesa.fs.storage.common.metadata.PartitionConfig](config)
151 951 7099 - 7135 Apply org.locationtech.geomesa.fs.storage.common.metadata.FileBasedMetadata.PartitionFiles.apply FileBasedMetadata.PartitionFiles.apply(scala.collection.Seq.apply[org.locationtech.geomesa.fs.storage.common.metadata.PartitionConfig](config), FileBasedMetadata.PartitionFiles.apply$default$2, FileBasedMetadata.PartitionFiles.apply$default$3)
151 952 7075 - 7136 Apply java.util.concurrent.ConcurrentHashMap.put map.put(partition.name, FileBasedMetadata.PartitionFiles.apply(scala.collection.Seq.apply[org.locationtech.geomesa.fs.storage.common.metadata.PartitionConfig](config), FileBasedMetadata.PartitionFiles.apply$default$2, FileBasedMetadata.PartitionFiles.apply$default$3))
155 955 7161 - 7190 Apply org.locationtech.geomesa.fs.storage.common.metadata.FileBasedMetadata.writeCompactedConfig FileBasedMetadata.this.writeCompactedConfig(configs)
156 956 7202 - 7223 Apply org.locationtech.geomesa.fs.storage.common.metadata.FileBasedMetadata.readPartitionFiles FileBasedMetadata.this.readPartitionFiles(8)
156 957 7271 - 7279 Select org.locationtech.geomesa.fs.storage.common.metadata.FileBasedMetadata.PartitionFiles.parsed f.parsed
156 958 7268 - 7268 TypeApply scala.collection.Seq.canBuildFrom collection.this.Seq.canBuildFrom[org.apache.hadoop.fs.Path]
156 959 7257 - 7279 ApplyToImplicitArgs scala.collection.TraversableLike.++ f.unparsed.++[org.apache.hadoop.fs.Path, Seq[org.apache.hadoop.fs.Path]](f.parsed)(collection.this.Seq.canBuildFrom[org.apache.hadoop.fs.Path])
156 960 7257 - 7279 Block scala.collection.TraversableLike.++ f.unparsed.++[org.apache.hadoop.fs.Path, Seq[org.apache.hadoop.fs.Path]](f.parsed)(collection.this.Seq.canBuildFrom[org.apache.hadoop.fs.Path])
156 961 7240 - 7240 TypeApply scala.collection.mutable.Iterable.canBuildFrom mutable.this.Iterable.canBuildFrom[org.apache.hadoop.fs.Path]
156 962 7202 - 7281 ApplyToImplicitArgs scala.collection.TraversableLike.flatMap scala.collection.JavaConverters.mapAsScalaConcurrentMapConverter[String, org.locationtech.geomesa.fs.storage.common.metadata.FileBasedMetadata.PartitionFiles](FileBasedMetadata.this.readPartitionFiles(8)).asScala.flatMap[org.apache.hadoop.fs.Path, Iterable[org.apache.hadoop.fs.Path]](((x0$1: (String, org.locationtech.geomesa.fs.storage.common.metadata.FileBasedMetadata.PartitionFiles)) => x0$1 match { case (_1: String, _2: org.locationtech.geomesa.fs.storage.common.metadata.FileBasedMetadata.PartitionFiles)(String, org.locationtech.geomesa.fs.storage.common.metadata.FileBasedMetadata.PartitionFiles)(_, (f @ _)) => f.unparsed.++[org.apache.hadoop.fs.Path, Seq[org.apache.hadoop.fs.Path]](f.parsed)(collection.this.Seq.canBuildFrom[org.apache.hadoop.fs.Path]) }))(mutable.this.Iterable.canBuildFrom[org.apache.hadoop.fs.Path])
156 963 7283 - 7284 Literal <nosymbol> 8
156 964 7195 - 7285 Apply org.locationtech.geomesa.fs.storage.common.metadata.FileBasedMetadata.delete FileBasedMetadata.this.delete(scala.collection.JavaConverters.mapAsScalaConcurrentMapConverter[String, org.locationtech.geomesa.fs.storage.common.metadata.FileBasedMetadata.PartitionFiles](FileBasedMetadata.this.readPartitionFiles(8)).asScala.flatMap[org.apache.hadoop.fs.Path, Iterable[org.apache.hadoop.fs.Path]](((x0$1: (String, org.locationtech.geomesa.fs.storage.common.metadata.FileBasedMetadata.PartitionFiles)) => x0$1 match { case (_1: String, _2: org.locationtech.geomesa.fs.storage.common.metadata.FileBasedMetadata.PartitionFiles)(String, org.locationtech.geomesa.fs.storage.common.metadata.FileBasedMetadata.PartitionFiles)(_, (f @ _)) => f.unparsed.++[org.apache.hadoop.fs.Path, Seq[org.apache.hadoop.fs.Path]](f.parsed)(collection.this.Seq.canBuildFrom[org.apache.hadoop.fs.Path]) }))(mutable.this.Iterable.canBuildFrom[org.apache.hadoop.fs.Path]), 8)
160 965 7401 - 7412 Apply scala.Int.> threads.>(0)
160 966 7414 - 7449 Literal <nosymbol> "Threads must be a positive number"
160 967 7393 - 7450 Apply scala.Predef.require scala.Predef.require(threads.>(0), "Threads must be a positive number")
163 968 7524 - 7625 Apply scala.Option.foreach partition.foreach[Unit](((p: String) => (if (FileBasedMetadata.this.logger.underlying.isWarnEnabled()) FileBasedMetadata.this.logger.underlying.warn("Ignoring requested partition \'{}\' and compacting all metadata", (p: AnyRef)) else (): Unit)))
165 969 7645 - 7679 TypeApply scala.collection.generic.GenericCompanion.empty scala.collection.mutable.ArrayBuffer.empty[org.locationtech.geomesa.fs.storage.common.metadata.PartitionConfig]
166 970 7696 - 7719 TypeApply scala.collection.generic.GenericCompanion.empty scala.collection.mutable.ArrayBuffer.empty[org.apache.hadoop.fs.Path]
168 971 7725 - 7752 Apply org.locationtech.geomesa.fs.storage.common.metadata.FileBasedMetadata.readPartitionFiles FileBasedMetadata.this.readPartitionFiles(threads)
168 984 7786 - 8076 Block <nosymbol> { val config: Option[org.locationtech.geomesa.fs.storage.common.metadata.PartitionConfig] = FileBasedMetadata.this.readPartition(f, threads).filter(((x$3: org.locationtech.geomesa.fs.storage.common.metadata.PartitionConfig) => x$3.files.nonEmpty)); config match { case scala.None => FileBasedMetadata.this.metadata.invalidate(name) case (value: org.locationtech.geomesa.fs.storage.common.metadata.PartitionConfig)Some[org.locationtech.geomesa.fs.storage.common.metadata.PartitionConfig]((c @ _)) => { FileBasedMetadata.this.metadata.put(name, c.toMetadata); configs.+=(c) } }; paths.++=(f.unparsed); paths.++=(f.parsed) }
168 985 7725 - 8082 Apply scala.collection.IterableLike.foreach scala.collection.JavaConverters.mapAsScalaConcurrentMapConverter[String, org.locationtech.geomesa.fs.storage.common.metadata.FileBasedMetadata.PartitionFiles](FileBasedMetadata.this.readPartitionFiles(threads)).asScala.foreach[scala.collection.mutable.ArrayBuffer[org.apache.hadoop.fs.Path]](((x0$1: (String, org.locationtech.geomesa.fs.storage.common.metadata.FileBasedMetadata.PartitionFiles)) => x0$1 match { case (_1: String, _2: org.locationtech.geomesa.fs.storage.common.metadata.FileBasedMetadata.PartitionFiles)(String, org.locationtech.geomesa.fs.storage.common.metadata.FileBasedMetadata.PartitionFiles)((name @ _), (f @ _)) => { val config: Option[org.locationtech.geomesa.fs.storage.common.metadata.PartitionConfig] = FileBasedMetadata.this.readPartition(f, threads).filter(((x$3: org.locationtech.geomesa.fs.storage.common.metadata.PartitionConfig) => x$3.files.nonEmpty)); config match { case scala.None => FileBasedMetadata.this.metadata.invalidate(name) case (value: org.locationtech.geomesa.fs.storage.common.metadata.PartitionConfig)Some[org.locationtech.geomesa.fs.storage.common.metadata.PartitionConfig]((c @ _)) => { FileBasedMetadata.this.metadata.put(name, c.toMetadata); configs.+=(c) } }; paths.++=(f.unparsed); paths.++=(f.parsed) } }))
169 972 7841 - 7857 Select scala.collection.TraversableOnce.nonEmpty x$3.files.nonEmpty
169 973 7808 - 7858 Apply scala.Option.filter FileBasedMetadata.this.readPartition(f, threads).filter(((x$3: org.locationtech.geomesa.fs.storage.common.metadata.PartitionConfig) => x$3.files.nonEmpty))
171 974 7901 - 7926 Apply com.github.benmanes.caffeine.cache.Cache.invalidate FileBasedMetadata.this.metadata.invalidate(name)
171 975 7901 - 7926 Block com.github.benmanes.caffeine.cache.Cache.invalidate FileBasedMetadata.this.metadata.invalidate(name)
172 979 7948 - 8016 Block <nosymbol> { FileBasedMetadata.this.metadata.put(name, c.toMetadata); configs.+=(c) }
173 976 7980 - 7992 Select org.locationtech.geomesa.fs.storage.common.metadata.PartitionConfig.toMetadata c.toMetadata
173 977 7961 - 7993 Apply com.github.benmanes.caffeine.cache.Cache.put FileBasedMetadata.this.metadata.put(name, c.toMetadata)
174 978 8004 - 8016 Apply scala.collection.mutable.ArrayBuffer.+= configs.+=(c)
176 980 8041 - 8051 Select org.locationtech.geomesa.fs.storage.common.metadata.FileBasedMetadata.PartitionFiles.unparsed f.unparsed
176 981 8031 - 8051 Apply scala.collection.mutable.ArrayBuffer.++= paths.++=(f.unparsed)
177 982 8068 - 8076 Select org.locationtech.geomesa.fs.storage.common.metadata.FileBasedMetadata.PartitionFiles.parsed f.parsed
177 983 8058 - 8076 Apply scala.collection.mutable.ArrayBuffer.++= paths.++=(f.parsed)
180 986 8109 - 8122 Select scala.collection.SeqLike.toSeq configs.toSeq
180 987 8088 - 8123 Apply org.locationtech.geomesa.fs.storage.common.metadata.FileBasedMetadata.writeCompactedConfig FileBasedMetadata.this.writeCompactedConfig(configs.toSeq)
181 988 8128 - 8150 Apply org.locationtech.geomesa.fs.storage.common.metadata.FileBasedMetadata.delete FileBasedMetadata.this.delete(paths, threads)
183 989 8178 - 8192 Select scala.runtime.BoxedUnit.UNIT scala.runtime.BoxedUnit.UNIT
183 990 8156 - 8193 Apply com.github.benmanes.caffeine.cache.Cache.invalidate FileBasedMetadata.this.partitions.invalidate(scala.runtime.BoxedUnit.UNIT)
187 991 8241 - 8267 Apply com.github.benmanes.caffeine.cache.Cache.invalidateAll FileBasedMetadata.this.partitions.invalidateAll()
188 992 8272 - 8296 Apply com.github.benmanes.caffeine.cache.Cache.invalidateAll FileBasedMetadata.this.metadata.invalidateAll()
191 993 8333 - 8335 Literal <nosymbol> ()
200 994 8522 - 8558 Literal <nosymbol> "Serialized partition configuration"
200 996 8514 - 8607 Apply org.locationtech.geomesa.utils.stats.MethodProfiling.profile FileBasedMetadata.this.profile[String]("Serialized partition configuration")(FileBasedMetadata.this.converter.renderPartition(config))
201 995 8568 - 8601 Apply org.locationtech.geomesa.fs.storage.common.metadata.MetadataConverter.renderPartition FileBasedMetadata.this.converter.renderPartition(config)
203 997 8620 - 8655 Literal <nosymbol> "Persisted partition configuration"
203 1023 8612 - 9134 Apply org.locationtech.geomesa.utils.stats.MethodProfiling.profile FileBasedMetadata.this.profile[org.apache.hadoop.fs.Path]("Persisted partition configuration")({ val encoded: String = org.locationtech.geomesa.utils.text.StringSerialization.alphaNumericSafeString(config.name); val name: String = scala.StringContext.apply("", "", "-", "", "").s(FileBasedMetadata.UpdatePartitionPrefix, encoded, java.util.UUID.randomUUID(), FileBasedMetadata.this.converter.suffix); val file: org.apache.hadoop.fs.Path = new org.apache.hadoop.fs.Path(FileBasedMetadata.this.directory, name); org.locationtech.geomesa.utils.io.`package`.WithClose.apply[org.apache.hadoop.fs.FSDataOutputStream, Unit](FileBasedMetadata.this.fc.create(file, java.util.EnumSet.of[org.apache.hadoop.fs.CreateFlag](CREATE), org.apache.hadoop.fs.Options.CreateOpts.createParent()))(((out: org.apache.hadoop.fs.FSDataOutputStream) => { out.write(data.getBytes(java.nio.charset.StandardCharsets.UTF_8)); out.hflush(); out.hsync() }))(io.this.IsCloseable.closeableIsCloseable); org.locationtech.geomesa.fs.storage.common.utils.PathCache.register(FileBasedMetadata.this.fc, file, org.locationtech.geomesa.fs.storage.common.utils.PathCache.register$default$3, org.locationtech.geomesa.fs.storage.common.utils.PathCache.register$default$4); file })
204 998 8722 - 8733 Select org.locationtech.geomesa.fs.storage.common.metadata.PartitionConfig.name config.name
204 999 8679 - 8734 Apply org.locationtech.geomesa.utils.text.StringSerialization.alphaNumericSafeString org.locationtech.geomesa.utils.text.StringSerialization.alphaNumericSafeString(config.name)
205 1000 8754 - 8755 Literal <nosymbol> ""
205 1001 8776 - 8777 Literal <nosymbol> ""
205 1002 8784 - 8786 Literal <nosymbol> "-"
205 1003 8805 - 8806 Literal <nosymbol> ""
205 1004 8824 - 8825 Literal <nosymbol> ""
205 1005 8755 - 8776 Select org.locationtech.geomesa.fs.storage.common.metadata.FileBasedMetadata.UpdatePartitionPrefix FileBasedMetadata.UpdatePartitionPrefix
205 1006 8787 - 8804 Apply java.util.UUID.randomUUID java.util.UUID.randomUUID()
205 1007 8807 - 8823 Select org.locationtech.geomesa.fs.storage.common.metadata.MetadataConverter.suffix FileBasedMetadata.this.converter.suffix
205 1008 8752 - 8825 Apply scala.StringContext.s scala.StringContext.apply("", "", "-", "", "").s(FileBasedMetadata.UpdatePartitionPrefix, encoded, java.util.UUID.randomUUID(), FileBasedMetadata.this.converter.suffix)
206 1009 8852 - 8861 Select org.locationtech.geomesa.fs.storage.common.metadata.FileBasedMetadata.directory FileBasedMetadata.this.directory
206 1010 8843 - 8868 Apply org.apache.hadoop.fs.Path.<init> new org.apache.hadoop.fs.Path(FileBasedMetadata.this.directory, name)
207 1011 8901 - 8940 Apply java.util.EnumSet.of java.util.EnumSet.of[org.apache.hadoop.fs.CreateFlag](CREATE)
207 1012 8942 - 8965 Apply org.apache.hadoop.fs.Options.CreateOpts.createParent org.apache.hadoop.fs.Options.CreateOpts.createParent()
207 1013 8885 - 8966 Apply org.apache.hadoop.fs.FileContext.create FileBasedMetadata.this.fc.create(file, java.util.EnumSet.of[org.apache.hadoop.fs.CreateFlag](CREATE), org.apache.hadoop.fs.Options.CreateOpts.createParent())
207 1019 8968 - 8968 Select org.locationtech.geomesa.utils.io.IsCloseableImplicits.closeableIsCloseable io.this.IsCloseable.closeableIsCloseable
207 1020 8875 - 9082 ApplyToImplicitArgs org.locationtech.geomesa.utils.io.WithClose.apply org.locationtech.geomesa.utils.io.`package`.WithClose.apply[org.apache.hadoop.fs.FSDataOutputStream, Unit](FileBasedMetadata.this.fc.create(file, java.util.EnumSet.of[org.apache.hadoop.fs.CreateFlag](CREATE), org.apache.hadoop.fs.Options.CreateOpts.createParent()))(((out: org.apache.hadoop.fs.FSDataOutputStream) => { out.write(data.getBytes(java.nio.charset.StandardCharsets.UTF_8)); out.hflush(); out.hsync() }))(io.this.IsCloseable.closeableIsCloseable)
208 1014 9009 - 9031 Select java.nio.charset.StandardCharsets.UTF_8 java.nio.charset.StandardCharsets.UTF_8
208 1015 8995 - 9032 Apply java.lang.String.getBytes data.getBytes(java.nio.charset.StandardCharsets.UTF_8)
208 1016 8985 - 9033 Apply java.io.FilterOutputStream.write out.write(data.getBytes(java.nio.charset.StandardCharsets.UTF_8))
209 1017 9042 - 9054 Apply org.apache.hadoop.fs.FSDataOutputStream.hflush out.hflush()
210 1018 9063 - 9074 Apply org.apache.hadoop.fs.FSDataOutputStream.hsync out.hsync()
212 1021 9108 - 9110 Select org.locationtech.geomesa.fs.storage.common.metadata.FileBasedMetadata.fc FileBasedMetadata.this.fc
212 1022 9089 - 9117 Apply org.locationtech.geomesa.fs.storage.common.utils.PathCache.register org.locationtech.geomesa.fs.storage.common.utils.PathCache.register(FileBasedMetadata.this.fc, file, org.locationtech.geomesa.fs.storage.common.utils.PathCache.register$default$3, org.locationtech.geomesa.fs.storage.common.utils.PathCache.register$default$4)
223 1024 9363 - 9409 Literal <nosymbol> "Serialized compacted partition configuration"
223 1026 9355 - 9459 Apply org.locationtech.geomesa.utils.stats.MethodProfiling.profile FileBasedMetadata.this.profile[String]("Serialized compacted partition configuration")(FileBasedMetadata.this.converter.renderCompaction(config))
224 1025 9419 - 9453 Apply org.locationtech.geomesa.fs.storage.common.metadata.MetadataConverter.renderCompaction FileBasedMetadata.this.converter.renderCompaction(config)
226 1027 9472 - 9517 Literal <nosymbol> "Persisted compacted partition configuration"
226 1061 9464 - 10270 Apply org.locationtech.geomesa.utils.stats.MethodProfiling.profile FileBasedMetadata.this.profile[Unit]("Persisted compacted partition configuration")({ val file: org.apache.hadoop.fs.Path = new org.apache.hadoop.fs.Path(FileBasedMetadata.this.directory, FileBasedMetadata.CompactedPrefix.+(FileBasedMetadata.this.converter.suffix)); val flags: java.util.EnumSet[org.apache.hadoop.fs.CreateFlag] = java.util.EnumSet.of[org.apache.hadoop.fs.CreateFlag](CREATE, OVERWRITE); org.locationtech.geomesa.utils.io.`package`.WithClose.apply[org.apache.hadoop.fs.FSDataOutputStream, Unit](FileBasedMetadata.this.fc.create(file, flags, org.apache.hadoop.fs.Options.CreateOpts.createParent()))(((out: org.apache.hadoop.fs.FSDataOutputStream) => { out.write(data.getBytes(java.nio.charset.StandardCharsets.UTF_8)); out.hflush(); out.hsync() }))(io.this.IsCloseable.closeableIsCloseable); org.locationtech.geomesa.fs.storage.common.utils.PathCache.register(FileBasedMetadata.this.fc, file, org.locationtech.geomesa.fs.storage.common.utils.PathCache.register$default$3, org.locationtech.geomesa.fs.storage.common.utils.PathCache.register$default$4); val toRemove: org.apache.hadoop.fs.Path = new org.apache.hadoop.fs.Path(FileBasedMetadata.this.directory, if (FileBasedMetadata.this.converter.suffix.==(FileBasedMetadata.HoconPathSuffix)) FileBasedMetadata.CompactedJson else FileBasedMetadata.CompactedHocon); if (org.locationtech.geomesa.fs.storage.common.utils.PathCache.exists(FileBasedMetadata.this.fc, toRemove, true)) { FileBasedMetadata.this.fc.delete(toRemove, false); org.locationtech.geomesa.fs.storage.common.utils.PathCache.invalidate(FileBasedMetadata.this.fc, toRemove) } else () })
227 1028 9547 - 9556 Select org.locationtech.geomesa.fs.storage.common.metadata.FileBasedMetadata.directory FileBasedMetadata.this.directory
227 1029 9576 - 9592 Select org.locationtech.geomesa.fs.storage.common.metadata.MetadataConverter.suffix FileBasedMetadata.this.converter.suffix
227 1030 9558 - 9592 Apply java.lang.String.+ FileBasedMetadata.CompactedPrefix.+(FileBasedMetadata.this.converter.suffix)
227 1031 9538 - 9593 Apply org.apache.hadoop.fs.Path.<init> new org.apache.hadoop.fs.Path(FileBasedMetadata.this.directory, FileBasedMetadata.CompactedPrefix.+(FileBasedMetadata.this.converter.suffix))
228 1032 9612 - 9673 Apply java.util.EnumSet.of java.util.EnumSet.of[org.apache.hadoop.fs.CreateFlag](CREATE, OVERWRITE)
229 1033 9713 - 9736 Apply org.apache.hadoop.fs.Options.CreateOpts.createParent org.apache.hadoop.fs.Options.CreateOpts.createParent()
229 1034 9690 - 9737 Apply org.apache.hadoop.fs.FileContext.create FileBasedMetadata.this.fc.create(file, flags, org.apache.hadoop.fs.Options.CreateOpts.createParent())
229 1040 9739 - 9739 Select org.locationtech.geomesa.utils.io.IsCloseableImplicits.closeableIsCloseable io.this.IsCloseable.closeableIsCloseable
229 1041 9680 - 9853 ApplyToImplicitArgs org.locationtech.geomesa.utils.io.WithClose.apply org.locationtech.geomesa.utils.io.`package`.WithClose.apply[org.apache.hadoop.fs.FSDataOutputStream, Unit](FileBasedMetadata.this.fc.create(file, flags, org.apache.hadoop.fs.Options.CreateOpts.createParent()))(((out: org.apache.hadoop.fs.FSDataOutputStream) => { out.write(data.getBytes(java.nio.charset.StandardCharsets.UTF_8)); out.hflush(); out.hsync() }))(io.this.IsCloseable.closeableIsCloseable)
230 1035 9780 - 9802 Select java.nio.charset.StandardCharsets.UTF_8 java.nio.charset.StandardCharsets.UTF_8
230 1036 9766 - 9803 Apply java.lang.String.getBytes data.getBytes(java.nio.charset.StandardCharsets.UTF_8)
230 1037 9756 - 9804 Apply java.io.FilterOutputStream.write out.write(data.getBytes(java.nio.charset.StandardCharsets.UTF_8))
231 1038 9813 - 9825 Apply org.apache.hadoop.fs.FSDataOutputStream.hflush out.hflush()
232 1039 9834 - 9845 Apply org.apache.hadoop.fs.FSDataOutputStream.hsync out.hsync()
234 1042 9879 - 9881 Select org.locationtech.geomesa.fs.storage.common.metadata.FileBasedMetadata.fc FileBasedMetadata.this.fc
234 1043 9860 - 9888 Apply org.locationtech.geomesa.fs.storage.common.utils.PathCache.register org.locationtech.geomesa.fs.storage.common.utils.PathCache.register(FileBasedMetadata.this.fc, file, org.locationtech.geomesa.fs.storage.common.utils.PathCache.register$default$3, org.locationtech.geomesa.fs.storage.common.utils.PathCache.register$default$4)
237 1044 10025 - 10034 Select org.locationtech.geomesa.fs.storage.common.metadata.FileBasedMetadata.directory FileBasedMetadata.this.directory
237 1045 10060 - 10075 Select org.locationtech.geomesa.fs.storage.common.metadata.FileBasedMetadata.HoconPathSuffix FileBasedMetadata.HoconPathSuffix
237 1046 10040 - 10075 Apply java.lang.Object.== FileBasedMetadata.this.converter.suffix.==(FileBasedMetadata.HoconPathSuffix)
237 1047 10079 - 10092 Select org.locationtech.geomesa.fs.storage.common.metadata.FileBasedMetadata.CompactedJson FileBasedMetadata.CompactedJson
237 1048 10079 - 10092 Block org.locationtech.geomesa.fs.storage.common.metadata.FileBasedMetadata.CompactedJson FileBasedMetadata.CompactedJson
237 1049 10102 - 10116 Select org.locationtech.geomesa.fs.storage.common.metadata.FileBasedMetadata.CompactedHocon FileBasedMetadata.CompactedHocon
237 1050 10102 - 10116 Block org.locationtech.geomesa.fs.storage.common.metadata.FileBasedMetadata.CompactedHocon FileBasedMetadata.CompactedHocon
237 1051 10016 - 10119 Apply org.apache.hadoop.fs.Path.<init> new org.apache.hadoop.fs.Path(FileBasedMetadata.this.directory, if (FileBasedMetadata.this.converter.suffix.==(FileBasedMetadata.HoconPathSuffix)) FileBasedMetadata.CompactedJson else FileBasedMetadata.CompactedHocon)
238 1052 10147 - 10149 Select org.locationtech.geomesa.fs.storage.common.metadata.FileBasedMetadata.fc FileBasedMetadata.this.fc
238 1053 10170 - 10174 Literal <nosymbol> true
238 1054 10130 - 10175 Apply org.locationtech.geomesa.fs.storage.common.utils.PathCache.exists org.locationtech.geomesa.fs.storage.common.utils.PathCache.exists(FileBasedMetadata.this.fc, toRemove, true)
238 1058 10177 - 10264 Block <nosymbol> { FileBasedMetadata.this.fc.delete(toRemove, false); org.locationtech.geomesa.fs.storage.common.utils.PathCache.invalidate(FileBasedMetadata.this.fc, toRemove) }
238 1059 10126 - 10126 Literal <nosymbol> ()
238 1060 10126 - 10126 Block <nosymbol> ()
239 1055 10187 - 10213 Apply org.apache.hadoop.fs.FileContext.delete FileBasedMetadata.this.fc.delete(toRemove, false)
240 1056 10243 - 10245 Select org.locationtech.geomesa.fs.storage.common.metadata.FileBasedMetadata.fc FileBasedMetadata.this.fc
240 1057 10222 - 10256 Apply org.locationtech.geomesa.fs.storage.common.utils.PathCache.invalidate org.locationtech.geomesa.fs.storage.common.utils.PathCache.invalidate(FileBasedMetadata.this.fc, toRemove)
253 1062 10570 - 10617 Apply java.util.concurrent.ConcurrentHashMap.<init> new java.util.concurrent.ConcurrentHashMap[String,org.locationtech.geomesa.fs.storage.common.metadata.FileBasedMetadata.PartitionFiles]()
256 1063 10674 - 10697 Literal <nosymbol> "Listed metadata files"
256 1075 10666 - 11156 Apply org.locationtech.geomesa.utils.stats.MethodProfiling.profile FileBasedMetadata.this.profile[Int]("Listed metadata files")({ val pool: org.locationtech.geomesa.utils.concurrent.CachedThreadPool = new org.locationtech.geomesa.utils.concurrent.CachedThreadPool(threads); val phaser: java.util.concurrent.Phaser = new java.util.concurrent.Phaser(2); pool.submit(new FileBasedMetadata.this.DirectoryWorker(pool, phaser, FileBasedMetadata.this.fc.listStatus(FileBasedMetadata.this.directory), result)); try { phaser.awaitAdvanceInterruptibly(phaser.arrive()) } finally pool.shutdown() })
257 1064 10718 - 10747 Apply org.locationtech.geomesa.utils.concurrent.CachedThreadPool.<init> new org.locationtech.geomesa.utils.concurrent.CachedThreadPool(threads)
259 1065 10823 - 10836 Apply java.util.concurrent.Phaser.<init> new java.util.concurrent.Phaser(2)
260 1066 10961 - 10970 Select org.locationtech.geomesa.fs.storage.common.metadata.FileBasedMetadata.directory FileBasedMetadata.this.directory
260 1067 10947 - 10971 Apply org.apache.hadoop.fs.FileContext.listStatus FileBasedMetadata.this.fc.listStatus(FileBasedMetadata.this.directory)
260 1068 10913 - 10980 Apply org.locationtech.geomesa.fs.storage.common.metadata.FileBasedMetadata.DirectoryWorker.<init> new FileBasedMetadata.this.DirectoryWorker(pool, phaser, FileBasedMetadata.this.fc.listStatus(FileBasedMetadata.this.directory), result)
260 1069 10901 - 10981 Apply java.util.concurrent.AbstractExecutorService.submit pool.submit(new FileBasedMetadata.this.DirectoryWorker(pool, phaser, FileBasedMetadata.this.fc.listStatus(FileBasedMetadata.this.directory), result))
263 1070 11084 - 11099 Apply java.util.concurrent.Phaser.arrive phaser.arrive()
263 1071 11051 - 11100 Apply java.util.concurrent.Phaser.awaitAdvanceInterruptibly phaser.awaitAdvanceInterruptibly(phaser.arrive())
263 1072 11051 - 11100 Block java.util.concurrent.Phaser.awaitAdvanceInterruptibly phaser.awaitAdvanceInterruptibly(phaser.arrive())
265 1073 11127 - 11142 Apply org.locationtech.geomesa.utils.concurrent.CachedThreadPool.shutdown pool.shutdown()
265 1074 11127 - 11142 Block org.locationtech.geomesa.utils.concurrent.CachedThreadPool.shutdown pool.shutdown()
280 1076 11464 - 11475 Apply scala.Int.< threads.<(2)
281 1077 11508 - 11527 Apply org.locationtech.geomesa.fs.storage.common.metadata.FileBasedMetadata.readPartitionConfig FileBasedMetadata.this.readPartitionConfig(file)
281 1078 11508 - 11527 ApplyImplicitView scala.Option.option2Iterable scala.this.Option.option2Iterable[org.locationtech.geomesa.fs.storage.common.metadata.PartitionConfig](FileBasedMetadata.this.readPartitionConfig(file))
281 1079 11507 - 11507 TypeApply scala.collection.Seq.canBuildFrom collection.this.Seq.canBuildFrom[org.locationtech.geomesa.fs.storage.common.metadata.PartitionConfig]
281 1080 11485 - 11528 ApplyToImplicitArgs scala.collection.TraversableLike.flatMap files.unparsed.flatMap[org.locationtech.geomesa.fs.storage.common.metadata.PartitionConfig, Seq[org.locationtech.geomesa.fs.storage.common.metadata.PartitionConfig]]({ ((file: org.apache.hadoop.fs.Path) => scala.this.Option.option2Iterable[org.locationtech.geomesa.fs.storage.common.metadata.PartitionConfig](FileBasedMetadata.this.readPartitionConfig(file))) })(collection.this.Seq.canBuildFrom[org.locationtech.geomesa.fs.storage.common.metadata.PartitionConfig])
281 1081 11485 - 11528 Block scala.collection.TraversableLike.flatMap files.unparsed.flatMap[org.locationtech.geomesa.fs.storage.common.metadata.PartitionConfig, Seq[org.locationtech.geomesa.fs.storage.common.metadata.PartitionConfig]]({ ((file: org.apache.hadoop.fs.Path) => scala.this.Option.option2Iterable[org.locationtech.geomesa.fs.storage.common.metadata.PartitionConfig](FileBasedMetadata.this.readPartitionConfig(file))) })(collection.this.Seq.canBuildFrom[org.locationtech.geomesa.fs.storage.common.metadata.PartitionConfig])
282 1097 11540 - 12007 Block <nosymbol> { val ec: org.locationtech.geomesa.utils.concurrent.CachedThreadPool = new org.locationtech.geomesa.utils.concurrent.CachedThreadPool(threads); try { val results: scala.collection.mutable.Builder[org.locationtech.geomesa.fs.storage.common.metadata.PartitionConfig,Seq[org.locationtech.geomesa.fs.storage.common.metadata.PartitionConfig]] = scala.collection.Seq.newBuilder[org.locationtech.geomesa.fs.storage.common.metadata.PartitionConfig]; def readOne(p: org.apache.hadoop.fs.Path): Unit = FileBasedMetadata.this.readPartitionConfig(p).foreach[scala.collection.mutable.Builder[org.locationtech.geomesa.fs.storage.common.metadata.PartitionConfig,Seq[org.locationtech.geomesa.fs.storage.common.metadata.PartitionConfig]]](((c: org.locationtech.geomesa.fs.storage.common.metadata.PartitionConfig) => results.synchronized[scala.collection.mutable.Builder[org.locationtech.geomesa.fs.storage.common.metadata.PartitionConfig,Seq[org.locationtech.geomesa.fs.storage.common.metadata.PartitionConfig]]](results.+=(c)))); files.unparsed.toList.map[java.util.concurrent.Future[_], List[java.util.concurrent.Future[_]]](((p: org.apache.hadoop.fs.Path) => ec.submit({ final class $anon extends Object with Runnable { def <init>(): <$anon: Runnable> = { $anon.super.<init>(); () }; override def run(): Unit = readOne(p) }; new $anon() })))(immutable.this.List.canBuildFrom[java.util.concurrent.Future[_]]).foreach[Any](((x$4: java.util.concurrent.Future[_]) => x$4.get())); results.result() } finally ec.shutdown() }
283 1082 11557 - 11586 Apply org.locationtech.geomesa.utils.concurrent.CachedThreadPool.<init> new org.locationtech.geomesa.utils.concurrent.CachedThreadPool(threads)
284 1094 11607 - 11953 Block <nosymbol> { val results: scala.collection.mutable.Builder[org.locationtech.geomesa.fs.storage.common.metadata.PartitionConfig,Seq[org.locationtech.geomesa.fs.storage.common.metadata.PartitionConfig]] = scala.collection.Seq.newBuilder[org.locationtech.geomesa.fs.storage.common.metadata.PartitionConfig]; def readOne(p: org.apache.hadoop.fs.Path): Unit = FileBasedMetadata.this.readPartitionConfig(p).foreach[scala.collection.mutable.Builder[org.locationtech.geomesa.fs.storage.common.metadata.PartitionConfig,Seq[org.locationtech.geomesa.fs.storage.common.metadata.PartitionConfig]]](((c: org.locationtech.geomesa.fs.storage.common.metadata.PartitionConfig) => results.synchronized[scala.collection.mutable.Builder[org.locationtech.geomesa.fs.storage.common.metadata.PartitionConfig,Seq[org.locationtech.geomesa.fs.storage.common.metadata.PartitionConfig]]](results.+=(c)))); files.unparsed.toList.map[java.util.concurrent.Future[_], List[java.util.concurrent.Future[_]]](((p: org.apache.hadoop.fs.Path) => ec.submit({ final class $anon extends Object with Runnable { def <init>(): <$anon: Runnable> = { $anon.super.<init>(); () }; override def run(): Unit = readOne(p) }; new $anon() })))(immutable.this.List.canBuildFrom[java.util.concurrent.Future[_]]).foreach[Any](((x$4: java.util.concurrent.Future[_]) => x$4.get())); results.result() }
285 1083 11621 - 11652 TypeApply scala.collection.Seq.newBuilder scala.collection.Seq.newBuilder[org.locationtech.geomesa.fs.storage.common.metadata.PartitionConfig]
287 1086 11702 - 11798 Apply scala.Option.foreach FileBasedMetadata.this.readPartitionConfig(p).foreach[scala.collection.mutable.Builder[org.locationtech.geomesa.fs.storage.common.metadata.PartitionConfig,Seq[org.locationtech.geomesa.fs.storage.common.metadata.PartitionConfig]]](((c: org.locationtech.geomesa.fs.storage.common.metadata.PartitionConfig) => results.synchronized[scala.collection.mutable.Builder[org.locationtech.geomesa.fs.storage.common.metadata.PartitionConfig,Seq[org.locationtech.geomesa.fs.storage.common.metadata.PartitionConfig]]](results.+=(c))))
288 1084 11773 - 11785 Apply scala.collection.mutable.Builder.+= results.+=(c)
288 1085 11752 - 11786 Apply java.lang.Object.synchronized results.synchronized[scala.collection.mutable.Builder[org.locationtech.geomesa.fs.storage.common.metadata.PartitionConfig,Seq[org.locationtech.geomesa.fs.storage.common.metadata.PartitionConfig]]](results.+=(c))
291 1087 11902 - 11912 Apply org.locationtech.geomesa.fs.storage.common.metadata.FileBasedMetadata.readOne readOne(p)
291 1088 11858 - 11861 Apply org.locationtech.geomesa.fs.storage.common.metadata.FileBasedMetadata.$anon.<init> new $anon()
291 1089 11848 - 11914 Apply java.util.concurrent.AbstractExecutorService.submit ec.submit({ final class $anon extends Object with Runnable { def <init>(): <$anon: Runnable> = { $anon.super.<init>(); () }; override def run(): Unit = readOne(p) }; new $anon() })
291 1090 11842 - 11842 TypeApply scala.collection.immutable.List.canBuildFrom immutable.this.List.canBuildFrom[java.util.concurrent.Future[_]]
291 1091 11924 - 11929 Apply java.util.concurrent.Future.get x$4.get()
291 1092 11817 - 11930 Apply scala.collection.immutable.List.foreach files.unparsed.toList.map[java.util.concurrent.Future[_], List[java.util.concurrent.Future[_]]](((p: org.apache.hadoop.fs.Path) => ec.submit({ final class $anon extends Object with Runnable { def <init>(): <$anon: Runnable> = { $anon.super.<init>(); () }; override def run(): Unit = readOne(p) }; new $anon() })))(immutable.this.List.canBuildFrom[java.util.concurrent.Future[_]]).foreach[Any](((x$4: java.util.concurrent.Future[_]) => x$4.get()))
292 1093 11939 - 11953 Apply scala.collection.mutable.Builder.result results.result()
294 1095 11980 - 11993 Apply org.locationtech.geomesa.utils.concurrent.CachedThreadPool.shutdown ec.shutdown()
294 1096 11980 - 11993 Block org.locationtech.geomesa.utils.concurrent.CachedThreadPool.shutdown ec.shutdown()
297 1098 12045 - 12057 Select org.locationtech.geomesa.fs.storage.common.metadata.FileBasedMetadata.PartitionFiles.config files.config
297 1099 12042 - 12042 TypeApply scala.collection.Seq.canBuildFrom collection.this.Seq.canBuildFrom[org.locationtech.geomesa.fs.storage.common.metadata.PartitionConfig]
297 1100 12034 - 12057 ApplyToImplicitArgs scala.collection.TraversableLike.++ updates.++[org.locationtech.geomesa.fs.storage.common.metadata.PartitionConfig, Seq[org.locationtech.geomesa.fs.storage.common.metadata.PartitionConfig]](files.config)(collection.this.Seq.canBuildFrom[org.locationtech.geomesa.fs.storage.common.metadata.PartitionConfig])
297 1101 12066 - 12082 Select scala.collection.TraversableOnce.nonEmpty x$5.files.nonEmpty
297 1102 12012 - 12083 Apply scala.Option.filter metadata.this.`package`.mergePartitionConfigs(updates.++[org.locationtech.geomesa.fs.storage.common.metadata.PartitionConfig, Seq[org.locationtech.geomesa.fs.storage.common.metadata.PartitionConfig]](files.config)(collection.this.Seq.canBuildFrom[org.locationtech.geomesa.fs.storage.common.metadata.PartitionConfig])).filter(((x$5: org.locationtech.geomesa.fs.storage.common.metadata.PartitionConfig) => x$5.files.nonEmpty))
307 1118 12283 - 12656 Block <nosymbol> { val config: com.typesafe.config.Config = FileBasedMetadata.this.profile[com.typesafe.config.Config]("Loaded partition configuration")(org.locationtech.geomesa.utils.io.`package`.WithClose.apply[java.io.InputStreamReader, com.typesafe.config.Config](new java.io.InputStreamReader(FileBasedMetadata.this.fc.open(file), java.nio.charset.StandardCharsets.UTF_8))(((in: java.io.InputStreamReader) => com.typesafe.config.ConfigFactory.parseReader(in, com.typesafe.config.ConfigParseOptions.defaults().setSyntax(FileBasedMetadata.getSyntax(file.getName())))))(io.this.IsCloseable.closeableIsCloseable)); FileBasedMetadata.this.profile[Some[org.locationtech.geomesa.fs.storage.common.metadata.PartitionConfig]]("Parsed partition configuration")(scala.Some.apply[org.locationtech.geomesa.fs.storage.common.metadata.PartitionConfig](FileBasedMetadata.this.converter.parsePartition(config))) }
308 1103 12304 - 12336 Literal <nosymbol> "Loaded partition configuration"
308 1113 12296 - 12551 Apply org.locationtech.geomesa.utils.stats.MethodProfiling.profile FileBasedMetadata.this.profile[com.typesafe.config.Config]("Loaded partition configuration")(org.locationtech.geomesa.utils.io.`package`.WithClose.apply[java.io.InputStreamReader, com.typesafe.config.Config](new java.io.InputStreamReader(FileBasedMetadata.this.fc.open(file), java.nio.charset.StandardCharsets.UTF_8))(((in: java.io.InputStreamReader) => com.typesafe.config.ConfigFactory.parseReader(in, com.typesafe.config.ConfigParseOptions.defaults().setSyntax(FileBasedMetadata.getSyntax(file.getName())))))(io.this.IsCloseable.closeableIsCloseable))
309 1104 12380 - 12393 Apply org.apache.hadoop.fs.FileContext.open FileBasedMetadata.this.fc.open(file)
309 1105 12395 - 12417 Select java.nio.charset.StandardCharsets.UTF_8 java.nio.charset.StandardCharsets.UTF_8
309 1106 12358 - 12418 Apply java.io.InputStreamReader.<init> new java.io.InputStreamReader(FileBasedMetadata.this.fc.open(file), java.nio.charset.StandardCharsets.UTF_8)
309 1111 12420 - 12420 Select org.locationtech.geomesa.utils.io.IsCloseableImplicits.closeableIsCloseable io.this.IsCloseable.closeableIsCloseable
309 1112 12348 - 12543 ApplyToImplicitArgs org.locationtech.geomesa.utils.io.WithClose.apply org.locationtech.geomesa.utils.io.`package`.WithClose.apply[java.io.InputStreamReader, com.typesafe.config.Config](new java.io.InputStreamReader(FileBasedMetadata.this.fc.open(file), java.nio.charset.StandardCharsets.UTF_8))(((in: java.io.InputStreamReader) => com.typesafe.config.ConfigFactory.parseReader(in, com.typesafe.config.ConfigParseOptions.defaults().setSyntax(FileBasedMetadata.getSyntax(file.getName())))))(io.this.IsCloseable.closeableIsCloseable)
310 1107 12518 - 12530 Apply org.apache.hadoop.fs.Path.getName file.getName()
310 1108 12508 - 12531 Apply org.locationtech.geomesa.fs.storage.common.metadata.FileBasedMetadata.getSyntax FileBasedMetadata.getSyntax(file.getName())
310 1109 12468 - 12532 Apply com.typesafe.config.ConfigParseOptions.setSyntax com.typesafe.config.ConfigParseOptions.defaults().setSyntax(FileBasedMetadata.getSyntax(file.getName()))
310 1110 12438 - 12533 Apply com.typesafe.config.ConfigFactory.parseReader com.typesafe.config.ConfigFactory.parseReader(in, com.typesafe.config.ConfigParseOptions.defaults().setSyntax(FileBasedMetadata.getSyntax(file.getName())))
313 1114 12566 - 12598 Literal <nosymbol> "Parsed partition configuration"
313 1117 12558 - 12656 Apply org.locationtech.geomesa.utils.stats.MethodProfiling.profile FileBasedMetadata.this.profile[Some[org.locationtech.geomesa.fs.storage.common.metadata.PartitionConfig]]("Parsed partition configuration")(scala.Some.apply[org.locationtech.geomesa.fs.storage.common.metadata.PartitionConfig](FileBasedMetadata.this.converter.parsePartition(config)))
314 1115 12615 - 12647 Apply org.locationtech.geomesa.fs.storage.common.metadata.MetadataConverter.parsePartition FileBasedMetadata.this.converter.parsePartition(config)
314 1116 12610 - 12648 Apply scala.Some.apply scala.Some.apply[org.locationtech.geomesa.fs.storage.common.metadata.PartitionConfig](FileBasedMetadata.this.converter.parsePartition(config))
317 1119 12754 - 12758 Select scala.None scala.None
317 1120 12694 - 12758 Block <nosymbol> { (if (FileBasedMetadata.this.logger.underlying.isErrorEnabled()) FileBasedMetadata.this.logger.underlying.error(scala.StringContext.apply("Error reading config at path ", ":").s(file), e) else (): Unit); scala.None }
328 1135 12983 - 13371 Block <nosymbol> { val config: com.typesafe.config.Config = FileBasedMetadata.this.profile[com.typesafe.config.Config]("Loaded compacted partition configuration")(org.locationtech.geomesa.utils.io.`package`.WithClose.apply[java.io.InputStreamReader, com.typesafe.config.Config](new java.io.InputStreamReader(FileBasedMetadata.this.fc.open(file), java.nio.charset.StandardCharsets.UTF_8))(((in: java.io.InputStreamReader) => com.typesafe.config.ConfigFactory.parseReader(in, com.typesafe.config.ConfigParseOptions.defaults().setSyntax(FileBasedMetadata.getSyntax(file.getName())))))(io.this.IsCloseable.closeableIsCloseable)); FileBasedMetadata.this.profile[Seq[org.locationtech.geomesa.fs.storage.common.metadata.PartitionConfig]]("Parsed compacted partition configuration")(FileBasedMetadata.this.converter.parseCompaction(config)) }
329 1121 13004 - 13046 Literal <nosymbol> "Loaded compacted partition configuration"
329 1131 12996 - 13261 Apply org.locationtech.geomesa.utils.stats.MethodProfiling.profile FileBasedMetadata.this.profile[com.typesafe.config.Config]("Loaded compacted partition configuration")(org.locationtech.geomesa.utils.io.`package`.WithClose.apply[java.io.InputStreamReader, com.typesafe.config.Config](new java.io.InputStreamReader(FileBasedMetadata.this.fc.open(file), java.nio.charset.StandardCharsets.UTF_8))(((in: java.io.InputStreamReader) => com.typesafe.config.ConfigFactory.parseReader(in, com.typesafe.config.ConfigParseOptions.defaults().setSyntax(FileBasedMetadata.getSyntax(file.getName())))))(io.this.IsCloseable.closeableIsCloseable))
330 1122 13090 - 13103 Apply org.apache.hadoop.fs.FileContext.open FileBasedMetadata.this.fc.open(file)
330 1123 13105 - 13127 Select java.nio.charset.StandardCharsets.UTF_8 java.nio.charset.StandardCharsets.UTF_8
330 1124 13068 - 13128 Apply java.io.InputStreamReader.<init> new java.io.InputStreamReader(FileBasedMetadata.this.fc.open(file), java.nio.charset.StandardCharsets.UTF_8)
330 1129 13130 - 13130 Select org.locationtech.geomesa.utils.io.IsCloseableImplicits.closeableIsCloseable io.this.IsCloseable.closeableIsCloseable
330 1130 13058 - 13253 ApplyToImplicitArgs org.locationtech.geomesa.utils.io.WithClose.apply org.locationtech.geomesa.utils.io.`package`.WithClose.apply[java.io.InputStreamReader, com.typesafe.config.Config](new java.io.InputStreamReader(FileBasedMetadata.this.fc.open(file), java.nio.charset.StandardCharsets.UTF_8))(((in: java.io.InputStreamReader) => com.typesafe.config.ConfigFactory.parseReader(in, com.typesafe.config.ConfigParseOptions.defaults().setSyntax(FileBasedMetadata.getSyntax(file.getName())))))(io.this.IsCloseable.closeableIsCloseable)
331 1125 13228 - 13240 Apply org.apache.hadoop.fs.Path.getName file.getName()
331 1126 13218 - 13241 Apply org.locationtech.geomesa.fs.storage.common.metadata.FileBasedMetadata.getSyntax FileBasedMetadata.getSyntax(file.getName())
331 1127 13178 - 13242 Apply com.typesafe.config.ConfigParseOptions.setSyntax com.typesafe.config.ConfigParseOptions.defaults().setSyntax(FileBasedMetadata.getSyntax(file.getName()))
331 1128 13148 - 13243 Apply com.typesafe.config.ConfigFactory.parseReader com.typesafe.config.ConfigFactory.parseReader(in, com.typesafe.config.ConfigParseOptions.defaults().setSyntax(FileBasedMetadata.getSyntax(file.getName())))
334 1132 13276 - 13318 Literal <nosymbol> "Parsed compacted partition configuration"
334 1134 13268 - 13371 Apply org.locationtech.geomesa.utils.stats.MethodProfiling.profile FileBasedMetadata.this.profile[Seq[org.locationtech.geomesa.fs.storage.common.metadata.PartitionConfig]]("Parsed compacted partition configuration")(FileBasedMetadata.this.converter.parseCompaction(config))
335 1133 13330 - 13363 Apply org.locationtech.geomesa.fs.storage.common.metadata.MetadataConverter.parseCompaction FileBasedMetadata.this.converter.parseCompaction(config)
338 1136 13469 - 13478 TypeApply scala.collection.generic.GenericCompanion.empty scala.collection.Seq.empty[Nothing]
338 1137 13409 - 13478 Block <nosymbol> { (if (FileBasedMetadata.this.logger.underlying.isErrorEnabled()) FileBasedMetadata.this.logger.underlying.error(scala.StringContext.apply("Error reading config at path ", ":").s(file), e) else (): Unit); scala.collection.Seq.empty[Nothing] }
349 1138 13689 - 13700 Apply scala.Int.< threads.<(2)
350 1139 13724 - 13743 Apply org.apache.hadoop.fs.FileContext.delete FileBasedMetadata.this.fc.delete(x$6, false)
350 1140 13710 - 13744 Apply scala.collection.IterableLike.foreach paths.foreach[Boolean](((x$6: org.apache.hadoop.fs.Path) => FileBasedMetadata.this.fc.delete(x$6, false)))
350 1141 13710 - 13744 Block scala.collection.IterableLike.foreach paths.foreach[Boolean](((x$6: org.apache.hadoop.fs.Path) => FileBasedMetadata.this.fc.delete(x$6, false)))
351 1153 13756 - 13990 Block <nosymbol> { val ec: org.locationtech.geomesa.utils.concurrent.CachedThreadPool = new org.locationtech.geomesa.utils.concurrent.CachedThreadPool(threads); try { paths.toList.map[java.util.concurrent.Future[_], List[java.util.concurrent.Future[_]]](((p: org.apache.hadoop.fs.Path) => ec.submit({ final class $anon extends Object with Runnable { def <init>(): <$anon: Runnable> = { $anon.super.<init>(); () }; override def run(): Unit = { FileBasedMetadata.this.fc.delete(p, false); () } }; new $anon() })))(immutable.this.List.canBuildFrom[java.util.concurrent.Future[_]]).foreach[Any](((x$7: java.util.concurrent.Future[_]) => x$7.get())) } finally ec.shutdown() }
352 1142 13773 - 13802 Apply org.locationtech.geomesa.utils.concurrent.CachedThreadPool.<init> new org.locationtech.geomesa.utils.concurrent.CachedThreadPool(threads)
354 1143 13899 - 13918 Apply org.apache.hadoop.fs.FileContext.delete FileBasedMetadata.this.fc.delete(p, false)
354 1144 13908 - 13908 Literal <nosymbol> ()
354 1145 13855 - 13858 Apply org.locationtech.geomesa.fs.storage.common.metadata.FileBasedMetadata.$anon.<init> new $anon()
354 1146 13845 - 13920 Apply java.util.concurrent.AbstractExecutorService.submit ec.submit({ final class $anon extends Object with Runnable { def <init>(): <$anon: Runnable> = { $anon.super.<init>(); () }; override def run(): Unit = { FileBasedMetadata.this.fc.delete(p, false); () } }; new $anon() })
354 1147 13839 - 13839 TypeApply scala.collection.immutable.List.canBuildFrom immutable.this.List.canBuildFrom[java.util.concurrent.Future[_]]
354 1148 13930 - 13935 Apply java.util.concurrent.Future.get x$7.get()
354 1149 13823 - 13936 Apply scala.collection.immutable.List.foreach paths.toList.map[java.util.concurrent.Future[_], List[java.util.concurrent.Future[_]]](((p: org.apache.hadoop.fs.Path) => ec.submit({ final class $anon extends Object with Runnable { def <init>(): <$anon: Runnable> = { $anon.super.<init>(); () }; override def run(): Unit = { FileBasedMetadata.this.fc.delete(p, false); () } }; new $anon() })))(immutable.this.List.canBuildFrom[java.util.concurrent.Future[_]]).foreach[Any](((x$7: java.util.concurrent.Future[_]) => x$7.get()))
354 1150 13823 - 13936 Block scala.collection.immutable.List.foreach paths.toList.map[java.util.concurrent.Future[_], List[java.util.concurrent.Future[_]]](((p: org.apache.hadoop.fs.Path) => ec.submit({ final class $anon extends Object with Runnable { def <init>(): <$anon: Runnable> = { $anon.super.<init>(); () }; override def run(): Unit = { FileBasedMetadata.this.fc.delete(p, false); () } }; new $anon() })))(immutable.this.List.canBuildFrom[java.util.concurrent.Future[_]]).foreach[Any](((x$7: java.util.concurrent.Future[_]) => x$7.get()))
356 1151 13963 - 13976 Apply org.locationtech.geomesa.utils.concurrent.CachedThreadPool.shutdown ec.shutdown()
356 1152 13963 - 13976 Block org.locationtech.geomesa.utils.concurrent.CachedThreadPool.shutdown ec.shutdown()
369 1226 14265 - 15832 Block <nosymbol> { var i: Int = DirectoryWorker.this.phaser.getRegisteredParties().+(1); val iter: org.apache.hadoop.fs.RemoteIterator[org.apache.hadoop.fs.FileStatus] = DirectoryWorker.this.listDirectory; while$1(){ if (iter.hasNext().&&(i.<(org.locationtech.geomesa.utils.concurrent.`package`.PhaserUtils.MaxParties))) { { val status: org.apache.hadoop.fs.FileStatus = iter.next(); val path: org.apache.hadoop.fs.Path = status.getPath(); <stable> <accessor> lazy val name: String = path.getName(); if (status.isDirectory()) { i = i.+(1); DirectoryWorker.this.es.submit(new FileBasedMetadata.this.DirectoryWorker(DirectoryWorker.this.es, new java.util.concurrent.Phaser(DirectoryWorker.this.phaser, 1), FileBasedMetadata.this.fc.listStatus(path), DirectoryWorker.this.result)) } else if (name.startsWith(FileBasedMetadata.UpdatePartitionPrefix)) { val encoded: String = name.substring(8, name.length().-(42)); val partition: String = org.locationtech.geomesa.utils.text.StringSerialization.decodeAlphaNumericSafeString(encoded); DirectoryWorker.this.result.merge(partition, { <artifact> val x$1: Seq[org.apache.hadoop.fs.Path] @scala.reflect.internal.annotations.uncheckedBounds = scala.collection.Seq.apply[org.apache.hadoop.fs.Path](path); <artifact> val x$2: Seq[org.locationtech.geomesa.fs.storage.common.metadata.PartitionConfig] @scala.reflect.internal.annotations.uncheckedBounds = FileBasedMetadata.PartitionFiles.apply$default$1; <artifact> val x$3: Seq[org.apache.hadoop.fs.Path] @scala.reflect.internal.annotations.uncheckedBounds = FileBasedMetadata.PartitionFiles.apply$default$2; FileBasedMetadata.PartitionFiles.apply(x$2, x$3, x$1) }, FileBasedMetadata.addFiles) } else if (name.==(FileBasedMetadata.CompactedHocon).||(name.==(FileBasedMetadata.CompactedJson))) { i = i.+(1); DirectoryWorker.this.phaser.register(); DirectoryWorker.this.es.submit(new FileBasedMetadata.this.CompactedParser(DirectoryWorker.this.phaser, path, DirectoryWorker.this.result)) } else if (name.startsWith(FileBasedMetadata.UpdateFilePrefix).&&(name.endsWith(FileBasedMetadata.JsonPathSuffix))) { i = i.+(1); DirectoryWorker.this.phaser.register(); DirectoryWorker.this.es.submit(new FileBasedMetadata.this.UpdateParser(DirectoryWorker.this.phaser, path, DirectoryWorker.this.result)) } else () }; while$1() } else () }; if (iter.hasNext()) { DirectoryWorker.this.es.submit(new FileBasedMetadata.this.DirectoryWorker(DirectoryWorker.this.es, new java.util.concurrent.Phaser(DirectoryWorker.this.phaser, 1), iter, DirectoryWorker.this.result)); () } else () }
370 1154 14273 - 14304 Apply scala.Int.+ DirectoryWorker.this.phaser.getRegisteredParties().+(1)
371 1155 14324 - 14337 Select org.locationtech.geomesa.fs.storage.common.metadata.FileBasedMetadata.DirectoryWorker.listDirectory DirectoryWorker.this.listDirectory
372 1156 14373 - 14395 Select org.locationtech.geomesa.utils.concurrent.PhaserUtils.MaxParties org.locationtech.geomesa.utils.concurrent.`package`.PhaserUtils.MaxParties
372 1157 14369 - 14395 Apply scala.Int.< i.<(org.locationtech.geomesa.utils.concurrent.`package`.PhaserUtils.MaxParties)
372 1158 14353 - 14395 Apply scala.Boolean.&& iter.hasNext().&&(i.<(org.locationtech.geomesa.utils.concurrent.`package`.PhaserUtils.MaxParties))
372 1210 14397 - 14397 Apply org.locationtech.geomesa.fs.storage.common.metadata.FileBasedMetadata.DirectoryWorker.while$1 while$1()
372 1211 14397 - 15712 Block <nosymbol> { { val status: org.apache.hadoop.fs.FileStatus = iter.next(); val path: org.apache.hadoop.fs.Path = status.getPath(); <stable> <accessor> lazy val name: String = path.getName(); if (status.isDirectory()) { i = i.+(1); DirectoryWorker.this.es.submit(new FileBasedMetadata.this.DirectoryWorker(DirectoryWorker.this.es, new java.util.concurrent.Phaser(DirectoryWorker.this.phaser, 1), FileBasedMetadata.this.fc.listStatus(path), DirectoryWorker.this.result)) } else if (name.startsWith(FileBasedMetadata.UpdatePartitionPrefix)) { val encoded: String = name.substring(8, name.length().-(42)); val partition: String = org.locationtech.geomesa.utils.text.StringSerialization.decodeAlphaNumericSafeString(encoded); DirectoryWorker.this.result.merge(partition, { <artifact> val x$1: Seq[org.apache.hadoop.fs.Path] @scala.reflect.internal.annotations.uncheckedBounds = scala.collection.Seq.apply[org.apache.hadoop.fs.Path](path); <artifact> val x$2: Seq[org.locationtech.geomesa.fs.storage.common.metadata.PartitionConfig] @scala.reflect.internal.annotations.uncheckedBounds = FileBasedMetadata.PartitionFiles.apply$default$1; <artifact> val x$3: Seq[org.apache.hadoop.fs.Path] @scala.reflect.internal.annotations.uncheckedBounds = FileBasedMetadata.PartitionFiles.apply$default$2; FileBasedMetadata.PartitionFiles.apply(x$2, x$3, x$1) }, FileBasedMetadata.addFiles) } else if (name.==(FileBasedMetadata.CompactedHocon).||(name.==(FileBasedMetadata.CompactedJson))) { i = i.+(1); DirectoryWorker.this.phaser.register(); DirectoryWorker.this.es.submit(new FileBasedMetadata.this.CompactedParser(DirectoryWorker.this.phaser, path, DirectoryWorker.this.result)) } else if (name.startsWith(FileBasedMetadata.UpdateFilePrefix).&&(name.endsWith(FileBasedMetadata.JsonPathSuffix))) { i = i.+(1); DirectoryWorker.this.phaser.register(); DirectoryWorker.this.es.submit(new FileBasedMetadata.this.UpdateParser(DirectoryWorker.this.phaser, path, DirectoryWorker.this.result)) } else () }; while$1() }
372 1212 14346 - 14346 Literal <nosymbol> ()
372 1213 14346 - 14346 Block <nosymbol> ()
373 1159 14422 - 14431 Apply org.apache.hadoop.fs.RemoteIterator.next iter.next()
374 1160 14453 - 14467 Apply org.apache.hadoop.fs.FileStatus.getPath status.getPath()
376 1161 14521 - 14539 Apply org.apache.hadoop.fs.FileStatus.isDirectory status.isDirectory()
376 1171 14541 - 14769 Block <nosymbol> { i = i.+(1); DirectoryWorker.this.es.submit(new FileBasedMetadata.this.DirectoryWorker(DirectoryWorker.this.es, new java.util.concurrent.Phaser(DirectoryWorker.this.phaser, 1), FileBasedMetadata.this.fc.listStatus(path), DirectoryWorker.this.result)) }
377 1162 14555 - 14561 Apply scala.Int.+ i.+(1)
379 1163 14701 - 14703 Select org.locationtech.geomesa.fs.storage.common.metadata.FileBasedMetadata.DirectoryWorker.es DirectoryWorker.this.es
379 1164 14716 - 14722 Select org.locationtech.geomesa.fs.storage.common.metadata.FileBasedMetadata.DirectoryWorker.phaser DirectoryWorker.this.phaser
379 1165 14724 - 14725 Literal <nosymbol> 1
379 1166 14705 - 14726 Apply java.util.concurrent.Phaser.<init> new java.util.concurrent.Phaser(DirectoryWorker.this.phaser, 1)
379 1167 14728 - 14747 Apply org.apache.hadoop.fs.FileContext.listStatus FileBasedMetadata.this.fc.listStatus(path)
379 1168 14749 - 14755 Select org.locationtech.geomesa.fs.storage.common.metadata.FileBasedMetadata.DirectoryWorker.result DirectoryWorker.this.result
379 1169 14681 - 14756 Apply org.locationtech.geomesa.fs.storage.common.metadata.FileBasedMetadata.DirectoryWorker.<init> new FileBasedMetadata.this.DirectoryWorker(DirectoryWorker.this.es, new java.util.concurrent.Phaser(DirectoryWorker.this.phaser, 1), FileBasedMetadata.this.fc.listStatus(path), DirectoryWorker.this.result)
379 1170 14671 - 14757 Apply java.util.concurrent.ExecutorService.submit DirectoryWorker.this.es.submit(new FileBasedMetadata.this.DirectoryWorker(DirectoryWorker.this.es, new java.util.concurrent.Phaser(DirectoryWorker.this.phaser, 1), FileBasedMetadata.this.fc.listStatus(path), DirectoryWorker.this.result))
380 1172 14795 - 14816 Select org.locationtech.geomesa.fs.storage.common.metadata.FileBasedMetadata.UpdatePartitionPrefix FileBasedMetadata.UpdatePartitionPrefix
380 1173 14779 - 14817 Apply java.lang.String.startsWith name.startsWith(FileBasedMetadata.UpdatePartitionPrefix)
380 1182 14819 - 15167 Block <nosymbol> { val encoded: String = name.substring(8, name.length().-(42)); val partition: String = org.locationtech.geomesa.utils.text.StringSerialization.decodeAlphaNumericSafeString(encoded); DirectoryWorker.this.result.merge(partition, { <artifact> val x$1: Seq[org.apache.hadoop.fs.Path] @scala.reflect.internal.annotations.uncheckedBounds = scala.collection.Seq.apply[org.apache.hadoop.fs.Path](path); <artifact> val x$2: Seq[org.locationtech.geomesa.fs.storage.common.metadata.PartitionConfig] @scala.reflect.internal.annotations.uncheckedBounds = FileBasedMetadata.PartitionFiles.apply$default$1; <artifact> val x$3: Seq[org.apache.hadoop.fs.Path] @scala.reflect.internal.annotations.uncheckedBounds = FileBasedMetadata.PartitionFiles.apply$default$2; FileBasedMetadata.PartitionFiles.apply(x$2, x$3, x$1) }, FileBasedMetadata.addFiles) }
380 1209 14775 - 15702 If <nosymbol> if (name.startsWith(FileBasedMetadata.UpdatePartitionPrefix)) { val encoded: String = name.substring(8, name.length().-(42)); val partition: String = org.locationtech.geomesa.utils.text.StringSerialization.decodeAlphaNumericSafeString(encoded); DirectoryWorker.this.result.merge(partition, { <artifact> val x$1: Seq[org.apache.hadoop.fs.Path] @scala.reflect.internal.annotations.uncheckedBounds = scala.collection.Seq.apply[org.apache.hadoop.fs.Path](path); <artifact> val x$2: Seq[org.locationtech.geomesa.fs.storage.common.metadata.PartitionConfig] @scala.reflect.internal.annotations.uncheckedBounds = FileBasedMetadata.PartitionFiles.apply$default$1; <artifact> val x$3: Seq[org.apache.hadoop.fs.Path] @scala.reflect.internal.annotations.uncheckedBounds = FileBasedMetadata.PartitionFiles.apply$default$2; FileBasedMetadata.PartitionFiles.apply(x$2, x$3, x$1) }, FileBasedMetadata.addFiles) } else if (name.==(FileBasedMetadata.CompactedHocon).||(name.==(FileBasedMetadata.CompactedJson))) { i = i.+(1); DirectoryWorker.this.phaser.register(); DirectoryWorker.this.es.submit(new FileBasedMetadata.this.CompactedParser(DirectoryWorker.this.phaser, path, DirectoryWorker.this.result)) } else if (name.startsWith(FileBasedMetadata.UpdateFilePrefix).&&(name.endsWith(FileBasedMetadata.JsonPathSuffix))) { i = i.+(1); DirectoryWorker.this.phaser.register(); DirectoryWorker.this.es.submit(new FileBasedMetadata.this.UpdateParser(DirectoryWorker.this.phaser, path, DirectoryWorker.this.result)) } else ()
382 1174 14934 - 14935 Literal <nosymbol> 8
382 1175 14937 - 14953 Apply scala.Int.- name.length().-(42)
382 1176 14919 - 14954 Apply java.lang.String.substring name.substring(8, name.length().-(42))
383 1177 15014 - 15071 Apply org.locationtech.geomesa.utils.text.StringSerialization.decodeAlphaNumericSafeString org.locationtech.geomesa.utils.text.StringSerialization.decodeAlphaNumericSafeString(encoded)
384 1178 15134 - 15143 Apply scala.collection.generic.GenericCompanion.apply scala.collection.Seq.apply[org.apache.hadoop.fs.Path](path)
384 1179 15108 - 15144 Apply org.locationtech.geomesa.fs.storage.common.metadata.FileBasedMetadata.PartitionFiles.apply FileBasedMetadata.PartitionFiles.apply(x$2, x$3, x$1)
384 1180 15146 - 15154 Select org.locationtech.geomesa.fs.storage.common.metadata.FileBasedMetadata.addFiles FileBasedMetadata.addFiles
384 1181 15084 - 15155 Apply java.util.concurrent.ConcurrentHashMap.merge DirectoryWorker.this.result.merge(partition, { <artifact> val x$1: Seq[org.apache.hadoop.fs.Path] @scala.reflect.internal.annotations.uncheckedBounds = scala.collection.Seq.apply[org.apache.hadoop.fs.Path](path); <artifact> val x$2: Seq[org.locationtech.geomesa.fs.storage.common.metadata.PartitionConfig] @scala.reflect.internal.annotations.uncheckedBounds = FileBasedMetadata.PartitionFiles.apply$default$1; <artifact> val x$3: Seq[org.apache.hadoop.fs.Path] @scala.reflect.internal.annotations.uncheckedBounds = FileBasedMetadata.PartitionFiles.apply$default$2; FileBasedMetadata.PartitionFiles.apply(x$2, x$3, x$1) }, FileBasedMetadata.addFiles)
385 1183 15185 - 15199 Select org.locationtech.geomesa.fs.storage.common.metadata.FileBasedMetadata.CompactedHocon FileBasedMetadata.CompactedHocon
385 1184 15211 - 15224 Select org.locationtech.geomesa.fs.storage.common.metadata.FileBasedMetadata.CompactedJson FileBasedMetadata.CompactedJson
385 1185 15203 - 15224 Apply java.lang.Object.== name.==(FileBasedMetadata.CompactedJson)
385 1186 15177 - 15224 Apply scala.Boolean.|| name.==(FileBasedMetadata.CompactedHocon).||(name.==(FileBasedMetadata.CompactedJson))
385 1193 15226 - 15387 Block <nosymbol> { i = i.+(1); DirectoryWorker.this.phaser.register(); DirectoryWorker.this.es.submit(new FileBasedMetadata.this.CompactedParser(DirectoryWorker.this.phaser, path, DirectoryWorker.this.result)) }
385 1208 15173 - 15702 If <nosymbol> if (name.==(FileBasedMetadata.CompactedHocon).||(name.==(FileBasedMetadata.CompactedJson))) { i = i.+(1); DirectoryWorker.this.phaser.register(); DirectoryWorker.this.es.submit(new FileBasedMetadata.this.CompactedParser(DirectoryWorker.this.phaser, path, DirectoryWorker.this.result)) } else if (name.startsWith(FileBasedMetadata.UpdateFilePrefix).&&(name.endsWith(FileBasedMetadata.JsonPathSuffix))) { i = i.+(1); DirectoryWorker.this.phaser.register(); DirectoryWorker.this.es.submit(new FileBasedMetadata.this.UpdateParser(DirectoryWorker.this.phaser, path, DirectoryWorker.this.result)) } else ()
386 1187 15240 - 15246 Apply scala.Int.+ i.+(1)
387 1188 15259 - 15276 Apply java.util.concurrent.Phaser.register DirectoryWorker.this.phaser.register()
388 1189 15353 - 15359 Select org.locationtech.geomesa.fs.storage.common.metadata.FileBasedMetadata.DirectoryWorker.phaser DirectoryWorker.this.phaser
388 1190 15367 - 15373 Select org.locationtech.geomesa.fs.storage.common.metadata.FileBasedMetadata.DirectoryWorker.result DirectoryWorker.this.result
388 1191 15333 - 15374 Apply org.locationtech.geomesa.fs.storage.common.metadata.FileBasedMetadata.CompactedParser.<init> new FileBasedMetadata.this.CompactedParser(DirectoryWorker.this.phaser, path, DirectoryWorker.this.result)
388 1192 15323 - 15375 Apply java.util.concurrent.ExecutorService.submit DirectoryWorker.this.es.submit(new FileBasedMetadata.this.CompactedParser(DirectoryWorker.this.phaser, path, DirectoryWorker.this.result))
389 1194 15413 - 15429 Select org.locationtech.geomesa.fs.storage.common.metadata.FileBasedMetadata.UpdateFilePrefix FileBasedMetadata.UpdateFilePrefix
389 1195 15448 - 15462 Select org.locationtech.geomesa.fs.storage.common.metadata.FileBasedMetadata.JsonPathSuffix FileBasedMetadata.JsonPathSuffix
389 1196 15434 - 15463 Apply java.lang.String.endsWith name.endsWith(FileBasedMetadata.JsonPathSuffix)
389 1197 15397 - 15463 Apply scala.Boolean.&& name.startsWith(FileBasedMetadata.UpdateFilePrefix).&&(name.endsWith(FileBasedMetadata.JsonPathSuffix))
389 1204 15465 - 15702 Block <nosymbol> { i = i.+(1); DirectoryWorker.this.phaser.register(); DirectoryWorker.this.es.submit(new FileBasedMetadata.this.UpdateParser(DirectoryWorker.this.phaser, path, DirectoryWorker.this.result)) }
389 1205 15393 - 15393 Literal <nosymbol> ()
389 1206 15393 - 15393 Block <nosymbol> ()
389 1207 15393 - 15702 If <nosymbol> if (name.startsWith(FileBasedMetadata.UpdateFilePrefix).&&(name.endsWith(FileBasedMetadata.JsonPathSuffix))) { i = i.+(1); DirectoryWorker.this.phaser.register(); DirectoryWorker.this.es.submit(new FileBasedMetadata.this.UpdateParser(DirectoryWorker.this.phaser, path, DirectoryWorker.this.result)) } else ()
391 1198 15558 - 15564 Apply scala.Int.+ i.+(1)
392 1199 15577 - 15594 Apply java.util.concurrent.Phaser.register DirectoryWorker.this.phaser.register()
393 1200 15668 - 15674 Select org.locationtech.geomesa.fs.storage.common.metadata.FileBasedMetadata.DirectoryWorker.phaser DirectoryWorker.this.phaser
393 1201 15682 - 15688 Select org.locationtech.geomesa.fs.storage.common.metadata.FileBasedMetadata.DirectoryWorker.result DirectoryWorker.this.result
393 1202 15651 - 15689 Apply org.locationtech.geomesa.fs.storage.common.metadata.FileBasedMetadata.UpdateParser.<init> new FileBasedMetadata.this.UpdateParser(DirectoryWorker.this.phaser, path, DirectoryWorker.this.result)
393 1203 15641 - 15690 Apply java.util.concurrent.ExecutorService.submit DirectoryWorker.this.es.submit(new FileBasedMetadata.this.UpdateParser(DirectoryWorker.this.phaser, path, DirectoryWorker.this.result))
396 1214 15725 - 15737 Apply org.apache.hadoop.fs.RemoteIterator.hasNext iter.hasNext()
396 1224 15721 - 15721 Literal <nosymbol> ()
396 1225 15721 - 15721 Block <nosymbol> ()
397 1215 15781 - 15783 Select org.locationtech.geomesa.fs.storage.common.metadata.FileBasedMetadata.DirectoryWorker.es DirectoryWorker.this.es
397 1216 15796 - 15802 Select org.locationtech.geomesa.fs.storage.common.metadata.FileBasedMetadata.DirectoryWorker.phaser DirectoryWorker.this.phaser
397 1217 15804 - 15805 Literal <nosymbol> 1
397 1218 15785 - 15806 Apply java.util.concurrent.Phaser.<init> new java.util.concurrent.Phaser(DirectoryWorker.this.phaser, 1)
397 1219 15814 - 15820 Select org.locationtech.geomesa.fs.storage.common.metadata.FileBasedMetadata.DirectoryWorker.result DirectoryWorker.this.result
397 1220 15761 - 15821 Apply org.locationtech.geomesa.fs.storage.common.metadata.FileBasedMetadata.DirectoryWorker.<init> new FileBasedMetadata.this.DirectoryWorker(DirectoryWorker.this.es, new java.util.concurrent.Phaser(DirectoryWorker.this.phaser, 1), iter, DirectoryWorker.this.result)
397 1221 15751 - 15822 Apply java.util.concurrent.ExecutorService.submit DirectoryWorker.this.es.submit(new FileBasedMetadata.this.DirectoryWorker(DirectoryWorker.this.es, new java.util.concurrent.Phaser(DirectoryWorker.this.phaser, 1), iter, DirectoryWorker.this.result))
397 1222 15760 - 15760 Literal <nosymbol> ()
397 1223 15751 - 15822 Block <nosymbol> { DirectoryWorker.this.es.submit(new FileBasedMetadata.this.DirectoryWorker(DirectoryWorker.this.es, new java.util.concurrent.Phaser(DirectoryWorker.this.phaser, 1), iter, DirectoryWorker.this.result)); () }
400 1227 15887 - 15889 Literal <nosymbol> ()
400 1228 15887 - 15889 Block <nosymbol> ()
401 1229 15966 - 16019 Typed <nosymbol> (if (FileBasedMetadata.this.logger.underlying.isErrorEnabled()) FileBasedMetadata.this.logger.underlying.error("Error scanning metadata directory:", e) else (): Unit)
403 1230 16046 - 16061 Apply java.util.concurrent.Phaser.arrive DirectoryWorker.this.phaser.arrive()
403 1231 16059 - 16059 Literal <nosymbol> ()
403 1232 16046 - 16061 Block <nosymbol> { DirectoryWorker.this.phaser.arrive(); () }
413 1233 16326 - 16330 Select org.locationtech.geomesa.fs.storage.common.metadata.FileBasedMetadata.CompactedParser.path CompactedParser.this.path
413 1239 16306 - 16527 Apply scala.collection.IterableLike.foreach FileBasedMetadata.this.readCompactedConfig(CompactedParser.this.path).foreach[org.locationtech.geomesa.fs.storage.common.metadata.FileBasedMetadata.PartitionFiles](((config: org.locationtech.geomesa.fs.storage.common.metadata.PartitionConfig) => CompactedParser.this.result.merge(config.name, FileBasedMetadata.PartitionFiles.apply(scala.collection.Seq.apply[org.locationtech.geomesa.fs.storage.common.metadata.PartitionConfig](config), FileBasedMetadata.PartitionFiles.apply$default$2, FileBasedMetadata.PartitionFiles.apply$default$3), FileBasedMetadata.addFiles)))
413 1240 16306 - 16527 Block scala.collection.IterableLike.foreach FileBasedMetadata.this.readCompactedConfig(CompactedParser.this.path).foreach[org.locationtech.geomesa.fs.storage.common.metadata.FileBasedMetadata.PartitionFiles](((config: org.locationtech.geomesa.fs.storage.common.metadata.PartitionConfig) => CompactedParser.this.result.merge(config.name, FileBasedMetadata.PartitionFiles.apply(scala.collection.Seq.apply[org.locationtech.geomesa.fs.storage.common.metadata.PartitionConfig](config), FileBasedMetadata.PartitionFiles.apply$default$2, FileBasedMetadata.PartitionFiles.apply$default$3), FileBasedMetadata.addFiles)))
415 1234 16457 - 16468 Select org.locationtech.geomesa.fs.storage.common.metadata.PartitionConfig.name config.name
415 1235 16494 - 16505 Apply scala.collection.generic.GenericCompanion.apply scala.collection.Seq.apply[org.locationtech.geomesa.fs.storage.common.metadata.PartitionConfig](config)
415 1236 16470 - 16506 Apply org.locationtech.geomesa.fs.storage.common.metadata.FileBasedMetadata.PartitionFiles.apply FileBasedMetadata.PartitionFiles.apply(scala.collection.Seq.apply[org.locationtech.geomesa.fs.storage.common.metadata.PartitionConfig](config), FileBasedMetadata.PartitionFiles.apply$default$2, FileBasedMetadata.PartitionFiles.apply$default$3)
415 1237 16508 - 16516 Select org.locationtech.geomesa.fs.storage.common.metadata.FileBasedMetadata.addFiles FileBasedMetadata.addFiles
415 1238 16444 - 16517 Apply java.util.concurrent.ConcurrentHashMap.merge CompactedParser.this.result.merge(config.name, FileBasedMetadata.PartitionFiles.apply(scala.collection.Seq.apply[org.locationtech.geomesa.fs.storage.common.metadata.PartitionConfig](config), FileBasedMetadata.PartitionFiles.apply$default$2, FileBasedMetadata.PartitionFiles.apply$default$3), FileBasedMetadata.addFiles)
418 1241 16582 - 16584 Literal <nosymbol> ()
418 1242 16582 - 16584 Block <nosymbol> ()
419 1243 16652 - 16710 Typed <nosymbol> (if (FileBasedMetadata.this.logger.underlying.isErrorEnabled()) FileBasedMetadata.this.logger.underlying.error("Error reading compacted metadata entry:", e) else (): Unit)
421 1244 16737 - 16752 Apply java.util.concurrent.Phaser.arrive CompactedParser.this.phaser.arrive()
421 1245 16750 - 16750 Literal <nosymbol> ()
421 1246 16737 - 16752 Block <nosymbol> { CompactedParser.this.phaser.arrive(); () }
431 1247 17014 - 17018 Select org.locationtech.geomesa.fs.storage.common.metadata.FileBasedMetadata.UpdateParser.path UpdateParser.this.path
431 1255 16994 - 17153 Apply scala.Option.foreach FileBasedMetadata.this.readPartitionConfig(UpdateParser.this.path).foreach[org.locationtech.geomesa.fs.storage.common.metadata.FileBasedMetadata.PartitionFiles](((config: org.locationtech.geomesa.fs.storage.common.metadata.PartitionConfig) => UpdateParser.this.result.merge(config.name, FileBasedMetadata.PartitionFiles.apply(scala.collection.Seq.apply[org.locationtech.geomesa.fs.storage.common.metadata.PartitionConfig](config), scala.collection.Seq.apply[org.apache.hadoop.fs.Path](UpdateParser.this.path), FileBasedMetadata.PartitionFiles.apply$default$3), FileBasedMetadata.addFiles)))
431 1256 16994 - 17153 Block scala.Option.foreach FileBasedMetadata.this.readPartitionConfig(UpdateParser.this.path).foreach[org.locationtech.geomesa.fs.storage.common.metadata.FileBasedMetadata.PartitionFiles](((config: org.locationtech.geomesa.fs.storage.common.metadata.PartitionConfig) => UpdateParser.this.result.merge(config.name, FileBasedMetadata.PartitionFiles.apply(scala.collection.Seq.apply[org.locationtech.geomesa.fs.storage.common.metadata.PartitionConfig](config), scala.collection.Seq.apply[org.apache.hadoop.fs.Path](UpdateParser.this.path), FileBasedMetadata.PartitionFiles.apply$default$3), FileBasedMetadata.addFiles)))
432 1248 17063 - 17074 Select org.locationtech.geomesa.fs.storage.common.metadata.PartitionConfig.name config.name
432 1249 17100 - 17111 Apply scala.collection.generic.GenericCompanion.apply scala.collection.Seq.apply[org.locationtech.geomesa.fs.storage.common.metadata.PartitionConfig](config)
432 1250 17126 - 17130 Select org.locationtech.geomesa.fs.storage.common.metadata.FileBasedMetadata.UpdateParser.path UpdateParser.this.path
432 1251 17122 - 17131 Apply scala.collection.generic.GenericCompanion.apply scala.collection.Seq.apply[org.apache.hadoop.fs.Path](UpdateParser.this.path)
432 1252 17076 - 17132 Apply org.locationtech.geomesa.fs.storage.common.metadata.FileBasedMetadata.PartitionFiles.apply FileBasedMetadata.PartitionFiles.apply(scala.collection.Seq.apply[org.locationtech.geomesa.fs.storage.common.metadata.PartitionConfig](config), scala.collection.Seq.apply[org.apache.hadoop.fs.Path](UpdateParser.this.path), FileBasedMetadata.PartitionFiles.apply$default$3)
432 1253 17134 - 17142 Select org.locationtech.geomesa.fs.storage.common.metadata.FileBasedMetadata.addFiles FileBasedMetadata.addFiles
432 1254 17050 - 17143 Apply java.util.concurrent.ConcurrentHashMap.merge UpdateParser.this.result.merge(config.name, FileBasedMetadata.PartitionFiles.apply(scala.collection.Seq.apply[org.locationtech.geomesa.fs.storage.common.metadata.PartitionConfig](config), scala.collection.Seq.apply[org.apache.hadoop.fs.Path](UpdateParser.this.path), FileBasedMetadata.PartitionFiles.apply$default$3), FileBasedMetadata.addFiles)
435 1257 17208 - 17210 Literal <nosymbol> ()
435 1258 17208 - 17210 Block <nosymbol> ()
436 1259 17278 - 17333 Typed <nosymbol> (if (FileBasedMetadata.this.logger.underlying.isErrorEnabled()) FileBasedMetadata.this.logger.underlying.error("Error reading metadata update entry:", e) else (): Unit)
438 1260 17360 - 17375 Apply java.util.concurrent.Phaser.arrive UpdateParser.this.phaser.arrive()
438 1261 17373 - 17373 Literal <nosymbol> ()
438 1262 17360 - 17375 Block <nosymbol> { UpdateParser.this.phaser.arrive(); () }
446 1263 17481 - 17487 Literal <nosymbol> "file"
447 1264 17538 - 17550 Select org.locationtech.geomesa.fs.storage.common.metadata.FileBasedMetadata.MetadataType FileBasedMetadata.this.MetadataType
447 1265 17556 - 17572 Select org.locationtech.geomesa.fs.storage.common.metadata.FileBasedMetadata.Config.RenderKey FileBasedMetadata.this.Config.RenderKey
447 1266 17576 - 17596 Select org.locationtech.geomesa.fs.storage.common.metadata.FileBasedMetadata.Config.RenderCompact FileBasedMetadata.this.Config.RenderCompact
447 1267 17556 - 17596 Apply scala.Predef.ArrowAssoc.-> scala.Predef.ArrowAssoc[String](FileBasedMetadata.this.Config.RenderKey).->[String](FileBasedMetadata.this.Config.RenderCompact)
447 1268 17552 - 17597 Apply scala.collection.generic.GenMapFactory.apply scala.Predef.Map.apply[String, String](scala.Predef.ArrowAssoc[String](FileBasedMetadata.this.Config.RenderKey).->[String](FileBasedMetadata.this.Config.RenderCompact))
447 1269 17525 - 17598 Apply org.locationtech.geomesa.fs.storage.api.NamedOptions.apply org.locationtech.geomesa.fs.storage.api.`package`.NamedOptions.apply(FileBasedMetadata.this.MetadataType, scala.Predef.Map.apply[String, String](scala.Predef.ArrowAssoc[String](FileBasedMetadata.this.Config.RenderKey).->[String](FileBasedMetadata.this.Config.RenderCompact)))
448 1270 17649 - 17661 Select org.locationtech.geomesa.fs.storage.common.metadata.FileBasedMetadata.MetadataType FileBasedMetadata.this.MetadataType
448 1271 17667 - 17683 Select org.locationtech.geomesa.fs.storage.common.metadata.FileBasedMetadata.Config.RenderKey FileBasedMetadata.this.Config.RenderKey
448 1272 17687 - 17706 Select org.locationtech.geomesa.fs.storage.common.metadata.FileBasedMetadata.Config.RenderPretty FileBasedMetadata.this.Config.RenderPretty
448 1273 17667 - 17706 Apply scala.Predef.ArrowAssoc.-> scala.Predef.ArrowAssoc[String](FileBasedMetadata.this.Config.RenderKey).->[String](FileBasedMetadata.this.Config.RenderPretty)
448 1274 17663 - 17707 Apply scala.collection.generic.GenMapFactory.apply scala.Predef.Map.apply[String, String](scala.Predef.ArrowAssoc[String](FileBasedMetadata.this.Config.RenderKey).->[String](FileBasedMetadata.this.Config.RenderPretty))
448 1275 17636 - 17708 Apply org.locationtech.geomesa.fs.storage.api.NamedOptions.apply org.locationtech.geomesa.fs.storage.api.`package`.NamedOptions.apply(FileBasedMetadata.this.MetadataType, scala.Predef.Map.apply[String, String](scala.Predef.ArrowAssoc[String](FileBasedMetadata.this.Config.RenderKey).->[String](FileBasedMetadata.this.Config.RenderPretty)))
451 1276 17748 - 17756 Literal <nosymbol> "render"
453 1277 17782 - 17790 Literal <nosymbol> "pretty"
454 1278 17815 - 17824 Literal <nosymbol> "compact"
457 1279 17868 - 17879 Literal <nosymbol> "compacted"
458 1280 17918 - 17927 Literal <nosymbol> "update-"
459 1281 17966 - 17988 Apply java.lang.String.+ FileBasedMetadata.this.UpdateFilePrefix.+("$")
460 1282 18027 - 18046 Select org.locationtech.geomesa.fs.storage.common.metadata.RenderPretty.suffix metadata.this.`package`.RenderPretty.suffix
461 1283 18085 - 18105 Select org.locationtech.geomesa.fs.storage.common.metadata.RenderCompact.suffix metadata.this.`package`.RenderCompact.suffix
462 1284 18162 - 18176 Select org.locationtech.geomesa.fs.storage.common.metadata.FileBasedMetadata.JsonPathSuffix FileBasedMetadata.this.JsonPathSuffix
462 1285 18144 - 18176 Apply java.lang.String.+ FileBasedMetadata.this.CompactedPrefix.+(FileBasedMetadata.this.JsonPathSuffix)
463 1286 18233 - 18248 Select org.locationtech.geomesa.fs.storage.common.metadata.FileBasedMetadata.HoconPathSuffix FileBasedMetadata.this.HoconPathSuffix
463 1287 18215 - 18248 Apply java.lang.String.+ FileBasedMetadata.this.CompactedPrefix.+(FileBasedMetadata.this.HoconPathSuffix)
466 1289 18345 - 18348 Apply org.locationtech.geomesa.fs.storage.common.metadata.FileBasedMetadata.$anon.<init> new $anon()
468 1288 18527 - 18544 Apply org.locationtech.geomesa.fs.storage.api.StorageMetadata.PartitionMetadata.+ existing.+(update)
472 1295 18651 - 18654 Apply org.locationtech.geomesa.fs.storage.common.metadata.FileBasedMetadata.$anon.<init> new $anon()
474 1290 18848 - 18865 Apply org.locationtech.geomesa.fs.storage.api.StorageMetadata.PartitionMetadata.- existing.-(update)
475 1291 18876 - 18896 Select scala.collection.SeqLike.isEmpty result.files.isEmpty
475 1292 18900 - 18904 Literal <nosymbol> null
475 1293 18900 - 18904 Block <nosymbol> null
475 1294 18914 - 18920 Ident org.locationtech.geomesa.fs.storage.common.metadata.FileBasedMetadata.$anon.result result
480 1306 19016 - 19019 Apply org.locationtech.geomesa.fs.storage.common.metadata.FileBasedMetadata.$anon.<init> new $anon()
482 1296 19214 - 19227 Select org.locationtech.geomesa.fs.storage.common.metadata.FileBasedMetadata.PartitionFiles.config update.config
482 1297 19211 - 19211 TypeApply scala.collection.Seq.canBuildFrom collection.this.Seq.canBuildFrom[org.locationtech.geomesa.fs.storage.common.metadata.PartitionConfig]
482 1298 19195 - 19227 ApplyToImplicitArgs scala.collection.TraversableLike.++ existing.config.++[org.locationtech.geomesa.fs.storage.common.metadata.PartitionConfig, Seq[org.locationtech.geomesa.fs.storage.common.metadata.PartitionConfig]](update.config)(collection.this.Seq.canBuildFrom[org.locationtech.geomesa.fs.storage.common.metadata.PartitionConfig])
483 1299 19276 - 19289 Select org.locationtech.geomesa.fs.storage.common.metadata.FileBasedMetadata.PartitionFiles.parsed update.parsed
483 1300 19273 - 19273 TypeApply scala.collection.Seq.canBuildFrom collection.this.Seq.canBuildFrom[org.apache.hadoop.fs.Path]
483 1301 19257 - 19289 ApplyToImplicitArgs scala.collection.TraversableLike.++ existing.parsed.++[org.apache.hadoop.fs.Path, Seq[org.apache.hadoop.fs.Path]](update.parsed)(collection.this.Seq.canBuildFrom[org.apache.hadoop.fs.Path])
483 1302 19312 - 19327 Select org.locationtech.geomesa.fs.storage.common.metadata.FileBasedMetadata.PartitionFiles.unparsed update.unparsed
483 1303 19309 - 19309 TypeApply scala.collection.Seq.canBuildFrom collection.this.Seq.canBuildFrom[org.apache.hadoop.fs.Path]
483 1304 19291 - 19327 ApplyToImplicitArgs scala.collection.TraversableLike.++ existing.unparsed.++[org.apache.hadoop.fs.Path, Seq[org.apache.hadoop.fs.Path]](update.unparsed)(collection.this.Seq.canBuildFrom[org.apache.hadoop.fs.Path])
483 1305 19234 - 19328 Apply org.locationtech.geomesa.fs.storage.common.metadata.FileBasedMetadata.PartitionFiles.apply FileBasedMetadata.this.PartitionFiles.apply(config, existing.parsed.++[org.apache.hadoop.fs.Path, Seq[org.apache.hadoop.fs.Path]](update.parsed)(collection.this.Seq.canBuildFrom[org.apache.hadoop.fs.Path]), existing.unparsed.++[org.apache.hadoop.fs.Path, Seq[org.apache.hadoop.fs.Path]](update.unparsed)(collection.this.Seq.canBuildFrom[org.apache.hadoop.fs.Path]))
494 1307 19530 - 19534 Select org.locationtech.geomesa.fs.storage.common.metadata.FileBasedMetadata.fc m.fc
494 1308 19536 - 19547 Select org.locationtech.geomesa.fs.storage.common.metadata.FileBasedMetadata.directory m.directory
494 1309 19549 - 19554 Select org.locationtech.geomesa.fs.storage.common.metadata.FileBasedMetadata.sft m.sft
494 1310 19556 - 19562 Select org.locationtech.geomesa.fs.storage.common.metadata.FileBasedMetadata.meta m.meta
494 1311 19564 - 19575 Select org.locationtech.geomesa.fs.storage.common.metadata.FileBasedMetadata.converter m.converter
494 1312 19508 - 19576 Apply org.locationtech.geomesa.fs.storage.common.metadata.FileBasedMetadata.<init> new FileBasedMetadata(m.fc, m.directory, m.sft, m.meta, m.converter)
497 1313 19656 - 19671 Select org.locationtech.geomesa.fs.storage.common.metadata.FileBasedMetadata.HoconPathSuffix FileBasedMetadata.this.HoconPathSuffix
497 1314 19642 - 19672 Apply java.lang.String.endsWith file.endsWith(FileBasedMetadata.this.HoconPathSuffix)
498 1315 19682 - 19699 Literal <nosymbol> CONF
498 1316 19682 - 19699 Block <nosymbol> CONF
499 1317 19729 - 19743 Select org.locationtech.geomesa.fs.storage.common.metadata.FileBasedMetadata.JsonPathSuffix FileBasedMetadata.this.JsonPathSuffix
499 1318 19715 - 19744 Apply java.lang.String.endsWith file.endsWith(FileBasedMetadata.this.JsonPathSuffix)
499 1323 19711 - 19814 If <nosymbol> if (file.endsWith(FileBasedMetadata.this.JsonPathSuffix)) JSON else JSON
500 1319 19754 - 19771 Literal <nosymbol> JSON
500 1320 19754 - 19771 Block <nosymbol> JSON
502 1321 19791 - 19808 Literal <nosymbol> JSON
502 1322 19791 - 19808 Block <nosymbol> JSON