@@ -30,6 +30,7 @@ import org.apache.spark.sql.{DataFrame, Dataset, Encoder, Encoders, Row, SparkSe
30
30
import org .apache .spark .sql .streaming .{DataStreamWriter , Trigger }
31
31
import org .apache .spark .sql .types .{BooleanType , LongType , StructField , StructType }
32
32
import org .slf4j .{Logger , LoggerFactory }
33
+ import ai .chronon .online .metrics
33
34
34
35
import java .time .{Instant , ZoneId , ZoneOffset }
35
36
import java .time .format .DateTimeFormatter
@@ -64,11 +65,10 @@ class JoinSourceRunner(groupByConf: api.GroupBy, conf: Map[String, String] = Map
64
65
apiImpl : Api )
65
66
extends Serializable {
66
67
67
- import ai .chronon .online .metrics
68
-
69
68
@ transient implicit lazy val logger : Logger = LoggerFactory .getLogger(getClass)
70
69
71
- val context : metrics.Metrics .Context = metrics.Metrics .Context (metrics.Metrics .Environment .GroupByStreaming , groupByConf)
70
+ val context : metrics.Metrics .Context =
71
+ metrics.Metrics .Context (metrics.Metrics .Environment .GroupByStreaming , groupByConf)
72
72
73
73
private case class Schemas (leftStreamSchema : StructType ,
74
74
leftSourceSchema : StructType ,
@@ -123,7 +123,6 @@ class JoinSourceRunner(groupByConf: api.GroupBy, conf: Map[String, String] = Map
123
123
private val streamingDataset : String = groupByConf.streamingDataset
124
124
125
125
def toPutRequest (input : Row ): KVStore .PutRequest = {
126
- import ai .chronon .online .metrics
127
126
val keys = keyIndices.map(input.get)
128
127
val values = valueIndices.map(input.get)
129
128
@@ -230,7 +229,6 @@ class JoinSourceRunner(groupByConf: api.GroupBy, conf: Map[String, String] = Map
230
229
val deserialized : Dataset [Mutation ] = df
231
230
.as[Array [Byte ]]
232
231
.map { arr =>
233
- import ai .chronon .online .metrics
234
232
ingressContext.increment(metrics.Metrics .Name .RowCount )
235
233
ingressContext.count(metrics.Metrics .Name .Bytes , arr.length)
236
234
try {
@@ -289,7 +287,6 @@ class JoinSourceRunner(groupByConf: api.GroupBy, conf: Map[String, String] = Map
289
287
}
290
288
291
289
def chainedStreamingQuery : DataStreamWriter [Row ] = {
292
- import ai .chronon .online .metrics
293
290
val joinSource = groupByConf.streamingSource.get.getJoinSource
294
291
val left = joinSource.join.left
295
292
val topic = TopicInfo .parse(left.topic)
@@ -357,7 +354,6 @@ class JoinSourceRunner(groupByConf: api.GroupBy, conf: Map[String, String] = Map
357
354
358
355
val rowsScala = rows.toScala.toArray
359
356
val requests = rowsScala.map { row =>
360
- import ai .chronon .online .metrics
361
357
val keyMap = row.getValuesMap[AnyRef ](leftColumns)
362
358
val eventTs = row.get(leftTimeIndex).asInstanceOf [Long ]
363
359
context.distribution(metrics.Metrics .Name .LagMillis , System .currentTimeMillis() - eventTs)
@@ -368,12 +364,10 @@ class JoinSourceRunner(groupByConf: api.GroupBy, conf: Map[String, String] = Map
368
364
val microBatchTimestamp =
369
365
percentile(rowsScala.map(_.get(leftTimeIndex).asInstanceOf [Long ]), timePercentile)
370
366
if (microBatchTimestamp.isDefined) {
371
- import ai .chronon .online .metrics
372
367
val microBatchLag = System .currentTimeMillis() - microBatchTimestamp.get
373
368
context.distribution(metrics.Metrics .Name .BatchLagMillis , microBatchLag)
374
369
375
370
if (minimumQueryDelayMs > 0 && microBatchLag >= 0 && microBatchLag < minimumQueryDelayMs) {
376
- import ai .chronon .online .metrics
377
371
val sleepMillis = minimumQueryDelayMs - microBatchLag
378
372
Thread .sleep(sleepMillis)
379
373
context.distribution(metrics.Metrics .Name .QueryDelaySleepMillis , sleepMillis)
@@ -416,7 +410,6 @@ class JoinSourceRunner(groupByConf: api.GroupBy, conf: Map[String, String] = Map
416
410
417
411
def emitRequestMetric (request : PutRequest , context : metrics.Metrics .Context ): Unit = {
418
412
request.tsMillis.foreach { ts : Long =>
419
- import ai .chronon .online .metrics
420
413
context.distribution(metrics.Metrics .Name .FreshnessMillis , System .currentTimeMillis() - ts)
421
414
context.increment(metrics.Metrics .Name .RowCount )
422
415
context.distribution(metrics.Metrics .Name .ValueBytes , request.valueBytes.length)
0 commit comments