-
Notifications
You must be signed in to change notification settings - Fork 20
(dsl): Support IP range aggregation #650
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
base: main
Are you sure you want to change the base?
Changes from all commits
95e43c8
34aa413
17e75b1
217870e
6f7fc47
a008b52
bb29432
ccd21f7
ce1129a
9534345
509f319
390ef22
bfb9ea0
ed07116
a1e18ff
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -1 +1,2 @@ | ||
* text=auto eol=lf | ||
sbt linguist-vendored |
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,61 @@ | ||
--- | ||
id: elastic_aggregation_ip_range | ||
title: "Ip Range Aggregation" | ||
--- | ||
|
||
The `Ip Range` aggregation is a multi-bucket aggregation that creates buckets for ranges of IP addresses, either using `from`/`to` values or `CIDR` masks. | ||
|
||
In order to use the `IP Range` aggregation import the following: | ||
```scala | ||
import zio.elasticsearch.aggregation.IpRangeAggregation | ||
import zio.elasticsearch.ElasticAggregation.ipRangeAggregation | ||
``` | ||
|
||
You can create a [type-safe](https://lambdaworks.github.io/zio-elasticsearch/overview/overview_zio_prelude_schema) `IpRange` aggregation using the `IpRangeAggregation` method this way: | ||
```scala | ||
val aggregation: IpRangeAggregation = | ||
ipRangeAggregation( | ||
name = "ip_range_agg", | ||
field = Document.stringField, | ||
ranges = NonEmptyChunk( | ||
IpRange.IpRangeBound(to = Some("10.0.0.5")), | ||
IpRange.IpRangeBound(from = Some("10.0.0.5")) | ||
) | ||
) | ||
``` | ||
|
||
You can create an `IpRange` aggregation using the `IpRangeAggregation` method this way: | ||
```scala | ||
val aggregation: IpRangeAggregation = | ||
ipRangeAggregation( | ||
name = "ip_range_agg", | ||
field = "ipField", | ||
ranges = NonEmptyChunk( | ||
IpRange.IpRangeBound(to = Some("10.0.0.5")), | ||
IpRange.IpRangeBound(from = Some("10.0.0.5")) | ||
) | ||
) | ||
``` | ||
|
||
You can also use CIDR masks for ranges: | ||
```scala | ||
val cidrAggregation: IpRangeAggregation = | ||
ipRangeAggregation( | ||
name = "cidr_agg", | ||
field = "ipField", | ||
ranges = NonEmptyChunk( | ||
IpRange.IpRangeBound(mask = Some("10.0.0.0/25")), | ||
IpRange.IpRangeBound(mask = Some("10.0.0.128/25")) | ||
) | ||
) | ||
``` | ||
|
||
If you want to explicitly set the keyed property: | ||
```scala | ||
val multipleAggregations = | ||
ipRangeAggregation("ip_range_agg", "ipField", NonEmptyChunk(IpRange.IpRangeBound(to = Some("10.0.0.5")))) | ||
.keyedOn | ||
.withAgg(maxAggregation("maxAgg", "someField")) | ||
``` | ||
|
||
You can find more information about `Ip Range` aggregation [here](https://www.elastic.co/docs/reference/aggregations/search-aggregations-bucket-iprange-aggregation). |
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. This file still have issues with format, and also it has special ^M characters |
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -16,15 +16,16 @@ | |
|
||
package zio.elasticsearch.aggregation | ||
|
||
import zio.Chunk | ||
import zio.elasticsearch.ElasticAggregation.multipleAggregations | ||
import zio.elasticsearch.ElasticPrimitive.ElasticPrimitiveOps | ||
import zio.elasticsearch.aggregation.IpRange.IpRangeBound | ||
import zio.elasticsearch.aggregation.options._ | ||
import zio.elasticsearch.query.ElasticQuery | ||
import zio.elasticsearch.query.sort.Sort | ||
import zio.elasticsearch.script.Script | ||
import zio.json.ast.Json | ||
import zio.json.ast.Json.{Arr, Obj} | ||
import zio.{Chunk, NonEmptyChunk} | ||
|
||
sealed trait ElasticAggregation { self => | ||
private[elasticsearch] def toJson: Json | ||
|
@@ -205,6 +206,71 @@ private[elasticsearch] final case class Filter( | |
} | ||
} | ||
|
||
sealed trait IpRangeAggregation extends SingleElasticAggregation with WithAgg with WithSubAgg[IpRangeAggregation] | ||
|
||
private[elasticsearch] final case class IpRange( | ||
name: String, | ||
field: String, | ||
ranges: NonEmptyChunk[IpRangeBound], | ||
keyed: Option[Boolean], | ||
subAggregations: Option[Chunk[SingleElasticAggregation]] | ||
markok4 marked this conversation as resolved.
Show resolved
Hide resolved
|
||
) extends IpRangeAggregation { self => | ||
|
||
def keyedOn: IpRangeAggregation = self.copy(keyed = Some(true)) | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. I guess we can use only |
||
|
||
def withAgg(aggregation: SingleElasticAggregation): MultipleAggregations = | ||
multipleAggregations.aggregations(self, aggregation) | ||
|
||
def withSubAgg(aggregation: SingleElasticAggregation): IpRangeAggregation = | ||
self.copy(subAggregations = Some(aggregation +: subAggregations.getOrElse(Chunk.empty))) | ||
|
||
private[elasticsearch] def toJson: Json = { | ||
|
||
val rangesJson = ranges.map(_.toJson) | ||
val keyedJson = keyed.fold(Obj())(k => Obj("keyed" -> k.toJson)) | ||
val subAggsJson = subAggregations match { | ||
case Some(aggs) if aggs.nonEmpty => | ||
Obj("aggs" -> aggs.map(_.toJson).reduce(_ merge _)) | ||
case _ => Obj() | ||
} | ||
|
||
Obj( | ||
name -> ( | ||
Obj("ip_range" -> (Obj("field" -> field.toJson, "ranges" -> Arr(rangesJson)) merge keyedJson)) merge subAggsJson | ||
) | ||
) | ||
} | ||
} | ||
|
||
object IpRange { | ||
|
||
final case class IpRangeBound( | ||
from: Option[String] = None, | ||
to: Option[String] = None, | ||
mask: Option[String] = None, | ||
key: Option[String] = None | ||
markok4 marked this conversation as resolved.
Show resolved
Hide resolved
|
||
) { self => | ||
|
||
def from(value: String): IpRangeBound = self.copy(from = Some(value)) | ||
|
||
def to(value: String): IpRangeBound = self.copy(to = Some(value)) | ||
|
||
def mask(value: String): IpRangeBound = self.copy(mask = Some(value)) | ||
|
||
def key(value: String): IpRangeBound = self.copy(key = Some(value)) | ||
markok4 marked this conversation as resolved.
Show resolved
Hide resolved
|
||
|
||
def toJson: Json = { | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Can we put |
||
val baseFields = Chunk.empty[(String, Json)] ++ | ||
from.map("from" -> _.toJson) ++ | ||
to.map("to" -> _.toJson) ++ | ||
mask.map("mask" -> _.toJson) ++ | ||
key.map("key" -> _.toJson) | ||
|
||
Obj(baseFields: _*) | ||
} | ||
} | ||
} | ||
|
||
sealed trait MaxAggregation extends SingleElasticAggregation with HasMissing[MaxAggregation] with WithAgg | ||
|
||
private[elasticsearch] final case class Max(name: String, field: String, missing: Option[Double]) | ||
|
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -77,6 +77,18 @@ object AggregationResponse { | |
(key, toResult(response)) | ||
}) | ||
) | ||
case IpRangeAggregationResponse(buckets) => | ||
IpRangeAggregationResult( | ||
buckets.map(b => | ||
IpRangeAggregationBucketResult( | ||
key = b.key, | ||
from = b.from, | ||
to = b.to, | ||
docCount = b.docCount, | ||
subAggregations = Map.empty | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. @dbulaja98, up there, we have |
||
) | ||
) | ||
) | ||
case MaxAggregationResponse(value) => | ||
MaxAggregationResult(value) | ||
case MinAggregationResponse(value) => | ||
|
@@ -157,6 +169,8 @@ private[elasticsearch] case class BucketDecoder(fields: Chunk[(String, Json)]) e | |
) | ||
case str if str.contains("filter#") => | ||
Some(field -> data.unsafeAs[FilterAggregationResponse](FilterAggregationResponse.decoder)) | ||
case str if str.contains("ip_range#") => | ||
Some(field -> data.unsafeAs[IpRangeAggregationResponse](IpRangeAggregationResponse.decoder)) | ||
case str if str.contains("max#") => | ||
Some(field -> MaxAggregationResponse(value = objFields("value").unsafeAs[Double])) | ||
case str if str.contains("min#") => | ||
|
@@ -202,6 +216,8 @@ private[elasticsearch] case class BucketDecoder(fields: Chunk[(String, Json)]) e | |
(field.split("#")(1), data.asInstanceOf[ExtendedStatsAggregationResponse]) | ||
case str if str.contains("filter#") => | ||
(field.split("#")(1), data.asInstanceOf[FilterAggregationResponse]) | ||
case str if str.contains("ip_range#") => | ||
(field.split("#")(1), data.asInstanceOf[IpRangeAggregationResponse]) | ||
case str if str.contains("max#") => | ||
(field.split("#")(1), data.asInstanceOf[MaxAggregationResponse]) | ||
case str if str.contains("min#") => | ||
|
@@ -285,6 +301,27 @@ private[elasticsearch] sealed trait JsonDecoderOps { | |
} | ||
} | ||
|
||
private[elasticsearch] final case class IpRangeAggregationBucket( | ||
key: String, | ||
from: Option[String], | ||
to: Option[String], | ||
@jsonField("doc_count") | ||
docCount: Int | ||
) extends AggregationBucket | ||
|
||
private[elasticsearch] object IpRangeAggregationBucket { | ||
implicit val decoder: JsonDecoder[IpRangeAggregationBucket] = DeriveJsonDecoder.gen[IpRangeAggregationBucket] | ||
} | ||
|
||
private[elasticsearch] final case class IpRangeAggregationResponse( | ||
buckets: Chunk[IpRangeAggregationBucket] | ||
) extends AggregationResponse | ||
|
||
private[elasticsearch] object IpRangeAggregationResponse { | ||
implicit val decoder: JsonDecoder[IpRangeAggregationResponse] = | ||
DeriveJsonDecoder.gen[IpRangeAggregationResponse] | ||
} | ||
|
||
private[elasticsearch] final case class MaxAggregationResponse(value: Double) extends AggregationResponse | ||
|
||
private[elasticsearch] object MaxAggregationResponse { | ||
|
Uh oh!
There was an error while loading. Please reload this page.