|
| 1 | +// +build requires_docker |
| 2 | + |
| 3 | +package main |
| 4 | + |
| 5 | +import ( |
| 6 | + "context" |
| 7 | + "flag" |
| 8 | + "strings" |
| 9 | + "testing" |
| 10 | + "time" |
| 11 | + |
| 12 | + "github.com/prometheus/common/model" |
| 13 | + "github.com/prometheus/prometheus/pkg/labels" |
| 14 | + "github.com/stretchr/testify/require" |
| 15 | + "github.com/weaveworks/common/user" |
| 16 | + |
| 17 | + "github.com/cortexproject/cortex/integration/e2e" |
| 18 | + e2edb "github.com/cortexproject/cortex/integration/e2e/db" |
| 19 | + "github.com/cortexproject/cortex/integration/e2ecortex" |
| 20 | + client2 "github.com/cortexproject/cortex/pkg/ingester/client" |
| 21 | +) |
| 22 | + |
| 23 | +func TestQuerierWithStreamingBlocksAndChunksIngesters(t *testing.T) { |
| 24 | + s, err := e2e.NewScenario(networkName) |
| 25 | + require.NoError(t, err) |
| 26 | + defer s.Close() |
| 27 | + |
| 28 | + require.NoError(t, writeFileToSharedDir(s, cortexSchemaConfigFile, []byte(cortexSchemaConfigYaml))) |
| 29 | + chunksFlags := mergeFlags(ChunksStorageFlags, map[string]string{}) |
| 30 | + |
| 31 | + blockFlags := mergeFlags(BlocksStorageFlags, map[string]string{ |
| 32 | + "-experimental.blocks-storage.tsdb.block-ranges-period": "1h", |
| 33 | + "-experimental.blocks-storage.tsdb.head-compaction-interval": "1m", |
| 34 | + "-experimental.store-gateway.sharding-enabled": "false", |
| 35 | + "-querier.ingester-streaming": "true", |
| 36 | + }) |
| 37 | + |
| 38 | + // Start dependencies. |
| 39 | + consul := e2edb.NewConsul() |
| 40 | + minio := e2edb.NewMinio(9000, blockFlags["-experimental.blocks-storage.s3.bucket-name"]) |
| 41 | + require.NoError(t, s.StartAndWaitReady(consul, minio)) |
| 42 | + |
| 43 | + // Start Cortex components. |
| 44 | + ingesterBlocks := e2ecortex.NewIngester("ingester-blocks", consul.NetworkHTTPEndpoint(), blockFlags, "") |
| 45 | + ingesterChunks := e2ecortex.NewIngester("ingester-chunks", consul.NetworkHTTPEndpoint(), chunksFlags, "") |
| 46 | + storeGateway := e2ecortex.NewStoreGateway("store-gateway", consul.NetworkHTTPEndpoint(), blockFlags, "") |
| 47 | + require.NoError(t, s.StartAndWaitReady(ingesterBlocks, ingesterChunks, storeGateway)) |
| 48 | + |
| 49 | + // Sharding is disabled, pass gateway address. |
| 50 | + querierFlags := mergeFlags(blockFlags, map[string]string{ |
| 51 | + "-experimental.querier.store-gateway-addresses": strings.Join([]string{storeGateway.NetworkGRPCEndpoint()}, ","), |
| 52 | + "-distributor.shard-by-all-labels": "true", |
| 53 | + }) |
| 54 | + querier := e2ecortex.NewQuerier("querier", consul.NetworkHTTPEndpoint(), querierFlags, "") |
| 55 | + require.NoError(t, s.StartAndWaitReady(querier)) |
| 56 | + |
| 57 | + require.NoError(t, querier.WaitSumMetrics(e2e.Equals(1024), "cortex_ring_tokens_total")) |
| 58 | + |
| 59 | + s1 := []client2.Sample{ |
| 60 | + {Value: 1, TimestampMs: 1000}, |
| 61 | + {Value: 2, TimestampMs: 2000}, |
| 62 | + {Value: 3, TimestampMs: 3000}, |
| 63 | + {Value: 4, TimestampMs: 4000}, |
| 64 | + {Value: 5, TimestampMs: 5000}, |
| 65 | + } |
| 66 | + |
| 67 | + s2 := []client2.Sample{ |
| 68 | + {Value: 1, TimestampMs: 1000}, |
| 69 | + {Value: 2.5, TimestampMs: 2500}, |
| 70 | + {Value: 3, TimestampMs: 3000}, |
| 71 | + {Value: 5.5, TimestampMs: 5500}, |
| 72 | + } |
| 73 | + |
| 74 | + clientConfig := client2.Config{} |
| 75 | + clientConfig.RegisterFlags(flag.NewFlagSet("unused", flag.ContinueOnError)) // registers default values |
| 76 | + |
| 77 | + // Push data to chunks ingester. |
| 78 | + { |
| 79 | + ingesterChunksClient, err := client2.MakeIngesterClient(ingesterChunks.GRPCEndpoint(), clientConfig) |
| 80 | + require.NoError(t, err) |
| 81 | + defer ingesterChunksClient.Close() |
| 82 | + |
| 83 | + _, err = ingesterChunksClient.Push(user.InjectOrgID(context.Background(), "user"), &client2.WriteRequest{ |
| 84 | + Timeseries: []client2.PreallocTimeseries{ |
| 85 | + {TimeSeries: &client2.TimeSeries{Labels: []client2.LabelAdapter{{Name: labels.MetricName, Value: "s"}, {Name: "l", Value: "1"}}, Samples: s1}}, |
| 86 | + {TimeSeries: &client2.TimeSeries{Labels: []client2.LabelAdapter{{Name: labels.MetricName, Value: "s"}, {Name: "l", Value: "2"}}, Samples: s1}}}, |
| 87 | + Source: client2.API, |
| 88 | + }) |
| 89 | + require.NoError(t, err) |
| 90 | + } |
| 91 | + |
| 92 | + // Push data to blocks ingester. |
| 93 | + { |
| 94 | + ingesterBlocksClient, err := client2.MakeIngesterClient(ingesterBlocks.GRPCEndpoint(), clientConfig) |
| 95 | + require.NoError(t, err) |
| 96 | + defer ingesterBlocksClient.Close() |
| 97 | + |
| 98 | + _, err = ingesterBlocksClient.Push(user.InjectOrgID(context.Background(), "user"), &client2.WriteRequest{ |
| 99 | + Timeseries: []client2.PreallocTimeseries{ |
| 100 | + {TimeSeries: &client2.TimeSeries{Labels: []client2.LabelAdapter{{Name: labels.MetricName, Value: "s"}, {Name: "l", Value: "2"}}, Samples: s2}}, |
| 101 | + {TimeSeries: &client2.TimeSeries{Labels: []client2.LabelAdapter{{Name: labels.MetricName, Value: "s"}, {Name: "l", Value: "3"}}, Samples: s1}}}, |
| 102 | + Source: client2.API, |
| 103 | + }) |
| 104 | + require.NoError(t, err) |
| 105 | + } |
| 106 | + |
| 107 | + c, err := e2ecortex.NewClient("", querier.HTTPEndpoint(), "", "", "user") |
| 108 | + require.NoError(t, err) |
| 109 | + |
| 110 | + // Query back the series (1 only in the storage, 1 only in the ingesters, 1 on both). |
| 111 | + result, err := c.Query("s[1m]", time.Unix(10, 0)) |
| 112 | + require.NoError(t, err) |
| 113 | + |
| 114 | + s1Values := []model.SamplePair{ |
| 115 | + {Value: 1, Timestamp: 1000}, |
| 116 | + {Value: 2, Timestamp: 2000}, |
| 117 | + {Value: 3, Timestamp: 3000}, |
| 118 | + {Value: 4, Timestamp: 4000}, |
| 119 | + {Value: 5, Timestamp: 5000}, |
| 120 | + } |
| 121 | + |
| 122 | + s1AndS2ValuesMerged := []model.SamplePair{ |
| 123 | + {Value: 1, Timestamp: 1000}, |
| 124 | + {Value: 2, Timestamp: 2000}, |
| 125 | + {Value: 2.5, Timestamp: 2500}, |
| 126 | + {Value: 3, Timestamp: 3000}, |
| 127 | + {Value: 4, Timestamp: 4000}, |
| 128 | + {Value: 5, Timestamp: 5000}, |
| 129 | + {Value: 5.5, Timestamp: 5500}, |
| 130 | + } |
| 131 | + |
| 132 | + expectedMatrix := model.Matrix{ |
| 133 | + // From chunks ingester only. |
| 134 | + &model.SampleStream{ |
| 135 | + Metric: model.Metric{labels.MetricName: "s", "l": "1"}, |
| 136 | + Values: s1Values, |
| 137 | + }, |
| 138 | + |
| 139 | + // From blocks ingester only. |
| 140 | + &model.SampleStream{ |
| 141 | + Metric: model.Metric{labels.MetricName: "s", "l": "3"}, |
| 142 | + Values: s1Values, |
| 143 | + }, |
| 144 | + |
| 145 | + // Merged from both ingesters. |
| 146 | + &model.SampleStream{ |
| 147 | + Metric: model.Metric{labels.MetricName: "s", "l": "2"}, |
| 148 | + Values: s1AndS2ValuesMerged, |
| 149 | + }, |
| 150 | + } |
| 151 | + |
| 152 | + require.Equal(t, model.ValMatrix, result.Type()) |
| 153 | + require.ElementsMatch(t, expectedMatrix, result.(model.Matrix)) |
| 154 | +} |
0 commit comments