-
Notifications
You must be signed in to change notification settings - Fork 3.6k
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
chore(dataobj): Create initial dataobj builder (#16011)
Co-authored-by: Cyril Tovena <[email protected]> Co-authored-by: Robert Fratto <[email protected]>
- Loading branch information
1 parent
4b44b59
commit ca4c025
Showing
31 changed files
with
1,352 additions
and
46 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,32 @@ | ||
package consumer | ||
|
||
import ( | ||
"errors" | ||
"flag" | ||
|
||
"github.com/grafana/loki/v3/pkg/dataobj" | ||
) | ||
|
||
type Config struct { | ||
dataobj.BuilderConfig | ||
TenantID string `yaml:"tenant_id"` | ||
// StorageBucketPrefix is the prefix to use for the storage bucket. | ||
StorageBucketPrefix string `yaml:"storage_bucket_prefix"` | ||
} | ||
|
||
func (cfg *Config) Validate() error { | ||
if cfg.TenantID == "" { | ||
return errors.New("tenantID is required") | ||
} | ||
return cfg.BuilderConfig.Validate() | ||
} | ||
|
||
func (cfg *Config) RegisterFlags(f *flag.FlagSet) { | ||
cfg.RegisterFlagsWithPrefix("dataobj-consumer.", f) | ||
} | ||
|
||
func (cfg *Config) RegisterFlagsWithPrefix(prefix string, f *flag.FlagSet) { | ||
cfg.BuilderConfig.RegisterFlagsWithPrefix(prefix, f) | ||
f.StringVar(&cfg.TenantID, prefix+"tenant-id", "fake", "The tenant ID to use for the data object builder.") | ||
f.StringVar(&cfg.StorageBucketPrefix, prefix+"storage-bucket-prefix", "dataobj/", "The prefix to use for the storage bucket.") | ||
} |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,117 @@ | ||
package consumer | ||
|
||
import ( | ||
"time" | ||
|
||
"go.uber.org/atomic" | ||
|
||
"github.com/prometheus/client_golang/prometheus" | ||
) | ||
|
||
type partitionOffsetMetrics struct { | ||
currentOffset prometheus.GaugeFunc | ||
lastOffset atomic.Int64 | ||
|
||
// Error counters | ||
flushFailures prometheus.Counter | ||
commitFailures prometheus.Counter | ||
appendFailures prometheus.Counter | ||
|
||
// Processing delay histogram | ||
processingDelay prometheus.Histogram | ||
} | ||
|
||
func newPartitionOffsetMetrics() *partitionOffsetMetrics { | ||
p := &partitionOffsetMetrics{ | ||
flushFailures: prometheus.NewCounter(prometheus.CounterOpts{ | ||
Name: "loki_dataobj_consumer_flush_failures_total", | ||
Help: "Total number of flush failures", | ||
}), | ||
commitFailures: prometheus.NewCounter(prometheus.CounterOpts{ | ||
Name: "loki_dataobj_consumer_commit_failures_total", | ||
Help: "Total number of commit failures", | ||
}), | ||
appendFailures: prometheus.NewCounter(prometheus.CounterOpts{ | ||
Name: "loki_dataobj_consumer_append_failures_total", | ||
Help: "Total number of append failures", | ||
}), | ||
processingDelay: prometheus.NewHistogram(prometheus.HistogramOpts{ | ||
Name: "loki_dataobj_consumer_processing_delay_seconds", | ||
Help: "Time difference between record timestamp and processing time in seconds", | ||
Buckets: prometheus.DefBuckets, | ||
NativeHistogramBucketFactor: 1.1, | ||
NativeHistogramMaxBucketNumber: 100, | ||
NativeHistogramMinResetDuration: 0, | ||
}), | ||
} | ||
|
||
p.currentOffset = prometheus.NewGaugeFunc( | ||
prometheus.GaugeOpts{ | ||
Name: "loki_dataobj_consumer_current_offset", | ||
Help: "The last consumed offset for this partition", | ||
}, | ||
p.getCurrentOffset, | ||
) | ||
|
||
return p | ||
} | ||
|
||
func (p *partitionOffsetMetrics) getCurrentOffset() float64 { | ||
return float64(p.lastOffset.Load()) | ||
} | ||
|
||
func (p *partitionOffsetMetrics) register(reg prometheus.Registerer) error { | ||
collectors := []prometheus.Collector{ | ||
p.currentOffset, | ||
p.flushFailures, | ||
p.commitFailures, | ||
p.appendFailures, | ||
p.processingDelay, | ||
} | ||
|
||
for _, collector := range collectors { | ||
if err := reg.Register(collector); err != nil { | ||
if _, ok := err.(prometheus.AlreadyRegisteredError); !ok { | ||
return err | ||
} | ||
} | ||
} | ||
return nil | ||
} | ||
|
||
func (p *partitionOffsetMetrics) unregister(reg prometheus.Registerer) { | ||
collectors := []prometheus.Collector{ | ||
p.currentOffset, | ||
p.flushFailures, | ||
p.commitFailures, | ||
p.appendFailures, | ||
p.processingDelay, | ||
} | ||
|
||
for _, collector := range collectors { | ||
reg.Unregister(collector) | ||
} | ||
} | ||
|
||
func (p *partitionOffsetMetrics) updateOffset(offset int64) { | ||
p.lastOffset.Store(offset) | ||
} | ||
|
||
func (p *partitionOffsetMetrics) incFlushFailures() { | ||
p.flushFailures.Inc() | ||
} | ||
|
||
func (p *partitionOffsetMetrics) incCommitFailures() { | ||
p.commitFailures.Inc() | ||
} | ||
|
||
func (p *partitionOffsetMetrics) incAppendFailures() { | ||
p.appendFailures.Inc() | ||
} | ||
|
||
func (p *partitionOffsetMetrics) observeProcessingDelay(recordTimestamp time.Time) { | ||
// Convert milliseconds to seconds and calculate delay | ||
if !recordTimestamp.IsZero() { // Only observe if timestamp is valid | ||
p.processingDelay.Observe(time.Since(recordTimestamp).Seconds()) | ||
} | ||
} |
Oops, something went wrong.