minio/cmd/http-traffic-recorder.go
Praveen raj Mani 8836d57e3c The prometheus metrics refractoring (#8003)
The measures are consolidated to the following metrics

- `disk_storage_used` : Disk space used by the disk.
- `disk_storage_available`: Available disk space left on the disk.
- `disk_storage_total`: Total disk space on the disk.
- `disks_offline`: Total number of offline disks in current MinIO instance.
- `disks_total`: Total number of disks in current MinIO instance.
- `s3_requests_total`: Total number of s3 requests in current MinIO instance.
- `s3_errors_total`: Total number of errors in s3 requests in current MinIO instance.
- `s3_requests_current`: Total number of active s3 requests in current MinIO instance.
- `internode_rx_bytes_total`: Total number of internode bytes received by current MinIO server instance.
- `internode_tx_bytes_total`: Total number of bytes sent to the other nodes by current MinIO server instance.
- `s3_rx_bytes_total`: Total number of s3 bytes received by current MinIO server instance.
- `s3_tx_bytes_total`: Total number of s3 bytes sent by current MinIO server instance.
- `minio_version_info`: Current MinIO version with commit-id.
- `s3_ttfb_seconds_bucket`: Histogram that holds the latency information of the requests.

And this PR also modifies the current StorageInfo queries

- Decouples StorageInfo from ServerInfo .
- StorageInfo is enhanced to give endpoint information.

NOTE: ADMIN API VERSION IS BUMPED UP IN THIS PR

Fixes #7873
2019-10-22 21:01:14 -07:00

107 lines
2.6 KiB
Go

/*
* MinIO Cloud Storage, (C) 2019 MinIO, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cmd
import (
"io"
"net/http"
"time"
)
// records the incoming bytes from the underlying request.Body.
type recordTrafficRequest struct {
io.ReadCloser
isS3Request bool
}
// Records the bytes read.
func (r *recordTrafficRequest) Read(p []byte) (n int, err error) {
n, err = r.ReadCloser.Read(p)
globalConnStats.incInputBytes(n)
if r.isS3Request {
globalConnStats.incS3InputBytes(n)
}
return n, err
}
// Records the outgoing bytes through the responseWriter.
type recordTrafficResponse struct {
// wrapper for underlying http.ResponseWriter.
writer http.ResponseWriter
isS3Request bool
}
// Calls the underlying WriteHeader.
func (r *recordTrafficResponse) WriteHeader(i int) {
r.writer.WriteHeader(i)
}
// Calls the underlying Header.
func (r *recordTrafficResponse) Header() http.Header {
return r.writer.Header()
}
// Records the output bytes
func (r *recordTrafficResponse) Write(p []byte) (n int, err error) {
n, err = r.writer.Write(p)
globalConnStats.incOutputBytes(n)
// Check if it is s3 request
if r.isS3Request {
globalConnStats.incS3OutputBytes(n)
}
return n, err
}
// Calls the underlying Flush.
func (r *recordTrafficResponse) Flush() {
r.writer.(http.Flusher).Flush()
}
// Records the outgoing bytes through the responseWriter.
type recordAPIStats struct {
// wrapper for underlying http.ResponseWriter.
writer http.ResponseWriter
TTFB time.Time // TimeToFirstByte.
firstByteRead bool
respStatusCode int
isS3Request bool
}
// Calls the underlying WriteHeader.
func (r *recordAPIStats) WriteHeader(i int) {
r.respStatusCode = i
r.writer.WriteHeader(i)
}
// Calls the underlying Header.
func (r *recordAPIStats) Header() http.Header {
return r.writer.Header()
}
// Records the TTFB on the first byte write.
func (r *recordAPIStats) Write(p []byte) (n int, err error) {
if !r.firstByteRead {
r.TTFB = UTCNow()
r.firstByteRead = true
}
return r.writer.Write(p)
}
// Calls the underlying Flush.
func (r *recordAPIStats) Flush() {
r.writer.(http.Flusher).Flush()
}