KNETRequestProfiler mierzy szczegółowe metryki czasowe requestów
i pomaga identyfikować wąskie gardła w komunikacji sieciowej.
import rip.nerd.kitsunenet.util.KNETRequestProfiler
val profiler = KNETRequestProfiler.create()
// Jako interceptor
val client = KNETClient.builder()
.addInterceptor(profiler.interceptor())
.build()
// Wykonaj requesty
val response = client.get("https://api.example.com/users")
// Pobierz profil ostatniego requestu
val profile = profiler.getLastProfile()
println("URL: ${profile.url}")
println("Status: ${profile.statusCode}")
println("Total: ${profile.totalMs}ms")
println("Slow: ${profile.isSlow}")
data class RequestProfile(
val id: String,
val url: String,
val method: String,
val statusCode: Int?,
val totalMs: Long, // Całkowity czas
val dnsMs: Long, // DNS resolution
val connectMs: Long, // TCP connect
val sslMs: Long, // SSL handshake
val sendMs: Long, // Wysyłanie danych
val waitMs: Long, // TTFB (Time to First Byte)
val receiveMs: Long, // Odbieranie danych
val requestSize: Long, // Rozmiar request body
val responseSize: Long, // Rozmiar response body
val isSlow: Boolean, // Czy przekroczył threshold
val error: String? // Błąd (jeśli wystąpił)
)
val profile = profiler.getLastProfile()!!
// Sprawdź gdzie jest problem
if (profile.waitMs > 1000) {
println("Backend wolno odpowiada (TTFB: ${profile.waitMs}ms)")
}
if (profile.receiveMs > 500) {
println("Duża ilość danych (${profile.responseSize} bytes)")
}
// Throughput
println("Throughput: ${profile.throughputBytesPerSec} B/s")
// Po wykonaniu wielu requestów
val stats = profiler.getEndpointStats("api.example.com/users")
println("Requests: ${stats.requestCount}")
println("Avg latency: ${stats.averageLatencyMs}ms")
println("Min: ${stats.minLatencyMs}ms")
println("Max: ${stats.maxLatencyMs}ms")
println("Errors: ${stats.errorCount}")
println("Slow: ${stats.slowCount}")
// Percentyle
println("P50: ${stats.p50LatencyMs}ms")
println("P90: ${stats.p90LatencyMs}ms")
println("P95: ${stats.p95LatencyMs}ms")
println("P99: ${stats.p99LatencyMs}ms")
val globalStats = profiler.getStats()
println("Total requests: ${globalStats.totalRequests}")
println("Slow requests: ${globalStats.slowRequests}")
println("Slow rate: ${globalStats.slowRate}%")
println("Avg latency: ${globalStats.averageLatencyMs}ms")
println("P95 latency: ${globalStats.p95LatencyMs}ms")
println("Slowest endpoint: ${globalStats.slowestEndpoint}")
println("Fastest endpoint: ${globalStats.fastestEndpoint}")
println("Most errors: ${globalStats.mostErrorsEndpoint}")
val profiler = KNETRequestProfiler.builder()
// Threshold dla "slow" requestów
.slowThresholdMs(2000) // > 2s = slow
// Maksymalna liczba przechowywanych profili
.maxProfiles(500)
// Grupowanie statystyk per endpoint
.groupByEndpoint(true)
// Callback na wolne requesty
.onSlowRequest { profile ->
Log.w("Performance", "Slow request: ${profile.url} (${profile.totalMs}ms)")
analytics.log("slow_request", profile.toMap())
}
.build()
val profiler = KNETRequestProfiler.withSlowAlert(
thresholdMs = 3000,
onSlow = { profile ->
// Alert do monitoring
monitoring.alert(
title = "Slow HTTP Request",
message = "${profile.method} ${profile.url} took ${profile.totalMs}ms",
severity = if (profile.totalMs > 10000) "critical" else "warning"
)
// Log do analytics
analytics.logSlowRequest(profile.toMap())
}
)
// Export do JSON
val json = profiler.exportToJson()
// Zapisz do pliku
File(context.cacheDir, "profiler_data.json").writeText(json)
// Wyślij do backendu
client.post("https://analytics.example.com/metrics", json)
class PerformanceDashboard(private val profiler: KNETRequestProfiler) {
fun getReport(): Map<String, Any> {
val stats = profiler.getStats()
val endpoints = profiler.getAllEndpointStats()
return mapOf(
"summary" to mapOf(
"totalRequests" to stats.totalRequests,
"avgLatency" to stats.averageLatencyMs,
"p95Latency" to stats.p95LatencyMs,
"slowRate" to stats.slowRate
),
"slowestEndpoints" to endpoints.values
.sortedByDescending { it.averageLatencyMs }
.take(5)
.map { it.endpoint to it.averageLatencyMs },
"errorProne" to endpoints.values
.filter { it.errorCount > 0 }
.sortedByDescending { it.errorRate }
.take(5)
.map { it.endpoint to it.errorRate }
)
}
}
class PerformanceGate(private val profiler: KNETRequestProfiler) {
fun checkPerformance(): Boolean {
val stats = profiler.getStats()
// Fail jeśli zbyt wiele wolnych requestów
if (stats.slowRate > 5.0) {
println("FAIL: Slow rate ${stats.slowRate}% > 5%")
return false
}
// Fail jeśli P95 zbyt wysokie
if (stats.p95LatencyMs > 2000) {
println("FAIL: P95 ${stats.p95LatencyMs}ms > 2000ms")
return false
}
println("PASS: Performance within limits")
return true
}
}
class AdaptiveTimeoutManager(private val profiler: KNETRequestProfiler) {
fun getTimeoutForEndpoint(endpoint: String): Long {
val stats = profiler.getEndpointStats(endpoint)
return if (stats != null) {
// P95 + 50% buffer
(stats.p95LatencyMs * 1.5).toLong().coerceIn(5000, 60000)
} else {
// Default
30000
}
}
}
KNETRequestProfiler| Metoda | Opis |
|---|---|
interceptor() | Tworzy interceptor |
getLastProfile() | Ostatni profil |
getProfile(id) | Profil po ID |
getAllProfiles() | Wszystkie profile |
getSlowProfiles() | Wolne requesty |
getErrorProfiles() | Requesty z błędami |
getEndpointStats(endpoint) | Statystyki endpointu |
getAllEndpointStats() | Wszystkie statystyki |
getStats() | Globalne statystyki |
exportToJson() | Export do JSON |
clear() | Czyści profile |