Initial commit
This commit is contained in:
commit
08afb6e761
|
@ -0,0 +1,54 @@
|
|||
name: build
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- 'main'
|
||||
tags:
|
||||
- 'v*'
|
||||
pull_request:
|
||||
|
||||
jobs:
|
||||
build:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
-
|
||||
name: Checkout
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
fetch-depth: 0
|
||||
-
|
||||
name: Set up Go
|
||||
uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: 1.14
|
||||
-
|
||||
name: Cache Go modules
|
||||
uses: actions/cache@v1
|
||||
with:
|
||||
path: ~/go/pkg/mod
|
||||
key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-go-
|
||||
# -
|
||||
# name: Tests
|
||||
# run: |
|
||||
# go mod tidy
|
||||
# go test -v ./...
|
||||
-
|
||||
name: Docker Login
|
||||
uses: docker/login-action@v1
|
||||
with:
|
||||
registry: ghcr.io
|
||||
username: ${{ github.repository_owner }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
-
|
||||
name: Run GoReleaser
|
||||
uses: goreleaser/goreleaser-action@v2
|
||||
if: success() && startsWith(github.ref, 'refs/tags/')
|
||||
with:
|
||||
version: latest
|
||||
args: release --rm-dist
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
|
@ -0,0 +1,19 @@
|
|||
# Binaries for programs and plugins
|
||||
*.exe
|
||||
*.exe~
|
||||
*.dll
|
||||
*.so
|
||||
*.dylib
|
||||
.idea/
|
||||
.DS_Store
|
||||
|
||||
# Test binary, built with `go test -c`
|
||||
*.test
|
||||
|
||||
# Output of the go coverage tool, specifically when used with LiteIDE
|
||||
*.out
|
||||
|
||||
vendor/
|
||||
bench_server/bench_server
|
||||
plow*
|
||||
dist/
|
|
@ -0,0 +1,32 @@
|
|||
project_name: plow
|
||||
builds:
|
||||
- env: [CGO_ENABLED=0]
|
||||
goos:
|
||||
- linux
|
||||
- windows
|
||||
- darwin
|
||||
goarch:
|
||||
- amd64
|
||||
- arm64
|
||||
dockers:
|
||||
- image_templates: ["ghcr.io/six-ddc/plow:{{ .Version }}"]
|
||||
dockerfile: Dockerfile
|
||||
build_flag_templates:
|
||||
- --label=org.opencontainers.image.title={{ .ProjectName }}
|
||||
- --label=org.opencontainers.image.description={{ .ProjectName }}
|
||||
- --label=org.opencontainers.image.url=https://github.com/six-ddc/plow
|
||||
- --label=org.opencontainers.image.source=https://github.com/six-ddc/plow
|
||||
- --label=org.opencontainers.image.version={{ .Version }}
|
||||
- --label=org.opencontainers.image.created={{ time "2006-01-02T15:04:05Z07:00" }}
|
||||
- --label=org.opencontainers.image.revision={{ .FullCommit }}
|
||||
- --label=org.opencontainers.image.licenses=Apache-2.0
|
||||
nfpms:
|
||||
- maintainer: six-ddc@github
|
||||
description: A high-performance HTTP benchmarking tool with real-time web UI and terminal displaying.
|
||||
homepage: https://github.com/six-ddc/plow
|
||||
license: Apache-2.0
|
||||
formats:
|
||||
- deb
|
||||
- rpm
|
||||
- apk
|
||||
|
|
@ -0,0 +1,3 @@
|
|||
FROM scratch
|
||||
COPY plow /usr/bin/plow
|
||||
ENTRYPOINT ["/usr/bin/plow"]
|
|
@ -0,0 +1,201 @@
|
|||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright [yyyy] [name of copyright owner]
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
|
@ -0,0 +1,5 @@
|
|||
# plow
|
||||
A high-performance HTTP benchmarking tool with real-time web UI and terminal displaying
|
||||
|
||||
![](https://github.com/six-ddc/plow/blob/main/demo.gif?raw=true)
|
||||
|
|
@ -0,0 +1,27 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"github.com/valyala/fasthttp"
|
||||
"log"
|
||||
"math/rand"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
var serverPort = flag.Int("p", 8080, "port to use for benchmarks")
|
||||
|
||||
func main() {
|
||||
flag.Parse()
|
||||
addr := "localhost:" + strconv.Itoa(*serverPort)
|
||||
log.Println("Starting HTTP server on:", addr)
|
||||
log.Fatalln(fasthttp.ListenAndServe(addr, func(c *fasthttp.RequestCtx) {
|
||||
//time.Sleep(time.Duration(rand.Int63n(int64(5 * time.Second))))
|
||||
if rand.Intn(5) == 0 {
|
||||
c.SetStatusCode(400)
|
||||
}
|
||||
_, werr := c.Write(c.Request.Body())
|
||||
if werr != nil {
|
||||
log.Println(werr)
|
||||
}
|
||||
}))
|
||||
}
|
|
@ -0,0 +1,221 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
cors "github.com/AdhityaRamadhanus/fasthttpcors"
|
||||
"github.com/go-echarts/go-echarts/v2/charts"
|
||||
"github.com/go-echarts/go-echarts/v2/components"
|
||||
"github.com/go-echarts/go-echarts/v2/opts"
|
||||
"github.com/go-echarts/go-echarts/v2/templates"
|
||||
"github.com/valyala/fasthttp"
|
||||
"net"
|
||||
"strings"
|
||||
"sync"
|
||||
"text/template"
|
||||
"time"
|
||||
)
|
||||
|
||||
func init() {
|
||||
templates.PageTpl = `
|
||||
{{- define "page" }}
|
||||
<!DOCTYPE html>
|
||||
<html>
|
||||
{{- template "header" . }}
|
||||
<body>
|
||||
<p align="center">🚀 <a href="https://github.com/six-ddc/plow"><b>plow</b></a> <em>is a high-performance HTTP benchmarking tool with real-time web UI and terminal displaying</em></p>
|
||||
<style> .box { justify-content:center; display:flex; flex-wrap:wrap } </style>
|
||||
<div class="box"> {{- range .Charts }} {{ template "base" . }} {{- end }} </div>
|
||||
</body>
|
||||
</html>
|
||||
{{ end }}
|
||||
`
|
||||
}
|
||||
|
||||
var (
|
||||
assertsPath = "/echarts/statics/"
|
||||
apiPath = "/data"
|
||||
latencyView = "latency"
|
||||
rpsView = "rps"
|
||||
timeFormat = "15:04:05"
|
||||
refreshInterval = time.Second
|
||||
)
|
||||
|
||||
const (
|
||||
DefaultTemplate string = `
|
||||
$(function () { setInterval({{ .ViewID }}_sync, {{ .Interval }}); });
|
||||
function {{ .ViewID }}_sync() {
|
||||
$.ajax({
|
||||
type: "GET",
|
||||
url: "http://{{ .Addr }}{{ .ApiPath }}/{{ .Route }}",
|
||||
dataType: "json",
|
||||
success: function (result) {
|
||||
let opt = goecharts_{{ .ViewID }}.getOption();
|
||||
let x = opt.xAxis[0].data;
|
||||
x.push(result.time);
|
||||
opt.xAxis[0].data = x;
|
||||
for (let i = 0; i < result.values.length; i++) {
|
||||
let y = opt.series[i].data;
|
||||
y.push({ value: result.values[i] });
|
||||
opt.series[i].data = y;
|
||||
goecharts_{{ .ViewID }}.setOption(opt);
|
||||
}
|
||||
}
|
||||
});
|
||||
}`
|
||||
)
|
||||
|
||||
func (c *Charts) genViewTemplate(vid, route string) string {
|
||||
tpl, err := template.New("view").Parse(DefaultTemplate)
|
||||
if err != nil {
|
||||
panic("failed to parse template " + err.Error())
|
||||
}
|
||||
|
||||
var d = struct {
|
||||
Interval int
|
||||
Addr string
|
||||
ApiPath string
|
||||
Route string
|
||||
ViewID string
|
||||
}{
|
||||
Interval: int(refreshInterval.Milliseconds()),
|
||||
Addr: c.linkAddr,
|
||||
ApiPath: apiPath,
|
||||
Route: route,
|
||||
ViewID: vid,
|
||||
}
|
||||
|
||||
buf := bytes.Buffer{}
|
||||
if err := tpl.Execute(&buf, d); err != nil {
|
||||
panic("statsview: failed to execute template " + err.Error())
|
||||
}
|
||||
|
||||
return buf.String()
|
||||
}
|
||||
|
||||
func (c *Charts) newBasicView(route string) *charts.Line {
|
||||
graph := charts.NewLine()
|
||||
graph.SetGlobalOptions(
|
||||
charts.WithTooltipOpts(opts.Tooltip{Show: true, Trigger: "axis"}),
|
||||
charts.WithXAxisOpts(opts.XAxis{Name: "Time"}),
|
||||
charts.WithInitializationOpts(opts.Initialization{
|
||||
Width: "700px",
|
||||
Height: "400px",
|
||||
}),
|
||||
charts.WithDataZoomOpts(opts.DataZoom{
|
||||
Type: "slider",
|
||||
XAxisIndex: []int{0},
|
||||
}),
|
||||
)
|
||||
graph.SetXAxis([]string{}).SetSeriesOptions(charts.WithLineChartOpts(opts.LineChart{Smooth: true}))
|
||||
graph.AddJSFuncs(c.genViewTemplate(graph.ChartID, route))
|
||||
return graph
|
||||
}
|
||||
|
||||
func (c *Charts) newLatencyView() components.Charter {
|
||||
graph := c.newBasicView(latencyView)
|
||||
graph.SetGlobalOptions(
|
||||
charts.WithTitleOpts(opts.Title{Title: "Latency"}),
|
||||
charts.WithYAxisOpts(opts.YAxis{Scale: true, AxisLabel: &opts.AxisLabel{Formatter: "{value} ms"}}),
|
||||
charts.WithLegendOpts(opts.Legend{Show: true, Selected: map[string]bool{"Min": false, "Max": false}}),
|
||||
)
|
||||
graph.AddSeries("Min", []opts.LineData{}).
|
||||
AddSeries("Mean", []opts.LineData{}).
|
||||
AddSeries("Max", []opts.LineData{})
|
||||
return graph
|
||||
}
|
||||
|
||||
func (c *Charts) newRPSView() components.Charter {
|
||||
graph := c.newBasicView(rpsView)
|
||||
graph.SetGlobalOptions(
|
||||
charts.WithTitleOpts(opts.Title{Title: "Reqs/sec"}),
|
||||
charts.WithYAxisOpts(opts.YAxis{Scale: true}),
|
||||
)
|
||||
graph.AddSeries("RPS", []opts.LineData{})
|
||||
return graph
|
||||
}
|
||||
|
||||
type Metrics struct {
|
||||
Values []float64 `json:"values"`
|
||||
Time string `json:"time"`
|
||||
}
|
||||
|
||||
type Charts struct {
|
||||
listenAddr string
|
||||
linkAddr string
|
||||
page *components.Page
|
||||
ln net.Listener
|
||||
lock sync.Mutex
|
||||
reportData ChartsReport
|
||||
dataFunc func() *ChartsReport
|
||||
}
|
||||
|
||||
func NewCharts(listenAddr string, linkAddr string, dataFunc func() *ChartsReport) (*Charts, error) {
|
||||
ln, err := net.Listen("tcp4", listenAddr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
c := &Charts{listenAddr: listenAddr, linkAddr: linkAddr, ln: ln, dataFunc: dataFunc}
|
||||
|
||||
c.page = components.NewPage()
|
||||
c.page.PageTitle = "plow"
|
||||
c.page.AssetsHost = fmt.Sprintf("http://%s%s", linkAddr, assertsPath)
|
||||
c.page.Assets.JSAssets.Add("jquery.min.js")
|
||||
c.page.AddCharts(c.newLatencyView(), c.newRPSView())
|
||||
|
||||
return c, nil
|
||||
}
|
||||
|
||||
func (c *Charts) Handler(ctx *fasthttp.RequestCtx) {
|
||||
path := string(ctx.Path())
|
||||
switch path {
|
||||
case assertsPath + "echarts.min.js":
|
||||
ctx.WriteString(EchartJS)
|
||||
case assertsPath + "jquery.min.js":
|
||||
ctx.WriteString(JqueryJS)
|
||||
case "/":
|
||||
ctx.SetContentType("text/html")
|
||||
c.page.Render(ctx)
|
||||
default:
|
||||
if strings.HasPrefix(path, apiPath) {
|
||||
view := path[len(apiPath)+1:]
|
||||
var values []float64
|
||||
c.lock.Lock()
|
||||
switch view {
|
||||
case latencyView:
|
||||
values = append(values, c.dataFunc().Latency.min/1e6)
|
||||
values = append(values, c.dataFunc().Latency.Mean()/1e6)
|
||||
values = append(values, c.dataFunc().Latency.max/1e6)
|
||||
case rpsView:
|
||||
values = append(values, c.dataFunc().RPS)
|
||||
}
|
||||
c.lock.Unlock()
|
||||
metrics := &Metrics{
|
||||
Time: time.Now().Format(timeFormat),
|
||||
Values: values,
|
||||
}
|
||||
json.NewEncoder(ctx).Encode(metrics)
|
||||
} else {
|
||||
ctx.Error("NotFound", fasthttp.StatusNotFound)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (c *Charts) Serve() {
|
||||
go func() {
|
||||
ticker := time.NewTicker(refreshInterval)
|
||||
for {
|
||||
select {
|
||||
case <-ticker.C:
|
||||
c.lock.Lock()
|
||||
c.reportData = *c.dataFunc()
|
||||
c.lock.Unlock()
|
||||
}
|
||||
}
|
||||
}()
|
||||
server := fasthttp.Server{
|
||||
Handler: cors.DefaultHandler().CorsMiddleware(c.Handler),
|
||||
}
|
||||
server.Serve(c.ln)
|
||||
}
|
|
@ -0,0 +1,21 @@
|
|||
module github.com/six-ddc/plow
|
||||
|
||||
go 1.14
|
||||
|
||||
require (
|
||||
github.com/AdhityaRamadhanus/fasthttpcors v0.0.0-20170121111917-d4c07198763a
|
||||
github.com/BurntSushi/toml v0.3.1 // indirect
|
||||
github.com/alecthomas/units v0.0.0-20210208195552-ff826a37aa15 // indirect
|
||||
github.com/andybalholm/brotli v1.0.3 // indirect
|
||||
github.com/beorn7/perks v1.0.1
|
||||
github.com/go-echarts/go-echarts/v2 v2.2.4
|
||||
github.com/klauspost/compress v1.13.0 // indirect
|
||||
github.com/mattn/go-isatty v0.0.13
|
||||
github.com/mattn/go-runewidth v0.0.13
|
||||
github.com/nicksnyder/go-i18n v1.10.1 // indirect
|
||||
github.com/valyala/fasthttp v1.26.0
|
||||
go.uber.org/automaxprocs v1.4.0
|
||||
golang.org/x/net v0.0.0-20210525063256-abc453219eb5 // indirect
|
||||
gopkg.in/alecthomas/kingpin.v3-unstable v3.0.0-20191105091915-95d230a53780
|
||||
gopkg.in/yaml.v2 v2.4.0 // indirect
|
||||
)
|
|
@ -0,0 +1,75 @@
|
|||
github.com/AdhityaRamadhanus/fasthttpcors v0.0.0-20170121111917-d4c07198763a h1:XVdatQFSP2YhJGjqLLIfW8QBk4loz/SCe/PxkXDiW+s=
|
||||
github.com/AdhityaRamadhanus/fasthttpcors v0.0.0-20170121111917-d4c07198763a/go.mod h1:C0A1KeiVHs+trY6gUTPhhGammbrZ30ZfXRW/nuT7HLw=
|
||||
github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ=
|
||||
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
|
||||
github.com/alecthomas/units v0.0.0-20210208195552-ff826a37aa15 h1:AUNCr9CiJuwrRYS3XieqF+Z9B9gNxo/eANAJCF2eiN4=
|
||||
github.com/alecthomas/units v0.0.0-20210208195552-ff826a37aa15/go.mod h1:OMCwj8VM1Kc9e19TLln2VL61YJF0x1XFtfdL4JdbSyE=
|
||||
github.com/andybalholm/brotli v1.0.2 h1:JKnhI/XQ75uFBTiuzXpzFrUriDPiZjlOSzh6wXogP0E=
|
||||
github.com/andybalholm/brotli v1.0.2/go.mod h1:loMXtMfwqflxFJPmdbJO0a3KNoPuLBgiu3qAvBg8x/Y=
|
||||
github.com/andybalholm/brotli v1.0.3 h1:fpcw+r1N1h0Poc1F/pHbW40cUm/lMEQslZtCkBQ0UnM=
|
||||
github.com/andybalholm/brotli v1.0.3/go.mod h1:fO7iG3H7G2nSZ7m0zPUDn85XEX2GTukHGRSepvi9Eig=
|
||||
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
|
||||
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
|
||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/go-echarts/go-echarts/v2 v2.2.4 h1:SKJpdyNIyD65XjbUZjzg6SwccTNXEgmh+PlaO23g2H0=
|
||||
github.com/go-echarts/go-echarts/v2 v2.2.4/go.mod h1:6TOomEztzGDVDkOSCFBq3ed7xOYfbOqhaBzD0YV771A=
|
||||
github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
|
||||
github.com/klauspost/compress v1.12.2 h1:2KCfW3I9M7nSc5wOqXAlW2v2U6v+w6cbjvbfp+OykW8=
|
||||
github.com/klauspost/compress v1.12.2/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg=
|
||||
github.com/klauspost/compress v1.13.0 h1:2T7tUoQrQT+fQWdaY5rjWztFGAFwbGD04iPJg90ZiOs=
|
||||
github.com/klauspost/compress v1.13.0/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg=
|
||||
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
|
||||
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
|
||||
github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=
|
||||
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
|
||||
github.com/mattn/go-isatty v0.0.13 h1:qdl+GuBjcsKKDco5BsxPJlId98mSWNKqYA+Co0SC1yA=
|
||||
github.com/mattn/go-isatty v0.0.13/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU=
|
||||
github.com/mattn/go-runewidth v0.0.13 h1:lTGmDsbAYt5DmK6OnoV7EuIF1wEIFAcxld6ypU4OSgU=
|
||||
github.com/mattn/go-runewidth v0.0.13/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w=
|
||||
github.com/nicksnyder/go-i18n v1.10.1 h1:isfg77E/aCD7+0lD/D00ebR2MV5vgeQ276WYyDaCRQc=
|
||||
github.com/nicksnyder/go-i18n v1.10.1/go.mod h1:e4Di5xjP9oTVrC6y3C7C0HoSYXjSbhh/dU0eUV32nB4=
|
||||
github.com/pelletier/go-toml v1.2.0 h1:T5zMGML61Wp+FlcbWjRDT7yAxhJNAiPPLOFECq181zc=
|
||||
github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic=
|
||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/rivo/uniseg v0.2.0 h1:S1pD9weZBuJdFmowNwbpi7BJ8TNftyUImj/0WQi72jY=
|
||||
github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
|
||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
|
||||
github.com/stretchr/testify v1.6.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/valyala/bytebufferpool v1.0.0 h1:GqA5TC/0021Y/b9FG4Oi9Mr3q7XYx6KllzawFIhcdPw=
|
||||
github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc=
|
||||
github.com/valyala/fasthttp v1.26.0 h1:k5Tooi31zPG/g8yS6o2RffRO2C9B9Kah9SY8j/S7058=
|
||||
github.com/valyala/fasthttp v1.26.0/go.mod h1:cmWIqlu99AO/RKcp1HWaViTqc57FswJOfYYdPJBl8BA=
|
||||
github.com/valyala/tcplisten v1.0.0/go.mod h1:T0xQ8SeCZGxckz9qRXTfG43PvQ/mcWh7FwZEA7Ioqkc=
|
||||
go.uber.org/automaxprocs v1.4.0 h1:CpDZl6aOlLhReez+8S3eEotD7Jx0Os++lemPlMULQP0=
|
||||
go.uber.org/automaxprocs v1.4.0/go.mod h1:/mTEdr7LvHhs0v7mjdxDreTz1OG5zdZGqgOnhWiR/+Q=
|
||||
golang.org/x/crypto v0.0.0-20210513164829-c07d793c2f9a h1:kr2P4QFmQr29mSLA43kwrOcgcReGTfbE9N577tCTuBc=
|
||||
golang.org/x/crypto v0.0.0-20210513164829-c07d793c2f9a/go.mod h1:P+XmwS30IXTQdn5tA2iutPOUgjI07+tq3H3K9MVA1s8=
|
||||
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
|
||||
golang.org/x/net v0.0.0-20210510120150-4163338589ed h1:p9UgmWI9wKpfYmgaV/IZKGdXc5qEK45tDwwwDyjS26I=
|
||||
golang.org/x/net v0.0.0-20210510120150-4163338589ed/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
golang.org/x/net v0.0.0-20210525063256-abc453219eb5 h1:wjuX4b5yYQnEQHzd+CBcrcC6OVR2J1CN6mUy0oSxIPo=
|
||||
golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210514084401-e8d321eab015 h1:hZR0X1kPW+nwyJ9xRxqZk1vx5RUObAPBdKVvXPDUH/E=
|
||||
golang.org/x/sys v0.0.0-20210514084401-e8d321eab015/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.6 h1:aRYxNxv6iGQlyVaZmk6ZgYEDa+Jg18DxebPSrd6bg1M=
|
||||
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
gopkg.in/alecthomas/kingpin.v3-unstable v3.0.0-20191105091915-95d230a53780 h1:CEBpW6C191eozfEuWdUmIAHn7lwlLxJ7HVdr2e2Tsrw=
|
||||
gopkg.in/alecthomas/kingpin.v3-unstable v3.0.0-20191105091915-95d230a53780/go.mod h1:3HH7i1SgMqlzxCcBmUHW657sD4Kvv9sC3HpL3YukzwA=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
|
||||
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
|
||||
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo=
|
||||
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
|
@ -0,0 +1,128 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"gopkg.in/alecthomas/kingpin.v3-unstable"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"strings"
|
||||
)
|
||||
|
||||
var (
|
||||
concurrency = kingpin.Flag("concurrency", "Number of connections to run concurrently").Short('c').Default("1").Int()
|
||||
requests = kingpin.Flag("requests", "Number of requests to run").Short('n').Default("-1").Int64()
|
||||
duration = kingpin.Flag("duration", "Duration of test, examples: -d 10s -d 3m").Short('d').PlaceHolder("DURATION").Duration()
|
||||
interval = kingpin.Flag("interval", "Print snapshot result every interval, use 0 to print once at the end").Short('i').Default("200ms").Duration()
|
||||
seconds = kingpin.Flag("seconds", "Use seconds as time unit to print").Bool()
|
||||
|
||||
body = kingpin.Flag("body", "HTTP request body, if start the body with @, the rest should be a filename to read").String()
|
||||
stream = kingpin.Flag("stream", "Specify whether to stream file specified by '--body @file' using chunked encoding or to read into memory").Default("false").Bool()
|
||||
method = kingpin.Flag("method", "HTTP method").Default("GET").Short('m').String()
|
||||
headers = kingpin.Flag("header", "Custom HTTP headers").Short('H').PlaceHolder("K:V").Strings()
|
||||
host = kingpin.Flag("host", "Host header").String()
|
||||
contentType = kingpin.Flag("content", "Content-Type header").Short('T').String()
|
||||
|
||||
chartsListenAddr = kingpin.Flag("listen", "Listen addr to serve Web UI").Default("127.0.0.1:18888").String()
|
||||
chartsLinkAddr = kingpin.Flag("link", "Link addr used for show Web html and request backend server").Default("127.0.0.1:18888").String()
|
||||
timeout = kingpin.Flag("timeout", "Timeout for each http request").PlaceHolder("DURATION").Duration()
|
||||
dialTimeout = kingpin.Flag("dial-timeout", "Timeout for dial addr").PlaceHolder("DURATION").Duration()
|
||||
reqWriteTimeout = kingpin.Flag("req-timeout", "Timeout for full request writing").PlaceHolder("DURATION").Duration()
|
||||
respReadTimeout = kingpin.Flag("resp-timeout", "Timeout for full response reading").PlaceHolder("DURATION").Duration()
|
||||
socks5 = kingpin.Flag("socks5", "Socks5 proxy").PlaceHolder("ip:port").String()
|
||||
|
||||
url = kingpin.Arg("url", "request url").Required().String()
|
||||
)
|
||||
|
||||
func errAndExit(msg string) {
|
||||
fmt.Fprintln(os.Stderr, msg)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
func main() {
|
||||
kingpin.UsageTemplate(kingpin.CompactUsageTemplate).Version("1.0.0").Author("six-ddc@github")
|
||||
kingpin.CommandLine.Help = `A high-performance HTTP benchmarking tool with real-time web UI and terminal displaying
|
||||
|
||||
Example:
|
||||
plow http://127.0.0.1:8080/ -c 20 -n 100000
|
||||
plow https://httpbin.org/post -c 20 -d 5m --body @file.json -T 'application/json' -m POST
|
||||
`
|
||||
kingpin.Parse()
|
||||
if *requests >= 0 && *requests < int64(*concurrency) {
|
||||
errAndExit("requests must greater than or equal concurrency")
|
||||
return
|
||||
}
|
||||
|
||||
var err error
|
||||
var bodyBytes []byte
|
||||
var bodyFile string
|
||||
if strings.HasPrefix(*body, "@") {
|
||||
fileName := (*body)[1:]
|
||||
if _, err = os.Stat(fileName); err != nil {
|
||||
errAndExit(err.Error())
|
||||
return
|
||||
}
|
||||
if *stream {
|
||||
bodyFile = fileName
|
||||
} else {
|
||||
bodyBytes, err = ioutil.ReadFile(fileName)
|
||||
if err != nil {
|
||||
errAndExit(err.Error())
|
||||
return
|
||||
}
|
||||
}
|
||||
} else if *body != "" {
|
||||
bodyBytes = []byte(*body)
|
||||
}
|
||||
|
||||
clientOpt := ClientOpt{
|
||||
url: *url,
|
||||
method: *method,
|
||||
headers: *headers,
|
||||
bodyBytes: bodyBytes,
|
||||
bodyFile: bodyFile,
|
||||
maxConns: *concurrency,
|
||||
doTimeout: *timeout,
|
||||
readTimeout: *respReadTimeout,
|
||||
writeTimeout: *reqWriteTimeout,
|
||||
dialTimeout: *dialTimeout,
|
||||
|
||||
socks5Proxy: *socks5,
|
||||
contentType: *contentType,
|
||||
host: *host,
|
||||
}
|
||||
|
||||
fmt.Printf("Benchmarking %s", *url)
|
||||
if *requests > 0 {
|
||||
fmt.Printf(" with %d request(s)", *requests)
|
||||
}
|
||||
if *duration > 0 {
|
||||
fmt.Printf(" for %s", duration.String())
|
||||
}
|
||||
fmt.Printf(" using %d connection(s)\n", *concurrency)
|
||||
if *chartsListenAddr != "" {
|
||||
fmt.Printf("> Real-time charts is listening on http://%s/\n", *chartsLinkAddr)
|
||||
}
|
||||
fmt.Printf("\n")
|
||||
|
||||
requester, err := NewRequester(*concurrency, *requests, *duration, &clientOpt)
|
||||
if err != nil {
|
||||
errAndExit(err.Error())
|
||||
return
|
||||
}
|
||||
go requester.Run()
|
||||
|
||||
report := NewStreamReport()
|
||||
go report.Collect(requester.RecordChan())
|
||||
|
||||
if *chartsListenAddr != "" {
|
||||
charts, err := NewCharts(*chartsListenAddr, *chartsLinkAddr, report.Charts)
|
||||
if err != nil {
|
||||
errAndExit(err.Error())
|
||||
return
|
||||
}
|
||||
go charts.Serve()
|
||||
}
|
||||
|
||||
printer := NewPrinter(*requests, *duration)
|
||||
printer.PrintLoop(report.Snapshot, *interval, *seconds, report.Done())
|
||||
}
|
|
@ -0,0 +1,353 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"github.com/mattn/go-isatty"
|
||||
"github.com/mattn/go-runewidth"
|
||||
"math"
|
||||
"os"
|
||||
"regexp"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
var (
|
||||
maxBarLen = 40
|
||||
barStart = "|"
|
||||
barBody = "■"
|
||||
barEnd = "|"
|
||||
barSpinner = []string{"|", "/", "-", "\\"}
|
||||
clearLine = []byte("\r\033[K")
|
||||
isTerminal = isatty.IsTerminal(os.Stdout.Fd()) || isatty.IsTerminal(os.Stdout.Fd())
|
||||
)
|
||||
|
||||
type Printer struct {
|
||||
maxNum int64
|
||||
maxDuration time.Duration
|
||||
curNum int64
|
||||
curDuration time.Duration
|
||||
pbInc int64
|
||||
pbNumStr string
|
||||
pbDurStr string
|
||||
}
|
||||
|
||||
func NewPrinter(maxNum int64, maxDuration time.Duration) *Printer {
|
||||
return &Printer{maxNum: maxNum, maxDuration: maxDuration}
|
||||
}
|
||||
|
||||
func (p *Printer) updateProgressValue(rs *SnapshotReport) {
|
||||
p.pbInc += 1
|
||||
if p.maxDuration > 0 {
|
||||
n := rs.Elapsed
|
||||
if n > p.maxDuration {
|
||||
n = p.maxDuration
|
||||
}
|
||||
p.curDuration = n
|
||||
barLen := int((p.curDuration*time.Duration(maxBarLen-2) + p.maxDuration/2) / p.maxDuration)
|
||||
p.pbDurStr = barStart + strings.Repeat(barBody, barLen) + strings.Repeat(" ", maxBarLen-2-barLen) + barEnd
|
||||
}
|
||||
if p.maxNum > 0 {
|
||||
p.curNum = rs.Count
|
||||
if p.maxNum > 0 {
|
||||
barLen := int((p.curNum*int64(maxBarLen-2) + p.maxNum/2) / p.maxNum)
|
||||
p.pbNumStr = barStart + strings.Repeat(barBody, barLen) + strings.Repeat(" ", maxBarLen-2-barLen) + barEnd
|
||||
} else {
|
||||
idx := p.pbInc % int64(len(barSpinner))
|
||||
p.pbNumStr = barSpinner[int(idx)]
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (p *Printer) PrintLoop(snapshot func() *SnapshotReport, interval time.Duration, useSeconds bool, doneChan <-chan struct{}) {
|
||||
var buf bytes.Buffer
|
||||
|
||||
var backCursor string
|
||||
echo := func(isFinal bool) {
|
||||
report := snapshot()
|
||||
p.updateProgressValue(report)
|
||||
os.Stdout.WriteString(backCursor)
|
||||
buf.Reset()
|
||||
p.formatTableReports(&buf, report, isFinal, useSeconds)
|
||||
result := buf.Bytes()
|
||||
n := 0
|
||||
for {
|
||||
i := bytes.IndexByte(result, '\n')
|
||||
if i == -1 {
|
||||
os.Stdout.Write(clearLine)
|
||||
os.Stdout.Write(result)
|
||||
break
|
||||
}
|
||||
n++
|
||||
os.Stdout.Write(clearLine)
|
||||
os.Stdout.Write(result[:i])
|
||||
os.Stdout.Write([]byte("\n"))
|
||||
result = result[i+1:]
|
||||
}
|
||||
os.Stdout.Sync()
|
||||
backCursor = fmt.Sprintf("\033[%dA", n)
|
||||
}
|
||||
|
||||
if interval > 0 {
|
||||
ticker := time.NewTicker(interval)
|
||||
loop:
|
||||
for {
|
||||
select {
|
||||
case <-ticker.C:
|
||||
echo(false)
|
||||
case <-doneChan:
|
||||
ticker.Stop()
|
||||
break loop
|
||||
}
|
||||
}
|
||||
} else {
|
||||
<-doneChan
|
||||
}
|
||||
echo(true)
|
||||
}
|
||||
|
||||
const (
|
||||
FgBlackColor int = iota + 30
|
||||
FgRedColor
|
||||
FgGreenColor
|
||||
FgYellowColor
|
||||
FgBlueColor
|
||||
FgMagentaColor
|
||||
FgCyanColor
|
||||
FgWhiteColor
|
||||
)
|
||||
|
||||
func colorize(s string, seq int) string {
|
||||
if !isTerminal {
|
||||
return s
|
||||
}
|
||||
return fmt.Sprintf("\033[%dm%s\033[0m", seq, s)
|
||||
}
|
||||
|
||||
func durationToString(d time.Duration, useSeconds bool) string {
|
||||
d = d.Truncate(time.Microsecond)
|
||||
if useSeconds {
|
||||
return formatFloat64(d.Seconds())
|
||||
}
|
||||
return d.String()
|
||||
}
|
||||
|
||||
func alignBulk(bulk [][]string, aligns ...int) {
|
||||
maxLen := map[int]int{}
|
||||
for _, b := range bulk {
|
||||
for i, bb := range b {
|
||||
lbb := displayWidth(bb)
|
||||
if maxLen[i] < lbb {
|
||||
maxLen[i] = lbb
|
||||
}
|
||||
}
|
||||
}
|
||||
for _, b := range bulk {
|
||||
for i, ali := range aligns {
|
||||
if len(b) >= i+1 {
|
||||
if i == len(aligns)-1 && ali == ALIGN_LEFT {
|
||||
continue
|
||||
}
|
||||
b[i] = padString(b[i], " ", maxLen[i], ali)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func writeBulkWith(writer *bytes.Buffer, bulk [][]string, lineStart, sep, lineEnd string) {
|
||||
for _, b := range bulk {
|
||||
writer.WriteString(lineStart)
|
||||
writer.WriteString(b[0])
|
||||
for _, bb := range b[1:] {
|
||||
writer.WriteString(sep)
|
||||
writer.WriteString(bb)
|
||||
}
|
||||
writer.WriteString(lineEnd)
|
||||
}
|
||||
}
|
||||
|
||||
func writeBulk(writer *bytes.Buffer, bulk [][]string) {
|
||||
writeBulkWith(writer, bulk, " ", " ", "\n")
|
||||
}
|
||||
|
||||
func formatFloat64(f float64) string {
|
||||
return strconv.FormatFloat(f, 'f', -1, 64)
|
||||
}
|
||||
|
||||
func (p *Printer) formatTableReports(writer *bytes.Buffer, snapshot *SnapshotReport, isFinal bool, useSeconds bool) {
|
||||
summaryBulk := p.buildSummary(snapshot, isFinal)
|
||||
errorsBulks := p.buildErrors(snapshot)
|
||||
statsBulk := p.buildStats(snapshot, useSeconds)
|
||||
percBulk := p.buildPercentile(snapshot, useSeconds)
|
||||
hisBulk := p.buildHistogram(snapshot, useSeconds, isFinal)
|
||||
|
||||
writer.WriteString("Summary:\n")
|
||||
writeBulk(writer, summaryBulk)
|
||||
writer.WriteString("\n")
|
||||
|
||||
if errorsBulks != nil {
|
||||
writer.WriteString("Error:\n")
|
||||
writeBulk(writer, errorsBulks)
|
||||
writer.WriteString("\n")
|
||||
}
|
||||
|
||||
writeBulkWith(writer, statsBulk, "", " ", "\n")
|
||||
writer.WriteString("\n")
|
||||
|
||||
writer.WriteString("Latency Percentile:\n")
|
||||
writeBulk(writer, percBulk)
|
||||
writer.WriteString("\n")
|
||||
|
||||
writer.WriteString("Latency Histogram:\n")
|
||||
writeBulk(writer, hisBulk)
|
||||
}
|
||||
|
||||
func (p *Printer) buildHistogram(snapshot *SnapshotReport, useSeconds bool, isFinal bool) [][]string {
|
||||
hisBulk := make([][]string, 0, 8)
|
||||
maxCount := 0
|
||||
hisSum := 0
|
||||
for _, bin := range snapshot.Histograms {
|
||||
if maxCount < bin.Count {
|
||||
maxCount = bin.Count
|
||||
}
|
||||
hisSum += bin.Count
|
||||
}
|
||||
for _, bin := range snapshot.Histograms {
|
||||
row := []string{durationToString(bin.Mean, useSeconds), strconv.Itoa(bin.Count)}
|
||||
if isFinal {
|
||||
row = append(row, fmt.Sprintf("%.2f%%", math.Floor(float64(bin.Count)*1e4/float64(hisSum)+0.5)/100.0))
|
||||
} else {
|
||||
barLen := 0
|
||||
if maxCount > 0 {
|
||||
barLen = (bin.Count*maxBarLen + maxCount/2) / maxCount
|
||||
}
|
||||
row = append(row, strings.Repeat(barBody, barLen))
|
||||
}
|
||||
hisBulk = append(hisBulk, row)
|
||||
}
|
||||
if isFinal {
|
||||
alignBulk(hisBulk, ALIGN_LEFT, ALIGN_RIGHT, ALIGN_RIGHT)
|
||||
} else {
|
||||
alignBulk(hisBulk, ALIGN_LEFT, ALIGN_RIGHT, ALIGN_LEFT)
|
||||
}
|
||||
return hisBulk
|
||||
}
|
||||
|
||||
func (p *Printer) buildPercentile(snapshot *SnapshotReport, useSeconds bool) [][]string {
|
||||
percBulk := make([][]string, 2)
|
||||
percAligns := make([]int, 0, len(snapshot.Percentiles))
|
||||
for _, percentile := range snapshot.Percentiles {
|
||||
perc := formatFloat64(percentile.Percentile * 100)
|
||||
percBulk[0] = append(percBulk[0], "P"+perc)
|
||||
percBulk[1] = append(percBulk[1], durationToString(percentile.Latency, useSeconds))
|
||||
percAligns = append(percAligns, ALIGN_CENTER)
|
||||
}
|
||||
percAligns[0] = ALIGN_LEFT
|
||||
alignBulk(percBulk, percAligns...)
|
||||
return percBulk
|
||||
}
|
||||
|
||||
func (p *Printer) buildStats(snapshot *SnapshotReport, useSeconds bool) [][]string {
|
||||
var statsBulk [][]string
|
||||
statsBulk = append(statsBulk,
|
||||
[]string{"Statistics", "Min", "Mean", "StdDev", "Max"},
|
||||
[]string{
|
||||
" Latency",
|
||||
durationToString(snapshot.Stats.Min, useSeconds),
|
||||
durationToString(snapshot.Stats.Mean, useSeconds),
|
||||
durationToString(snapshot.Stats.StdDev, useSeconds),
|
||||
durationToString(snapshot.Stats.Max, useSeconds),
|
||||
},
|
||||
)
|
||||
if snapshot.RpsStats != nil {
|
||||
statsBulk = append(statsBulk,
|
||||
[]string{
|
||||
" RPS",
|
||||
formatFloat64(math.Trunc(snapshot.RpsStats.Min*100) / 100.0),
|
||||
formatFloat64(math.Trunc(snapshot.RpsStats.Mean*100) / 100.0),
|
||||
formatFloat64(math.Trunc(snapshot.RpsStats.StdDev*100) / 100.0),
|
||||
formatFloat64(math.Trunc(snapshot.RpsStats.Max*100) / 100.0),
|
||||
},
|
||||
)
|
||||
}
|
||||
alignBulk(statsBulk, ALIGN_LEFT, ALIGN_CENTER, ALIGN_CENTER, ALIGN_CENTER, ALIGN_CENTER)
|
||||
return statsBulk
|
||||
}
|
||||
|
||||
func (p *Printer) buildErrors(snapshot *SnapshotReport) [][]string {
|
||||
var errorsBulks [][]string
|
||||
for k, v := range snapshot.Errors {
|
||||
vs := colorize(strconv.FormatInt(v, 10), FgRedColor)
|
||||
errorsBulks = append(errorsBulks, []string{vs, "\"" + k + "\""})
|
||||
}
|
||||
if errorsBulks != nil {
|
||||
sort.Slice(errorsBulks, func(i, j int) bool { return errorsBulks[i][1] < errorsBulks[j][1] })
|
||||
}
|
||||
alignBulk(errorsBulks, ALIGN_LEFT, ALIGN_LEFT)
|
||||
return errorsBulks
|
||||
}
|
||||
|
||||
func (p *Printer) buildSummary(snapshot *SnapshotReport, isFinal bool) [][]string {
|
||||
summarybulk := make([][]string, 0, 8)
|
||||
elapsedLine := []string{"Elapsed", snapshot.Elapsed.Truncate(time.Millisecond).String()}
|
||||
if p.maxDuration > 0 && !isFinal {
|
||||
elapsedLine = append(elapsedLine, p.pbDurStr)
|
||||
}
|
||||
countLine := []string{"Count", strconv.FormatInt(snapshot.Count, 10)}
|
||||
if p.maxNum > 0 && !isFinal {
|
||||
countLine = append(countLine, p.pbNumStr)
|
||||
}
|
||||
summarybulk = append(
|
||||
summarybulk,
|
||||
elapsedLine,
|
||||
countLine,
|
||||
)
|
||||
|
||||
codesBulks := make([][]string, 0, len(snapshot.Codes))
|
||||
for k, v := range snapshot.Codes {
|
||||
vs := strconv.FormatInt(v, 10)
|
||||
if k != "2xx" {
|
||||
vs = colorize(vs, FgMagentaColor)
|
||||
}
|
||||
codesBulks = append(codesBulks, []string{" " + k, vs})
|
||||
}
|
||||
sort.Slice(codesBulks, func(i, j int) bool { return codesBulks[i][0] < codesBulks[j][0] })
|
||||
summarybulk = append(summarybulk, codesBulks...)
|
||||
summarybulk = append(summarybulk,
|
||||
[]string{"RPS", fmt.Sprintf("%.3f", snapshot.RPS)},
|
||||
[]string{"Reads", fmt.Sprintf("%.3fMB/s", snapshot.ReadThroughput)},
|
||||
[]string{"Writes", fmt.Sprintf("%.3fMB/s", snapshot.WriteThroughput)},
|
||||
)
|
||||
alignBulk(summarybulk, ALIGN_LEFT, ALIGN_RIGHT)
|
||||
return summarybulk
|
||||
}
|
||||
|
||||
var ansi = regexp.MustCompile("\033\\[(?:[0-9]{1,3}(?:;[0-9]{1,3})*)?[m|K]")
|
||||
|
||||
func displayWidth(str string) int {
|
||||
return runewidth.StringWidth(ansi.ReplaceAllLiteralString(str, ""))
|
||||
}
|
||||
|
||||
const (
|
||||
ALIGN_LEFT = iota
|
||||
ALIGN_RIGHT
|
||||
ALIGN_CENTER
|
||||
)
|
||||
|
||||
func padString(s, pad string, width int, align int) string {
|
||||
gap := width - displayWidth(s)
|
||||
if gap > 0 {
|
||||
if align == ALIGN_LEFT {
|
||||
return s + strings.Repeat(pad, gap)
|
||||
} else if align == ALIGN_RIGHT {
|
||||
return strings.Repeat(pad, gap) + s
|
||||
} else if align == ALIGN_CENTER {
|
||||
gapLeft := int(math.Ceil(float64(gap / 2)))
|
||||
gapRight := gap - gapLeft
|
||||
return strings.Repeat(pad, gapLeft) + s + strings.Repeat(pad, gapRight)
|
||||
}
|
||||
}
|
||||
return s
|
||||
}
|
|
@ -0,0 +1,280 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"github.com/beorn7/perks/histogram"
|
||||
"github.com/beorn7/perks/quantile"
|
||||
"math"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
var quantiles = []float64{0.50, 0.75, 0.90, 0.95, 0.99, 0.999, 0.9999}
|
||||
|
||||
var quantilesTarget = map[float64]float64{
|
||||
0.50: 0.01,
|
||||
0.75: 0.01,
|
||||
0.90: 0.001,
|
||||
0.95: 0.001,
|
||||
0.99: 0.001,
|
||||
0.999: 0.0001,
|
||||
0.9999: 0.00001,
|
||||
}
|
||||
|
||||
type Stats struct {
|
||||
count int64
|
||||
sum float64
|
||||
sumSq float64
|
||||
min float64
|
||||
max float64
|
||||
}
|
||||
|
||||
func (s *Stats) Update(v float64) {
|
||||
s.count++
|
||||
s.sum += v
|
||||
s.sumSq += v * v
|
||||
if v < s.min || s.count == 1 {
|
||||
s.min = v
|
||||
}
|
||||
if v > s.max || s.count == 1 {
|
||||
s.max = v
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Stats) Stddev() float64 {
|
||||
num := (float64(s.count) * s.sumSq) - math.Pow(s.sum, 2)
|
||||
div := float64(s.count * (s.count - 1))
|
||||
if div == 0 {
|
||||
return 0
|
||||
}
|
||||
return math.Sqrt(num / div)
|
||||
}
|
||||
|
||||
func (s *Stats) Mean() float64 {
|
||||
if s.count == 0 {
|
||||
return 0
|
||||
}
|
||||
return s.sum / float64(s.count)
|
||||
}
|
||||
|
||||
func (s *Stats) Reset() {
|
||||
s.count = 0
|
||||
s.sum = 0
|
||||
s.sumSq = 0
|
||||
s.min = 0
|
||||
s.max = 0
|
||||
}
|
||||
|
||||
type StreamReport struct {
|
||||
lock sync.Mutex
|
||||
|
||||
latencyStats *Stats
|
||||
rpsStats *Stats
|
||||
latencyQuantile *quantile.Stream
|
||||
latencyHistogram *histogram.Histogram
|
||||
codes map[string]int64
|
||||
errors map[string]int64
|
||||
|
||||
latencyWithinSec *Stats
|
||||
rpsWithinSec float64
|
||||
|
||||
readBytes int64
|
||||
writeBytes int64
|
||||
|
||||
doneChan chan struct{}
|
||||
}
|
||||
|
||||
func NewStreamReport() *StreamReport {
|
||||
return &StreamReport{
|
||||
latencyQuantile: quantile.NewTargeted(quantilesTarget),
|
||||
latencyHistogram: histogram.New(8),
|
||||
codes: make(map[string]int64, 1),
|
||||
errors: make(map[string]int64, 1),
|
||||
doneChan: make(chan struct{}, 1),
|
||||
latencyStats: &Stats{},
|
||||
rpsStats: &Stats{},
|
||||
latencyWithinSec: &Stats{},
|
||||
}
|
||||
}
|
||||
|
||||
func (s *StreamReport) insert(v float64) {
|
||||
s.latencyQuantile.Insert(v)
|
||||
s.latencyHistogram.Insert(v)
|
||||
|
||||
s.latencyStats.Update(v)
|
||||
}
|
||||
|
||||
func (s *StreamReport) percentiles() ([]float64, []float64) {
|
||||
result := make([]float64, len(quantiles))
|
||||
for i, f := range quantiles {
|
||||
result[i] = s.latencyQuantile.Query(f)
|
||||
}
|
||||
return quantiles, result
|
||||
}
|
||||
|
||||
func (s *StreamReport) Collect(records <-chan *ReportRecord) {
|
||||
latencyWithinSecTemp := &Stats{}
|
||||
go func() {
|
||||
ticker := time.NewTicker(time.Second)
|
||||
lastCount := int64(0)
|
||||
lastTime := startTime
|
||||
for {
|
||||
select {
|
||||
case <-ticker.C:
|
||||
s.lock.Lock()
|
||||
dc := s.latencyStats.count - lastCount
|
||||
if dc > 0 {
|
||||
rps := float64(dc) / time.Since(lastTime).Seconds()
|
||||
s.rpsStats.Update(rps)
|
||||
lastCount = s.latencyStats.count
|
||||
lastTime = time.Now()
|
||||
|
||||
*s.latencyWithinSec = *latencyWithinSecTemp
|
||||
s.rpsWithinSec = rps
|
||||
latencyWithinSecTemp.Reset()
|
||||
}
|
||||
s.lock.Unlock()
|
||||
case <-s.doneChan:
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
for {
|
||||
r, ok := <-records
|
||||
if !ok {
|
||||
close(s.doneChan)
|
||||
break
|
||||
}
|
||||
s.lock.Lock()
|
||||
latencyWithinSecTemp.Update(float64(r.cost))
|
||||
s.insert(float64(r.cost))
|
||||
if r.code != "" {
|
||||
s.codes[r.code] += 1
|
||||
}
|
||||
if r.error != "" {
|
||||
s.errors[r.error] += 1
|
||||
}
|
||||
s.readBytes = r.readBytes
|
||||
s.writeBytes = r.writeBytes
|
||||
s.lock.Unlock()
|
||||
recordPool.Put(r)
|
||||
}
|
||||
}
|
||||
|
||||
type SnapshotReport struct {
|
||||
Elapsed time.Duration
|
||||
Count int64
|
||||
Codes map[string]int64
|
||||
Errors map[string]int64
|
||||
RPS float64
|
||||
ReadThroughput float64
|
||||
WriteThroughput float64
|
||||
|
||||
Stats *struct {
|
||||
Min time.Duration
|
||||
Mean time.Duration
|
||||
StdDev time.Duration
|
||||
Max time.Duration
|
||||
}
|
||||
|
||||
RpsStats *struct {
|
||||
Min float64
|
||||
Mean float64
|
||||
StdDev float64
|
||||
Max float64
|
||||
}
|
||||
|
||||
Percentiles []*struct {
|
||||
Percentile float64
|
||||
Latency time.Duration
|
||||
}
|
||||
|
||||
Histograms []*struct {
|
||||
Mean time.Duration
|
||||
Count int
|
||||
}
|
||||
}
|
||||
|
||||
func (s *StreamReport) Snapshot() *SnapshotReport {
|
||||
s.lock.Lock()
|
||||
|
||||
rs := &SnapshotReport{
|
||||
Elapsed: time.Since(startTime),
|
||||
Count: s.latencyStats.count,
|
||||
Stats: &struct {
|
||||
Min time.Duration
|
||||
Mean time.Duration
|
||||
StdDev time.Duration
|
||||
Max time.Duration
|
||||
}{time.Duration(s.latencyStats.min), time.Duration(s.latencyStats.Mean()),
|
||||
time.Duration(s.latencyStats.Stddev()), time.Duration(s.latencyStats.max)},
|
||||
}
|
||||
if s.rpsStats.count > 0 {
|
||||
rs.RpsStats = &struct {
|
||||
Min float64
|
||||
Mean float64
|
||||
StdDev float64
|
||||
Max float64
|
||||
}{s.rpsStats.min, s.rpsStats.Mean(),
|
||||
s.rpsStats.Stddev(), s.rpsStats.max}
|
||||
}
|
||||
|
||||
elapseInSec := rs.Elapsed.Seconds()
|
||||
rs.RPS = float64(rs.Count) / elapseInSec
|
||||
rs.ReadThroughput = float64(s.readBytes) / 1024.0 / 1024.0 / elapseInSec
|
||||
rs.WriteThroughput = float64(s.writeBytes) / 1024.0 / 1024.0 / elapseInSec
|
||||
|
||||
rs.Codes = make(map[string]int64, len(s.codes))
|
||||
for k, v := range s.codes {
|
||||
rs.Codes[k] = v
|
||||
}
|
||||
rs.Errors = make(map[string]int64, len(s.errors))
|
||||
for k, v := range s.errors {
|
||||
rs.Errors[k] = v
|
||||
}
|
||||
|
||||
rs.Percentiles = make([]*struct {
|
||||
Percentile float64
|
||||
Latency time.Duration
|
||||
}, len(quantiles))
|
||||
for i, p := range quantiles {
|
||||
rs.Percentiles[i] = &struct {
|
||||
Percentile float64
|
||||
Latency time.Duration
|
||||
}{p, time.Duration(s.latencyQuantile.Query(p))}
|
||||
}
|
||||
|
||||
hisBins := s.latencyHistogram.Bins()
|
||||
rs.Histograms = make([]*struct {
|
||||
Mean time.Duration
|
||||
Count int
|
||||
}, len(hisBins))
|
||||
for i, b := range hisBins {
|
||||
rs.Histograms[i] = &struct {
|
||||
Mean time.Duration
|
||||
Count int
|
||||
}{time.Duration(b.Mean()), b.Count}
|
||||
}
|
||||
|
||||
s.lock.Unlock()
|
||||
return rs
|
||||
}
|
||||
|
||||
func (s *StreamReport) Done() <-chan struct{} {
|
||||
return s.doneChan
|
||||
}
|
||||
|
||||
type ChartsReport struct {
|
||||
RPS float64
|
||||
Latency Stats
|
||||
}
|
||||
|
||||
func (s *StreamReport) Charts() *ChartsReport {
|
||||
s.lock.Lock()
|
||||
cr := &ChartsReport{
|
||||
RPS: s.rpsWithinSec,
|
||||
Latency: *s.latencyWithinSec,
|
||||
}
|
||||
s.lock.Unlock()
|
||||
return cr
|
||||
}
|
|
@ -0,0 +1,355 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"github.com/valyala/fasthttp"
|
||||
"github.com/valyala/fasthttp/fasthttpproxy"
|
||||
"go.uber.org/automaxprocs/maxprocs"
|
||||
"io/ioutil"
|
||||
"net"
|
||||
"net/http"
|
||||
_ "net/http/pprof"
|
||||
url2 "net/url"
|
||||
"os"
|
||||
"os/signal"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"syscall"
|
||||
"time"
|
||||
)
|
||||
|
||||
var (
|
||||
startTime = time.Now()
|
||||
)
|
||||
|
||||
type ReportRecord struct {
|
||||
cost time.Duration
|
||||
code string
|
||||
error string
|
||||
readBytes int64
|
||||
writeBytes int64
|
||||
}
|
||||
|
||||
var recordPool = sync.Pool{
|
||||
New: func() interface{} { return new(ReportRecord) },
|
||||
}
|
||||
|
||||
func init() {
|
||||
go func() {
|
||||
http.ListenAndServe("0.0.0.0:6060", nil)
|
||||
}()
|
||||
_, _ = maxprocs.Set()
|
||||
}
|
||||
|
||||
type MyConn struct {
|
||||
net.Conn
|
||||
r, w *int64
|
||||
}
|
||||
|
||||
func NewMyConn(conn net.Conn, r, w *int64) (*MyConn, error) {
|
||||
myConn := &MyConn{Conn: conn, r: r, w: w}
|
||||
return myConn, nil
|
||||
}
|
||||
|
||||
func (c *MyConn) Read(b []byte) (n int, err error) {
|
||||
sz, err := c.Conn.Read(b)
|
||||
|
||||
if err == nil {
|
||||
atomic.AddInt64(c.r, int64(sz))
|
||||
}
|
||||
return sz, err
|
||||
}
|
||||
|
||||
func (c *MyConn) Write(b []byte) (n int, err error) {
|
||||
sz, err := c.Conn.Write(b)
|
||||
|
||||
if err == nil {
|
||||
atomic.AddInt64(c.w, int64(sz))
|
||||
}
|
||||
return sz, err
|
||||
}
|
||||
|
||||
func ThroughputInterceptorDial(dial fasthttp.DialFunc, r *int64, w *int64) fasthttp.DialFunc {
|
||||
return func(addr string) (net.Conn, error) {
|
||||
conn, err := dial(addr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return NewMyConn(conn, r, w)
|
||||
}
|
||||
}
|
||||
|
||||
type Requester struct {
|
||||
concurrency int
|
||||
requests int64
|
||||
duration time.Duration
|
||||
clientOpt *ClientOpt
|
||||
httpClient *fasthttp.HostClient
|
||||
httpHeader *fasthttp.RequestHeader
|
||||
|
||||
recordChan chan *ReportRecord
|
||||
report *StreamReport
|
||||
errCount int64
|
||||
wg sync.WaitGroup
|
||||
|
||||
readBytes int64
|
||||
writeBytes int64
|
||||
|
||||
cancel func()
|
||||
}
|
||||
|
||||
type ClientOpt struct {
|
||||
url string
|
||||
method string
|
||||
headers []string
|
||||
bodyBytes []byte
|
||||
bodyFile string
|
||||
|
||||
maxConns int
|
||||
doTimeout time.Duration
|
||||
readTimeout time.Duration
|
||||
writeTimeout time.Duration
|
||||
dialTimeout time.Duration
|
||||
|
||||
socks5Proxy string
|
||||
contentType string
|
||||
host string
|
||||
}
|
||||
|
||||
func NewRequester(concurrency int, requests int64, duration time.Duration, clientOpt *ClientOpt) (*Requester, error) {
|
||||
maxResult := concurrency * 100
|
||||
if maxResult > 8192 {
|
||||
maxResult = 8192
|
||||
}
|
||||
r := &Requester{
|
||||
concurrency: concurrency,
|
||||
requests: requests,
|
||||
duration: duration,
|
||||
clientOpt: clientOpt,
|
||||
recordChan: make(chan *ReportRecord, maxResult),
|
||||
}
|
||||
client, header, err := buildRequestClient(clientOpt, &r.readBytes, &r.writeBytes)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
r.httpClient = client
|
||||
r.httpHeader = header
|
||||
return r, nil
|
||||
}
|
||||
|
||||
func addMissingPort(addr string, isTLS bool) string {
|
||||
n := strings.Index(addr, ":")
|
||||
if n >= 0 {
|
||||
return addr
|
||||
}
|
||||
port := 80
|
||||
if isTLS {
|
||||
port = 443
|
||||
}
|
||||
return net.JoinHostPort(addr, strconv.Itoa(port))
|
||||
}
|
||||
|
||||
func buildRequestClient(opt *ClientOpt, r *int64, w *int64) (*fasthttp.HostClient, *fasthttp.RequestHeader, error) {
|
||||
u, err := url2.Parse(opt.url)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
httpClient := &fasthttp.HostClient{
|
||||
Addr: addMissingPort(u.Host, u.Scheme == "https"),
|
||||
IsTLS: u.Scheme == "https",
|
||||
Name: "plow",
|
||||
MaxConns: opt.maxConns,
|
||||
ReadTimeout: opt.readTimeout,
|
||||
WriteTimeout: opt.writeTimeout,
|
||||
DisableHeaderNamesNormalizing: true,
|
||||
}
|
||||
if opt.socks5Proxy != "" {
|
||||
if strings.Index(opt.socks5Proxy, "://") == -1 {
|
||||
opt.socks5Proxy = "socks5://" + opt.socks5Proxy
|
||||
}
|
||||
httpClient.Dial = fasthttpproxy.FasthttpSocksDialer(opt.socks5Proxy)
|
||||
} else {
|
||||
httpClient.Dial = fasthttpproxy.FasthttpProxyHTTPDialerTimeout(opt.dialTimeout)
|
||||
}
|
||||
httpClient.Dial = ThroughputInterceptorDial(httpClient.Dial, r, w)
|
||||
|
||||
var requestHeader fasthttp.RequestHeader
|
||||
if opt.contentType != "" {
|
||||
requestHeader.SetContentType(opt.contentType)
|
||||
}
|
||||
if opt.host != "" {
|
||||
requestHeader.SetHost(opt.host)
|
||||
} else {
|
||||
requestHeader.SetHost(u.Host)
|
||||
}
|
||||
requestHeader.SetMethod(opt.method)
|
||||
requestHeader.SetRequestURI(u.RequestURI())
|
||||
for _, h := range opt.headers {
|
||||
n := strings.SplitN(h, ":", 2)
|
||||
if len(n) != 2 {
|
||||
return nil, nil, fmt.Errorf("invalid header: %s", h)
|
||||
}
|
||||
requestHeader.Set(n[0], n[1])
|
||||
}
|
||||
|
||||
return httpClient, &requestHeader, nil
|
||||
}
|
||||
|
||||
func (r *Requester) Cancel() {
|
||||
r.cancel()
|
||||
}
|
||||
|
||||
func (r *Requester) RecordChan() <-chan *ReportRecord {
|
||||
return r.recordChan
|
||||
}
|
||||
|
||||
func getErrorType(err error) string {
|
||||
switch err {
|
||||
case fasthttp.ErrTimeout:
|
||||
return "Timeout"
|
||||
case fasthttp.ErrNoFreeConns:
|
||||
return "NoFreeConns"
|
||||
case fasthttp.ErrConnectionClosed:
|
||||
return "ConnClosed"
|
||||
case fasthttp.ErrDialTimeout:
|
||||
return "DialTimeout"
|
||||
default:
|
||||
if opErr, ok := err.(*net.OpError); ok {
|
||||
err = opErr.Err
|
||||
}
|
||||
switch t := err.(type) {
|
||||
case *net.DNSError:
|
||||
return "DNS"
|
||||
case *os.SyscallError:
|
||||
if errno, ok := t.Err.(syscall.Errno); ok {
|
||||
switch errno {
|
||||
case syscall.ECONNREFUSED:
|
||||
return "ConnRefused"
|
||||
case syscall.ETIMEDOUT:
|
||||
return "Timeout"
|
||||
case syscall.EADDRNOTAVAIL:
|
||||
return "AddrNotAvail"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return "Unknown"
|
||||
}
|
||||
|
||||
func (r *Requester) DoRequest(req *fasthttp.Request, resp *fasthttp.Response, rr *ReportRecord) {
|
||||
t1 := time.Since(startTime)
|
||||
var err error
|
||||
if r.clientOpt.doTimeout > 0 {
|
||||
err = r.httpClient.DoTimeout(req, resp, r.clientOpt.doTimeout)
|
||||
} else {
|
||||
err = r.httpClient.Do(req, resp)
|
||||
}
|
||||
var code string
|
||||
|
||||
if err != nil {
|
||||
rr.cost = time.Since(startTime) - t1
|
||||
rr.code = ""
|
||||
rr.error = err.Error()
|
||||
return
|
||||
} else {
|
||||
switch resp.StatusCode() / 100 {
|
||||
case 1:
|
||||
code = "1xx"
|
||||
case 2:
|
||||
code = "2xx"
|
||||
case 3:
|
||||
code = "3xx"
|
||||
case 4:
|
||||
code = "4xx"
|
||||
case 5:
|
||||
code = "5xx"
|
||||
}
|
||||
err = resp.BodyWriteTo(ioutil.Discard)
|
||||
if err != nil {
|
||||
rr.cost = time.Since(startTime) - t1
|
||||
rr.code = ""
|
||||
rr.error = err.Error()
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
rr.cost = time.Since(startTime) - t1
|
||||
rr.code = code
|
||||
rr.error = ""
|
||||
}
|
||||
|
||||
func (r *Requester) Run() {
|
||||
// handle ctrl-c
|
||||
sigs := make(chan os.Signal, 1)
|
||||
signal.Notify(sigs, os.Interrupt, syscall.SIGTERM)
|
||||
defer signal.Stop(sigs)
|
||||
|
||||
ctx, cancelFunc := context.WithCancel(context.Background())
|
||||
r.cancel = cancelFunc
|
||||
go func() {
|
||||
<-sigs
|
||||
cancelFunc()
|
||||
}()
|
||||
if r.duration > 0 {
|
||||
time.AfterFunc(r.duration, func() {
|
||||
cancelFunc()
|
||||
})
|
||||
}
|
||||
|
||||
startTime = time.Now()
|
||||
semaphore := r.requests
|
||||
for i := 0; i < r.concurrency; i++ {
|
||||
r.wg.Add(1)
|
||||
go func() {
|
||||
defer r.wg.Done()
|
||||
req := &fasthttp.Request{}
|
||||
resp := &fasthttp.Response{}
|
||||
r.httpHeader.CopyTo(&req.Header)
|
||||
if r.httpClient.IsTLS {
|
||||
req.URI().SetScheme("https")
|
||||
req.URI().SetHostBytes(req.Header.Host())
|
||||
}
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return
|
||||
default:
|
||||
}
|
||||
|
||||
if r.requests > 0 && atomic.AddInt64(&semaphore, -1) < 0 {
|
||||
cancelFunc()
|
||||
return
|
||||
}
|
||||
|
||||
if r.clientOpt.bodyFile != "" {
|
||||
file, err := os.Open(r.clientOpt.bodyFile)
|
||||
if err != nil {
|
||||
rr := recordPool.Get().(*ReportRecord)
|
||||
rr.cost = 0
|
||||
rr.error = err.Error()
|
||||
rr.readBytes = atomic.LoadInt64(&r.readBytes)
|
||||
rr.writeBytes = atomic.LoadInt64(&r.writeBytes)
|
||||
r.recordChan <- rr
|
||||
continue
|
||||
}
|
||||
req.SetBodyStream(file, -1)
|
||||
} else {
|
||||
req.SetBodyRaw(r.clientOpt.bodyBytes)
|
||||
}
|
||||
resp.Reset()
|
||||
rr := recordPool.Get().(*ReportRecord)
|
||||
r.DoRequest(req, resp, rr)
|
||||
rr.readBytes = atomic.LoadInt64(&r.readBytes)
|
||||
rr.writeBytes = atomic.LoadInt64(&r.writeBytes)
|
||||
r.recordChan <- rr
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
r.wg.Wait()
|
||||
close(r.recordChan)
|
||||
}
|
File diff suppressed because one or more lines are too long
Loading…
Reference in New Issue