feat(export): 增强导出功能并优化用户体验

- 新增导出任务SQL分析功能,支持查看生成SQL
- 优化导出任务列表展示,增加评估状态和进度显示
- 实现大文件分片导出和自动压缩功能
- 添加字段中文标签映射,提升导出文件可读性
- 改进XLSX导出逻辑,支持多sheet操作
- 增加导出任务自动轮询更新状态功能
- 修复导出进度计算问题,优化性能监控
This commit is contained in:
zhouyonggao 2025-11-25 17:05:01 +08:00
parent 6fa4abdcf5
commit 950fa758e1
28 changed files with 4433 additions and 109 deletions

View File

@ -1,6 +1,7 @@
package api
import (
"archive/zip"
"database/sql"
"encoding/json"
"fmt"
@ -8,6 +9,8 @@ import (
"log"
"marketing-system-data-tool/server/internal/exporter"
"net/http"
"os"
"path/filepath"
"strconv"
"strings"
"time"
@ -33,6 +36,11 @@ func ExportsHandler(meta, marketing *sql.DB) http.Handler {
if strings.HasPrefix(p, "/") {
id := strings.TrimPrefix(p, "/")
if r.Method == http.MethodGet && !strings.HasSuffix(p, "/download") {
if strings.HasSuffix(p, "/sql") {
id = strings.TrimSuffix(id, "/sql")
api.getSQL(w, r, id)
return
}
api.get(w, r, id)
return
}
@ -102,9 +110,35 @@ func (a *ExportsAPI) create(w http.ResponseWriter, r *http.Request) {
return
}
var estimate int64
func() {
idx := strings.Index(q, " FROM ")
if idx > 0 {
cq := "SELECT COUNT(1)" + q[idx:]
row := dataDB.QueryRow(cq, args...)
var cnt int64
if err := row.Scan(&cnt); err == nil {
estimate = cnt
return
}
}
for _, r := range expRows {
if r.Table.Valid && r.Table.String == "order" && r.Rows.Valid { estimate = r.Rows.Int64; break }
if r.Rows.Valid { estimate += r.Rows.Int64 }
if r.Table.Valid && r.Table.String == "order" && r.Rows.Valid {
estimate = r.Rows.Int64
break
}
if r.Rows.Valid {
estimate += r.Rows.Int64
}
}
}()
labels := fieldLabels()
hdrs := make([]string, len(fs))
for i, tf := range fs {
if v, ok := labels[tf]; ok {
hdrs[i] = v
} else {
hdrs[i] = tf
}
}
ejSQL := "INSERT INTO export_jobs (template_id, status, requested_by, permission_scope_json, filters_json, options_json, explain_json, explain_score, row_estimate, file_format, created_at, updated_at) VALUES (?,?,?,?,?,?,?,?,?,?,?,?)"
ejArgs := []interface{}{p.TemplateID, "queued", p.RequestedBy, toJSON(p.Permission), toJSON(p.Filters), toJSON(p.Options), toJSON(expRows), score, estimate, p.FileFormat, time.Now(), time.Now()}
@ -115,7 +149,7 @@ func (a *ExportsAPI) create(w http.ResponseWriter, r *http.Request) {
return
}
id, _ := res.LastInsertId()
go a.runJob(uint64(id), dataDB, q, args, fs, p.FileFormat)
go a.runJob(uint64(id), dataDB, q, args, hdrs, p.FileFormat)
ok(w, r, map[string]interface{}{"id": id})
}
@ -129,7 +163,286 @@ func (a *ExportsAPI) runJob(id uint64, db *sql.DB, q string, args []interface{},
return
}
w.WriteHeader(cols)
const maxRowsPerFile = 300000
files := []string{}
{
var tplID uint64
var filtersJSON []byte
row := a.meta.QueryRow("SELECT template_id, filters_json FROM export_jobs WHERE id=?", id)
_ = row.Scan(&tplID, &filtersJSON)
var main string
var fieldsJSON []byte
tr := a.meta.QueryRow("SELECT main_table, fields_json FROM export_templates WHERE id=?", tplID)
_ = tr.Scan(&main, &fieldsJSON)
var fs []string
var fl map[string]interface{}
json.Unmarshal(fieldsJSON, &fs)
json.Unmarshal(filtersJSON, &fl)
wl := whitelist()
var chunks [][2]string
if v, ok := fl["create_time_between"]; ok {
if arr, ok2 := v.([]interface{}); ok2 && len(arr) == 2 {
chunks = splitByDays(toString(arr[0]), toString(arr[1]), 10)
}
if arrs, ok3 := v.([]string); ok3 && len(arrs) == 2 {
chunks = splitByDays(arrs[0], arrs[1], 10)
}
}
if len(chunks) > 0 {
out := make([]interface{}, len(cols))
dest := make([]interface{}, len(cols))
for i := range out {
dest[i] = &out[i]
}
var count int64
var partCount int64
var tick int64
for _, rg := range chunks {
fl["create_time_between"] = []string{rg[0], rg[1]}
req := exporter.BuildRequest{MainTable: main, Fields: fs, Filters: fl}
cq, cargs, err := exporter.BuildSQL(req, wl)
if err != nil {
continue
}
batch := 1000
for off := 0; ; off += batch {
sub := "SELECT * FROM (" + cq + ") AS sub LIMIT ? OFFSET ?"
args2 := append(append([]interface{}{}, cargs...), batch, off)
rows2, err := db.Query(sub, args2...)
if err != nil {
break
}
fetched := false
for rows2.Next() {
fetched = true
if err := rows2.Scan(dest...); err != nil {
rows2.Close()
a.meta.Exec("UPDATE export_jobs SET status=?, finished_at=? WHERE id?", "failed", time.Now(), id)
return
}
vals := make([]string, len(cols))
for i := range out {
if b, ok := out[i].([]byte); ok {
vals[i] = string(b)
} else if out[i] == nil {
vals[i] = ""
} else {
vals[i] = toString(out[i])
}
}
w.WriteRow(vals)
count++
partCount++
tick++
if tick%50 == 0 {
a.meta.Exec("UPDATE export_jobs SET total_rows=?, updated_at=? WHERE id= ?", count, time.Now(), id)
}
if partCount >= maxRowsPerFile {
path, size, _ := w.Close()
files = append(files, path)
a.meta.Exec("INSERT INTO export_job_files (job_id, storage_uri, row_count, size_bytes, created_at, updated_at) VALUES (?,?,?,?,?,?)", id, path, partCount, size, time.Now(), time.Now())
w, err = exporter.NewCSVWriter("storage", "export")
if err != nil {
rows2.Close()
a.meta.Exec("UPDATE export_jobs SET status=?, finished_at=? WHERE id= ?", "failed", time.Now(), id)
return
}
w.WriteHeader(cols)
partCount = 0
}
}
rows2.Close()
if !fetched {
break
}
}
}
path, size, _ := w.Close()
if partCount > 0 || len(files) == 0 {
files = append(files, path)
a.meta.Exec("INSERT INTO export_job_files (job_id, storage_uri, row_count, size_bytes, created_at, updated_at) VALUES (?,?,?,?,?,?)", id, path, partCount, size, time.Now(), time.Now())
}
if count == 0 {
row := db.QueryRow("SELECT COUNT(1) FROM ("+q+") AS sub", args...)
var c int64
_ = row.Scan(&c)
count = c
}
if len(files) >= 1 {
zipPath, zipSize := createZip(id, files)
a.meta.Exec("INSERT INTO export_job_files (job_id, storage_uri, row_count, size_bytes, created_at, updated_at) VALUES (?,?,?,?,?,?)", id, zipPath, count, zipSize, time.Now(), time.Now())
}
a.meta.Exec("UPDATE export_jobs SET status=?, finished_at=?, total_rows=?, updated_at=? WHERE id= ?", "completed", time.Now(), count, time.Now(), id)
return
}
}
log.Printf("job_id=%d sql=%s args=%v", id, q, args)
// batched cursor queries, split workbook per 300k rows
{
const maxRowsPerFile = 300000
out := make([]interface{}, len(cols))
dest := make([]interface{}, len(cols))
for i := range out {
dest[i] = &out[i]
}
var count int64
var partCount int64
var tick int64
batch := 1000
files2 := []string{}
for off := 0; ; off += batch {
sub := "SELECT * FROM (" + q + ") AS sub LIMIT ? OFFSET ?"
args2 := append(append([]interface{}{}, args...), batch, off)
rows3, err := db.Query(sub, args2...)
if err != nil {
a.meta.Exec("UPDATE export_jobs SET status=?, finished_at=? WHERE id= ?", "failed", time.Now(), id)
return
}
fetched := false
for rows3.Next() {
fetched = true
if err := rows3.Scan(dest...); err != nil {
rows3.Close()
a.meta.Exec("UPDATE export_jobs SET status=?, finished_at=? WHERE id?", "failed", time.Now(), id)
return
}
vals := make([]string, len(cols))
for i := range out {
if b, ok := out[i].([]byte); ok {
vals[i] = string(b)
} else if out[i] == nil {
vals[i] = ""
} else {
vals[i] = toString(out[i])
}
}
w.WriteRow(vals)
count++
partCount++
tick++
if tick%50 == 0 {
a.meta.Exec("UPDATE export_jobs SET total_rows=?, updated_at=? WHERE id= ?", count, time.Now(), id)
}
if partCount >= maxRowsPerFile {
path2, size2, _ := w.Close()
files2 = append(files2, path2)
a.meta.Exec("INSERT INTO export_job_files (job_id, storage_uri, row_count, size_bytes, created_at, updated_at) VALUES (?,?,?,?,?,?)", id, path2, partCount, size2, time.Now(), time.Now())
w, err = exporter.NewCSVWriter("storage", "export")
if err != nil {
rows3.Close()
a.meta.Exec("UPDATE export_jobs SET status=?, finished_at=? WHERE id= ?", "failed", time.Now(), id)
return
}
w.WriteHeader(cols)
partCount = 0
}
}
rows3.Close()
if !fetched {
break
}
}
path, size, _ := w.Close()
if partCount > 0 || len(files2) == 0 {
files2 = append(files2, path)
a.meta.Exec("INSERT INTO export_job_files (job_id, storage_uri, row_count, size_bytes, created_at, updated_at) VALUES (?,?,?,?,?,?)", id, path, partCount, size, time.Now(), time.Now())
}
if count == 0 {
row := db.QueryRow("SELECT COUNT(1) FROM ("+q+") AS sub", args...)
var c int64
_ = row.Scan(&c)
count = c
}
if len(files2) >= 1 {
zipPath, zipSize := createZip(id, files2)
a.meta.Exec("INSERT INTO export_job_files (job_id, storage_uri, row_count, size_bytes, created_at, updated_at) VALUES (?,?,?,?,?,?)", id, zipPath, count, zipSize, time.Now(), time.Now())
}
a.meta.Exec("UPDATE export_jobs SET status=?, finished_at=?, total_rows=?, updated_at=? WHERE id= ?", "completed", time.Now(), count, time.Now(), id)
return
}
// batched cursor queries, 1000 rows per page, file split at 300k
{
const maxRowsPerFile = 300000
files2 := []string{}
out := make([]interface{}, len(cols))
dest := make([]interface{}, len(cols))
for i := range out {
dest[i] = &out[i]
}
var count int64
var partCount int64
var tick int64
batch := 1000
for off := 0; ; off += batch {
sub := "SELECT * FROM (" + q + ") AS sub LIMIT ? OFFSET ?"
args2 := append(append([]interface{}{}, args...), batch, off)
rows3, err := db.Query(sub, args2...)
if err != nil {
a.meta.Exec("UPDATE export_jobs SET status=?, finished_at=? WHERE id= ?", "failed", time.Now(), id)
return
}
fetched := false
for rows3.Next() {
fetched = true
if err := rows3.Scan(dest...); err != nil {
rows3.Close()
a.meta.Exec("UPDATE export_jobs SET status=?, finished_at=? WHERE id?", "failed", time.Now(), id)
return
}
vals := make([]string, len(cols))
for i := range out {
if b, ok := out[i].([]byte); ok {
vals[i] = string(b)
} else if out[i] == nil {
vals[i] = ""
} else {
vals[i] = toString(out[i])
}
}
w.WriteRow(vals)
count++
partCount++
tick++
if tick%50 == 0 {
a.meta.Exec("UPDATE export_jobs SET total_rows=?, updated_at=? WHERE id= ?", count, time.Now(), id)
}
if partCount >= maxRowsPerFile {
path, size, _ := w.Close()
files2 = append(files2, path)
a.meta.Exec("INSERT INTO export_job_files (job_id, storage_uri, row_count, size_bytes, created_at, updated_at) VALUES (?,?,?,?,?,?)", id, path, partCount, size, time.Now(), time.Now())
w, err = exporter.NewCSVWriter("storage", "export")
if err != nil {
rows3.Close()
a.meta.Exec("UPDATE export_jobs SET status=?, finished_at=? WHERE id= ?", "failed", time.Now(), id)
return
}
w.WriteHeader(cols)
partCount = 0
}
}
rows3.Close()
if !fetched {
break
}
}
path, size, _ := w.Close()
if partCount > 0 || len(files2) == 0 {
files2 = append(files2, path)
a.meta.Exec("INSERT INTO export_job_files (job_id, storage_uri, row_count, size_bytes, created_at, updated_at) VALUES (?,?,?,?,?,?)", id, path, partCount, size, time.Now(), time.Now())
}
if count == 0 {
row := db.QueryRow("SELECT COUNT(1) FROM ("+q+") AS sub", args...)
var c int64
_ = row.Scan(&c)
count = c
}
if len(files2) >= 1 {
zipPath, zipSize := createZip(id, files2)
a.meta.Exec("INSERT INTO export_job_files (job_id, storage_uri, row_count, size_bytes, created_at, updated_at) VALUES (?,?,?,?,?,?)", id, zipPath, count, zipSize, time.Now(), time.Now())
}
a.meta.Exec("UPDATE export_jobs SET status=?, finished_at=?, total_rows=?, updated_at=? WHERE id= ?", "completed", time.Now(), count, time.Now(), id)
return
}
rows, err := db.Query(q, args...)
if err != nil {
a.meta.Exec("UPDATE export_jobs SET status=?, finished_at=? WHERE id= ?", "failed", time.Now(), id)
@ -161,7 +474,9 @@ func (a *ExportsAPI) runJob(id uint64, db *sql.DB, q string, args []interface{},
w.WriteRow(vals)
count++
tick++
if tick%500 == 0 { a.meta.Exec("UPDATE export_jobs SET total_rows=?, updated_at=? WHERE id= ?", count, time.Now(), id) }
if tick%50 == 0 {
a.meta.Exec("UPDATE export_jobs SET total_rows=?, updated_at=? WHERE id= ?", count, time.Now(), id)
}
}
path, size, _ := w.Close()
log.Printf("job_id=%d sql=%s args=%v", id, "INSERT INTO export_job_files (job_id, storage_uri, row_count, size_bytes, created_at, updated_at) VALUES (?,?,?,?,?,?)", []interface{}{id, path, count, size, time.Now(), time.Now()})
@ -171,12 +486,125 @@ func (a *ExportsAPI) runJob(id uint64, db *sql.DB, q string, args []interface{},
return
}
if fmt == "xlsx" {
const maxRowsPerFile = 300000
files := []string{}
x, path, err := exporter.NewXLSXWriter("storage", "export", "Sheet1")
if err != nil {
a.meta.Exec("UPDATE export_jobs SET status=?, finished_at=? WHERE id= ?", "failed", time.Now(), id)
return
}
x.WriteHeader(cols)
{
var tplID uint64
var filtersJSON []byte
row := a.meta.QueryRow("SELECT template_id, filters_json FROM export_jobs WHERE id=?", id)
_ = row.Scan(&tplID, &filtersJSON)
var main string
var fieldsJSON []byte
tr := a.meta.QueryRow("SELECT main_table, fields_json FROM export_templates WHERE id=?", tplID)
_ = tr.Scan(&main, &fieldsJSON)
var fs []string
var fl map[string]interface{}
json.Unmarshal(fieldsJSON, &fs)
json.Unmarshal(filtersJSON, &fl)
wl := whitelist()
var chunks [][2]string
if v, ok := fl["create_time_between"]; ok {
if arr, ok2 := v.([]interface{}); ok2 && len(arr) == 2 {
chunks = splitByDays(toString(arr[0]), toString(arr[1]), 10)
}
if arrs, ok3 := v.([]string); ok3 && len(arrs) == 2 {
chunks = splitByDays(arrs[0], arrs[1], 10)
}
}
if len(chunks) > 0 {
out := make([]interface{}, len(cols))
dest := make([]interface{}, len(cols))
for i := range out {
dest[i] = &out[i]
}
var count int64
var partCount int64
var tick int64
for _, rg := range chunks {
fl["create_time_between"] = []string{rg[0], rg[1]}
req := exporter.BuildRequest{MainTable: main, Fields: fs, Filters: fl}
cq, cargs, err := exporter.BuildSQL(req, wl)
if err != nil {
continue
}
batch := 1000
for off := 0; ; off += batch {
sub := "SELECT * FROM (" + cq + ") AS sub LIMIT ? OFFSET ?"
args2 := append(append([]interface{}{}, cargs...), batch, off)
rows2, err := db.Query(sub, args2...)
if err != nil {
break
}
fetched := false
for rows2.Next() {
fetched = true
if err := rows2.Scan(dest...); err != nil {
rows2.Close()
a.meta.Exec("UPDATE export_jobs SET status=?, finished_at=? WHERE id?", "failed", time.Now(), id)
return
}
vals := make([]string, len(cols))
for i := range out {
if b, ok := out[i].([]byte); ok {
vals[i] = string(b)
} else if out[i] == nil {
vals[i] = ""
} else {
vals[i] = toString(out[i])
}
}
x.WriteRow(vals)
count++
partCount++
tick++
if tick%50 == 0 {
a.meta.Exec("UPDATE export_jobs SET total_rows=?, updated_at=? WHERE id= ?", count, time.Now(), id)
}
if partCount >= maxRowsPerFile {
p, size, _ := x.Close(path)
files = append(files, p)
a.meta.Exec("INSERT INTO export_job_files (job_id, storage_uri, row_count, size_bytes, created_at, updated_at) VALUES (?,?,?,?,?,?)", id, p, partCount, size, time.Now(), time.Now())
x, path, err = exporter.NewXLSXWriter("storage", "export", "Sheet1")
if err != nil {
rows2.Close()
a.meta.Exec("UPDATE export_jobs SET status=?, finished_at=? WHERE id= ?", "failed", time.Now(), id)
return
}
x.WriteHeader(cols)
partCount = 0
}
}
rows2.Close()
if !fetched {
break
}
}
}
p, size, _ := x.Close(path)
if partCount > 0 || len(files) == 0 {
files = append(files, p)
a.meta.Exec("INSERT INTO export_job_files (job_id, storage_uri, row_count, size_bytes, created_at, updated_at) VALUES (?,?,?,?,?,?)", id, p, partCount, size, time.Now(), time.Now())
}
if count == 0 {
row := db.QueryRow("SELECT COUNT(1) FROM ("+q+") AS sub", args...)
var c int64
_ = row.Scan(&c)
count = c
}
if len(files) > 1 {
zipPath, zipSize := createZip(id, files)
a.meta.Exec("INSERT INTO export_job_files (job_id, storage_uri, row_count, size_bytes, created_at, updated_at) VALUES (?,?,?,?,?,?)", id, zipPath, count, zipSize, time.Now(), time.Now())
}
a.meta.Exec("UPDATE export_jobs SET status=?, finished_at=?, total_rows=?, updated_at=? WHERE id= ?", "completed", time.Now(), count, time.Now(), id)
return
}
}
log.Printf("job_id=%d sql=%s args=%v", id, q, args)
rows, err := db.Query(q, args...)
if err != nil {
@ -190,6 +618,7 @@ func (a *ExportsAPI) runJob(id uint64, db *sql.DB, q string, args []interface{},
dest[i] = &out[i]
}
var count int64
var tick int64
for rows.Next() {
if err := rows.Scan(dest...); err != nil {
a.meta.Exec("UPDATE export_jobs SET status=?, finished_at=? WHERE id=?", "failed", time.Now(), id)
@ -207,6 +636,10 @@ func (a *ExportsAPI) runJob(id uint64, db *sql.DB, q string, args []interface{},
}
x.WriteRow(vals)
count++
tick++
if tick%50 == 0 {
a.meta.Exec("UPDATE export_jobs SET total_rows=?, updated_at=? WHERE id= ?", count, time.Now(), id)
}
}
p, size, _ := x.Close(path)
log.Printf("job_id=%d sql=%s args=%v", id, "INSERT INTO export_job_files (job_id, storage_uri, row_count, size_bytes, created_at, updated_at) VALUES (?,?,?,?,?,?)", []interface{}{id, p, count, size, time.Now(), time.Now()})
@ -225,6 +658,28 @@ func (a *ExportsAPI) selectDataDB(ds string) *sql.DB {
return a.marketing
}
func splitByDays(startStr, endStr string, stepDays int) [][2]string {
layout := "2006-01-02 15:04:05"
s, es := strings.TrimSpace(startStr), strings.TrimSpace(endStr)
st, err1 := time.Parse(layout, s)
en, err2 := time.Parse(layout, es)
if err1 != nil || err2 != nil || !en.After(st) || stepDays <= 0 {
return [][2]string{{s, es}}
}
var out [][2]string
cur := st
step := time.Duration(stepDays) * 24 * time.Hour
for cur.Before(en) {
nxt := cur.Add(step)
if nxt.After(en) {
nxt = en
}
out = append(out, [2]string{cur.Format(layout), nxt.Format(layout)})
cur = nxt
}
return out
}
func (a *ExportsAPI) get(w http.ResponseWriter, r *http.Request, id string) {
row := a.meta.QueryRow("SELECT id, template_id, status, requested_by, total_rows, file_format, started_at, finished_at, created_at, updated_at FROM export_jobs WHERE id=?", id)
var m = map[string]interface{}{}
@ -264,6 +719,70 @@ func (a *ExportsAPI) get(w http.ResponseWriter, r *http.Request, id string) {
ok(w, r, m)
}
func (a *ExportsAPI) getSQL(w http.ResponseWriter, r *http.Request, id string) {
// load job filters and template fields
row := a.meta.QueryRow("SELECT template_id, filters_json FROM export_jobs WHERE id=?", id)
var tplID uint64
var filters []byte
if err := row.Scan(&tplID, &filters); err != nil {
fail(w, r, http.StatusNotFound, "not found")
return
}
tr := a.meta.QueryRow("SELECT main_table, fields_json FROM export_templates WHERE id=?", tplID)
var main string
var fields []byte
if err := tr.Scan(&main, &fields); err != nil {
fail(w, r, http.StatusBadRequest, "template not found")
return
}
var fs []string
var fl map[string]interface{}
json.Unmarshal(fields, &fs)
json.Unmarshal(filters, &fl)
wl := whitelist()
req := exporter.BuildRequest{MainTable: main, Fields: fs, Filters: fl}
q, args, err := exporter.BuildSQL(req, wl)
if err != nil {
fail(w, r, http.StatusBadRequest, err.Error())
return
}
formatArg := func(a interface{}) string {
switch t := a.(type) {
case nil:
return "NULL"
case []byte:
s := string(t)
s = strings.ReplaceAll(s, "'", "''")
return "'" + s + "'"
case string:
s := strings.ReplaceAll(t, "'", "''")
return "'" + s + "'"
case int:
return strconv.Itoa(t)
case int64:
return strconv.FormatInt(t, 10)
case float64:
return strconv.FormatFloat(t, 'f', -1, 64)
case time.Time:
return "'" + t.Format("2006-01-02 15:04:05") + "'"
default:
return fmt.Sprintf("%v", t)
}
}
var sb strings.Builder
var ai int
for i := 0; i < len(q); i++ {
c := q[i]
if c == '?' && ai < len(args) {
sb.WriteString(formatArg(args[ai]))
ai++
} else {
sb.WriteByte(c)
}
}
ok(w, r, map[string]interface{}{"sql": q, "final_sql": sb.String()})
}
func (a *ExportsAPI) download(w http.ResponseWriter, r *http.Request, id string) {
row := a.meta.QueryRow("SELECT storage_uri FROM export_job_files WHERE job_id=? ORDER BY id DESC LIMIT 1", id)
var uri string
@ -292,8 +811,15 @@ func toString(v interface{}) string {
return strconv.Itoa(t)
case float64:
return strconv.FormatFloat(t, 'f', -1, 64)
case bool:
if t {
return "1"
}
return "0"
case time.Time:
return t.Format("2006-01-02 15:04:05")
default:
return ""
return fmt.Sprintf("%v", t)
}
}
func (a *ExportsAPI) list(w http.ResponseWriter, r *http.Request) {
@ -301,15 +827,21 @@ func (a *ExportsAPI) list(w http.ResponseWriter, r *http.Request) {
page := 1
size := 15
if p := q.Get("page"); p != "" {
if n, err := strconv.Atoi(p); err == nil && n > 0 { page = n }
if n, err := strconv.Atoi(p); err == nil && n > 0 {
page = n
}
}
if s := q.Get("page_size"); s != "" {
if n, err := strconv.Atoi(s); err == nil && n > 0 && n <= 100 { size = n }
if n, err := strconv.Atoi(s); err == nil && n > 0 && n <= 100 {
size = n
}
}
tplIDStr := q.Get("template_id")
var tplID uint64
if tplIDStr != "" {
if n, err := strconv.ParseUint(tplIDStr, 10, 64); err == nil { tplID = n }
if n, err := strconv.ParseUint(tplIDStr, 10, 64); err == nil {
tplID = n
}
}
offset := (page - 1) * size
var totalCount int64
@ -323,9 +855,9 @@ func (a *ExportsAPI) list(w http.ResponseWriter, r *http.Request) {
var rows *sql.Rows
var err error
if tplID > 0 {
rows, err = a.meta.Query("SELECT id, template_id, status, requested_by, row_estimate, total_rows, file_format, created_at, updated_at, explain_score FROM export_jobs WHERE template_id = ? ORDER BY id DESC LIMIT ? OFFSET ?", tplID, size, offset)
rows, err = a.meta.Query("SELECT id, template_id, status, requested_by, row_estimate, total_rows, file_format, created_at, updated_at, explain_score, explain_json FROM export_jobs WHERE template_id = ? ORDER BY id DESC LIMIT ? OFFSET ?", tplID, size, offset)
} else {
rows, err = a.meta.Query("SELECT id, template_id, status, requested_by, row_estimate, total_rows, file_format, created_at, updated_at, explain_score FROM export_jobs ORDER BY id DESC LIMIT ? OFFSET ?", size, offset)
rows, err = a.meta.Query("SELECT id, template_id, status, requested_by, row_estimate, total_rows, file_format, created_at, updated_at, explain_score, explain_json FROM export_jobs ORDER BY id DESC LIMIT ? OFFSET ?", size, offset)
}
if err != nil {
fail(w, r, http.StatusInternalServerError, err.Error())
@ -339,12 +871,117 @@ func (a *ExportsAPI) list(w http.ResponseWriter, r *http.Request) {
var estimate, total sql.NullInt64
var createdAt, updatedAt sql.NullTime
var score sql.NullInt64
if err := rows.Scan(&id, &tid, &status, &req, &estimate, &total, &fmtstr, &createdAt, &updatedAt, &score); err != nil { continue }
var explainRaw sql.NullString
if err := rows.Scan(&id, &tid, &status, &req, &estimate, &total, &fmtstr, &createdAt, &updatedAt, &score, &explainRaw); err != nil {
continue
}
evalStatus := "通过"
if score.Int64 < 60 { evalStatus = "禁止" }
if score.Int64 < 60 {
evalStatus = "禁止"
}
desc := fmt.Sprintf("评分:%d估算行数:%d%s", score.Int64, estimate.Int64, map[bool]string{true: "允许执行", false: "禁止执行"}[score.Int64 >= 60])
if explainRaw.Valid && explainRaw.String != "" {
var arr []map[string]interface{}
if err := json.Unmarshal([]byte(explainRaw.String), &arr); err == nil {
segs := []string{}
for _, r := range arr {
getStr := func(field string) string {
if v, ok := r[field]; ok {
if mm, ok := v.(map[string]interface{}); ok {
if b, ok := mm["Valid"].(bool); ok && !b {
return ""
}
if s, ok := mm["String"].(string); ok {
return s
}
}
}
return ""
}
getInt := func(field string) int64 {
if v, ok := r[field]; ok {
if mm, ok := v.(map[string]interface{}); ok {
if b, ok := mm["Valid"].(bool); ok && !b {
return 0
}
if f, ok := mm["Int64"].(float64); ok {
return int64(f)
}
}
}
return 0
}
getFloat := func(field string) float64 {
if v, ok := r[field]; ok {
if mm, ok := v.(map[string]interface{}); ok {
if b, ok := mm["Valid"].(bool); ok && !b {
return 0
}
if f, ok := mm["Float64"].(float64); ok {
return f
}
}
}
return 0
}
tbl := getStr("Table")
typ := getStr("Type")
if typ == "" {
typ = getStr("SelectType")
}
key := getStr("Key")
rowsN := getInt("Rows")
filt := getFloat("Filtered")
extra := getStr("Extra")
if tbl == "" && typ == "" && rowsN == 0 && extra == "" {
continue
}
s := fmt.Sprintf("表:%s, 访问类型:%s, 预估行数:%d, 索引:%s, 过滤比例:%.1f%%", tbl, typ, rowsN, key, filt)
if extra != "" {
s += ", 额外:" + extra
}
segs = append(segs, s)
}
if len(segs) > 0 {
desc = strings.Join(segs, "")
}
}
}
m := map[string]interface{}{"id": id, "template_id": tid, "status": status, "requested_by": req, "row_estimate": estimate.Int64, "total_rows": total.Int64, "file_format": fmtstr, "created_at": createdAt.Time, "updated_at": updatedAt.Time, "eval_status": evalStatus, "eval_desc": desc}
items = append(items, m)
}
ok(w, r, map[string]interface{}{"items": items, "total": totalCount, "page": page, "page_size": size})
}
func createZip(jobID uint64, files []string) (string, int64) {
baseDir := "storage/export"
_ = os.MkdirAll(baseDir, 0755)
zipPath := filepath.Join(baseDir, fmt.Sprintf("job_%d_%d.zip", jobID, time.Now().Unix()))
zf, err := os.Create(zipPath)
if err != nil {
return zipPath, 0
}
defer zf.Close()
zw := zip.NewWriter(zf)
for _, p := range files {
f, err := os.Open(p)
if err != nil {
continue
}
fi, _ := f.Stat()
w, err := zw.Create(filepath.Base(p))
if err != nil {
f.Close()
continue
}
_, _ = io.Copy(w, f)
_ = fi
f.Close()
}
_ = zw.Close()
st, err := os.Stat(zipPath)
if err != nil {
return zipPath, 0
}
return zipPath, st.Size()
}

View File

@ -298,3 +298,86 @@ func whitelist() map[string]bool {
}
return m
}
func fieldLabels() map[string]string {
return map[string]string{
"order.order_number": "订单编号",
"order.creator": "创建者ID",
"order.out_trade_no": "支付流水号",
"order.type": "订单类型",
"order.status": "订单状态",
"order.contract_price": "合同单价",
"order.num": "数量",
"order.total": "总金额",
"order.pay_amount": "支付金额",
"order.create_time": "创建时间",
"order.update_time": "更新时间",
"order_detail.plan_title": "计划标题",
"order_detail.reseller_name": "分销商名称",
"order_detail.product_name": "商品名称",
"order_detail.show_url": "商品图片URL",
"order_detail.official_price": "官方价",
"order_detail.cost_price": "成本价",
"order_detail.create_time": "创建时间",
"order_detail.update_time": "更新时间",
"order_cash.channel": "渠道",
"order_cash.cash_activity_id": "红包批次号",
"order_cash.receive_status": "领取状态",
"order_cash.receive_time": "拆红包时间",
"order_cash.cash_packet_id": "红包ID",
"order_cash.cash_id": "红包规则ID",
"order_cash.amount": "红包额度",
"order_cash.status": "状态",
"order_cash.expire_time": "过期时间",
"order_cash.update_time": "更新时间",
"order_voucher.channel": "渠道",
"order_voucher.channel_activity_id": "渠道立减金批次",
"order_voucher.channel_voucher_id": "渠道立减金ID",
"order_voucher.status": "状态",
"order_voucher.grant_time": "领取时间",
"order_voucher.usage_time": "核销时间",
"order_voucher.refund_time": "退款时间",
"order_voucher.status_modify_time": "状态更新时间",
"order_voucher.overdue_time": "过期时间",
"order_voucher.refund_amount": "退款金额",
"order_voucher.official_price": "官方价",
"order_voucher.out_biz_no": "外部业务号",
"order_voucher.account_no": "账户号",
"plan.id": "计划ID",
"plan.title": "计划标题",
"plan.status": "状态",
"plan.begin_time": "开始时间",
"plan.end_time": "结束时间",
"key_batch.id": "批次ID",
"key_batch.batch_name": "批次名称",
"key_batch.bind_object": "绑定对象",
"key_batch.quantity": "发放数量",
"key_batch.stock": "剩余库存",
"key_batch.begin_time": "开始时间",
"key_batch.end_time": "结束时间",
"code_batch.id": "兑换批次ID",
"code_batch.title": "标题",
"code_batch.status": "状态",
"code_batch.begin_time": "开始时间",
"code_batch.end_time": "结束时间",
"code_batch.quantity": "数量",
"code_batch.usage": "使用数",
"code_batch.stock": "库存",
"voucher.channel": "渠道",
"voucher.channel_activity_id": "渠道批次号",
"voucher.price": "合同单价",
"voucher.balance": "剩余额度",
"voucher.used_amount": "已用额度",
"voucher.denomination": "面额",
"voucher_batch.channel_activity_id": "渠道批次号",
"voucher_batch.temp_no": "模板编号",
"voucher_batch.provider": "服务商",
"voucher_batch.weight": "权重",
"merchant_key_send.merchant_id": "商户ID",
"merchant_key_send.out_biz_no": "商户业务号",
"merchant_key_send.key": "券码",
"merchant_key_send.status": "状态",
"merchant_key_send.usage_time": "核销时间",
"merchant_key_send.create_time": "创建时间",
}
}

View File

@ -64,9 +64,16 @@ func NewXLSXWriter(dir, name, sheet string) (*XLSXWriter, string, error) {
os.MkdirAll(dir, 0755)
p := filepath.Join(dir, name+"_"+time.Now().Format("20060102150405")+".xlsx")
f := excelize.NewFile()
f.NewSheet(sheet)
idx, _ := f.GetSheetIndex(sheet)
idx, err := f.GetSheetIndex(sheet)
if err != nil || idx < 0 {
idx, _ = f.NewSheet(sheet)
f.SetActiveSheet(idx)
if sheet != "Sheet1" {
_ = f.DeleteSheet("Sheet1")
}
} else {
f.SetActiveSheet(idx)
}
return &XLSXWriter{f: f, sheet: sheet, row: 1}, p, nil
}
@ -108,7 +115,7 @@ func col(n int) string {
s := ""
for n > 0 {
n--
s = string('A'+(n%26)) + s
s = string(rune('A'+(n%26))) + s
n /= 26
}
return s

View File

@ -80,6 +80,7 @@ func Apply(db *sql.DB) error {
"ALTER TABLE export_jobs ADD COLUMN explain_json JSON",
"ALTER TABLE export_jobs ADD COLUMN explain_score INT",
"ALTER TABLE export_jobs ADD COLUMN filters_json JSON",
"ALTER TABLE export_job_files ADD COLUMN updated_at DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP",
}
for _, s := range optional {
if _, err := db.Exec(s); err != nil {

File diff suppressed because one or more lines are too long

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

View File

@ -48,29 +48,44 @@
</el-card>
</el-col>
<el-col :span="24">
<el-card v-if="jobsVisible" :header="'导出任务(模板 '+ (jobsTplId||'') +''">
<el-table :data="jobs" size="small" stripe>
<el-dialog v-model="jobsVisible" :title="'导出任务(模板 '+ (jobsTplId||'') +''" width="1000px">
<el-table :data="jobs" size="small" stripe row-key="id">
<el-table-column prop="id" label="ID"></el-table-column>
<el-table-column prop="eval_status" label="校验状态"></el-table-column>
<el-table-column label="校验状态">
<template #default="scope">{{ scope.row.eval_status || '评估中' }}</template>
</el-table-column>
<el-table-column label="进度">
<template #default="scope">{{ jobPercent(scope.row) }}</template>
</el-table-column>
<el-table-column prop="eval_desc" label="评估描述"></el-table-column>
<el-table-column prop="total_rows" label="行数"></el-table-column>
<el-table-column prop="row_estimate" label="行数"></el-table-column>
<el-table-column prop="total_rows" label="已写行数"></el-table-column>
<el-table-column prop="file_format" label="格式"></el-table-column>
<el-table-column prop="created_at" label="创建时间"></el-table-column>
<el-table-column label="操作" width="140">
<el-table-column label="创建时间">
<template #default="scope">{{ fmtDT(new Date(scope.row.created_at)) }}</template>
</el-table-column>
<el-table-column label="操作" width="200">
<template #default="scope">
<el-button size="small" type="success" @click="download(scope.row.id)">下载</el-button>
<el-button size="small" @click="openSQL(scope.row.id)">分析</el-button>
<el-button v-if="scope.row.status==='completed' && Number(scope.row.total_rows)>0" size="small" type="success" @click="download(scope.row.id)">下载</el-button>
<el-button v-else-if="scope.row.status==='completed'" size="small" disabled>无数据</el-button>
</template>
</el-table-column>
</el-table>
<div style="display:flex;justify-content:space-between;margin-top:8px">
<div><el-button size="small" @click="closeJobs">关闭</el-button></div>
<div><el-pagination background layout="prev, pager, next, total" :total="jobsTotal" :page-size="jobsPageSize" :current-page="jobsPage" @current-change="(p)=>loadJobs(p)" /></div>
<div style="display:flex;justify-content:flex-end;margin-top:8px">
<el-pagination background layout="prev, pager, next, total" :total="jobsTotal" :page-size="jobsPageSize" v-model:currentPage="jobsPage" @current-change="loadJobs" />
</div>
<div v-if="!jobs || !jobs.length" style="padding:8px 0;color:#999">暂无任务</div>
</el-card>
<template #footer>
<el-button @click="closeJobs">关闭</el-button>
</template>
</el-dialog>
<el-dialog v-model="sqlVisible" title="生成SQL" width="800px">
<div style="max-height:50vh;overflow:auto"><pre style="white-space:pre-wrap">{{ sqlText }}</pre></div>
<template #footer>
<el-button @click="sqlVisible=false">关闭</el-button>
</template>
</el-dialog>
</el-col>
</el-row>
</el-main>

View File

@ -7,8 +7,10 @@ const { createApp, reactive } = Vue;
jobsVisible: false,
jobsTplId: null,
jobsPage: 1,
jobsPageSize: 15,
jobsPageSize: 10,
jobsTotal: 0,
sqlVisible: false,
sqlText: '',
job: {},
form: {
name: '',
@ -460,15 +462,27 @@ const { createApp, reactive } = Vue;
state.jobsPage = Number(payload.page || page)
}catch(_e){ state.jobs = [] }
}
const openJobs = (row)=>{ state.jobsTplId = row.id; state.jobsVisible = true; loadJobs(1) }
const closeJobs = ()=>{ state.jobsVisible = false }
let jobsPollTimer = null
const startJobsPolling = ()=>{
if(jobsPollTimer) return
jobsPollTimer = setInterval(()=>{ if(state.jobsVisible){ loadJobs(state.jobsPage) } }, 1000)
}
const stopJobsPolling = ()=>{ if(jobsPollTimer){ clearInterval(jobsPollTimer); jobsPollTimer=null } }
const openJobs = (row)=>{ state.jobsTplId = row.id; state.jobsVisible = true; loadJobs(1); startJobsPolling() }
const closeJobs = ()=>{ state.jobsVisible = false; stopJobsPolling() }
const jobPercent = (row)=>{
const est = Number(row.row_estimate || 0)
const done = Number(row.total_rows || 0)
if(row.status==='completed') return '100%'
if(row.status==='failed') return '失败'
if(row.status==='canceled') return '已取消'
if(row.status==='queued') return '0%'
if(est>0 && done>=0){ const p = Math.max(0, Math.min(100, Math.floor(done*100/est))); return p + '%' }
return row.status || ''
if(row.status==='running'){
if(est>0){ const p = Math.max(0, Math.min(100, Math.floor(done*100/est))); return p + '%' }
return '0%'
}
if(est>0){ const p = Math.max(0, Math.min(100, Math.floor(done*100/est))); return p + '%' }
return '评估中'
}
const createTemplate = async ()=>{
const formRef = createFormRef.value
@ -552,7 +566,12 @@ const { createApp, reactive } = Vue;
const j=await r.json();
const jid = j?.data?.id ?? j?.id
state.exportVisible=false
if(jid){ loadJob(jid); loadJobs() } else { msg('任务创建返回异常','error') }
if(jid){
state.jobsTplId = Number(id)
state.jobsVisible = true
loadJobs(1)
startJobsPolling()
} else { msg('任务创建返回异常','error') }
}
Vue.watch(()=>state.exportForm.creatorIds, ()=>{ state.exportForm.resellerId=null; state.exportForm.planId=null; state.exportForm.keyBatchId=null; state.exportForm.codeBatchId=null; state.exportForm.productId=null; loadResellers() })
Vue.watch(()=>state.exportForm.resellerId, ()=>{ state.exportForm.planId=null; state.exportForm.keyBatchId=null; state.exportForm.codeBatchId=null; state.exportForm.productId=null })
@ -612,9 +631,18 @@ const { createApp, reactive } = Vue;
}
}
const download = (id)=>{ window.open(API_BASE + '/api/exports/'+id+'/download','_blank') }
const openSQL = async (id)=>{
try{
const res = await fetch(API_BASE + '/api/exports/'+id+'/sql')
const data = await res.json()
const s = data?.data?.final_sql || data?.final_sql || data?.data?.sql || data?.sql || ''
state.sqlText = s
state.sqlVisible = true
}catch(_e){ state.sqlText=''; state.sqlVisible=false; msg('加载SQL失败','error') }
}
loadTemplates()
return { ...Vue.toRefs(state), visibilityOptions, formatOptions, datasourceOptions, fieldOptions, loadTemplates, createTemplate, openExport, submitExport, loadJob, loadJobs, openJobs, closeJobs, download, openEdit, saveEdit, removeTemplate, resizeDialog, createRules, exportRules, editRules, createFormRef, exportFormRef, editFormRef, dsLabel, exportType, isOrder, exportTitle, creatorOptions, resellerOptions, hasCreators, hasReseller, hasPlan, hasKeyBatch, hasCodeBatch, jobPercent }
return { ...Vue.toRefs(state), visibilityOptions, formatOptions, datasourceOptions, fieldOptions, loadTemplates, createTemplate, openExport, submitExport, loadJob, loadJobs, openJobs, closeJobs, download, openSQL, openEdit, saveEdit, removeTemplate, resizeDialog, createRules, exportRules, editRules, createFormRef, exportFormRef, editFormRef, dsLabel, exportType, isOrder, exportTitle, creatorOptions, resellerOptions, hasCreators, hasReseller, hasPlan, hasKeyBatch, hasCodeBatch, jobPercent, fmtDT }
}
})
app.use(ElementPlus)