|
1 | 1 | package main |
2 | 2 |
|
3 | | -import "net/http" |
| 3 | +import ( |
| 4 | + "bytes" |
| 5 | + "encoding/json" |
| 6 | + "net/http" |
4 | 7 |
|
5 | | -var limit string = "20" |
| 8 | + "github.com/gorilla/mux" |
| 9 | +) |
6 | 10 |
|
7 | | -// This sets limit and continueToken into the r.URL parameter |
8 | | -// it helps to set limit and "continue" while proxying to the clusterAPI. |
9 | | -func handlePagination(r *http.Request) { |
10 | | - q := r.URL.Query() |
| 11 | +var limit string = "15" |
11 | 12 |
|
12 | | - q.Set("limit", limit) |
| 13 | +type responseCapture struct { |
| 14 | + http.ResponseWriter |
| 15 | + StatusCode int |
| 16 | + Body *bytes.Buffer |
| 17 | +} |
| 18 | + |
| 19 | +func (r *responseCapture) WriteHeader(code int) { |
| 20 | + r.StatusCode = code |
| 21 | + r.ResponseWriter.WriteHeader(code) |
| 22 | +} |
13 | 23 |
|
14 | | - continueToken := q.Get("continueToken") |
15 | | - if continueToken != "" { |
16 | | - q.Set("continue", continueToken) |
| 24 | +func (r *responseCapture) Write(b []byte) (int, error) { |
| 25 | + r.Body.Write(b) |
| 26 | + return r.ResponseWriter.Write(b) |
| 27 | +} |
| 28 | + |
| 29 | +// CreateResponseCapture initializes responseCapture with a http.ResponseWriter and empty bytes.Buffer for the body. |
| 30 | +func CreateResponseCapture(w http.ResponseWriter) *responseCapture { |
| 31 | + return &responseCapture{ |
| 32 | + ResponseWriter: w, |
| 33 | + Body: &bytes.Buffer{}, |
| 34 | + StatusCode: http.StatusOK, |
17 | 35 | } |
| 36 | +} |
| 37 | + |
| 38 | +type ResourceMetadata struct { |
| 39 | + ResourceVersion string `json:"resourceVersion"` |
| 40 | + ContinueToken string `json:"continue"` |
| 41 | +} |
| 42 | + |
| 43 | +type Metadata struct { |
| 44 | + Name string `json:"name"` |
| 45 | + Namespace string `json:"namespace"` |
| 46 | + Uuid string `json:"uuid"` |
| 47 | +} |
| 48 | + |
| 49 | +type Item struct { |
| 50 | + Metadata Metadata `json:"metadata"` |
| 51 | +} |
| 52 | +type ResourceResponse struct { |
| 53 | + Metadata ResourceMetadata `json:"metadata"` |
| 54 | + Items []Item `json:"items"` |
| 55 | +} |
| 56 | + |
| 57 | +var paginationMap = make(map[int]*ResourceResponse) |
18 | 58 |
|
19 | | - r.URL.RawQuery = q.Encode() |
| 59 | +func handlePagination(c *HeadlampConfig) mux.MiddlewareFunc { |
| 60 | + return func(h http.Handler) http.Handler { |
| 61 | + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { |
| 62 | + |
| 63 | + rcw := CreateResponseCapture(w) |
| 64 | + |
| 65 | + q := r.URL.Query() |
| 66 | + q.Set("limit", limit) |
| 67 | + r.URL.RawQuery = q.Encode() |
| 68 | + |
| 69 | + h.ServeHTTP(rcw, r) |
| 70 | + |
| 71 | + var currentPageResponse ResourceResponse |
| 72 | + json.Unmarshal(rcw.Body.Bytes(), ¤tPageResponse) |
| 73 | + |
| 74 | + paginationMap[0] = ¤tPageResponse |
| 75 | + |
| 76 | + // Loop to pre-fetch pages 1 through 4 |
| 77 | + currentContinueToken := currentPageResponse.Metadata.ContinueToken |
| 78 | + for i := 1; i <= 4; i++ { |
| 79 | + // Stop if there is no next page |
| 80 | + if currentContinueToken == "" { |
| 81 | + break |
| 82 | + } |
| 83 | + |
| 84 | + // Create a new request for the next page |
| 85 | + query := r.URL.Query() |
| 86 | + query.Set("continue", currentContinueToken) |
| 87 | + r.URL.RawQuery = query.Encode() |
| 88 | + |
| 89 | + // Clear the response capture buffer before the next request |
| 90 | + rcw.Body.Reset() |
| 91 | + |
| 92 | + // Serve the next request |
| 93 | + h.ServeHTTP(rcw, r) |
| 94 | + |
| 95 | + // Unmarshal the new response |
| 96 | + var nextResponse ResourceResponse |
| 97 | + json.Unmarshal(rcw.Body.Bytes(), &nextResponse) |
| 98 | + |
| 99 | + // Store the new response in the map |
| 100 | + paginationMap[i] = &nextResponse |
| 101 | + |
| 102 | + // Update the continue token for the next loop iteration |
| 103 | + currentContinueToken = nextResponse.Metadata.ContinueToken |
| 104 | + } |
| 105 | + }) |
| 106 | + } |
20 | 107 | } |
0 commit comments