Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

api: Add support for filtering and pagination to the node list endpoint #12727

Merged
merged 4 commits into from
Apr 21, 2022
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 3 additions & 0 deletions .changelog/12727.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1,3 @@
```release-note:improvement
api: Add support for filtering and pagination to the node list endpoint
```
60 changes: 48 additions & 12 deletions command/node_status.go
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@ package command
import (
"fmt"
"math"
"os"
"sort"
"strconv"
"strings"
Expand Down Expand Up @@ -35,6 +36,9 @@ type NodeStatusCommand struct {
self bool
stats bool
json bool
perPage int
pageToken string
filter string
tmpl string
}

Expand Down Expand Up @@ -76,6 +80,15 @@ Node Status Options:
-verbose
Display full information.

-per-page
How many results to show per page.

-page-token
Where to start pagination.

-filter
Specifies an expression used to filter query results.

-os
Display operating system name.

Expand All @@ -98,15 +111,18 @@ func (c *NodeStatusCommand) Synopsis() string {
func (c *NodeStatusCommand) AutocompleteFlags() complete.Flags {
return mergeAutocompleteFlags(c.Meta.AutocompleteFlags(FlagSetClient),
complete.Flags{
"-allocs": complete.PredictNothing,
"-json": complete.PredictNothing,
"-self": complete.PredictNothing,
"-short": complete.PredictNothing,
"-stats": complete.PredictNothing,
"-t": complete.PredictAnything,
"-os": complete.PredictAnything,
"-quiet": complete.PredictAnything,
"-verbose": complete.PredictNothing,
"-allocs": complete.PredictNothing,
"-filter": complete.PredictAnything,
"-json": complete.PredictNothing,
"-per-page": complete.PredictAnything,
"-page-token": complete.PredictAnything,
"-self": complete.PredictNothing,
"-short": complete.PredictNothing,
"-stats": complete.PredictNothing,
"-t": complete.PredictAnything,
"-os": complete.PredictAnything,
"-quiet": complete.PredictAnything,
"-verbose": complete.PredictNothing,
})
}

Expand Down Expand Up @@ -140,6 +156,9 @@ func (c *NodeStatusCommand) Run(args []string) int {
flags.BoolVar(&c.stats, "stats", false, "")
flags.BoolVar(&c.json, "json", false, "")
flags.StringVar(&c.tmpl, "t", "", "")
flags.StringVar(&c.filter, "filter", "", "")
flags.IntVar(&c.perPage, "per-page", 0, "")
flags.StringVar(&c.pageToken, "page-token", "", "")

if err := flags.Parse(args); err != nil {
return 1
Expand Down Expand Up @@ -173,13 +192,22 @@ func (c *NodeStatusCommand) Run(args []string) int {
return 1
}

var q *api.QueryOptions
// Set up the options to capture any filter passed and pagination
// details.
opts := api.QueryOptions{
Filter: c.filter,
PerPage: int32(c.perPage),
NextToken: c.pageToken,
}

// If the user requested showing the node OS, include this within the
// query params.
if c.os {
q = &api.QueryOptions{Params: map[string]string{"os": "true"}}
opts.Params = map[string]string{"os": "true"}
}

// Query the node info
nodes, _, err := client.Nodes().List(q)
nodes, qm, err := client.Nodes().List(&opts)
if err != nil {
c.Ui.Error(fmt.Sprintf("Error querying node status: %s", err))
return 1
Expand Down Expand Up @@ -267,6 +295,14 @@ func (c *NodeStatusCommand) Run(args []string) int {

// Dump the output
c.Ui.Output(formatList(out))

if qm.NextToken != "" {
c.Ui.Output(fmt.Sprintf(`
Results have been paginated. To get the next page run:

%s -page-token %s`, argsWithoutPageToken(os.Args), qm.NextToken))
}

return 0
}

Expand Down
40 changes: 31 additions & 9 deletions nomad/node_endpoint.go
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,7 @@ import (
"context"
"errors"
"fmt"
"net/http"
"reflect"
"strings"
"sync"
Expand All @@ -16,6 +17,7 @@ import (
"github.com/hashicorp/nomad/acl"
"github.com/hashicorp/nomad/helper/uuid"
"github.com/hashicorp/nomad/nomad/state"
"github.com/hashicorp/nomad/nomad/state/paginator"
"github.com/hashicorp/nomad/nomad/structs"
"github.com/hashicorp/raft"
vapi "github.com/hashicorp/vault/api"
Expand Down Expand Up @@ -1377,12 +1379,12 @@ func (n *Node) List(args *structs.NodeListRequest,
return structs.ErrPermissionDenied
}

// Setup the blocking query
// Set up the blocking query.
opts := blockingOptions{
queryOpts: &args.QueryOptions,
queryMeta: &reply.QueryMeta,
run: func(ws memdb.WatchSet, state *state.StateStore) error {
// Capture all the nodes

var err error
var iter memdb.ResultIterator
if prefix := args.QueryOptions.Prefix; prefix != "" {
Expand All @@ -1394,16 +1396,36 @@ func (n *Node) List(args *structs.NodeListRequest,
return err
}

// Generate the tokenizer to use for pagination using the populated
// paginatorOpts object. The ID of a node must be unique within the
// region, therefore we only need WithID on the paginator options.
tokenizer := paginator.NewStructsTokenizer(iter, paginator.StructsTokenizerOptions{WithID: true})

var nodes []*structs.NodeListStub
for {
raw := iter.Next()
if raw == nil {
break
}
node := raw.(*structs.Node)
nodes = append(nodes, node.Stub(args.Fields))

// Build the paginator. This includes the function that is
// responsible for appending a node to the nodes array.
paginatorImpl, err := paginator.NewPaginator(iter, tokenizer, nil, args.QueryOptions,
func(raw interface{}) error {
nodes = append(nodes, raw.(*structs.Node).Stub(args.Fields))
return nil
})
if err != nil {
return structs.NewErrRPCCodedf(
http.StatusBadRequest, "failed to create result paginator: %v", err)
}

// Calling page populates our output nodes array as well as returns
// the next token.
nextToken, err := paginatorImpl.Page()
if err != nil {
return structs.NewErrRPCCodedf(
http.StatusBadRequest, "failed to read result page: %v", err)
}

// Populate the reply.
reply.Nodes = nodes
reply.NextToken = nextToken

// Use the last index that affected the jobs table
index, err := state.Index("nodes")
Expand Down
154 changes: 154 additions & 0 deletions nomad/node_endpoint_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -3940,3 +3940,157 @@ func TestClientEndpoint_UpdateAlloc_Evals_ByTrigger(t *testing.T) {
}

}

func TestNode_List_PaginationFiltering(t *testing.T) {
ci.Parallel(t)

s1, _, cleanupS1 := TestACLServer(t, nil)
defer cleanupS1()
codec := rpcClient(t, s1)
testutil.WaitForLeader(t, s1.RPC)

// Build a set of nodes in various datacenters and states. This allows us
// to test different filter queries along with pagination.
mocks := []struct {
id string
dc string
status string
}{
{
id: "aaaa1111-3350-4b4b-d185-0e1992ed43e9",
dc: "dc2",
status: structs.NodeStatusDisconnected,
},
{
id: "aaaaaa22-3350-4b4b-d185-0e1992ed43e9",
dc: "dc1",
status: structs.NodeStatusReady,
},
{
id: "aaaaaa33-3350-4b4b-d185-0e1992ed43e9",
dc: "dc3",
status: structs.NodeStatusReady,
},
{
id: "aaaaaaaa-3350-4b4b-d185-0e1992ed43e9",
dc: "dc2",
status: structs.NodeStatusDown,
},
{
id: "aaaaaabb-3350-4b4b-d185-0e1992ed43e9",
dc: "dc3",
status: structs.NodeStatusDown,
},
{
id: "aaaaaacc-3350-4b4b-d185-0e1992ed43e9",
dc: "dc1",
status: structs.NodeStatusReady,
},
}

testState := s1.fsm.State()

for i, m := range mocks {
index := 1000 + uint64(i)
mockNode := mock.Node()
mockNode.ID = m.id
mockNode.Datacenter = m.dc
mockNode.Status = m.status
mockNode.CreateIndex = index
require.NoError(t, testState.UpsertNode(structs.MsgTypeTestSetup, index, mockNode))
}

// The server is running with ACLs enabled, so generate an adequate token
// to use.
aclToken := mock.CreatePolicyAndToken(t, testState, 1100, "test-valid-read",
mock.NodePolicy(acl.PolicyRead)).SecretID

cases := []struct {
name string
filter string
nextToken string
pageSize int32
expectedNextToken string
expectedIDs []string
expectedError string
}{
{
name: "pagination no filter",
pageSize: 2,
expectedNextToken: "aaaaaa33-3350-4b4b-d185-0e1992ed43e9",
expectedIDs: []string{
"aaaa1111-3350-4b4b-d185-0e1992ed43e9",
"aaaaaa22-3350-4b4b-d185-0e1992ed43e9",
},
},
{
name: "pagination no filter with next token",
pageSize: 2,
nextToken: "aaaaaa33-3350-4b4b-d185-0e1992ed43e9",
expectedNextToken: "aaaaaabb-3350-4b4b-d185-0e1992ed43e9",
expectedIDs: []string{
"aaaaaa33-3350-4b4b-d185-0e1992ed43e9",
"aaaaaaaa-3350-4b4b-d185-0e1992ed43e9",
},
},
{
name: "pagination no filter with next token end of pages",
pageSize: 2,
nextToken: "aaaaaabb-3350-4b4b-d185-0e1992ed43e9",
expectedNextToken: "",
expectedIDs: []string{
"aaaaaabb-3350-4b4b-d185-0e1992ed43e9",
"aaaaaacc-3350-4b4b-d185-0e1992ed43e9",
},
},
{
name: "filter no pagination",
filter: `Datacenter == "dc3"`,
expectedIDs: []string{
"aaaaaa33-3350-4b4b-d185-0e1992ed43e9",
"aaaaaabb-3350-4b4b-d185-0e1992ed43e9",
},
},
{
name: "filter and pagination",
filter: `Status != "ready"`,
pageSize: 2,
expectedNextToken: "aaaaaabb-3350-4b4b-d185-0e1992ed43e9",
expectedIDs: []string{
"aaaa1111-3350-4b4b-d185-0e1992ed43e9",
"aaaaaaaa-3350-4b4b-d185-0e1992ed43e9",
},
},
}

for _, tc := range cases {
t.Run(tc.name, func(t *testing.T) {
req := &structs.NodeListRequest{
QueryOptions: structs.QueryOptions{
Region: "global",
Filter: tc.filter,
PerPage: tc.pageSize,
NextToken: tc.nextToken,
},
}
req.AuthToken = aclToken
var resp structs.NodeListResponse
err := msgpackrpc.CallWithCodec(codec, "Node.List", req, &resp)
if tc.expectedError == "" {
require.NoError(t, err)
} else {
require.Error(t, err)
require.Contains(t, err.Error(), tc.expectedError)
return
}

actualIDs := []string{}

for _, node := range resp.Nodes {
actualIDs = append(actualIDs, node.ID)
}
require.Equal(t, tc.expectedIDs, actualIDs, "unexpected page of nodes")
require.Equal(t, tc.expectedNextToken, resp.QueryMeta.NextToken, "unexpected NextToken")
})
}
}
9 changes: 9 additions & 0 deletions nomad/structs/structs.go
Original file line number Diff line number Diff line change
Expand Up @@ -1962,6 +1962,15 @@ type Node struct {
ModifyIndex uint64
}

// GetID is a helper for getting the ID when the object may be nil and is
// required for pagination.
func (n *Node) GetID() string {
if n == nil {
return ""
}
return n.ID
}

// Sanitize returns a copy of the Node omitting confidential fields
// It only returns a copy if the Node contains the confidential fields
func (n *Node) Sanitize() *Node {
Expand Down
26 changes: 26 additions & 0 deletions nomad/structs/structs_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -6612,6 +6612,32 @@ func TestNode_Copy(t *testing.T) {
require.Equal(node.Drivers, node2.Drivers)
}

func TestNode_GetID(t *testing.T) {
ci.Parallel(t)

testCases := []struct {
inputNode *Node
expectedOutput string
name string
}{
{
inputNode: nil,
expectedOutput: "",
name: "nil input node",
},
{
inputNode: &Node{ID: "someid"},
expectedOutput: "someid",
name: "nil input node",
},
}

for _, tc := range testCases {
actualOutput := tc.inputNode.GetID()
require.Equal(t, tc.expectedOutput, actualOutput)
}
}

func TestNode_Sanitize(t *testing.T) {
ci.Parallel(t)

Expand Down
Loading