Skip to content

Commit 04947a1

Browse files
mctpd: add bridged endpoint polling mechanism
Implement endpoint periodic polling mechanism to validate bridged endpoint accessiblity. Begin polling as soon as gateway routes are created. Stop polling once it's established that endpoint path is accessible. Publish peer path once downstream endpoint responds to send poll command. Signed-off-by: Faizan Ali <faizana@nvidia.com>
1 parent 367fc81 commit 04947a1

File tree

1 file changed

+144
-1
lines changed

1 file changed

+144
-1
lines changed

src/mctpd.c

Lines changed: 144 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -101,6 +101,12 @@ struct role {
101101
const char *dbus_val;
102102
};
103103

104+
// Endpoint poll context for bridged endpoint polling
105+
struct poll_ctx {
106+
struct peer *bridge;
107+
mctp_eid_t poll_eid;
108+
};
109+
104110
static const struct role roles[] = {
105111
[ENDPOINT_ROLE_UNKNOWN] = {
106112
.role = ENDPOINT_ROLE_UNKNOWN,
@@ -199,6 +205,10 @@ struct peer {
199205
// Pool size
200206
uint8_t pool_size;
201207
uint8_t pool_start;
208+
209+
struct {
210+
sd_event_source **sources;
211+
} poll;
202212
};
203213

204214
struct msg_type_support {
@@ -5011,6 +5021,136 @@ static int endpoint_send_allocate_endpoint_ids(
50115021
return rc;
50125022
}
50135023

5024+
static int peer_endpoint_poll(sd_event_source *s, uint64_t usec, void *userdata)
5025+
{
5026+
struct poll_ctx *pctx = userdata;
5027+
struct peer *bridge = pctx->bridge;
5028+
mctp_eid_t ep = pctx->poll_eid;
5029+
mctp_eid_t pool_start, idx;
5030+
struct peer *peer = NULL;
5031+
mctp_eid_t ret_eid = 0;
5032+
struct net *n;
5033+
int rc = 0;
5034+
5035+
if (!bridge) {
5036+
free(pctx);
5037+
return 0;
5038+
}
5039+
5040+
pool_start = bridge->pool_start;
5041+
idx = ep - pool_start;
5042+
5043+
/* Polling policy :
5044+
*
5045+
* Once bridge eid pool space is allocated and gateway
5046+
* routes for downstream endpoints are in place, busowner
5047+
* would initiate periodic GET_ENDPOINT_ID command at an
5048+
* interval of atleast 1/2 * TRECLAIM.
5049+
5050+
1. The downstream endpoint if present behind the bridge,
5051+
responds to send poll command, that endpoint path is
5052+
considered accessible.
5053+
The endpoint path would be published as reachable to d-bus and
5054+
polling will no longer continue.
5055+
5056+
2. If endpoint is not present or doesn't responds to send poll
5057+
commmand, then it has not been establed yet that endpoint
5058+
path from the bridge is accessible or not, thus continue
5059+
to poll.
5060+
*/
5061+
5062+
n = lookup_net(bridge->ctx, bridge->net);
5063+
peer = n->peers[ep];
5064+
if (!peer) {
5065+
rc = add_peer(bridge->ctx, &(bridge->phys), ep, bridge->net,
5066+
&peer, true);
5067+
if (rc < 0)
5068+
goto exit;
5069+
}
5070+
5071+
rc = query_endpoint_poll_commmand(peer, &ret_eid);
5072+
if (rc < 0) {
5073+
goto reschedule;
5074+
}
5075+
5076+
if (ret_eid != ep) {
5077+
warnx("Unexpected eid %d abort polling for eid %d", ret_eid,
5078+
ep);
5079+
goto exit;
5080+
}
5081+
5082+
if (bridge->ctx->verbose) {
5083+
fprintf(stderr, "Endpoint %d is accessible\n", ep);
5084+
}
5085+
5086+
rc = setup_added_peer(peer);
5087+
if (rc < 0)
5088+
goto reschedule;
5089+
5090+
exit:
5091+
if (bridge) {
5092+
assert(sd_event_source_get_enabled(bridge->poll.sources[idx],
5093+
NULL) == 0);
5094+
sd_event_source_unref(bridge->poll.sources[idx]);
5095+
bridge->poll.sources[idx] = NULL;
5096+
}
5097+
free(pctx);
5098+
return rc < 0 ? rc : 0;
5099+
5100+
reschedule:
5101+
rc = mctp_ops.sd_event.source_set_time_relative(
5102+
bridge->poll.sources[idx], bridge->ctx->endpoint_poll);
5103+
if (rc >= 0) {
5104+
rc = sd_event_source_set_enabled(bridge->poll.sources[idx],
5105+
SD_EVENT_ONESHOT);
5106+
}
5107+
return 0;
5108+
}
5109+
5110+
static int bridge_poll_start(struct peer *bridge)
5111+
{
5112+
mctp_eid_t pool_start = bridge->pool_start;
5113+
mctp_eid_t pool_size = bridge->pool_size;
5114+
sd_event_source **sources = NULL;
5115+
struct ctx *ctx;
5116+
int rc;
5117+
int i;
5118+
5119+
sources = calloc(pool_size, sizeof(sd_event_source *));
5120+
ctx = bridge->ctx;
5121+
5122+
if (!sources) {
5123+
rc = -ENOMEM;
5124+
warnx("Failed to setup periodic polling for bridge (eid %d)",
5125+
bridge->eid);
5126+
return rc;
5127+
}
5128+
5129+
bridge->poll.sources = sources;
5130+
for (i = 0; i < pool_size; i++) {
5131+
struct poll_ctx *pctx = calloc(1, sizeof(struct poll_ctx));
5132+
if (!pctx) {
5133+
warnx("Failed to memory, skip polling for eid %d",
5134+
pool_start + i);
5135+
continue;
5136+
}
5137+
5138+
pctx->bridge = bridge;
5139+
pctx->poll_eid = pool_start + i;
5140+
rc = mctp_ops.sd_event.add_time_relative(
5141+
ctx->event, &bridge->poll.sources[i], CLOCK_MONOTONIC,
5142+
ctx->endpoint_poll, 0, peer_endpoint_poll, pctx);
5143+
if (rc < 0) {
5144+
warnx("Failed to setup poll event source for eid %d",
5145+
(pool_start + i));
5146+
free(pctx);
5147+
continue;
5148+
}
5149+
}
5150+
5151+
return 0;
5152+
}
5153+
50145154
static int endpoint_allocate_eids(struct peer *peer)
50155155
{
50165156
uint8_t allocated_pool_size = 0;
@@ -5079,7 +5219,10 @@ static int endpoint_allocate_eids(struct peer *peer)
50795219
peer->pool_size);
50805220
}
50815221

5082-
// TODO: Polling logic for downstream EID
5222+
// Poll for downstream endpoint accessibility
5223+
if (peer->ctx->endpoint_poll) {
5224+
bridge_poll_start(peer);
5225+
}
50835226

50845227
return 0;
50855228
}

0 commit comments

Comments
 (0)