Skip to content

Commit 77b03f7

Browse files
mctpd: add bridged endpoint polling mechanism
Implement endpoint periodic polling mechanism to validate bridged endpoint accessiblity. Begin polling as soon as gateway routes are created. Stop polling once it's established that endpoint path is accessible. Publish peer path once downstream endpoint responds to send poll command. Signed-off-by: Faizan Ali <faizana@nvidia.com>
1 parent 9696861 commit 77b03f7

File tree

1 file changed

+145
-1
lines changed

1 file changed

+145
-1
lines changed

src/mctpd.c

Lines changed: 145 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -101,6 +101,12 @@ struct role {
101101
const char *dbus_val;
102102
};
103103

104+
// Endpoint poll context for bridged endpoint polling
105+
struct poll_ctx {
106+
struct peer *bridge;
107+
mctp_eid_t poll_eid;
108+
};
109+
104110
static const struct role roles[] = {
105111
[ENDPOINT_ROLE_UNKNOWN] = {
106112
.role = ENDPOINT_ROLE_UNKNOWN,
@@ -199,6 +205,10 @@ struct peer {
199205
// Pool size
200206
uint8_t pool_size;
201207
uint8_t pool_start;
208+
209+
struct {
210+
sd_event_source **sources;
211+
} poll;
202212
};
203213

204214
struct ctx {
@@ -4756,6 +4766,139 @@ static int endpoint_send_allocate_endpoint_ids(
47564766
return rc;
47574767
}
47584768

4769+
static int peer_endpoint_poll(sd_event_source *s, uint64_t usec, void *userdata)
4770+
{
4771+
struct poll_ctx *pctx = userdata;
4772+
struct peer *bridge = pctx->bridge;
4773+
struct net *n;
4774+
struct peer *peer = NULL;
4775+
mctp_eid_t ep = pctx->poll_eid;
4776+
mctp_eid_t pool_start = bridge->pool_start;
4777+
mctp_eid_t ret_eid = 0;
4778+
int rc = 0, idx = ep - pool_start;
4779+
4780+
/* Polling policy :
4781+
*
4782+
* Once bridge eid pool space is allocated and gateway
4783+
* routes for downstream endpoints are in place, busowner
4784+
* would initiate periodic GET_ENDPOINT_ID command at an
4785+
* interval of atleast 1/2 * TRECLAIM.
4786+
4787+
1. The downstream endpoint if present behind the bridge,
4788+
responds to send poll command, that endpoint path is
4789+
considered accessible.
4790+
The endpoint path would be published as reachable to d-bus and
4791+
polling will no longer continue.
4792+
4793+
2. If endpoint is not present or doesn't responds to send poll
4794+
commmand, then it has not been establed yet that endpoint
4795+
path from the bridge is accessible or not, thus continue
4796+
to poll.
4797+
*/
4798+
4799+
if (!bridge) {
4800+
warnx("Bridge (eid %d) removed?", bridge->eid);
4801+
goto exit;
4802+
}
4803+
4804+
n = lookup_net(bridge->ctx, bridge->net);
4805+
peer = n->peers[ep];
4806+
if (!peer) {
4807+
rc = add_peer(bridge->ctx, &(bridge->phys), ep, bridge->net,
4808+
&peer, true);
4809+
if (rc < 0)
4810+
goto exit;
4811+
}
4812+
4813+
rc = query_endpoint_poll_commmand(peer, &ret_eid);
4814+
if (rc < 0) {
4815+
goto reschedule;
4816+
}
4817+
4818+
if (ret_eid != ep) {
4819+
warnx("Unexpected eid %d abort polling for eid %d", ret_eid,
4820+
ep);
4821+
goto exit;
4822+
}
4823+
4824+
if (bridge->ctx->verbose) {
4825+
fprintf(stderr, "Endpoint %d is accessible\n", ep);
4826+
}
4827+
4828+
rc = setup_added_peer(peer);
4829+
if (rc < 0)
4830+
goto reschedule;
4831+
4832+
goto exit;
4833+
4834+
reschedule:
4835+
if (!bridge) {
4836+
warnx("Bridge (eid %d) removed?", bridge->eid);
4837+
goto exit;
4838+
}
4839+
4840+
rc = sd_event_source_set_time_relative(bridge->poll.sources[idx],
4841+
bridge->ctx->endpoint_poll);
4842+
if (rc >= 0) {
4843+
rc = sd_event_source_set_enabled(bridge->poll.sources[idx],
4844+
SD_EVENT_ONESHOT);
4845+
}
4846+
return 0;
4847+
4848+
exit:
4849+
if (bridge) {
4850+
assert(sd_event_source_get_enabled(bridge->poll.sources[idx],
4851+
NULL) == 0);
4852+
sd_event_source_unref(bridge->poll.sources[idx]);
4853+
bridge->poll.sources[idx] = NULL;
4854+
}
4855+
free(pctx);
4856+
return rc < 0 ? rc : 0;
4857+
}
4858+
4859+
static int bridge_poll_start(struct peer *bridge)
4860+
{
4861+
struct ctx *ctx;
4862+
int rc;
4863+
mctp_eid_t pool_start = bridge->pool_start;
4864+
mctp_eid_t pool_size = bridge->pool_size;
4865+
int i;
4866+
ctx = bridge->ctx;
4867+
sd_event_source **sources =
4868+
calloc(pool_size, sizeof(sd_event_source *));
4869+
4870+
if (!sources) {
4871+
rc = -ENOMEM;
4872+
warnx("Failed to setup periodic polling for bridge (eid %d)",
4873+
bridge->eid);
4874+
return rc;
4875+
}
4876+
4877+
bridge->poll.sources = sources;
4878+
for (i = 0; i < pool_size; i++) {
4879+
struct poll_ctx *pctx = calloc(1, sizeof(struct poll_ctx));
4880+
if (!pctx) {
4881+
warnx("Failed to memory, skip polling for eid %d",
4882+
pool_start + i);
4883+
continue;
4884+
}
4885+
4886+
pctx->bridge = bridge;
4887+
pctx->poll_eid = pool_start + i;
4888+
rc = sd_event_add_time_relative(
4889+
ctx->event, &bridge->poll.sources[i], CLOCK_MONOTONIC,
4890+
ctx->endpoint_poll, 0, peer_endpoint_poll, pctx);
4891+
if (rc < 0) {
4892+
warnx("Failed to setup poll event source for eid %d",
4893+
(pool_start + i));
4894+
free(pctx);
4895+
continue;
4896+
}
4897+
}
4898+
4899+
return 0;
4900+
}
4901+
47594902
static int endpoint_allocate_eids(struct peer *peer)
47604903
{
47614904
uint8_t allocated_pool_size = 0;
@@ -4814,7 +4957,8 @@ static int endpoint_allocate_eids(struct peer *peer)
48144957
peer->pool_size);
48154958
}
48164959

4817-
// TODO: Polling logic for downstream EID
4960+
// Poll for downstream endpoint accessibility
4961+
bridge_poll_start(peer);
48184962

48194963
return 0;
48204964
}

0 commit comments

Comments
 (0)