Skip to content

Commit bc91184

Browse files
Add mctp bridge endpoint support
Add support for MCTP bridge endpoints that can allocate pools of EIDs for downstream endpoints. We assume each AssignEndpoint d-bus call will be for an MCTP bridge, with this we allocate/reserve a max_pool_size eid range contiguous to bridge's own eid. Later this pool size is updated based on SET_ENDPOINT_ID command response. - for static eid assignment via AssignEndpointStatic d-bus call, add check if eid is part of any other bridge's pool range. [Fixup and requested change from Jeremy Kerr <[email protected]>] Signed-off-by: Faizan Ali <[email protected]> Signed-off-by: Jeremy Kerr <[email protected]>
1 parent ece29f8 commit bc91184

File tree

1 file changed

+147
-22
lines changed

1 file changed

+147
-22
lines changed

src/mctpd.c

Lines changed: 147 additions & 22 deletions
Original file line numberDiff line numberDiff line change
@@ -185,6 +185,10 @@ struct peer {
185185
uint8_t endpoint_type;
186186
uint8_t medium_spec;
187187
} recovery;
188+
189+
// Pool size
190+
uint8_t pool_size;
191+
uint8_t pool_start;
188192
};
189193

190194
struct ctx {
@@ -1344,7 +1348,7 @@ static int endpoint_query_phys(struct ctx *ctx, const dest_phys *dest,
13441348
}
13451349

13461350
/* returns -ECONNREFUSED if the endpoint returns failure. */
1347-
static int endpoint_send_set_endpoint_id(const struct peer *peer,
1351+
static int endpoint_send_set_endpoint_id(struct peer *peer,
13481352
mctp_eid_t *new_eidp)
13491353
{
13501354
struct sockaddr_mctp_ext addr;
@@ -1412,9 +1416,16 @@ static int endpoint_send_set_endpoint_id(const struct peer *peer,
14121416

14131417
alloc = resp->status & 0x3;
14141418
if (alloc != 0) {
1415-
// TODO for bridges
1416-
warnx("%s requested allocation pool, unimplemented",
1417-
dest_phys_tostr(dest));
1419+
peer->pool_size = resp->eid_pool_size;
1420+
if (peer->ctx->verbose) {
1421+
fprintf(stderr,
1422+
"%s requested allocation of pool size = %d\n",
1423+
dest_phys_tostr(dest), peer->pool_size);
1424+
}
1425+
if (peer->pool_size > peer->ctx->max_pool_size) {
1426+
warnx("Truncate: requested pool size > max pool size config");
1427+
peer->pool_size = peer->ctx->max_pool_size;
1428+
}
14181429
}
14191430

14201431
rc = 0;
@@ -1642,11 +1653,66 @@ static int peer_set_mtu(struct ctx *ctx, struct peer *peer, uint32_t mtu)
16421653
return rc;
16431654
}
16441655

1656+
struct eid_allocation {
1657+
mctp_eid_t start;
1658+
unsigned int extent; /* 0 = only the start EID */
1659+
};
1660+
1661+
/* Allocate an unused dynamic EID for a peer, optionally with an associated
1662+
* bridge range (of size @bridged_len).
1663+
*
1664+
* We try to find the first allocation that contains the base EID plus the
1665+
* full range. If no space for that exists, we return the largest
1666+
* possible range. If the requested range is 0, then the first available
1667+
* (single) EID will suit as a match, the returned alloc->extent will be zero.
1668+
*
1669+
* It is up to the caller to check whether this range is suitable, and
1670+
* actually reserve that EID (& range) if so.
1671+
*
1672+
* returns 0 on success (with @alloc populated), non-zero on failure.
1673+
*/
1674+
static int allocate_eid(struct ctx *ctx, struct net *net,
1675+
unsigned int bridged_len, struct eid_allocation *alloc)
1676+
{
1677+
struct eid_allocation cur = { 0 }, best = { 0 };
1678+
mctp_eid_t eid;
1679+
1680+
for (eid = ctx->dyn_eid_min; eid <= ctx->dyn_eid_max; eid++) {
1681+
if (net->peers[eid]) {
1682+
// reset our current candidate allocation
1683+
cur.start = 0;
1684+
eid += net->peers[eid]->pool_size;
1685+
continue;
1686+
}
1687+
1688+
// start a new candidate allocation
1689+
if (!cur.start)
1690+
cur.start = eid;
1691+
cur.extent = eid - cur.start;
1692+
1693+
// if this suits, we're done
1694+
if (cur.extent == bridged_len) {
1695+
*alloc = cur;
1696+
return 0;
1697+
}
1698+
1699+
if (cur.extent > best.extent)
1700+
best = cur;
1701+
}
1702+
1703+
if (best.start) {
1704+
*alloc = best;
1705+
return 0;
1706+
}
1707+
1708+
return -1;
1709+
}
1710+
16451711
static int endpoint_assign_eid(struct ctx *ctx, sd_bus_error *berr,
16461712
const dest_phys *dest, struct peer **ret_peer,
1647-
mctp_eid_t static_eid)
1713+
mctp_eid_t static_eid, bool assign_bridge)
16481714
{
1649-
mctp_eid_t e, new_eid;
1715+
mctp_eid_t new_eid;
16501716
struct net *n = NULL;
16511717
struct peer *peer = NULL;
16521718
uint32_t net;
@@ -1671,22 +1737,43 @@ static int endpoint_assign_eid(struct ctx *ctx, sd_bus_error *berr,
16711737

16721738
new_eid = static_eid;
16731739
} else {
1674-
/* Find an unused dynamic EID */
1675-
for (e = ctx->dyn_eid_min; e <= ctx->dyn_eid_max; e++) {
1676-
if (n->peers[e])
1677-
continue;
1678-
rc = add_peer(ctx, dest, e, net, &peer);
1679-
if (rc < 0)
1680-
return rc;
1681-
break;
1682-
}
1683-
if (e > ctx->dyn_eid_max) {
1684-
warnx("Ran out of EIDs for net %d, allocating %s", net,
1685-
dest_phys_tostr(dest));
1740+
struct eid_allocation alloc;
1741+
unsigned int alloc_size = 0;
1742+
1743+
if (assign_bridge)
1744+
alloc_size = ctx->max_pool_size;
1745+
1746+
rc = allocate_eid(ctx, n, alloc_size, &alloc);
1747+
if (rc) {
1748+
warnx("Cannot allocate any EID (+pool %d) on net %d for %s",
1749+
alloc_size, net, dest_phys_tostr(dest));
16861750
sd_bus_error_setf(berr, SD_BUS_ERROR_FAILED,
16871751
"Ran out of EIDs");
16881752
return -EADDRNOTAVAIL;
16891753
}
1754+
1755+
/* Only allow complete pools for now. In future we could reserve
1756+
* this range, in the assumption that the subsequent pool
1757+
* request (in the Set Endpoint ID response) will fit in this
1758+
* reservation.
1759+
*/
1760+
if (alloc.extent < alloc_size) {
1761+
warnx("Cannot allocate sufficient EIDs (+pool %d) on net %d for %s"
1762+
" (largest span %d at %d)",
1763+
alloc_size, net, dest_phys_tostr(dest),
1764+
alloc.extent, alloc.start);
1765+
alloc.extent = 0;
1766+
}
1767+
1768+
new_eid = alloc.start;
1769+
1770+
rc = add_peer(ctx, dest, new_eid, net, &peer);
1771+
if (rc < 0)
1772+
return rc;
1773+
1774+
peer->pool_size = alloc.extent;
1775+
if (peer->pool_size)
1776+
peer->pool_start = new_eid + 1;
16901777
}
16911778

16921779
rc = endpoint_send_set_endpoint_id(peer, &new_eid);
@@ -1700,6 +1787,10 @@ static int endpoint_assign_eid(struct ctx *ctx, sd_bus_error *berr,
17001787
}
17011788

17021789
if (new_eid != peer->eid) {
1790+
// avoid allocation for any different EID in response
1791+
warnx("Mismatch of requested from received EID, resetting the pool");
1792+
peer->pool_size = 0;
1793+
peer->pool_start = 0;
17031794
rc = change_peer_eid(peer, new_eid);
17041795
if (rc == -EEXIST) {
17051796
sd_bus_error_setf(
@@ -2102,7 +2193,7 @@ static int method_setup_endpoint(sd_bus_message *call, void *data,
21022193
}
21032194

21042195
/* Set Endpoint ID */
2105-
rc = endpoint_assign_eid(ctx, berr, dest, &peer, 0);
2196+
rc = endpoint_assign_eid(ctx, berr, dest, &peer, 0, false);
21062197
if (rc < 0)
21072198
goto err;
21082199

@@ -2155,21 +2246,42 @@ static int method_assign_endpoint(sd_bus_message *call, void *data,
21552246
peer->net, peer_path, 0);
21562247
}
21572248

2158-
rc = endpoint_assign_eid(ctx, berr, dest, &peer, 0);
2249+
rc = endpoint_assign_eid(ctx, berr, dest, &peer, 0, true);
21592250
if (rc < 0)
21602251
goto err;
21612252

21622253
peer_path = path_from_peer(peer);
21632254
if (!peer_path)
21642255
goto err;
21652256

2257+
if (peer->pool_size > 0) {
2258+
//TODO: Implement Allocate EndpointID
2259+
}
2260+
21662261
return sd_bus_reply_method_return(call, "yisb", peer->eid, peer->net,
21672262
peer_path, 1);
21682263
err:
21692264
set_berr(ctx, rc, berr);
21702265
return rc;
21712266
}
21722267

2268+
// Checks if given EID belongs to any bridge's pool range
2269+
static bool is_eid_in_bridge_pool(struct net *n, struct ctx *ctx,
2270+
mctp_eid_t eid)
2271+
{
2272+
for (int i = ctx->dyn_eid_min; i <= eid; i++) {
2273+
struct peer *peer = n->peers[i];
2274+
if (peer && peer->pool_size > 0) {
2275+
if (eid >= peer->pool_start &&
2276+
eid < peer->pool_start + peer->pool_size) {
2277+
return true;
2278+
}
2279+
i += peer->pool_size;
2280+
}
2281+
}
2282+
return false;
2283+
}
2284+
21732285
static int method_assign_endpoint_static(sd_bus_message *call, void *data,
21742286
sd_bus_error *berr)
21752287
{
@@ -2224,10 +2336,22 @@ static int method_assign_endpoint_static(sd_bus_message *call, void *data,
22242336
return sd_bus_error_setf(berr,
22252337
SD_BUS_ERROR_INVALID_ARGS,
22262338
"Address in use");
2339+
} else {
2340+
// is requested EID part of any bridge pool range
2341+
struct net *n = lookup_net(ctx, netid);
2342+
if (!n) {
2343+
bug_warn("%s: Bad old net %d", __func__, netid);
2344+
return -EPROTO;
2345+
}
2346+
if (is_eid_in_bridge_pool(n, ctx, eid)) {
2347+
return sd_bus_error_setf(
2348+
berr, SD_BUS_ERROR_INVALID_ARGS,
2349+
"EID belongs to another MCTP bridge pool");
2350+
}
22272351
}
22282352
}
22292353

2230-
rc = endpoint_assign_eid(ctx, berr, dest, &peer, eid);
2354+
rc = endpoint_assign_eid(ctx, berr, dest, &peer, eid, false);
22312355
if (rc < 0) {
22322356
goto err;
22332357
}
@@ -2637,7 +2761,8 @@ static int peer_endpoint_recover(sd_event_source *s, uint64_t usec,
26372761
* after which we immediately return as there's no old peer state left to
26382762
* maintain.
26392763
*/
2640-
return endpoint_assign_eid(ctx, NULL, &phys, &peer, 0);
2764+
return endpoint_assign_eid(ctx, NULL, &phys, &peer, 0,
2765+
false);
26412766
}
26422767

26432768
/* Confirmation of the same device, apply its already allocated EID */

0 commit comments

Comments
 (0)