|
88 | 88 | pxInterface->pfAddMulticastMAC( IGMP_MacAddress.ucBytes ); |
89 | 89 | } |
90 | 90 |
|
91 | | - IGMPReportDesc_t * pxIRD; |
| 91 | + MCastReportData_t * pxIRD; |
92 | 92 | #if ( ipconfigUSE_LLMNR != 0 ) |
93 | | - if( NULL != ( pxIRD = ( IGMPReportDesc_t * ) pvPortMalloc( sizeof( IGMPReportDesc_t ) ) ) ) |
| 93 | + if( NULL != ( pxIRD = ( MCastReportData_t * ) pvPortMalloc( sizeof( MCastReportData_t ) ) ) ) |
94 | 94 | { |
95 | 95 | listSET_LIST_ITEM_OWNER( &( pxIRD->xListItem ), ( void * ) pxIRD ); |
96 | 96 | /* Quick and dirty assignment of end-point. This will probably have to be re-designed and re-done. */ |
97 | | - pxIRD->pxEndPoint = FreeRTOS_FirstEndPoint( FreeRTOS_FirstNetworkInterface() ); |
98 | | - pxIRD->mreq.imr_interface.sin_family = FREERTOS_AF_INET; |
99 | | - pxIRD->mreq.imr_interface.sin_len = sizeof( struct freertos_sockaddr ); |
100 | | - pxIRD->mreq.imr_interface.sin_address.ulIP_IPv4 = FreeRTOS_htonl( 0x00000000U ); |
101 | | - pxIRD->mreq.imr_multiaddr.sin_family = FREERTOS_AF_INET; |
102 | | - pxIRD->mreq.imr_multiaddr.sin_len = sizeof( struct freertos_sockaddr ); |
103 | | - pxIRD->mreq.imr_multiaddr.sin_address.ulIP_IPv4 = ipLLMNR_IP_ADDR; |
| 97 | + /* ToDo: make sure we also join the IPv6 multicast group */ |
| 98 | + pxIRD->pxEndPoint = NULL; |
| 99 | + pxIRD->xMCastGroupAddress.xIs_IPv6 = pdFALSE_UNSIGNED; |
| 100 | + pxIRD->xMCastGroupAddress.xIPAddress.ulIP_IPv4 = ipLLMNR_IP_ADDR; |
104 | 101 | BaseType_t bReportItemConsumed = xAddIGMPReportToList( pxIRD ); |
105 | 102 |
|
106 | 103 | if( pdTRUE != bReportItemConsumed ) |
|
198 | 195 |
|
199 | 196 | /* and schedule the reports. Note, the IGMP event is set at 100ms |
200 | 197 | * which corresponds to the increment used in ucMaxRespTime. |
201 | | - * pxIRD->ucCountDown holds a count in increments of the IGMP event time, so 12 = 1200ms = 1.2s */ |
| 198 | + * pxMRD->ucCountDown holds a count in increments of the IGMP event time, so 12 = 1200ms = 1.2s */ |
202 | 199 | const ListItem_t * pxIterator; |
203 | 200 | const ListItem_t * xEnd = listGET_END_MARKER( &xIGMP_ScheduleList ); |
204 | | - IGMPReportDesc_t * pxIRD; |
| 201 | + MCastReportData_t * pxMRD; |
205 | 202 |
|
206 | 203 | for( pxIterator = ( const ListItem_t * ) listGET_NEXT( xEnd ); |
207 | 204 | pxIterator != ( const ListItem_t * ) xEnd; |
208 | 205 | pxIterator = ( const ListItem_t * ) listGET_NEXT( pxIterator ) ) |
209 | 206 | { |
210 | | - pxIRD = ( IGMPReportDesc_t * ) listGET_LIST_ITEM_OWNER( pxIterator ); |
| 207 | + pxMRD = ( MCastReportData_t * ) listGET_LIST_ITEM_OWNER( pxIterator ); |
211 | 208 |
|
212 | 209 | /* Continue parsing if we are dealing with a general query, or if we are servicing a group-specific |
213 | 210 | * query and this report matches the group-specific query's destination address*/ |
214 | | - if( ( uiGroupAddress == 0U ) || ( uiGroupAddress == pxIRD->mreq.imr_multiaddr.sin_address.ulIP_IPv4 ) ) |
| 211 | + if( ( uiGroupAddress == 0U ) || ( uiGroupAddress == pxMRD->xMCastGroupAddress.xIPAddress.ulIP_IPv4 ) ) |
215 | 212 | { |
216 | 213 | /* This report needs to be scheduled for sending. Remember that it may already be scheduled. |
217 | | - * pxIRD->ucCountDown of zero means the report is not scheduled to be sent. If a report is scheduled, and it's |
| 214 | + * pxMRD->ucCountDown of zero means the report is not scheduled to be sent. If a report is scheduled, and it's |
218 | 215 | * scheduled time is before ucMaxRespTime, there is nothing to be done. If a |
219 | 216 | * report is scheduled past ucMaxRespTime, or not scheduled at all, we need |
220 | 217 | * to schedule it for a random time between 0 and ucMaxRespTime. */ |
221 | | - if( ( pxIRD->ucCountDown == 0 ) || ( pxIRD->ucCountDown >= ucMaxRespTime ) ) |
| 218 | + if( ( pxMRD->ucCountDown == 0 ) || ( pxMRD->ucCountDown >= ucMaxRespTime ) ) |
222 | 219 | { |
223 | 220 | uint32_t uiRandom; |
224 | 221 |
|
225 | 222 | if( xApplicationGetRandomNumber( &( uiRandom ) ) == pdFALSE ) |
226 | 223 | { |
227 | | - pxIRD->ucCountDown = uiNonRandomCounter++; |
| 224 | + pxMRD->ucCountDown = uiNonRandomCounter++; |
228 | 225 |
|
229 | 226 | if( uiNonRandomCounter > ucMaxRespTime ) |
230 | 227 | { |
|
245 | 242 | uiRandom -= ucMaxRespTime; |
246 | 243 | } |
247 | 244 |
|
248 | | - pxIRD->ucCountDown = ( uint8_t ) uiRandom; |
| 245 | + pxMRD->ucCountDown = ( uint8_t ) uiRandom; |
249 | 246 | } |
250 | 247 | } |
251 | 248 | } |
|
261 | 258 | /* Go through the list of IGMP reports and send anything that needs to be sent. */ |
262 | 259 | const ListItem_t * pxIterator; |
263 | 260 | const ListItem_t * xEnd = listGET_END_MARKER( &xIGMP_ScheduleList ); |
264 | | - IGMPReportDesc_t * pxIRD; |
| 261 | + MCastReportData_t * pxMRD; |
| 262 | + NetworkInterface_t * pxInterface; |
| 263 | + NetworkEndPoint_t * pxEndPoint; |
265 | 264 |
|
266 | 265 | for( pxIterator = ( const ListItem_t * ) listGET_NEXT( xEnd ); pxIterator != ( const ListItem_t * ) xEnd; pxIterator = ( const ListItem_t * ) listGET_NEXT( pxIterator ) ) |
267 | 266 | { |
268 | | - pxIRD = ( IGMPReportDesc_t * ) listGET_LIST_ITEM_OWNER( pxIterator ); |
| 267 | + pxMRD = ( MCastReportData_t * ) listGET_LIST_ITEM_OWNER( pxIterator ); |
269 | 268 |
|
270 | 269 | /* Only decrement down to one. Decrementing to zero is handled later. */ |
271 | | - if( pxIRD->ucCountDown > 1 ) |
| 270 | + if( pxMRD->ucCountDown > 1 ) |
272 | 271 | { |
273 | | - pxIRD->ucCountDown--; |
| 272 | + pxMRD->ucCountDown--; |
274 | 273 | } |
275 | 274 |
|
276 | | - if( pxIRD->pxEndPoint->bits.bIPv6 ) |
| 275 | + if( pxMRD->xMCastGroupAddress.xIs_IPv6 == pdTRUE_UNSIGNED ) |
277 | 276 | { |
278 | 277 | /* ToDo: handle IPv6 multicast groups through ICMPv6 messages */ |
279 | 278 | continue; |
280 | 279 | } |
281 | 280 |
|
282 | | - /* Hold off on sending reports until our IP is non-zero. This allows a bit of a wait during power up |
283 | | - * when the IP can be zero and also allows us to add reports to the list with a countdown of 1 for fast initial delay. */ |
284 | | - if( ( pxIRD->ucCountDown == 1 ) && ( pxIRD->pxEndPoint->ipv4_settings.ulIPAddress != 0 ) ) |
| 281 | + if( pxMRD->ucCountDown == 1 ) |
285 | 282 | { |
286 | | - pxIRD->ucCountDown = 0; |
287 | | - xSendIGMP( 0, ipIGMP_MEMBERSHIP_REPORT_V2, 0, pxIRD->mreq.imr_multiaddr.sin_address.ulIP_IPv4, pxIRD->pxEndPoint ); |
| 283 | + pxEndPoint = pxMRD->pxEndPoint; |
| 284 | + /* If the end-point is null, the report is for all interfaces. */ |
| 285 | + if ( pxEndPoint == NULL) |
| 286 | + { |
| 287 | + for( pxInterface = FreeRTOS_FirstNetworkInterface(); |
| 288 | + pxInterface != NULL; |
| 289 | + pxInterface = FreeRTOS_NextNetworkInterface( pxInterface ) ) |
| 290 | + { |
| 291 | + for( pxEndPoint = FreeRTOS_FirstEndPoint( pxInterface ); |
| 292 | + pxEndPoint != NULL; |
| 293 | + pxEndPoint = FreeRTOS_NextEndPoint( pxInterface, pxEndPoint ) ) |
| 294 | + { |
| 295 | + if ( pxEndPoint->bits.bIPv6 == pdTRUE_UNSIGNED ) |
| 296 | + { |
| 297 | + /* ToDo: handle ICMPv6 reports*/ |
| 298 | + continue; |
| 299 | + } |
| 300 | + |
| 301 | + /* Make sure the end-point has an IP address */ |
| 302 | + if ( pxEndPoint->ipv4_settings.ulIPAddress != 0 ) |
| 303 | + { |
| 304 | + pxMRD->ucCountDown = 0; |
| 305 | + xSendIGMP( 0, ipIGMP_MEMBERSHIP_REPORT_V2, 0, pxMRD->xMCastGroupAddress.xIPAddress.ulIP_IPv4, pxEndPoint ); |
| 306 | + } |
| 307 | + else |
| 308 | + { |
| 309 | + } |
| 310 | + } |
| 311 | + } |
| 312 | + } |
| 313 | + else |
| 314 | + { |
| 315 | + /* Make sure the end-point has an IP address */ |
| 316 | + if ( pxEndPoint->ipv4_settings.ulIPAddress != 0 ) |
| 317 | + { |
| 318 | + pxMRD->ucCountDown = 0; |
| 319 | + xSendIGMP( 0, ipIGMP_MEMBERSHIP_REPORT_V2, 0, pxMRD->xMCastGroupAddress.xIPAddress.ulIP_IPv4, pxEndPoint ); |
| 320 | + } |
| 321 | + } |
288 | 322 | } |
289 | 323 | } |
290 | 324 |
|
|
324 | 358 |
|
325 | 359 | const ListItem_t * pxIterator; |
326 | 360 | const ListItem_t * xEnd = listGET_END_MARKER( &xIGMP_ScheduleList ); |
327 | | - IGMPReportDesc_t * pxIRD; |
| 361 | + MCastReportData_t * pxIRD; |
328 | 362 |
|
329 | 363 | for( pxIterator = ( const ListItem_t * ) listGET_NEXT( xEnd ); |
330 | 364 | pxIterator != ( const ListItem_t * ) xEnd; |
331 | 365 | pxIterator = ( const ListItem_t * ) listGET_NEXT( pxIterator ) ) |
332 | 366 | { |
333 | | - pxIRD = ( IGMPReportDesc_t * ) listGET_LIST_ITEM_OWNER( pxIterator ); |
334 | | - |
335 | | - if( pxIRD->mreq.imr_multiaddr.sin_address.ulIP_IPv4 == pMCastGroup->imr_multiaddr.sin_address.ulIP_IPv4 ) |
| 367 | + pxIRD = ( MCastReportData_t * ) listGET_LIST_ITEM_OWNER( pxIterator ); |
| 368 | + |
| 369 | + if ( pxIRD->xMCastGroupAddress.xIs_IPv6 == pdTRUE_UNSIGNED ) |
| 370 | + { |
| 371 | + /* ToDo: handle IPv6 */ |
| 372 | + continue; |
| 373 | + } |
| 374 | + |
| 375 | + if( pxIRD->xMCastGroupAddress.xIPAddress.ulIP_IPv4 == pMCastGroup->imr_multiaddr.s_addr ) |
336 | 376 | { |
337 | 377 | /* Found a match. */ |
338 | 378 | if( pxIRD->xNumSockets > 0 ) |
|
356 | 396 | * |
357 | 397 | * @param[in] pNewEntry: The multicast group descriptor to search for. |
358 | 398 | */ |
359 | | - BaseType_t xAddIGMPReportToList( IGMPReportDesc_t * pNewEntry ) |
| 399 | + BaseType_t xAddIGMPReportToList( MCastReportData_t * pNewEntry ) |
360 | 400 | { |
361 | 401 | configASSERT( pNewEntry != NULL ); |
362 | 402 |
|
363 | 403 | const ListItem_t * pxIterator; |
364 | 404 | const ListItem_t * xEnd = listGET_END_MARKER( &xIGMP_ScheduleList ); |
365 | | - IGMPReportDesc_t * pxIRD; |
| 405 | + MCastReportData_t * pxIRD; |
366 | 406 |
|
367 | 407 | for( pxIterator = ( const ListItem_t * ) listGET_NEXT( xEnd ); |
368 | 408 | pxIterator != ( const ListItem_t * ) xEnd; |
369 | 409 | pxIterator = ( const ListItem_t * ) listGET_NEXT( pxIterator ) ) |
370 | 410 | { |
371 | | - pxIRD = ( IGMPReportDesc_t * ) listGET_LIST_ITEM_OWNER( pxIterator ); |
| 411 | + pxIRD = ( MCastReportData_t * ) listGET_LIST_ITEM_OWNER( pxIterator ); |
372 | 412 |
|
373 | | - if( pxIRD->mreq.imr_multiaddr.sin_address.ulIP_IPv4 == pNewEntry->mreq.imr_multiaddr.sin_address.ulIP_IPv4 ) |
| 413 | + if( pxIRD->xMCastGroupAddress.xIPAddress.ulIP_IPv4 == pNewEntry->xMCastGroupAddress.xIPAddress.ulIP_IPv4 ) |
374 | 414 | { |
375 | 415 | /* Found a duplicate. All IGMP snooping switches already know that we are interested. |
376 | 416 | * Just keep track of how many sockets are interested in this multicast group. */ |
|
431 | 471 | { |
432 | 472 | pxMCG = ( MCastGroupDesc_t * ) listGET_LIST_ITEM_OWNER( pxIterator ); |
433 | 473 |
|
434 | | - if( pxMCG->mreq.imr_multiaddr.sin_address.ulIP_IPv4 == pxMulticastGroup->mreq.imr_multiaddr.sin_address.ulIP_IPv4 ) |
| 474 | + if( pxMCG->mreq.imr_multiaddr.s_addr == pxMulticastGroup->mreq.imr_multiaddr.s_addr ) |
435 | 475 | { |
436 | 476 | /* Found a match. If we need to remove this address, go ahead. |
437 | 477 | * If we need to add it, it's already there, so just free the the descriptor to prevent memory leaks. */ |
|
462 | 502 | vListInsertEnd( &( pxSocket->u.xUDP.xMulticastGroupsList ), &( pxMulticastGroup->xListItem ) ); |
463 | 503 | /* Inform the network driver */ |
464 | 504 | uint8_t MCastDestMacBytes[ 6 ]; |
465 | | - vSetMultiCastIPv4MacAddress( pxMulticastGroup->mreq.imr_multiaddr.sin_address.ulIP_IPv4, MCastDestMacBytes ); |
| 505 | + vSetMultiCastIPv4MacAddress( pxMulticastGroup->mreq.imr_multiaddr.s_addr, MCastDestMacBytes ); |
466 | 506 |
|
467 | 507 | if( pxNetIf ) |
468 | 508 | { |
|
493 | 533 | else |
494 | 534 | { |
495 | 535 | /* Adding, but found duplicate. No need to inform the network driver. Simply free |
496 | | - * the IGMPReportDesc_t */ |
| 536 | + * the MCastReportData_t */ |
497 | 537 | if( pxMulticastGroup->pxIGMPReportDesc ) |
498 | 538 | { |
499 | 539 | vPortFree( pxMulticastGroup->pxIGMPReportDesc ); |
|
512 | 552 | /* Removing and found a match. */ |
513 | 553 | /* Inform the network driver */ |
514 | 554 | uint8_t MCastDestMacBytes[ 6 ]; |
515 | | - vSetMultiCastIPv4MacAddress( pxMulticastGroup->mreq.imr_multiaddr.sin_address.ulIP_IPv4, MCastDestMacBytes ); |
| 555 | + vSetMultiCastIPv4MacAddress( pxMulticastGroup->mreq.imr_multiaddr.s_addr, MCastDestMacBytes ); |
516 | 556 |
|
517 | 557 | if( pxNetIf ) |
518 | 558 | { |
|
0 commit comments