diff options
author | Gregory Nutt <gnutt@nuttx.org> | 2014-12-31 13:45:19 -0600 |
---|---|---|
committer | Gregory Nutt <gnutt@nuttx.org> | 2014-12-31 13:45:19 -0600 |
commit | be96ea0162a8291526cc1c036201c27b5db335c7 (patch) | |
tree | f1ffa1ea347293b48aca7fa3f94717297ad1896b /nuttx/drivers/net | |
parent | 2728352bff35883ba799c56091f130793b75db2d (diff) | |
download | px4-nuttx-be96ea0162a8291526cc1c036201c27b5db335c7.tar.gz px4-nuttx-be96ea0162a8291526cc1c036201c27b5db335c7.tar.bz2 px4-nuttx-be96ea0162a8291526cc1c036201c27b5db335c7.zip |
Ethernet skeleton: Add some more example logic
Diffstat (limited to 'nuttx/drivers/net')
-rw-r--r-- | nuttx/drivers/net/skeleton.c | 22 |
1 files changed, 12 insertions, 10 deletions
diff --git a/nuttx/drivers/net/skeleton.c b/nuttx/drivers/net/skeleton.c index 7febcb52b..9e0a19ebf 100644 --- a/nuttx/drivers/net/skeleton.c +++ b/nuttx/drivers/net/skeleton.c @@ -416,7 +416,9 @@ static void skel_interrupt_work(FAR void *arg) skel_interrupt_process(skel); - /* TODO: Re-enable Ethernet interrupts */ + /* Re-enable Ethernet interrupts */ + + up_enable_irq(CONFIG_skeleton_IRQ); } #endif @@ -442,11 +444,13 @@ static int skel_interrupt(int irq, FAR void *context) FAR struct skel_driver_s *skel = &g_skel[0]; #ifdef CONFIG_NET_NOINTS - /* TODO: Disable further Ethernet interrupts. Because Ethernet interrupts - * are also disabled if the TX timeout event occurs, there can be no race + /* Disable further Ethernet interrupts. Because Ethernet interrupts are + * also disabled if the TX timeout event occurs, there can be no race * condition here. */ + up_disable_irq(CONFIG_skeleton_IRQ); + /* TODO: Determine if a TX transfer just completed */ { @@ -558,20 +562,20 @@ static void skel_txtimeout_expiry(int argc, uint32_t arg, ...) FAR struct skel_driver_s *skel = (FAR struct skel_driver_s *)arg; #ifdef CONFIG_NET_NOINTS - /* TODO: Disable further Ethernet interrupts. This will prevent some race + /* Disable further Ethernet interrupts. This will prevent some race * conditions with interrupt work. There is still a potential race * condition with interrupt work that is already queued and in progress. */ + up_disable_irq(CONFIG_skeleton_IRQ); + /* Cancel any pending poll or interrupt work. This will have no effect * on work that has already been started. */ work_cancel(HPWORK, &skel->sk_work); - /* Schedule to perform the TX timeout processing on the worker thread. - * TODO: Assure that no there is not pending interrupt or poll work. - */ + /* Schedule to perform the TX timeout processing on the worker thread. */ work_queue(HPWORK, &skel->sk_work, skel_txtimeout_work, skel, 0); #else @@ -673,9 +677,7 @@ static void skel_poll_expiry(int argc, uint32_t arg, ...) if (work_available(&skel->sk_work)) { - /* Schedule to perform the interrupt processing on the worker thread. - * TODO: Make sure that there can be no pending interrupt work. - */ + /* Schedule to perform the interrupt processing on the worker thread. */ work_queue(HPWORK, &skel->sk_work, skel_poll_work, skel, 0); } |