# Changes epoll/kqueue to operate by file descriptors, not # event pointers, to hopefully fix the recent poll crashes. Index: libtorrent/src/torrent/poll_epoll.cc =================================================================== --- libtorrent/src/torrent/poll_epoll.cc (revision 1072) +++ libtorrent/src/torrent/poll_epoll.cc (working copy) @@ -37,6 +37,7 @@ #include "config.h" #include +#include #include #include @@ -60,7 +61,7 @@ inline void PollEPoll::set_event_mask(Event* e, uint32_t m) { - m_table[e->file_descriptor()] = std::make_pair(m, e); + m_table[e->file_descriptor()] = Table::value_type(m, e); } inline void @@ -70,7 +71,7 @@ epoll_event e; e.data.u64 = 0; // Make valgrind happy? Remove please. - e.data.ptr = event; + e.data.fd = event->file_descriptor(); e.events = mask; set_event_mask(event, mask); @@ -81,16 +82,20 @@ return; // Handle some libcurl/c-ares bugs by retrying once. + int retry = op; if (op == EPOLL_CTL_ADD && errno == EEXIST) { - op = EPOLL_CTL_MOD; + retry = EPOLL_CTL_MOD; errno = 0; } else if (op == EPOLL_CTL_MOD && errno == ENOENT) { - op = EPOLL_CTL_ADD; + retry = EPOLL_CTL_ADD; errno = 0; } - if (errno || epoll_ctl(m_fd, op, event->file_descriptor(), &e)) - throw internal_error("PollEPoll::modify(...) epoll_ctl call failed"); + if (errno || epoll_ctl(m_fd, retry, event->file_descriptor(), &e)) { + char errmsg[1024]; + snprintf(errmsg, sizeof(errmsg), "PollEPoll::modify(...) epoll_ctl(%d, %d -> %d, %d, [%p:%x]) = %d: %s", m_fd, op, retry, event->file_descriptor(), event, mask, errno, strerror(errno)); + throw internal_error(errmsg); + } } } @@ -138,20 +143,25 @@ void PollEPoll::perform() { for (epoll_event *itr = m_events, *last = m_events + m_waitingEvents; itr != last; ++itr) { + if (itr->data.fd < 0 || (size_t)itr->data.fd >= m_table.size()) + continue; + + Table::iterator evItr = m_table.begin() + itr->data.fd; + // Each branch must check for data.ptr != NULL to allow the socket // to remove itself between the calls. // // TODO: Make it so that it checks that read/write is wanted, that // it wasn't removed from one of them but not closed. - if (itr->events & EPOLLERR && itr->data.ptr != NULL && event_mask((Event*)itr->data.ptr) & EPOLLERR) - ((Event*)itr->data.ptr)->event_error(); + if (itr->events & EPOLLERR && evItr->second != NULL && evItr->first & EPOLLERR) + evItr->second->event_error(); - if (itr->events & EPOLLIN && itr->data.ptr != NULL && event_mask((Event*)itr->data.ptr) & EPOLLIN) - ((Event*)itr->data.ptr)->event_read(); + if (itr->events & EPOLLIN && evItr->second != NULL && evItr->first & EPOLLIN) + evItr->second->event_read(); - if (itr->events & EPOLLOUT && itr->data.ptr != NULL && event_mask((Event*)itr->data.ptr) & EPOLLOUT) - ((Event*)itr->data.ptr)->event_write(); + if (itr->events & EPOLLOUT && evItr->second != NULL && evItr->first & EPOLLOUT) + evItr->second->event_write(); } m_waitingEvents = 0; @@ -173,9 +183,14 @@ if (event_mask(event) != 0) throw internal_error("PollEPoll::close(...) called but the file descriptor is active"); + m_table[event->file_descriptor()] = Table::value_type(); + + /* + Shouldn't be needed anymore. for (epoll_event *itr = m_events, *last = m_events + m_waitingEvents; itr != last; ++itr) if (itr->data.ptr == event) itr->data.ptr = NULL; + */ } void @@ -183,12 +198,14 @@ // Kernel removes closed FDs automatically, so just clear the mask and remove it from pending calls. // Don't touch if the FD was re-used before we received the close notification. if (m_table[event->file_descriptor()].second == event) - set_event_mask(event, 0); + m_table[event->file_descriptor()] = Table::value_type(); + /* for (epoll_event *itr = m_events, *last = m_events + m_waitingEvents; itr != last; ++itr) { if (itr->data.ptr == event) itr->data.ptr = NULL; } + */ } // Use custom defines for EPOLL* to make the below code compile with Index: libtorrent/src/torrent/poll_kqueue.cc =================================================================== --- libtorrent/src/torrent/poll_kqueue.cc (revision 1072) +++ libtorrent/src/torrent/poll_kqueue.cc (working copy) @@ -70,7 +70,7 @@ PollKQueue::set_event_mask(Event* e, uint32_t m) { assert(e->file_descriptor() != -1); - m_table[e->file_descriptor()] = std::make_pair(m, e); + m_table[e->file_descriptor()] = Table::value_type(m, e); } void @@ -87,7 +87,7 @@ void PollKQueue::modify(Event* event, unsigned short op, short mask) { - // Flush the changed filters to the kernel if the buffer if full. + // Flush the changed filters to the kernel if the buffer is full. if (m_changedEvents == m_table.size()) flush_events(); @@ -100,7 +100,8 @@ struct kevent* itr = m_changes + (m_changedEvents++); - EV_SET(itr, event->file_descriptor(), mask, op, 0, 0, event); + assert(event == m_table[event->file_descriptor()].second); + EV_SET(itr, event->file_descriptor(), mask, op, 0, 0, NULL); } PollKQueue* @@ -196,8 +197,9 @@ return nfds; if (FD_ISSET(0, readSet)) { + m_events[m_waitingEvents].ident = 0; m_events[m_waitingEvents].filter = EVFILT_READ; - m_events[m_waitingEvents].udata = m_stdinEvent; + m_events[m_waitingEvents].flags = 0; m_waitingEvents++; } @@ -208,19 +210,24 @@ void PollKQueue::perform() { for (struct kevent *itr = m_events, *last = m_events + m_waitingEvents; itr != last; ++itr) { - if ((itr->flags & EV_ERROR) && itr->udata != NULL) { - if (event_mask((Event*)itr->udata) & flag_error) - ((Event*)itr->udata)->event_error(); + if (itr->ident < 0 || itr->ident >= m_table.size()) continue; + + Table::iterator evItr = m_table.begin() + itr->ident; + + if ((itr->flags & EV_ERROR) && evItr->second != NULL) { + if (evItr->first & flag_error) + evItr->second->event_error(); + continue; } // Also check current mask. - if (itr->filter == EVFILT_READ && itr->udata != NULL && event_mask((Event*)itr->udata) & flag_read) - ((Event*)itr->udata)->event_read(); + if (itr->filter == EVFILT_READ && evItr->second != NULL && evItr->first & flag_read) + evItr->second->event_read(); - if (itr->filter == EVFILT_WRITE && itr->udata != NULL && event_mask((Event*)itr->udata) & flag_write) - ((Event*)itr->udata)->event_write(); + if (itr->filter == EVFILT_WRITE && evItr->second != NULL && evItr->first & flag_write) + evItr->second->event_write(); } m_waitingEvents = 0; @@ -249,11 +256,16 @@ if (event_mask(event) != 0) throw internal_error("PollKQueue::close(...) called but the file descriptor is active"); + m_table[event->file_descriptor()] = Table::value_type(); + + /* + Shouldn't be needed anymore. for (struct kevent *itr = m_events, *last = m_events + m_waitingEvents; itr != last; ++itr) if (itr->udata == event) itr->udata = NULL; m_changedEvents = std::remove_if(m_changes, m_changes + m_changedEvents, rak::equal(event, rak::mem_ref(&kevent::udata))) - m_changes; + */ } void @@ -269,14 +281,16 @@ // and remove it from pending calls. Don't touch if the FD was // re-used before we received the close notification. if (m_table[event->file_descriptor()].second == event) - set_event_mask(event, 0); + m_table[event->file_descriptor()] = Table::value_type(); + /* for (struct kevent *itr = m_events, *last = m_events + m_waitingEvents; itr != last; ++itr) { if (itr->udata == event) itr->udata = NULL; } m_changedEvents = std::remove_if(m_changes, m_changes + m_changedEvents, rak::equal(event, rak::mem_ref(&kevent::udata))) - m_changes; + */ } // Use custom defines for EPOLL* to make the below code compile with