select.c
4.24 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
/*
* Copyright (c) 2006-2021, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2016-12-28 Bernard first version
*/
#include <rtthread.h>
#include <poll.h>
#include <sys/select.h>
static void fdszero(fd_set *set, int nfds)
{
fd_mask *m;
int n;
/*
The 'sizeof(fd_set)' of the system space may differ from user space,
so the actual size of the 'fd_set' is determined here with the parameter 'nfds'
*/
m = (fd_mask *)set;
for (n = 0; n < nfds; n += (sizeof(fd_mask) * 8))
{
rt_memset(m, 0, sizeof(fd_mask));
m ++;
}
}
int select(int nfds, fd_set *readfds, fd_set *writefds, fd_set *exceptfds, struct timeval *timeout)
{
int fd;
int npfds;
int msec;
int ndx;
int ret;
struct pollfd *pollset = RT_NULL;
/* How many pollfd structures do we need to allocate? */
for (fd = 0, npfds = 0; fd < nfds; fd++)
{
/* Check if any monitor operation is requested on this fd */
if ((readfds && FD_ISSET(fd, readfds)) ||
(writefds && FD_ISSET(fd, writefds)) ||
(exceptfds && FD_ISSET(fd, exceptfds)))
{
npfds++;
}
}
/* Allocate the descriptor list for poll() */
if (npfds > 0)
{
pollset = (struct pollfd *)rt_calloc(npfds, sizeof(struct pollfd));
if (!pollset)
{
return -1;
}
}
/* Initialize the descriptor list for poll() */
for (fd = 0, ndx = 0; fd < nfds; fd++)
{
int incr = 0;
/* The readfs set holds the set of FDs that the caller can be assured
* of reading from without blocking. Note that POLLHUP is included as
* a read-able condition. POLLHUP will be reported at the end-of-file
* or when a connection is lost. In either case, the read() can then
* be performed without blocking.
*/
if (readfds && FD_ISSET(fd, readfds))
{
pollset[ndx].fd = fd;
pollset[ndx].events |= POLLIN;
incr = 1;
}
if (writefds && FD_ISSET(fd, writefds))
{
pollset[ndx].fd = fd;
pollset[ndx].events |= POLLOUT;
incr = 1;
}
if (exceptfds && FD_ISSET(fd, exceptfds))
{
pollset[ndx].fd = fd;
incr = 1;
}
ndx += incr;
}
RT_ASSERT(ndx == npfds);
/* Convert the timeout to milliseconds */
if (timeout)
{
msec = timeout->tv_sec * 1000 + timeout->tv_usec / 1000;
}
else
{
msec = -1;
}
/* Then let poll do all of the real work. */
ret = poll(pollset, npfds, msec);
/* Now set up the return values */
if (readfds)
{
fdszero(readfds, nfds);
}
if (writefds)
{
fdszero(writefds, nfds);
}
if (exceptfds)
{
fdszero(exceptfds, nfds);
}
/* Convert the poll descriptor list back into selects 3 bitsets */
if (ret > 0)
{
ret = 0;
for (ndx = 0; ndx < npfds; ndx++)
{
/* Check for read conditions. Note that POLLHUP is included as a
* read condition. POLLHUP will be reported when no more data will
* be available (such as when a connection is lost). In either
* case, the read() can then be performed without blocking.
*/
if (readfds)
{
if (pollset[ndx].revents & (POLLIN | POLLHUP))
{
FD_SET(pollset[ndx].fd, readfds);
ret++;
}
}
/* Check for write conditions */
if (writefds)
{
if (pollset[ndx].revents & POLLOUT)
{
FD_SET(pollset[ndx].fd, writefds);
ret++;
}
}
/* Check for exceptions */
if (exceptfds)
{
if (pollset[ndx].revents & POLLERR)
{
FD_SET(pollset[ndx].fd, exceptfds);
ret++;
}
}
}
}
if (pollset) rt_free(pollset);
return ret;
}