]> git.ipfire.org Git - thirdparty/samba.git/commitdiff
merge from ronnie
authorAndrew Tridgell <tridge@samba.org>
Tue, 10 Apr 2007 09:35:21 +0000 (19:35 +1000)
committerAndrew Tridgell <tridge@samba.org>
Tue, 10 Apr 2007 09:35:21 +0000 (19:35 +1000)
(This used to be ctdb commit 886a3e9122168ececc257aa1bef2f5de2f2d8d7e)

1  2 
ctdb/common/ctdb_io.c

index e6269b18ab945ef508d532bc72fc9e30b877babd,7292aec2aac249426b74c69d9a1e57b7b892be14..eb1d47bae397247fc8ad5f15f83af36315d68b7a
@@@ -131,175 -111,5 +131,173 @@@ static void queue_io_read(struct ctdb_q
        }
  
        talloc_free(data_base);
 +      return;
 +
 +failed:
 +      queue->callback(NULL, 0, queue->private);
 +}
 +
 +
 +/* used when an event triggers a dead queue */
 +static void queue_dead(struct event_context *ev, struct timed_event *te, 
 +                     struct timeval t, void *private)
 +{
 +      struct ctdb_queue *queue = talloc_get_type(private, struct ctdb_queue);
 +      queue->callback(NULL, 0, queue->private);
 +}
 +
 +
 +/*
 +  called when an incoming connection is writeable
 +*/
 +static void queue_io_write(struct ctdb_queue *queue)
 +{
 +      while (queue->out_queue) {
 +              struct ctdb_queue_pkt *pkt = queue->out_queue;
 +              ssize_t n;
 +
 +              n = write(queue->fd, pkt->data, pkt->length);
 +
 +              if (n == -1 && errno != EAGAIN && errno != EWOULDBLOCK) {
 +                      event_add_timed(queue->ctdb->ev, queue, timeval_zero(), 
 +                                      queue_dead, queue);
 +                      EVENT_FD_NOT_WRITEABLE(queue->fde);
 +                      return;
 +              }
 +              if (n <= 0) return;
 +              
 +              if (n != pkt->length) {
 +                      pkt->length -= n;
 +                      pkt->data += n;
 +                      return;
 +              }
 +
 +              DLIST_REMOVE(queue->out_queue, pkt);
 +              talloc_free(pkt);
 +      }
 +
 +      EVENT_FD_NOT_WRITEABLE(queue->fde);
 +}
 +
 +/*
 +  called when an incoming connection is readable or writeable
 +*/
 +static void queue_io_handler(struct event_context *ev, struct fd_event *fde, 
 +                           uint16_t flags, void *private)
 +{
 +      struct ctdb_queue *queue = talloc_get_type(private, struct ctdb_queue);
 +
 +      if (flags & EVENT_FD_READ) {
 +              queue_io_read(queue);
 +      } else {
 +              queue_io_write(queue);
 +      }
 +}
 +
 +
 +/*
 +  queue a packet for sending
 +*/
 +int ctdb_queue_send(struct ctdb_queue *queue, uint8_t *data, uint32_t length)
 +{
 +      struct ctdb_queue_pkt *pkt;
 +      uint32_t length2;
 +
 +      /* enforce the length and alignment rules from the tcp packet allocator */
 +      length2 = (length+(queue->alignment-1)) & ~(queue->alignment-1);
 +      *(uint32_t *)data = length2;
 +
 +      if (length2 != length) {
 +              memset(data+length, 0, length2-length);
 +      }
 +      
 +      /* if the queue is empty then try an immediate write, avoiding
 +         queue overhead. This relies on non-blocking sockets */
 +      if (queue->out_queue == NULL && queue->fd != -1) {
 +              ssize_t n = write(queue->fd, data, length2);
 +              if (n == -1 && errno != EAGAIN && errno != EWOULDBLOCK) {
 +                      event_add_timed(queue->ctdb->ev, queue, timeval_zero(), 
 +                                      queue_dead, queue);
 +                      /* yes, we report success, as the dead node is 
 +                         handled via a separate event */
 +                      return 0;
 +              }
 +              if (n > 0) {
 +                      data += n;
 +                      length2 -= n;
 +              }
 +              if (length2 == 0) return 0;
 +      }
 +
 +      pkt = talloc(queue, struct ctdb_queue_pkt);
 +      CTDB_NO_MEMORY(queue->ctdb, pkt);
 +
 +      pkt->data = talloc_memdup(pkt, data, length2);
 +      CTDB_NO_MEMORY(queue->ctdb, pkt->data);
 +
 +      pkt->length = length2;
 +
 +      if (queue->out_queue == NULL && queue->fd != -1) {
 +              EVENT_FD_WRITEABLE(queue->fde);
 +      }
 +
 +      DLIST_ADD_END(queue->out_queue, pkt, struct ctdb_queue_pkt *);
 +
 +      return 0;
  }
  
 +
 +/*
 +  setup the fd used by the queue
 + */
 +int ctdb_queue_set_fd(struct ctdb_queue *queue, int fd)
 +{
 +      queue->fd = fd;
 +      talloc_free(queue->fde);
 +      queue->fde = NULL;
 +
 +      if (fd != -1) {
 +              queue->fde = event_add_fd(queue->ctdb->ev, queue, fd, EVENT_FD_READ, 
 +                                        queue_io_handler, queue);
 +              if (queue->fde == NULL) {
 +                      return -1;
 +              }
 +
 +              if (queue->out_queue) {
 +                      EVENT_FD_WRITEABLE(queue->fde);         
 +              }
 +      }
 +
 +      return 0;
 +}
 +
 +
 +
 +/*
 +  setup a packet queue on a socket
 + */
 +struct ctdb_queue *ctdb_queue_setup(struct ctdb_context *ctdb,
 +                                  TALLOC_CTX *mem_ctx, int fd, int alignment,
 +                                  
 +                                  ctdb_queue_cb_fn_t callback,
 +                                  void *private)
 +{
 +      struct ctdb_queue *queue;
 +
 +      queue = talloc_zero(mem_ctx, struct ctdb_queue);
 +      CTDB_NO_MEMORY_NULL(ctdb, queue);
 +
 +      queue->ctdb = ctdb;
 +      queue->fd = fd;
 +      queue->alignment = alignment;
 +      queue->private = private;
 +      queue->callback = callback;
 +      if (fd != -1) {
 +              if (ctdb_queue_set_fd(queue, fd) != 0) {
 +                      talloc_free(queue);
 +                      return NULL;
 +              }
 +      }
 +
 +      return queue;
 +}