(root)/
glib-2.79.0/
gio/
inotify/
inotify-kernel.c
       1  /*
       2     Copyright (C) 2005 John McCutchan
       3     Copyright © 2015 Canonical Limited
       4  
       5     This library is free software; you can redistribute it and/or
       6     modify it under the terms of the GNU Lesser General Public
       7     License as published by the Free Software Foundation; either
       8     version 2.1 of the License, or (at your option) any later version.
       9  
      10     This library is distributed in the hope that it will be useful,
      11     but WITHOUT ANY WARRANTY; without even the implied warranty of
      12     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
      13     Lesser General Public License for more details.
      14  
      15     You should have received a copy of the GNU Lesser General Public License
      16     along with this library; if not, see <http://www.gnu.org/licenses/>.
      17  
      18     Authors:
      19       Ryan Lortie <desrt@desrt.ca>
      20       John McCutchan <john@johnmccutchan.com>
      21  */
      22  
      23  #include "config.h"
      24  
      25  #include <stdio.h>
      26  #include <sys/ioctl.h>
      27  #include <unistd.h>
      28  #include <errno.h>
      29  #include <string.h>
      30  #include <glib.h>
      31  #include "inotify-kernel.h"
      32  #include <sys/inotify.h>
      33  #ifdef HAVE_SYS_FILIO_H
      34  #include <sys/filio.h>
      35  #endif
      36  #include <glib/glib-unix.h>
      37  
      38  #include "glib-private.h"
      39  
      40  /* From inotify(7) */
      41  #define MAX_EVENT_SIZE       (sizeof(struct inotify_event) + NAME_MAX + 1)
      42  
      43  /* Amount of time to sleep on receipt of uninteresting events */
      44  #define BOREDOM_SLEEP_TIME   (100 * G_TIME_SPAN_MILLISECOND)
      45  
      46  /* Define limits on the maximum amount of time and maximum amount of
      47   * interceding events between FROM/TO that can be merged.
      48   */
      49  #define MOVE_PAIR_DELAY      (10 * G_TIME_SPAN_MILLISECOND)
      50  #define MOVE_PAIR_DISTANCE   (100)
      51  
      52  /* We use the lock from inotify-helper.c
      53   *
      54   * We only have to take it on our read callback.
      55   *
      56   * The rest of locking is taken care of in inotify-helper.c
      57   */
      58  G_LOCK_EXTERN (inotify_lock);
      59  
      60  static ik_event_t *
      61  ik_event_new (struct inotify_event *kevent,
      62                gint64                now)
      63  {
      64    ik_event_t *event = g_new0 (ik_event_t, 1);
      65  
      66    event->wd = kevent->wd;
      67    event->mask = kevent->mask;
      68    event->cookie = kevent->cookie;
      69    event->len = kevent->len;
      70    event->timestamp = now;
      71    if (event->len)
      72      event->name = g_strdup (kevent->name);
      73    else
      74      event->name = NULL;
      75  
      76    return event;
      77  }
      78  
      79  void
      80  _ik_event_free (ik_event_t *event)
      81  {
      82    if (event->pair)
      83      {
      84        event->pair->pair = NULL;
      85        _ik_event_free (event->pair);
      86      }
      87  
      88    g_free (event->name);
      89    g_free (event);
      90  }
      91  
      92  typedef struct
      93  {
      94    GSource     source;
      95  
      96    GQueue      queue;
      97    gpointer    fd_tag;
      98    gint        fd;
      99  
     100    GHashTable *unmatched_moves;
     101    gboolean    is_bored;
     102  } InotifyKernelSource;
     103  
     104  static InotifyKernelSource *inotify_source;
     105  
     106  static gint64
     107  ik_source_get_dispatch_time (InotifyKernelSource *iks)
     108  {
     109    ik_event_t *head;
     110  
     111    head = g_queue_peek_head (&iks->queue);
     112  
     113    /* nothing in the queue: not ready */
     114    if (!head)
     115      return -1;
     116  
     117    /* if it's not an unpaired move, it is ready now */
     118    if (~head->mask & IN_MOVED_FROM || head->pair)
     119      return 0;
     120  
     121    /* if the queue is too long then it's ready now */
     122    if (iks->queue.length > MOVE_PAIR_DISTANCE)
     123      return 0;
     124  
     125    /* otherwise, it's ready after the delay */
     126    return head->timestamp + MOVE_PAIR_DELAY;
     127  }
     128  
     129  static gboolean
     130  ik_source_can_dispatch_now (InotifyKernelSource *iks,
     131                              gint64               now)
     132  {
     133    gint64 dispatch_time;
     134  
     135    dispatch_time = ik_source_get_dispatch_time (iks);
     136  
     137    return 0 <= dispatch_time && dispatch_time <= now;
     138  }
     139  
     140  static gsize
     141  ik_source_read_some_events (InotifyKernelSource *iks,
     142                              gchar               *buffer,
     143                              gsize                buffer_len)
     144  {
     145    gssize result;
     146    int errsv;
     147  
     148  again:
     149    result = read (iks->fd, buffer, buffer_len);
     150    errsv = errno;
     151  
     152    if (result < 0)
     153      {
     154        if (errsv == EINTR)
     155          goto again;
     156  
     157        if (errsv == EAGAIN)
     158          return 0;
     159  
     160        g_error ("inotify read(): %s", g_strerror (errsv));
     161      }
     162    else if (result == 0)
     163      g_error ("inotify unexpectedly hit eof");
     164  
     165    return result;
     166  }
     167  
     168  static gchar *
     169  ik_source_read_all_the_events (InotifyKernelSource *iks,
     170                                 gchar               *buffer,
     171                                 gsize                buffer_len,
     172                                 gsize               *length_out)
     173  {
     174    gsize n_read;
     175  
     176    n_read = ik_source_read_some_events (iks, buffer, buffer_len);
     177  
     178    /* Check if we might have gotten another event if we had passed in a
     179     * bigger buffer...
     180     */
     181    if (n_read + MAX_EVENT_SIZE > buffer_len)
     182      {
     183        gchar *new_buffer;
     184        guint n_readable;
     185        gint result;
     186        int errsv;
     187  
     188        /* figure out how many more bytes there are to read */
     189        result = ioctl (iks->fd, FIONREAD, &n_readable);
     190        errsv = errno;
     191        if (result != 0)
     192          g_error ("inotify ioctl(FIONREAD): %s", g_strerror (errsv));
     193  
     194        if (n_readable != 0)
     195          {
     196            /* there is in fact more data.  allocate a new buffer, copy
     197             * the existing data, and then append the remaining.
     198             */
     199            new_buffer = g_malloc (n_read + n_readable);
     200            memcpy (new_buffer, buffer, n_read);
     201            n_read += ik_source_read_some_events (iks, new_buffer + n_read, n_readable);
     202  
     203            buffer = new_buffer;
     204  
     205            /* There may be new events in the buffer that were added after
     206             * the FIONREAD was performed, but we can't risk getting into
     207             * a loop.  We'll get them next time.
     208             */
     209          }
     210      }
     211  
     212    *length_out = n_read;
     213  
     214    return buffer;
     215  }
     216  
     217  static gboolean
     218  ik_source_dispatch (GSource     *source,
     219                      GSourceFunc  func,
     220                      gpointer     user_data)
     221  {
     222    InotifyKernelSource *iks = (InotifyKernelSource *) source;
     223    gboolean (*user_callback) (ik_event_t *event) = (void *) func;
     224    gboolean interesting = FALSE;
     225    gint64 now;
     226  
     227    now = g_source_get_time (source);
     228  
     229    if (iks->is_bored || g_source_query_unix_fd (source, iks->fd_tag))
     230      {
     231        gchar stack_buffer[4096];
     232        gsize buffer_len;
     233        gchar *buffer;
     234        gsize offset;
     235  
     236        /* We want to read all of the available events.
     237         *
     238         * We need to do it in a finite number of steps so that we don't
     239         * get caught in a loop of read() with another process
     240         * continuously adding events each time we drain them.
     241         *
     242         * In the normal case we will have only a few events in the queue,
     243         * so start out by reading into a small stack-allocated buffer.
     244         * Even though we're on a fresh stack frame, there is no need to
     245         * pointlessly blow up with the size of the worker thread stack
     246         * with a huge buffer here.
     247         *
     248         * If the result is large enough to cause us to suspect that
     249         * another event may be pending then we allocate a buffer on the
     250         * heap that can hold all of the events and read (once!) into that
     251         * buffer.
     252         */
     253        buffer = ik_source_read_all_the_events (iks, stack_buffer, sizeof stack_buffer, &buffer_len);
     254  
     255        offset = 0;
     256  
     257        while (offset < buffer_len)
     258          {
     259            struct inotify_event *kevent = (struct inotify_event *) (buffer + offset);
     260            ik_event_t *event;
     261  
     262            event = ik_event_new (kevent, now);
     263  
     264            offset += sizeof (struct inotify_event) + event->len;
     265  
     266            if (event->mask & IN_MOVED_TO)
     267              {
     268                ik_event_t *pair;
     269  
     270                pair = g_hash_table_lookup (iks->unmatched_moves, GUINT_TO_POINTER (event->cookie));
     271                if (pair != NULL)
     272                  {
     273                    g_assert (!pair->pair);
     274  
     275                    g_hash_table_remove (iks->unmatched_moves, GUINT_TO_POINTER (event->cookie));
     276                    event->is_second_in_pair = TRUE;
     277                    event->pair = pair;
     278                    pair->pair = event;
     279                    continue;
     280                  }
     281  
     282                interesting = TRUE;
     283              }
     284  
     285            else if (event->mask & IN_MOVED_FROM)
     286              {
     287                gboolean new;
     288  
     289                new = g_hash_table_insert (iks->unmatched_moves, GUINT_TO_POINTER (event->cookie), event);
     290                if G_UNLIKELY (!new)
     291                  g_warning ("inotify: got IN_MOVED_FROM event with already-pending cookie %#x", event->cookie);
     292  
     293                interesting = TRUE;
     294              }
     295  
     296            g_queue_push_tail (&iks->queue, event);
     297          }
     298  
     299        if (buffer_len == 0)
     300          {
     301            /* We can end up reading nothing if we arrived here due to a
     302             * boredom timer but the stream of events stopped meanwhile.
     303             *
     304             * In that case, we need to switch back to polling the file
     305             * descriptor in the usual way.
     306             */
     307            g_assert (iks->is_bored);
     308            interesting = TRUE;
     309          }
     310  
     311        if (buffer != stack_buffer)
     312          g_free (buffer);
     313      }
     314  
     315    while (ik_source_can_dispatch_now (iks, now))
     316      {
     317        ik_event_t *event;
     318  
     319        /* callback will free the event */
     320        event = g_queue_pop_head (&iks->queue);
     321  
     322        if (event->mask & IN_MOVED_FROM && !event->pair)
     323          g_hash_table_remove (iks->unmatched_moves, GUINT_TO_POINTER (event->cookie));
     324  
     325        G_LOCK (inotify_lock);
     326  
     327        interesting |= (* user_callback) (event);
     328  
     329        G_UNLOCK (inotify_lock);
     330      }
     331  
     332    /* The queue gets blocked iff we have unmatched moves */
     333    g_assert ((iks->queue.length > 0) == (g_hash_table_size (iks->unmatched_moves) > 0));
     334  
     335    /* Here's where we decide what will wake us up next.
     336     *
     337     * If the last event was interesting then we will wake up on the fd or
     338     * when the timeout is reached on an unpaired move (if any).
     339     *
     340     * If the last event was uninteresting then we will wake up after the
     341     * shorter of the boredom sleep or any timeout for an unpaired move.
     342     */
     343    if (interesting)
     344      {
     345        if (iks->is_bored)
     346          {
     347            g_source_modify_unix_fd (source, iks->fd_tag, G_IO_IN);
     348            iks->is_bored = FALSE;
     349          }
     350  
     351        g_source_set_ready_time (source, ik_source_get_dispatch_time (iks));
     352      }
     353    else
     354      {
     355        guint64 dispatch_time = ik_source_get_dispatch_time (iks);
     356        guint64 boredom_time = now + BOREDOM_SLEEP_TIME;
     357  
     358        if (!iks->is_bored)
     359          {
     360            g_source_modify_unix_fd (source, iks->fd_tag, 0);
     361            iks->is_bored = TRUE;
     362          }
     363  
     364        g_source_set_ready_time (source, MIN (dispatch_time, boredom_time));
     365      }
     366  
     367    return TRUE;
     368  }
     369  
     370  static InotifyKernelSource *
     371  ik_source_new (gboolean (* callback) (ik_event_t *event))
     372  {
     373    static GSourceFuncs source_funcs = {
     374      NULL, NULL,
     375      ik_source_dispatch,
     376      NULL, NULL, NULL
     377    };
     378    InotifyKernelSource *iks;
     379    GSource *source;
     380    gboolean should_set_nonblock = FALSE;
     381  
     382    source = g_source_new (&source_funcs, sizeof (InotifyKernelSource));
     383    iks = (InotifyKernelSource *) source;
     384  
     385    g_source_set_static_name (source, "inotify kernel source");
     386  
     387    iks->unmatched_moves = g_hash_table_new (NULL, NULL);
     388    iks->fd = inotify_init1 (IN_CLOEXEC | IN_NONBLOCK);
     389  
     390    if (iks->fd < 0)
     391      {
     392        should_set_nonblock = TRUE;
     393        iks->fd = inotify_init ();
     394      }
     395  
     396    if (iks->fd >= 0)
     397      {
     398        GError *error = NULL;
     399  
     400        if (should_set_nonblock)
     401          {
     402            g_unix_set_fd_nonblocking (iks->fd, TRUE, &error);
     403            g_assert_no_error (error);
     404          }
     405  
     406        iks->fd_tag = g_source_add_unix_fd (source, iks->fd, G_IO_IN);
     407      }
     408  
     409    g_source_set_callback (source, (GSourceFunc) callback, NULL, NULL);
     410  
     411    g_source_attach (source, GLIB_PRIVATE_CALL (g_get_worker_context) ());
     412  
     413    return iks;
     414  }
     415  
     416  gboolean
     417  _ik_startup (gboolean (*cb)(ik_event_t *event))
     418  {
     419    if (g_once_init_enter_pointer (&inotify_source))
     420      g_once_init_leave_pointer (&inotify_source, ik_source_new (cb));
     421  
     422    return inotify_source->fd >= 0;
     423  }
     424  
     425  gint32
     426  _ik_watch (const char *path,
     427             guint32     mask,
     428             int        *err)
     429  {
     430    gint32 wd = -1;
     431  
     432    g_assert (path != NULL);
     433    g_assert (inotify_source && inotify_source->fd >= 0);
     434  
     435    wd = inotify_add_watch (inotify_source->fd, path, mask);
     436  
     437    if (wd < 0)
     438      {
     439        int e = errno;
     440        /* FIXME: debug msg failed to add watch */
     441        if (err)
     442          *err = e;
     443        return wd;
     444      }
     445  
     446    g_assert (wd >= 0);
     447    return wd;
     448  }
     449  
     450  int
     451  _ik_ignore (const char *path,
     452              gint32      wd)
     453  {
     454    g_assert (wd >= 0);
     455    g_assert (inotify_source && inotify_source->fd >= 0);
     456  
     457    if (inotify_rm_watch (inotify_source->fd, wd) < 0)
     458      {
     459        /* int e = errno; */
     460        /* failed to rm watch */
     461        return -1;
     462      }
     463  
     464    return 0;
     465  }