(root)/
xz-5.4.5/
src/
liblzma/
common/
stream_decoder_mt.c
       1  ///////////////////////////////////////////////////////////////////////////////
       2  //
       3  /// \file       stream_decoder_mt.c
       4  /// \brief      Multithreaded .xz Stream decoder
       5  //
       6  //  Authors:    Sebastian Andrzej Siewior
       7  //              Lasse Collin
       8  //
       9  //  This file has been put into the public domain.
      10  //  You can do whatever you want with this file.
      11  //
      12  ///////////////////////////////////////////////////////////////////////////////
      13  
      14  #include "common.h"
      15  #include "block_decoder.h"
      16  #include "stream_decoder.h"
      17  #include "index.h"
      18  #include "outqueue.h"
      19  
      20  
      21  typedef enum {
      22  	/// Waiting for work.
      23  	/// Main thread may change this to THR_RUN or THR_EXIT.
      24  	THR_IDLE,
      25  
      26  	/// Decoding is in progress.
      27  	/// Main thread may change this to THR_STOP or THR_EXIT.
      28  	/// The worker thread may change this to THR_IDLE.
      29  	THR_RUN,
      30  
      31  	/// The main thread wants the thread to stop whatever it was doing
      32  	/// but not exit. Main thread may change this to THR_EXIT.
      33  	/// The worker thread may change this to THR_IDLE.
      34  	THR_STOP,
      35  
      36  	/// The main thread wants the thread to exit.
      37  	THR_EXIT,
      38  
      39  } worker_state;
      40  
      41  
      42  typedef enum {
      43  	/// Partial updates (storing of worker thread progress
      44  	/// to lzma_outbuf) are disabled.
      45  	PARTIAL_DISABLED,
      46  
      47  	/// Main thread requests partial updates to be enabled but
      48  	/// no partial update has been done by the worker thread yet.
      49  	///
      50  	/// Changing from PARTIAL_DISABLED to PARTIAL_START requires
      51  	/// use of the worker-thread mutex. Other transitions don't
      52  	/// need a mutex.
      53  	PARTIAL_START,
      54  
      55  	/// Partial updates are enabled and the worker thread has done
      56  	/// at least one partial update.
      57  	PARTIAL_ENABLED,
      58  
      59  } partial_update_mode;
      60  
      61  
      62  struct worker_thread {
      63  	/// Worker state is protected with our mutex.
      64  	worker_state state;
      65  
      66  	/// Input buffer that will contain the whole Block except Block Header.
      67  	uint8_t *in;
      68  
      69  	/// Amount of memory allocated for "in"
      70  	size_t in_size;
      71  
      72  	/// Number of bytes written to "in" by the main thread
      73  	size_t in_filled;
      74  
      75  	/// Number of bytes consumed from "in" by the worker thread.
      76  	size_t in_pos;
      77  
      78  	/// Amount of uncompressed data that has been decoded. This local
      79  	/// copy is needed because updating outbuf->pos requires locking
      80  	/// the main mutex (coder->mutex).
      81  	size_t out_pos;
      82  
      83  	/// Pointer to the main structure is needed to (1) lock the main
      84  	/// mutex (coder->mutex) when updating outbuf->pos and (2) when
      85  	/// putting this thread back to the stack of free threads.
      86  	struct lzma_stream_coder *coder;
      87  
      88  	/// The allocator is set by the main thread. Since a copy of the
      89  	/// pointer is kept here, the application must not change the
      90  	/// allocator before calling lzma_end().
      91  	const lzma_allocator *allocator;
      92  
      93  	/// Output queue buffer to which the uncompressed data is written.
      94  	lzma_outbuf *outbuf;
      95  
      96  	/// Amount of compressed data that has already been decompressed.
      97  	/// This is updated from in_pos when our mutex is locked.
      98  	/// This is size_t, not uint64_t, because per-thread progress
      99  	/// is limited to sizes of allocated buffers.
     100  	size_t progress_in;
     101  
     102  	/// Like progress_in but for uncompressed data.
     103  	size_t progress_out;
     104  
     105  	/// Updating outbuf->pos requires locking the main mutex
     106  	/// (coder->mutex). Since the main thread will only read output
     107  	/// from the oldest outbuf in the queue, only the worker thread
     108  	/// that is associated with the oldest outbuf needs to update its
     109  	/// outbuf->pos. This avoids useless mutex contention that would
     110  	/// happen if all worker threads were frequently locking the main
     111  	/// mutex to update their outbuf->pos.
     112  	///
     113  	/// Only when partial_update is something else than PARTIAL_DISABLED,
     114  	/// this worker thread will update outbuf->pos after each call to
     115  	/// the Block decoder.
     116  	partial_update_mode partial_update;
     117  
     118  	/// Block decoder
     119  	lzma_next_coder block_decoder;
     120  
     121  	/// Thread-specific Block options are needed because the Block
     122  	/// decoder modifies the struct given to it at initialization.
     123  	lzma_block block_options;
     124  
     125  	/// Filter chain memory usage
     126  	uint64_t mem_filters;
     127  
     128  	/// Next structure in the stack of free worker threads.
     129  	struct worker_thread *next;
     130  
     131  	mythread_mutex mutex;
     132  	mythread_cond cond;
     133  
     134  	/// The ID of this thread is used to join the thread
     135  	/// when it's not needed anymore.
     136  	mythread thread_id;
     137  };
     138  
     139  
     140  struct lzma_stream_coder {
     141  	enum {
     142  		SEQ_STREAM_HEADER,
     143  		SEQ_BLOCK_HEADER,
     144  		SEQ_BLOCK_INIT,
     145  		SEQ_BLOCK_THR_INIT,
     146  		SEQ_BLOCK_THR_RUN,
     147  		SEQ_BLOCK_DIRECT_INIT,
     148  		SEQ_BLOCK_DIRECT_RUN,
     149  		SEQ_INDEX_WAIT_OUTPUT,
     150  		SEQ_INDEX_DECODE,
     151  		SEQ_STREAM_FOOTER,
     152  		SEQ_STREAM_PADDING,
     153  		SEQ_ERROR,
     154  	} sequence;
     155  
     156  	/// Block decoder
     157  	lzma_next_coder block_decoder;
     158  
     159  	/// Every Block Header will be decoded into this structure.
     160  	/// This is also used to initialize a Block decoder when in
     161  	/// direct mode. In threaded mode, a thread-specific copy will
     162  	/// be made for decoder initialization because the Block decoder
     163  	/// will modify the structure given to it.
     164  	lzma_block block_options;
     165  
     166  	/// Buffer to hold a filter chain for Block Header decoding and
     167  	/// initialization. These are freed after successful Block decoder
     168  	/// initialization or at stream_decoder_mt_end(). The thread-specific
     169  	/// copy of block_options won't hold a pointer to filters[] after
     170  	/// initialization.
     171  	lzma_filter filters[LZMA_FILTERS_MAX + 1];
     172  
     173  	/// Stream Flags from Stream Header
     174  	lzma_stream_flags stream_flags;
     175  
     176  	/// Index is hashed so that it can be compared to the sizes of Blocks
     177  	/// with O(1) memory usage.
     178  	lzma_index_hash *index_hash;
     179  
     180  
     181  	/// Maximum wait time if cannot use all the input and cannot
     182  	/// fill the output buffer. This is in milliseconds.
     183  	uint32_t timeout;
     184  
     185  
     186  	/// Error code from a worker thread.
     187  	///
     188  	/// \note       Use mutex.
     189  	lzma_ret thread_error;
     190  
     191  	/// Error code to return after pending output has been copied out. If
     192  	/// set in read_output_and_wait(), this is a mirror of thread_error.
     193  	/// If set in stream_decode_mt() then it's, for example, error that
     194  	/// occurred when decoding Block Header.
     195  	lzma_ret pending_error;
     196  
     197  	/// Number of threads that will be created at maximum.
     198  	uint32_t threads_max;
     199  
     200  	/// Number of thread structures that have been initialized from
     201  	/// "threads", and thus the number of worker threads actually
     202  	/// created so far.
     203  	uint32_t threads_initialized;
     204  
     205  	/// Array of allocated thread-specific structures. When no threads
     206  	/// are in use (direct mode) this is NULL. In threaded mode this
     207  	/// points to an array of threads_max number of worker_thread structs.
     208  	struct worker_thread *threads;
     209  
     210  	/// Stack of free threads. When a thread finishes, it puts itself
     211  	/// back into this stack. This starts as empty because threads
     212  	/// are created only when actually needed.
     213  	///
     214  	/// \note       Use mutex.
     215  	struct worker_thread *threads_free;
     216  
     217  	/// The most recent worker thread to which the main thread writes
     218  	/// the new input from the application.
     219  	struct worker_thread *thr;
     220  
     221  	/// Output buffer queue for decompressed data from the worker threads
     222  	///
     223  	/// \note       Use mutex with operations that need it.
     224  	lzma_outq outq;
     225  
     226  	mythread_mutex mutex;
     227  	mythread_cond cond;
     228  
     229  
     230  	/// Memory usage that will not be exceeded in multi-threaded mode.
     231  	/// Single-threaded mode can exceed this even by a large amount.
     232  	uint64_t memlimit_threading;
     233  
     234  	/// Memory usage limit that should never be exceeded.
     235  	/// LZMA_MEMLIMIT_ERROR will be returned if decoding isn't possible
     236  	/// even in single-threaded mode without exceeding this limit.
     237  	uint64_t memlimit_stop;
     238  
     239  	/// Amount of memory in use by the direct mode decoder
     240  	/// (coder->block_decoder). In threaded mode this is 0.
     241  	uint64_t mem_direct_mode;
     242  
     243  	/// Amount of memory needed by the running worker threads.
     244  	/// This doesn't include the memory needed by the output buffer.
     245  	///
     246  	/// \note       Use mutex.
     247  	uint64_t mem_in_use;
     248  
     249  	/// Amount of memory used by the idle (cached) threads.
     250  	///
     251  	/// \note       Use mutex.
     252  	uint64_t mem_cached;
     253  
     254  
     255  	/// Amount of memory needed for the filter chain of the next Block.
     256  	uint64_t mem_next_filters;
     257  
     258  	/// Amount of memory needed for the thread-specific input buffer
     259  	/// for the next Block.
     260  	uint64_t mem_next_in;
     261  
     262  	/// Amount of memory actually needed to decode the next Block
     263  	/// in threaded mode. This is
     264  	/// mem_next_filters + mem_next_in + memory needed for lzma_outbuf.
     265  	uint64_t mem_next_block;
     266  
     267  
     268  	/// Amount of compressed data in Stream Header + Blocks that have
     269  	/// already been finished.
     270  	///
     271  	/// \note       Use mutex.
     272  	uint64_t progress_in;
     273  
     274  	/// Amount of uncompressed data in Blocks that have already
     275  	/// been finished.
     276  	///
     277  	/// \note       Use mutex.
     278  	uint64_t progress_out;
     279  
     280  
     281  	/// If true, LZMA_NO_CHECK is returned if the Stream has
     282  	/// no integrity check.
     283  	bool tell_no_check;
     284  
     285  	/// If true, LZMA_UNSUPPORTED_CHECK is returned if the Stream has
     286  	/// an integrity check that isn't supported by this liblzma build.
     287  	bool tell_unsupported_check;
     288  
     289  	/// If true, LZMA_GET_CHECK is returned after decoding Stream Header.
     290  	bool tell_any_check;
     291  
     292  	/// If true, we will tell the Block decoder to skip calculating
     293  	/// and verifying the integrity check.
     294  	bool ignore_check;
     295  
     296  	/// If true, we will decode concatenated Streams that possibly have
     297  	/// Stream Padding between or after them. LZMA_STREAM_END is returned
     298  	/// once the application isn't giving us any new input (LZMA_FINISH),
     299  	/// and we aren't in the middle of a Stream, and possible
     300  	/// Stream Padding is a multiple of four bytes.
     301  	bool concatenated;
     302  
     303  	/// If true, we will return any errors immediately instead of first
     304  	/// producing all output before the location of the error.
     305  	bool fail_fast;
     306  
     307  
     308  	/// When decoding concatenated Streams, this is true as long as we
     309  	/// are decoding the first Stream. This is needed to avoid misleading
     310  	/// LZMA_FORMAT_ERROR in case the later Streams don't have valid magic
     311  	/// bytes.
     312  	bool first_stream;
     313  
     314  	/// This is used to track if the previous call to stream_decode_mt()
     315  	/// had output space (*out_pos < out_size) and managed to fill the
     316  	/// output buffer (*out_pos == out_size). This may be set to true
     317  	/// in read_output_and_wait(). This is read and then reset to false
     318  	/// at the beginning of stream_decode_mt().
     319  	///
     320  	/// This is needed to support applications that call lzma_code() in
     321  	/// such a way that more input is provided only when lzma_code()
     322  	/// didn't fill the output buffer completely. Basically, this makes
     323  	/// it easier to convert such applications from single-threaded
     324  	/// decoder to multi-threaded decoder.
     325  	bool out_was_filled;
     326  
     327  	/// Write position in buffer[] and position in Stream Padding
     328  	size_t pos;
     329  
     330  	/// Buffer to hold Stream Header, Block Header, and Stream Footer.
     331  	/// Block Header has biggest maximum size.
     332  	uint8_t buffer[LZMA_BLOCK_HEADER_SIZE_MAX];
     333  };
     334  
     335  
     336  /// Enables updating of outbuf->pos. This is a callback function that is
     337  /// used with lzma_outq_enable_partial_output().
     338  static void
     339  worker_enable_partial_update(void *thr_ptr)
     340  {
     341  	struct worker_thread *thr = thr_ptr;
     342  
     343  	mythread_sync(thr->mutex) {
     344  		thr->partial_update = PARTIAL_START;
     345  		mythread_cond_signal(&thr->cond);
     346  	}
     347  }
     348  
     349  
     350  /// Things do to at THR_STOP or when finishing a Block.
     351  /// This is called with thr->mutex locked.
     352  static void
     353  worker_stop(struct worker_thread *thr)
     354  {
     355  	// Update memory usage counters.
     356  	thr->coder->mem_in_use -= thr->in_size;
     357  	thr->in_size = 0; // thr->in was freed above.
     358  
     359  	thr->coder->mem_in_use -= thr->mem_filters;
     360  	thr->coder->mem_cached += thr->mem_filters;
     361  
     362  	// Put this thread to the stack of free threads.
     363  	thr->next = thr->coder->threads_free;
     364  	thr->coder->threads_free = thr;
     365  
     366  	mythread_cond_signal(&thr->coder->cond);
     367  	return;
     368  }
     369  
     370  
     371  static MYTHREAD_RET_TYPE
     372  worker_decoder(void *thr_ptr)
     373  {
     374  	struct worker_thread *thr = thr_ptr;
     375  	size_t in_filled;
     376  	partial_update_mode partial_update;
     377  	lzma_ret ret;
     378  
     379  next_loop_lock:
     380  
     381  	mythread_mutex_lock(&thr->mutex);
     382  next_loop_unlocked:
     383  
     384  	if (thr->state == THR_IDLE) {
     385  		mythread_cond_wait(&thr->cond, &thr->mutex);
     386  		goto next_loop_unlocked;
     387  	}
     388  
     389  	if (thr->state == THR_EXIT) {
     390  		mythread_mutex_unlock(&thr->mutex);
     391  
     392  		lzma_free(thr->in, thr->allocator);
     393  		lzma_next_end(&thr->block_decoder, thr->allocator);
     394  
     395  		mythread_mutex_destroy(&thr->mutex);
     396  		mythread_cond_destroy(&thr->cond);
     397  
     398  		return MYTHREAD_RET_VALUE;
     399  	}
     400  
     401  	if (thr->state == THR_STOP) {
     402  		thr->state = THR_IDLE;
     403  		mythread_mutex_unlock(&thr->mutex);
     404  
     405  		mythread_sync(thr->coder->mutex) {
     406  			worker_stop(thr);
     407  		}
     408  
     409  		goto next_loop_lock;
     410  	}
     411  
     412  	assert(thr->state == THR_RUN);
     413  
     414  	// Update progress info for get_progress().
     415  	thr->progress_in = thr->in_pos;
     416  	thr->progress_out = thr->out_pos;
     417  
     418  	// If we don't have any new input, wait for a signal from the main
     419  	// thread except if partial output has just been enabled. In that
     420  	// case we will do one normal run so that the partial output info
     421  	// gets passed to the main thread. The call to block_decoder.code()
     422  	// is useless but harmless as it can occur only once per Block.
     423  	in_filled = thr->in_filled;
     424  	partial_update = thr->partial_update;
     425  
     426  	if (in_filled == thr->in_pos && partial_update != PARTIAL_START) {
     427  		mythread_cond_wait(&thr->cond, &thr->mutex);
     428  		goto next_loop_unlocked;
     429  	}
     430  
     431  	mythread_mutex_unlock(&thr->mutex);
     432  
     433  	// Pass the input in small chunks to the Block decoder.
     434  	// This way we react reasonably fast if we are told to stop/exit,
     435  	// and (when partial update is enabled) we tell about our progress
     436  	// to the main thread frequently enough.
     437  	const size_t chunk_size = 16384;
     438  	if ((in_filled - thr->in_pos) > chunk_size)
     439  		in_filled = thr->in_pos + chunk_size;
     440  
     441  	ret = thr->block_decoder.code(
     442  			thr->block_decoder.coder, thr->allocator,
     443  			thr->in, &thr->in_pos, in_filled,
     444  			thr->outbuf->buf, &thr->out_pos,
     445  			thr->outbuf->allocated, LZMA_RUN);
     446  
     447  	if (ret == LZMA_OK) {
     448  		if (partial_update != PARTIAL_DISABLED) {
     449  			// The main thread uses thr->mutex to change from
     450  			// PARTIAL_DISABLED to PARTIAL_START. The main thread
     451  			// doesn't care about this variable after that so we
     452  			// can safely change it here to PARTIAL_ENABLED
     453  			// without a mutex.
     454  			thr->partial_update = PARTIAL_ENABLED;
     455  
     456  			// The main thread is reading decompressed data
     457  			// from thr->outbuf. Tell the main thread about
     458  			// our progress.
     459  			//
     460  			// NOTE: It's possible that we consumed input without
     461  			// producing any new output so it's possible that
     462  			// only in_pos has changed. In case of PARTIAL_START
     463  			// it is possible that neither in_pos nor out_pos has
     464  			// changed.
     465  			mythread_sync(thr->coder->mutex) {
     466  				thr->outbuf->pos = thr->out_pos;
     467  				thr->outbuf->decoder_in_pos = thr->in_pos;
     468  				mythread_cond_signal(&thr->coder->cond);
     469  			}
     470  		}
     471  
     472  		goto next_loop_lock;
     473  	}
     474  
     475  	// Either we finished successfully (LZMA_STREAM_END) or an error
     476  	// occurred. Both cases are handled almost identically. The error
     477  	// case requires updating thr->coder->thread_error.
     478  	//
     479  	// The sizes are in the Block Header and the Block decoder
     480  	// checks that they match, thus we know these:
     481  	assert(ret != LZMA_STREAM_END || thr->in_pos == thr->in_size);
     482  	assert(ret != LZMA_STREAM_END
     483  		|| thr->out_pos == thr->block_options.uncompressed_size);
     484  
     485  	// Free the input buffer. Don't update in_size as we need
     486  	// it later to update thr->coder->mem_in_use.
     487  	lzma_free(thr->in, thr->allocator);
     488  	thr->in = NULL;
     489  
     490  	mythread_sync(thr->mutex) {
     491  		if (thr->state != THR_EXIT)
     492  			thr->state = THR_IDLE;
     493  	}
     494  
     495  	mythread_sync(thr->coder->mutex) {
     496  		// Move our progress info to the main thread.
     497  		thr->coder->progress_in += thr->in_pos;
     498  		thr->coder->progress_out += thr->out_pos;
     499  		thr->progress_in = 0;
     500  		thr->progress_out = 0;
     501  
     502  		// Mark the outbuf as finished.
     503  		thr->outbuf->pos = thr->out_pos;
     504  		thr->outbuf->decoder_in_pos = thr->in_pos;
     505  		thr->outbuf->finished = true;
     506  		thr->outbuf->finish_ret = ret;
     507  		thr->outbuf = NULL;
     508  
     509  		// If an error occurred, tell it to the main thread.
     510  		if (ret != LZMA_STREAM_END
     511  				&& thr->coder->thread_error == LZMA_OK)
     512  			thr->coder->thread_error = ret;
     513  
     514  		worker_stop(thr);
     515  	}
     516  
     517  	goto next_loop_lock;
     518  }
     519  
     520  
     521  /// Tells the worker threads to exit and waits for them to terminate.
     522  static void
     523  threads_end(struct lzma_stream_coder *coder, const lzma_allocator *allocator)
     524  {
     525  	for (uint32_t i = 0; i < coder->threads_initialized; ++i) {
     526  		mythread_sync(coder->threads[i].mutex) {
     527  			coder->threads[i].state = THR_EXIT;
     528  			mythread_cond_signal(&coder->threads[i].cond);
     529  		}
     530  	}
     531  
     532  	for (uint32_t i = 0; i < coder->threads_initialized; ++i)
     533  		mythread_join(coder->threads[i].thread_id);
     534  
     535  	lzma_free(coder->threads, allocator);
     536  	coder->threads_initialized = 0;
     537  	coder->threads = NULL;
     538  	coder->threads_free = NULL;
     539  
     540  	// The threads don't update these when they exit. Do it here.
     541  	coder->mem_in_use = 0;
     542  	coder->mem_cached = 0;
     543  
     544  	return;
     545  }
     546  
     547  
     548  static void
     549  threads_stop(struct lzma_stream_coder *coder)
     550  {
     551  	for (uint32_t i = 0; i < coder->threads_initialized; ++i) {
     552  		mythread_sync(coder->threads[i].mutex) {
     553  			// The state must be changed conditionally because
     554  			// THR_IDLE -> THR_STOP is not a valid state change.
     555  			if (coder->threads[i].state != THR_IDLE) {
     556  				coder->threads[i].state = THR_STOP;
     557  				mythread_cond_signal(&coder->threads[i].cond);
     558  			}
     559  		}
     560  	}
     561  
     562  	return;
     563  }
     564  
     565  
     566  /// Initialize a new worker_thread structure and create a new thread.
     567  static lzma_ret
     568  initialize_new_thread(struct lzma_stream_coder *coder,
     569  		const lzma_allocator *allocator)
     570  {
     571  	// Allocate the coder->threads array if needed. It's done here instead
     572  	// of when initializing the decoder because we don't need this if we
     573  	// use the direct mode (we may even free coder->threads in the middle
     574  	// of the file if we switch from threaded to direct mode).
     575  	if (coder->threads == NULL) {
     576  		coder->threads = lzma_alloc(
     577  			coder->threads_max * sizeof(struct worker_thread),
     578  			allocator);
     579  
     580  		if (coder->threads == NULL)
     581  			return LZMA_MEM_ERROR;
     582  	}
     583  
     584  	// Pick a free structure.
     585  	assert(coder->threads_initialized < coder->threads_max);
     586  	struct worker_thread *thr
     587  			= &coder->threads[coder->threads_initialized];
     588  
     589  	if (mythread_mutex_init(&thr->mutex))
     590  		goto error_mutex;
     591  
     592  	if (mythread_cond_init(&thr->cond))
     593  		goto error_cond;
     594  
     595  	thr->state = THR_IDLE;
     596  	thr->in = NULL;
     597  	thr->in_size = 0;
     598  	thr->allocator = allocator;
     599  	thr->coder = coder;
     600  	thr->outbuf = NULL;
     601  	thr->block_decoder = LZMA_NEXT_CODER_INIT;
     602  	thr->mem_filters = 0;
     603  
     604  	if (mythread_create(&thr->thread_id, worker_decoder, thr))
     605  		goto error_thread;
     606  
     607  	++coder->threads_initialized;
     608  	coder->thr = thr;
     609  
     610  	return LZMA_OK;
     611  
     612  error_thread:
     613  	mythread_cond_destroy(&thr->cond);
     614  
     615  error_cond:
     616  	mythread_mutex_destroy(&thr->mutex);
     617  
     618  error_mutex:
     619  	return LZMA_MEM_ERROR;
     620  }
     621  
     622  
     623  static lzma_ret
     624  get_thread(struct lzma_stream_coder *coder, const lzma_allocator *allocator)
     625  {
     626  	// If there is a free structure on the stack, use it.
     627  	mythread_sync(coder->mutex) {
     628  		if (coder->threads_free != NULL) {
     629  			coder->thr = coder->threads_free;
     630  			coder->threads_free = coder->threads_free->next;
     631  
     632  			// The thread is no longer in the cache so subtract
     633  			// it from the cached memory usage. Don't add it
     634  			// to mem_in_use though; the caller will handle it
     635  			// since it knows how much memory it will actually
     636  			// use (the filter chain might change).
     637  			coder->mem_cached -= coder->thr->mem_filters;
     638  		}
     639  	}
     640  
     641  	if (coder->thr == NULL) {
     642  		assert(coder->threads_initialized < coder->threads_max);
     643  
     644  		// Initialize a new thread.
     645  		return_if_error(initialize_new_thread(coder, allocator));
     646  	}
     647  
     648  	coder->thr->in_filled = 0;
     649  	coder->thr->in_pos = 0;
     650  	coder->thr->out_pos = 0;
     651  
     652  	coder->thr->progress_in = 0;
     653  	coder->thr->progress_out = 0;
     654  
     655  	coder->thr->partial_update = PARTIAL_DISABLED;
     656  
     657  	return LZMA_OK;
     658  }
     659  
     660  
     661  static lzma_ret
     662  read_output_and_wait(struct lzma_stream_coder *coder,
     663  		const lzma_allocator *allocator,
     664  		uint8_t *restrict out, size_t *restrict out_pos,
     665  		size_t out_size,
     666  		bool *input_is_possible,
     667  		bool waiting_allowed,
     668  		mythread_condtime *wait_abs, bool *has_blocked)
     669  {
     670  	lzma_ret ret = LZMA_OK;
     671  
     672  	mythread_sync(coder->mutex) {
     673  		do {
     674  			// Get as much output from the queue as is possible
     675  			// without blocking.
     676  			const size_t out_start = *out_pos;
     677  			do {
     678  				ret = lzma_outq_read(&coder->outq, allocator,
     679  						out, out_pos, out_size,
     680  						NULL, NULL);
     681  
     682  				// If a Block was finished, tell the worker
     683  				// thread of the next Block (if it is still
     684  				// running) to start telling the main thread
     685  				// when new output is available.
     686  				if (ret == LZMA_STREAM_END)
     687  					lzma_outq_enable_partial_output(
     688  						&coder->outq,
     689  						&worker_enable_partial_update);
     690  
     691  				// Loop until a Block wasn't finished.
     692  				// It's important to loop around even if
     693  				// *out_pos == out_size because there could
     694  				// be an empty Block that will return
     695  				// LZMA_STREAM_END without needing any
     696  				// output space.
     697  			} while (ret == LZMA_STREAM_END);
     698  
     699  			// Check if lzma_outq_read reported an error from
     700  			// the Block decoder.
     701  			if (ret != LZMA_OK)
     702  				break;
     703  
     704  			// If the output buffer is now full but it wasn't full
     705  			// when this function was called, set out_was_filled.
     706  			// This way the next call to stream_decode_mt() knows
     707  			// that some output was produced and no output space
     708  			// remained in the previous call to stream_decode_mt().
     709  			if (*out_pos == out_size && *out_pos != out_start)
     710  				coder->out_was_filled = true;
     711  
     712  			// Check if any thread has indicated an error.
     713  			if (coder->thread_error != LZMA_OK) {
     714  				// If LZMA_FAIL_FAST was used, report errors
     715  				// from worker threads immediately.
     716  				if (coder->fail_fast) {
     717  					ret = coder->thread_error;
     718  					break;
     719  				}
     720  
     721  				// Otherwise set pending_error. The value we
     722  				// set here will not actually get used other
     723  				// than working as a flag that an error has
     724  				// occurred. This is because in SEQ_ERROR
     725  				// all output before the error will be read
     726  				// first by calling this function, and once we
     727  				// reach the location of the (first) error the
     728  				// error code from the above lzma_outq_read()
     729  				// will be returned to the application.
     730  				//
     731  				// Use LZMA_PROG_ERROR since the value should
     732  				// never leak to the application. It's
     733  				// possible that pending_error has already
     734  				// been set but that doesn't matter: if we get
     735  				// here, pending_error only works as a flag.
     736  				coder->pending_error = LZMA_PROG_ERROR;
     737  			}
     738  
     739  			// Check if decoding of the next Block can be started.
     740  			// The memusage of the active threads must be low
     741  			// enough, there must be a free buffer slot in the
     742  			// output queue, and there must be a free thread
     743  			// (that can be either created or an existing one
     744  			// reused).
     745  			//
     746  			// NOTE: This is checked after reading the output
     747  			// above because reading the output can free a slot in
     748  			// the output queue and also reduce active memusage.
     749  			//
     750  			// NOTE: If output queue is empty, then input will
     751  			// always be possible.
     752  			if (input_is_possible != NULL
     753  					&& coder->memlimit_threading
     754  						- coder->mem_in_use
     755  						- coder->outq.mem_in_use
     756  						>= coder->mem_next_block
     757  					&& lzma_outq_has_buf(&coder->outq)
     758  					&& (coder->threads_initialized
     759  							< coder->threads_max
     760  						|| coder->threads_free
     761  							!= NULL)) {
     762  				*input_is_possible = true;
     763  				break;
     764  			}
     765  
     766  			// If the caller doesn't want us to block, return now.
     767  			if (!waiting_allowed)
     768  				break;
     769  
     770  			// This check is needed only when input_is_possible
     771  			// is NULL. We must return if we aren't waiting for
     772  			// input to become possible and there is no more
     773  			// output coming from the queue.
     774  			if (lzma_outq_is_empty(&coder->outq)) {
     775  				assert(input_is_possible == NULL);
     776  				break;
     777  			}
     778  
     779  			// If there is more data available from the queue,
     780  			// our out buffer must be full and we need to return
     781  			// so that the application can provide more output
     782  			// space.
     783  			//
     784  			// NOTE: In general lzma_outq_is_readable() can return
     785  			// true also when there are no more bytes available.
     786  			// This can happen when a Block has finished without
     787  			// providing any new output. We know that this is not
     788  			// the case because in the beginning of this loop we
     789  			// tried to read as much as possible even when we had
     790  			// no output space left and the mutex has been locked
     791  			// all the time (so worker threads cannot have changed
     792  			// anything). Thus there must be actual pending output
     793  			// in the queue.
     794  			if (lzma_outq_is_readable(&coder->outq)) {
     795  				assert(*out_pos == out_size);
     796  				break;
     797  			}
     798  
     799  			// If the application stops providing more input
     800  			// in the middle of a Block, there will eventually
     801  			// be one worker thread left that is stuck waiting for
     802  			// more input (that might never arrive) and a matching
     803  			// outbuf which the worker thread cannot finish due
     804  			// to lack of input. We must detect this situation,
     805  			// otherwise we would end up waiting indefinitely
     806  			// (if no timeout is in use) or keep returning
     807  			// LZMA_TIMED_OUT while making no progress. Thus, the
     808  			// application would never get LZMA_BUF_ERROR from
     809  			// lzma_code() which would tell the application that
     810  			// no more progress is possible. No LZMA_BUF_ERROR
     811  			// means that, for example, truncated .xz files could
     812  			// cause an infinite loop.
     813  			//
     814  			// A worker thread doing partial updates will
     815  			// store not only the output position in outbuf->pos
     816  			// but also the matching input position in
     817  			// outbuf->decoder_in_pos. Here we check if that
     818  			// input position matches the amount of input that
     819  			// the worker thread has been given (in_filled).
     820  			// If so, we must return and not wait as no more
     821  			// output will be coming without first getting more
     822  			// input to the worker thread. If the application
     823  			// keeps calling lzma_code() without providing more
     824  			// input, it will eventually get LZMA_BUF_ERROR.
     825  			//
     826  			// NOTE: We can read partial_update and in_filled
     827  			// without thr->mutex as only the main thread
     828  			// modifies these variables. decoder_in_pos requires
     829  			// coder->mutex which we are already holding.
     830  			if (coder->thr != NULL && coder->thr->partial_update
     831  					!= PARTIAL_DISABLED) {
     832  				// There is exactly one outbuf in the queue.
     833  				assert(coder->thr->outbuf == coder->outq.head);
     834  				assert(coder->thr->outbuf == coder->outq.tail);
     835  
     836  				if (coder->thr->outbuf->decoder_in_pos
     837  						== coder->thr->in_filled)
     838  					break;
     839  			}
     840  
     841  			// Wait for input or output to become possible.
     842  			if (coder->timeout != 0) {
     843  				// See the comment in stream_encoder_mt.c
     844  				// about why mythread_condtime_set() is used
     845  				// like this.
     846  				//
     847  				// FIXME?
     848  				// In contrast to the encoder, this calls
     849  				// _condtime_set while the mutex is locked.
     850  				if (!*has_blocked) {
     851  					*has_blocked = true;
     852  					mythread_condtime_set(wait_abs,
     853  							&coder->cond,
     854  							coder->timeout);
     855  				}
     856  
     857  				if (mythread_cond_timedwait(&coder->cond,
     858  						&coder->mutex,
     859  						wait_abs) != 0) {
     860  					ret = LZMA_TIMED_OUT;
     861  					break;
     862  				}
     863  			} else {
     864  				mythread_cond_wait(&coder->cond,
     865  						&coder->mutex);
     866  			}
     867  		} while (ret == LZMA_OK);
     868  	}
     869  
     870  	// If we are returning an error, then the application cannot get
     871  	// more output from us and thus keeping the threads running is
     872  	// useless and waste of CPU time.
     873  	if (ret != LZMA_OK && ret != LZMA_TIMED_OUT)
     874  		threads_stop(coder);
     875  
     876  	return ret;
     877  }
     878  
     879  
     880  static lzma_ret
     881  decode_block_header(struct lzma_stream_coder *coder,
     882  		const lzma_allocator *allocator, const uint8_t *restrict in,
     883  		size_t *restrict in_pos, size_t in_size)
     884  {
     885  	if (*in_pos >= in_size)
     886  		return LZMA_OK;
     887  
     888  	if (coder->pos == 0) {
     889  		// Detect if it's Index.
     890  		if (in[*in_pos] == INDEX_INDICATOR)
     891  			return LZMA_INDEX_DETECTED;
     892  
     893  		// Calculate the size of the Block Header. Note that
     894  		// Block Header decoder wants to see this byte too
     895  		// so don't advance *in_pos.
     896  		coder->block_options.header_size
     897  				= lzma_block_header_size_decode(
     898  					in[*in_pos]);
     899  	}
     900  
     901  	// Copy the Block Header to the internal buffer.
     902  	lzma_bufcpy(in, in_pos, in_size, coder->buffer, &coder->pos,
     903  			coder->block_options.header_size);
     904  
     905  	// Return if we didn't get the whole Block Header yet.
     906  	if (coder->pos < coder->block_options.header_size)
     907  		return LZMA_OK;
     908  
     909  	coder->pos = 0;
     910  
     911  	// Version 1 is needed to support the .ignore_check option.
     912  	coder->block_options.version = 1;
     913  
     914  	// Block Header decoder will initialize all members of this array
     915  	// so we don't need to do it here.
     916  	coder->block_options.filters = coder->filters;
     917  
     918  	// Decode the Block Header.
     919  	return_if_error(lzma_block_header_decode(&coder->block_options,
     920  			allocator, coder->buffer));
     921  
     922  	// If LZMA_IGNORE_CHECK was used, this flag needs to be set.
     923  	// It has to be set after lzma_block_header_decode() because
     924  	// it always resets this to false.
     925  	coder->block_options.ignore_check = coder->ignore_check;
     926  
     927  	// coder->block_options is ready now.
     928  	return LZMA_STREAM_END;
     929  }
     930  
     931  
     932  /// Get the size of the Compressed Data + Block Padding + Check.
     933  static size_t
     934  comp_blk_size(const struct lzma_stream_coder *coder)
     935  {
     936  	return vli_ceil4(coder->block_options.compressed_size)
     937  			+ lzma_check_size(coder->stream_flags.check);
     938  }
     939  
     940  
     941  /// Returns true if the size (compressed or uncompressed) is such that
     942  /// threaded decompression cannot be used. Sizes that are too big compared
     943  /// to SIZE_MAX must be rejected to avoid integer overflows and truncations
     944  /// when lzma_vli is assigned to a size_t.
     945  static bool
     946  is_direct_mode_needed(lzma_vli size)
     947  {
     948  	return size == LZMA_VLI_UNKNOWN || size > SIZE_MAX / 3;
     949  }
     950  
     951  
     952  static lzma_ret
     953  stream_decoder_reset(struct lzma_stream_coder *coder,
     954  		const lzma_allocator *allocator)
     955  {
     956  	// Initialize the Index hash used to verify the Index.
     957  	coder->index_hash = lzma_index_hash_init(coder->index_hash, allocator);
     958  	if (coder->index_hash == NULL)
     959  		return LZMA_MEM_ERROR;
     960  
     961  	// Reset the rest of the variables.
     962  	coder->sequence = SEQ_STREAM_HEADER;
     963  	coder->pos = 0;
     964  
     965  	return LZMA_OK;
     966  }
     967  
     968  
     969  static lzma_ret
     970  stream_decode_mt(void *coder_ptr, const lzma_allocator *allocator,
     971  		 const uint8_t *restrict in, size_t *restrict in_pos,
     972  		 size_t in_size,
     973  		 uint8_t *restrict out, size_t *restrict out_pos,
     974  		 size_t out_size, lzma_action action)
     975  {
     976  	struct lzma_stream_coder *coder = coder_ptr;
     977  
     978  	mythread_condtime wait_abs;
     979  	bool has_blocked = false;
     980  
     981  	// Determine if in SEQ_BLOCK_HEADER and SEQ_BLOCK_THR_RUN we should
     982  	// tell read_output_and_wait() to wait until it can fill the output
     983  	// buffer (or a timeout occurs). Two conditions must be met:
     984  	//
     985  	// (1) If the caller provided no new input. The reason for this
     986  	//     can be, for example, the end of the file or that there is
     987  	//     a pause in the input stream and more input is available
     988  	//     a little later. In this situation we should wait for output
     989  	//     because otherwise we would end up in a busy-waiting loop where
     990  	//     we make no progress and the application just calls us again
     991  	//     without providing any new input. This would then result in
     992  	//     LZMA_BUF_ERROR even though more output would be available
     993  	//     once the worker threads decode more data.
     994  	//
     995  	// (2) Even if (1) is true, we will not wait if the previous call to
     996  	//     this function managed to produce some output and the output
     997  	//     buffer became full. This is for compatibility with applications
     998  	//     that call lzma_code() in such a way that new input is provided
     999  	//     only when the output buffer didn't become full. Without this
    1000  	//     trick such applications would have bad performance (bad
    1001  	//     parallelization due to decoder not getting input fast enough).
    1002  	//
    1003  	//     NOTE: Such loops might require that timeout is disabled (0)
    1004  	//     if they assume that output-not-full implies that all input has
    1005  	//     been consumed. If and only if timeout is enabled, we may return
    1006  	//     when output isn't full *and* not all input has been consumed.
    1007  	//
    1008  	// However, if LZMA_FINISH is used, the above is ignored and we always
    1009  	// wait (timeout can still cause us to return) because we know that
    1010  	// we won't get any more input. This matters if the input file is
    1011  	// truncated and we are doing single-shot decoding, that is,
    1012  	// timeout = 0 and LZMA_FINISH is used on the first call to
    1013  	// lzma_code() and the output buffer is known to be big enough
    1014  	// to hold all uncompressed data:
    1015  	//
    1016  	//   - If LZMA_FINISH wasn't handled specially, we could return
    1017  	//     LZMA_OK before providing all output that is possible with the
    1018  	//     truncated input. The rest would be available if lzma_code() was
    1019  	//     called again but then it's not single-shot decoding anymore.
    1020  	//
    1021  	//   - By handling LZMA_FINISH specially here, the first call will
    1022  	//     produce all the output, matching the behavior of the
    1023  	//     single-threaded decoder.
    1024  	//
    1025  	// So it's a very specific corner case but also easy to avoid. Note
    1026  	// that this special handling of LZMA_FINISH has no effect for
    1027  	// single-shot decoding when the input file is valid (not truncated);
    1028  	// premature LZMA_OK wouldn't be possible as long as timeout = 0.
    1029  	const bool waiting_allowed = action == LZMA_FINISH
    1030  			|| (*in_pos == in_size && !coder->out_was_filled);
    1031  	coder->out_was_filled = false;
    1032  
    1033  	while (true)
    1034  	switch (coder->sequence) {
    1035  	case SEQ_STREAM_HEADER: {
    1036  		// Copy the Stream Header to the internal buffer.
    1037  		const size_t in_old = *in_pos;
    1038  		lzma_bufcpy(in, in_pos, in_size, coder->buffer, &coder->pos,
    1039  				LZMA_STREAM_HEADER_SIZE);
    1040  		coder->progress_in += *in_pos - in_old;
    1041  
    1042  		// Return if we didn't get the whole Stream Header yet.
    1043  		if (coder->pos < LZMA_STREAM_HEADER_SIZE)
    1044  			return LZMA_OK;
    1045  
    1046  		coder->pos = 0;
    1047  
    1048  		// Decode the Stream Header.
    1049  		const lzma_ret ret = lzma_stream_header_decode(
    1050  				&coder->stream_flags, coder->buffer);
    1051  		if (ret != LZMA_OK)
    1052  			return ret == LZMA_FORMAT_ERROR && !coder->first_stream
    1053  					? LZMA_DATA_ERROR : ret;
    1054  
    1055  		// If we are decoding concatenated Streams, and the later
    1056  		// Streams have invalid Header Magic Bytes, we give
    1057  		// LZMA_DATA_ERROR instead of LZMA_FORMAT_ERROR.
    1058  		coder->first_stream = false;
    1059  
    1060  		// Copy the type of the Check so that Block Header and Block
    1061  		// decoders see it.
    1062  		coder->block_options.check = coder->stream_flags.check;
    1063  
    1064  		// Even if we return LZMA_*_CHECK below, we want
    1065  		// to continue from Block Header decoding.
    1066  		coder->sequence = SEQ_BLOCK_HEADER;
    1067  
    1068  		// Detect if there's no integrity check or if it is
    1069  		// unsupported if those were requested by the application.
    1070  		if (coder->tell_no_check && coder->stream_flags.check
    1071  				== LZMA_CHECK_NONE)
    1072  			return LZMA_NO_CHECK;
    1073  
    1074  		if (coder->tell_unsupported_check
    1075  				&& !lzma_check_is_supported(
    1076  					coder->stream_flags.check))
    1077  			return LZMA_UNSUPPORTED_CHECK;
    1078  
    1079  		if (coder->tell_any_check)
    1080  			return LZMA_GET_CHECK;
    1081  	}
    1082  
    1083  	// Fall through
    1084  
    1085  	case SEQ_BLOCK_HEADER: {
    1086  		const size_t in_old = *in_pos;
    1087  		const lzma_ret ret = decode_block_header(coder, allocator,
    1088  				in, in_pos, in_size);
    1089  		coder->progress_in += *in_pos - in_old;
    1090  
    1091  		if (ret == LZMA_OK) {
    1092  			// We didn't decode the whole Block Header yet.
    1093  			//
    1094  			// Read output from the queue before returning. This
    1095  			// is important because it is possible that the
    1096  			// application doesn't have any new input available
    1097  			// immediately. If we didn't try to copy output from
    1098  			// the output queue here, lzma_code() could end up
    1099  			// returning LZMA_BUF_ERROR even though queued output
    1100  			// is available.
    1101  			//
    1102  			// If the lzma_code() call provided at least one input
    1103  			// byte, only copy as much data from the output queue
    1104  			// as is available immediately. This way the
    1105  			// application will be able to provide more input
    1106  			// without a delay.
    1107  			//
    1108  			// On the other hand, if lzma_code() was called with
    1109  			// an empty input buffer(*), treat it specially: try
    1110  			// to fill the output buffer even if it requires
    1111  			// waiting for the worker threads to provide output
    1112  			// (timeout, if specified, can still cause us to
    1113  			// return).
    1114  			//
    1115  			//   - This way the application will be able to get all
    1116  			//     data that can be decoded from the input provided
    1117  			//     so far.
    1118  			//
    1119  			//   - We avoid both premature LZMA_BUF_ERROR and
    1120  			//     busy-waiting where the application repeatedly
    1121  			//     calls lzma_code() which immediately returns
    1122  			//     LZMA_OK without providing new data.
    1123  			//
    1124  			//   - If the queue becomes empty, we won't wait
    1125  			//     anything and will return LZMA_OK immediately
    1126  			//     (coder->timeout is completely ignored).
    1127  			//
    1128  			// (*) See the comment at the beginning of this
    1129  			//     function how waiting_allowed is determined
    1130  			//     and why there is an exception to the rule
    1131  			//     of "called with an empty input buffer".
    1132  			assert(*in_pos == in_size);
    1133  
    1134  			// If LZMA_FINISH was used we know that we won't get
    1135  			// more input, so the file must be truncated if we
    1136  			// get here. If worker threads don't detect any
    1137  			// errors, eventually there will be no more output
    1138  			// while we keep returning LZMA_OK which gets
    1139  			// converted to LZMA_BUF_ERROR in lzma_code().
    1140  			//
    1141  			// If fail-fast is enabled then we will return
    1142  			// immediately using LZMA_DATA_ERROR instead of
    1143  			// LZMA_OK or LZMA_BUF_ERROR. Rationale for the
    1144  			// error code:
    1145  			//
    1146  			//   - Worker threads may have a large amount of
    1147  			//     not-yet-decoded input data and we don't
    1148  			//     know for sure if all data is valid. Bad
    1149  			//     data there would result in LZMA_DATA_ERROR
    1150  			//     when fail-fast isn't used.
    1151  			//
    1152  			//   - Immediate LZMA_BUF_ERROR would be a bit weird
    1153  			//     considering the older liblzma code. lzma_code()
    1154  			//     even has an assertion to prevent coders from
    1155  			//     returning LZMA_BUF_ERROR directly.
    1156  			//
    1157  			// The downside of this is that with fail-fast apps
    1158  			// cannot always distinguish between corrupt and
    1159  			// truncated files.
    1160  			if (action == LZMA_FINISH && coder->fail_fast) {
    1161  				// We won't produce any more output. Stop
    1162  				// the unfinished worker threads so they
    1163  				// won't waste CPU time.
    1164  				threads_stop(coder);
    1165  				return LZMA_DATA_ERROR;
    1166  			}
    1167  
    1168  			// read_output_and_wait() will call threads_stop()
    1169  			// if needed so with that we can use return_if_error.
    1170  			return_if_error(read_output_and_wait(coder, allocator,
    1171  				out, out_pos, out_size,
    1172  				NULL, waiting_allowed,
    1173  				&wait_abs, &has_blocked));
    1174  
    1175  			if (coder->pending_error != LZMA_OK) {
    1176  				coder->sequence = SEQ_ERROR;
    1177  				break;
    1178  			}
    1179  
    1180  			return LZMA_OK;
    1181  		}
    1182  
    1183  		if (ret == LZMA_INDEX_DETECTED) {
    1184  			coder->sequence = SEQ_INDEX_WAIT_OUTPUT;
    1185  			break;
    1186  		}
    1187  
    1188  		// See if an error occurred.
    1189  		if (ret != LZMA_STREAM_END) {
    1190  			// NOTE: Here and in all other places where
    1191  			// pending_error is set, it may overwrite the value
    1192  			// (LZMA_PROG_ERROR) set by read_output_and_wait().
    1193  			// That function might overwrite value set here too.
    1194  			// These are fine because when read_output_and_wait()
    1195  			// sets pending_error, it actually works as a flag
    1196  			// variable only ("some error has occurred") and the
    1197  			// actual value of pending_error is not used in
    1198  			// SEQ_ERROR. In such cases SEQ_ERROR will eventually
    1199  			// get the correct error code from the return value of
    1200  			// a later read_output_and_wait() call.
    1201  			coder->pending_error = ret;
    1202  			coder->sequence = SEQ_ERROR;
    1203  			break;
    1204  		}
    1205  
    1206  		// Calculate the memory usage of the filters / Block decoder.
    1207  		coder->mem_next_filters = lzma_raw_decoder_memusage(
    1208  				coder->filters);
    1209  
    1210  		if (coder->mem_next_filters == UINT64_MAX) {
    1211  			// One or more unknown Filter IDs.
    1212  			coder->pending_error = LZMA_OPTIONS_ERROR;
    1213  			coder->sequence = SEQ_ERROR;
    1214  			break;
    1215  		}
    1216  
    1217  		coder->sequence = SEQ_BLOCK_INIT;
    1218  	}
    1219  
    1220  	// Fall through
    1221  
    1222  	case SEQ_BLOCK_INIT: {
    1223  		// Check if decoding is possible at all with the current
    1224  		// memlimit_stop which we must never exceed.
    1225  		//
    1226  		// This needs to be the first thing in SEQ_BLOCK_INIT
    1227  		// to make it possible to restart decoding after increasing
    1228  		// memlimit_stop with lzma_memlimit_set().
    1229  		if (coder->mem_next_filters > coder->memlimit_stop) {
    1230  			// Flush pending output before returning
    1231  			// LZMA_MEMLIMIT_ERROR. If the application doesn't
    1232  			// want to increase the limit, at least it will get
    1233  			// all the output possible so far.
    1234  			return_if_error(read_output_and_wait(coder, allocator,
    1235  					out, out_pos, out_size,
    1236  					NULL, true, &wait_abs, &has_blocked));
    1237  
    1238  			if (!lzma_outq_is_empty(&coder->outq))
    1239  				return LZMA_OK;
    1240  
    1241  			return LZMA_MEMLIMIT_ERROR;
    1242  		}
    1243  
    1244  		// Check if the size information is available in Block Header.
    1245  		// If it is, check if the sizes are small enough that we don't
    1246  		// need to worry *too* much about integer overflows later in
    1247  		// the code. If these conditions are not met, we must use the
    1248  		// single-threaded direct mode.
    1249  		if (is_direct_mode_needed(coder->block_options.compressed_size)
    1250  				|| is_direct_mode_needed(
    1251  				coder->block_options.uncompressed_size)) {
    1252  			coder->sequence = SEQ_BLOCK_DIRECT_INIT;
    1253  			break;
    1254  		}
    1255  
    1256  		// Calculate the amount of memory needed for the input and
    1257  		// output buffers in threaded mode.
    1258  		//
    1259  		// These cannot overflow because we already checked that
    1260  		// the sizes are small enough using is_direct_mode_needed().
    1261  		coder->mem_next_in = comp_blk_size(coder);
    1262  		const uint64_t mem_buffers = coder->mem_next_in
    1263  				+ lzma_outq_outbuf_memusage(
    1264  				coder->block_options.uncompressed_size);
    1265  
    1266  		// Add the amount needed by the filters.
    1267  		// Avoid integer overflows.
    1268  		if (UINT64_MAX - mem_buffers < coder->mem_next_filters) {
    1269  			// Use direct mode if the memusage would overflow.
    1270  			// This is a theoretical case that shouldn't happen
    1271  			// in practice unless the input file is weird (broken
    1272  			// or malicious).
    1273  			coder->sequence = SEQ_BLOCK_DIRECT_INIT;
    1274  			break;
    1275  		}
    1276  
    1277  		// Amount of memory needed to decode this Block in
    1278  		// threaded mode:
    1279  		coder->mem_next_block = coder->mem_next_filters + mem_buffers;
    1280  
    1281  		// If this alone would exceed memlimit_threading, then we must
    1282  		// use the single-threaded direct mode.
    1283  		if (coder->mem_next_block > coder->memlimit_threading) {
    1284  			coder->sequence = SEQ_BLOCK_DIRECT_INIT;
    1285  			break;
    1286  		}
    1287  
    1288  		// Use the threaded mode. Free the direct mode decoder in
    1289  		// case it has been initialized.
    1290  		lzma_next_end(&coder->block_decoder, allocator);
    1291  		coder->mem_direct_mode = 0;
    1292  
    1293  		// Since we already know what the sizes are supposed to be,
    1294  		// we can already add them to the Index hash. The Block
    1295  		// decoder will verify the values while decoding.
    1296  		const lzma_ret ret = lzma_index_hash_append(coder->index_hash,
    1297  				lzma_block_unpadded_size(
    1298  					&coder->block_options),
    1299  				coder->block_options.uncompressed_size);
    1300  		if (ret != LZMA_OK) {
    1301  			coder->pending_error = ret;
    1302  			coder->sequence = SEQ_ERROR;
    1303  			break;
    1304  		}
    1305  
    1306  		coder->sequence = SEQ_BLOCK_THR_INIT;
    1307  	}
    1308  
    1309  	// Fall through
    1310  
    1311  	case SEQ_BLOCK_THR_INIT: {
    1312  		// We need to wait for a multiple conditions to become true
    1313  		// until we can initialize the Block decoder and let a worker
    1314  		// thread decode it:
    1315  		//
    1316  		//   - Wait for the memory usage of the active threads to drop
    1317  		//     so that starting the decoding of this Block won't make
    1318  		//     us go over memlimit_threading.
    1319  		//
    1320  		//   - Wait for at least one free output queue slot.
    1321  		//
    1322  		//   - Wait for a free worker thread.
    1323  		//
    1324  		// While we wait, we must copy decompressed data to the out
    1325  		// buffer and catch possible decoder errors.
    1326  		//
    1327  		// read_output_and_wait() does all the above.
    1328  		bool block_can_start = false;
    1329  
    1330  		return_if_error(read_output_and_wait(coder, allocator,
    1331  				out, out_pos, out_size,
    1332  				&block_can_start, true,
    1333  				&wait_abs, &has_blocked));
    1334  
    1335  		if (coder->pending_error != LZMA_OK) {
    1336  			coder->sequence = SEQ_ERROR;
    1337  			break;
    1338  		}
    1339  
    1340  		if (!block_can_start) {
    1341  			// It's not a timeout because return_if_error handles
    1342  			// it already. Output queue cannot be empty either
    1343  			// because in that case block_can_start would have
    1344  			// been true. Thus the output buffer must be full and
    1345  			// the queue isn't empty.
    1346  			assert(*out_pos == out_size);
    1347  			assert(!lzma_outq_is_empty(&coder->outq));
    1348  			return LZMA_OK;
    1349  		}
    1350  
    1351  		// We know that we can start decoding this Block without
    1352  		// exceeding memlimit_threading. However, to stay below
    1353  		// memlimit_threading may require freeing some of the
    1354  		// cached memory.
    1355  		//
    1356  		// Get a local copy of variables that require locking the
    1357  		// mutex. It is fine if the worker threads modify the real
    1358  		// values after we read these as those changes can only be
    1359  		// towards more favorable conditions (less memory in use,
    1360  		// more in cache).
    1361  		//
    1362  		// These are initialized to silence warnings.
    1363  		uint64_t mem_in_use = 0;
    1364  		uint64_t mem_cached = 0;
    1365  		struct worker_thread *thr = NULL;
    1366  
    1367  		mythread_sync(coder->mutex) {
    1368  			mem_in_use = coder->mem_in_use;
    1369  			mem_cached = coder->mem_cached;
    1370  			thr = coder->threads_free;
    1371  		}
    1372  
    1373  		// The maximum amount of memory that can be held by other
    1374  		// threads and cached buffers while allowing us to start
    1375  		// decoding the next Block.
    1376  		const uint64_t mem_max = coder->memlimit_threading
    1377  				- coder->mem_next_block;
    1378  
    1379  		// If the existing allocations are so large that starting
    1380  		// to decode this Block might exceed memlimit_threads,
    1381  		// try to free memory from the output queue cache first.
    1382  		//
    1383  		// NOTE: This math assumes the worst case. It's possible
    1384  		// that the limit wouldn't be exceeded if the existing cached
    1385  		// allocations are reused.
    1386  		if (mem_in_use + mem_cached + coder->outq.mem_allocated
    1387  				> mem_max) {
    1388  			// Clear the outq cache except leave one buffer in
    1389  			// the cache if its size is correct. That way we
    1390  			// don't free and almost immediately reallocate
    1391  			// an identical buffer.
    1392  			lzma_outq_clear_cache2(&coder->outq, allocator,
    1393  				coder->block_options.uncompressed_size);
    1394  		}
    1395  
    1396  		// If there is at least one worker_thread in the cache and
    1397  		// the existing allocations are so large that starting to
    1398  		// decode this Block might exceed memlimit_threads, free
    1399  		// memory by freeing cached Block decoders.
    1400  		//
    1401  		// NOTE: The comparison is different here than above.
    1402  		// Here we don't care about cached buffers in outq anymore
    1403  		// and only look at memory actually in use. This is because
    1404  		// if there is something in outq cache, it's a single buffer
    1405  		// that can be used as is. We ensured this in the above
    1406  		// if-block.
    1407  		uint64_t mem_freed = 0;
    1408  		if (thr != NULL && mem_in_use + mem_cached
    1409  				+ coder->outq.mem_in_use > mem_max) {
    1410  			// Don't free the first Block decoder if its memory
    1411  			// usage isn't greater than what this Block will need.
    1412  			// Typically the same filter chain is used for all
    1413  			// Blocks so this way the allocations can be reused
    1414  			// when get_thread() picks the first worker_thread
    1415  			// from the cache.
    1416  			if (thr->mem_filters <= coder->mem_next_filters)
    1417  				thr = thr->next;
    1418  
    1419  			while (thr != NULL) {
    1420  				lzma_next_end(&thr->block_decoder, allocator);
    1421  				mem_freed += thr->mem_filters;
    1422  				thr->mem_filters = 0;
    1423  				thr = thr->next;
    1424  			}
    1425  		}
    1426  
    1427  		// Update the memory usage counters. Note that coder->mem_*
    1428  		// may have changed since we read them so we must subtract
    1429  		// or add the changes.
    1430  		mythread_sync(coder->mutex) {
    1431  			coder->mem_cached -= mem_freed;
    1432  
    1433  			// Memory needed for the filters and the input buffer.
    1434  			// The output queue takes care of its own counter so
    1435  			// we don't touch it here.
    1436  			//
    1437  			// NOTE: After this, coder->mem_in_use +
    1438  			// coder->mem_cached might count the same thing twice.
    1439  			// If so, this will get corrected in get_thread() when
    1440  			// a worker_thread is picked from coder->free_threads
    1441  			// and its memory usage is subtracted from mem_cached.
    1442  			coder->mem_in_use += coder->mem_next_in
    1443  					+ coder->mem_next_filters;
    1444  		}
    1445  
    1446  		// Allocate memory for the output buffer in the output queue.
    1447  		lzma_ret ret = lzma_outq_prealloc_buf(
    1448  				&coder->outq, allocator,
    1449  				coder->block_options.uncompressed_size);
    1450  		if (ret != LZMA_OK) {
    1451  			threads_stop(coder);
    1452  			return ret;
    1453  		}
    1454  
    1455  		// Set up coder->thr.
    1456  		ret = get_thread(coder, allocator);
    1457  		if (ret != LZMA_OK) {
    1458  			threads_stop(coder);
    1459  			return ret;
    1460  		}
    1461  
    1462  		// The new Block decoder memory usage is already counted in
    1463  		// coder->mem_in_use. Store it in the thread too.
    1464  		coder->thr->mem_filters = coder->mem_next_filters;
    1465  
    1466  		// Initialize the Block decoder.
    1467  		coder->thr->block_options = coder->block_options;
    1468  		ret = lzma_block_decoder_init(
    1469  					&coder->thr->block_decoder, allocator,
    1470  					&coder->thr->block_options);
    1471  
    1472  		// Free the allocated filter options since they are needed
    1473  		// only to initialize the Block decoder.
    1474  		lzma_filters_free(coder->filters, allocator);
    1475  		coder->thr->block_options.filters = NULL;
    1476  
    1477  		// Check if memory usage calculation and Block encoder
    1478  		// initialization succeeded.
    1479  		if (ret != LZMA_OK) {
    1480  			coder->pending_error = ret;
    1481  			coder->sequence = SEQ_ERROR;
    1482  			break;
    1483  		}
    1484  
    1485  		// Allocate the input buffer.
    1486  		coder->thr->in_size = coder->mem_next_in;
    1487  		coder->thr->in = lzma_alloc(coder->thr->in_size, allocator);
    1488  		if (coder->thr->in == NULL) {
    1489  			threads_stop(coder);
    1490  			return LZMA_MEM_ERROR;
    1491  		}
    1492  
    1493  		// Get the preallocated output buffer.
    1494  		coder->thr->outbuf = lzma_outq_get_buf(
    1495  				&coder->outq, coder->thr);
    1496  
    1497  		// Start the decoder.
    1498  		mythread_sync(coder->thr->mutex) {
    1499  			assert(coder->thr->state == THR_IDLE);
    1500  			coder->thr->state = THR_RUN;
    1501  			mythread_cond_signal(&coder->thr->cond);
    1502  		}
    1503  
    1504  		// Enable output from the thread that holds the oldest output
    1505  		// buffer in the output queue (if such a thread exists).
    1506  		mythread_sync(coder->mutex) {
    1507  			lzma_outq_enable_partial_output(&coder->outq,
    1508  					&worker_enable_partial_update);
    1509  		}
    1510  
    1511  		coder->sequence = SEQ_BLOCK_THR_RUN;
    1512  	}
    1513  
    1514  	// Fall through
    1515  
    1516  	case SEQ_BLOCK_THR_RUN: {
    1517  		if (action == LZMA_FINISH && coder->fail_fast) {
    1518  			// We know that we won't get more input and that
    1519  			// the caller wants fail-fast behavior. If we see
    1520  			// that we don't have enough input to finish this
    1521  			// Block, return LZMA_DATA_ERROR immediately.
    1522  			// See SEQ_BLOCK_HEADER for the error code rationale.
    1523  			const size_t in_avail = in_size - *in_pos;
    1524  			const size_t in_needed = coder->thr->in_size
    1525  					- coder->thr->in_filled;
    1526  			if (in_avail < in_needed) {
    1527  				threads_stop(coder);
    1528  				return LZMA_DATA_ERROR;
    1529  			}
    1530  		}
    1531  
    1532  		// Copy input to the worker thread.
    1533  		size_t cur_in_filled = coder->thr->in_filled;
    1534  		lzma_bufcpy(in, in_pos, in_size, coder->thr->in,
    1535  				&cur_in_filled, coder->thr->in_size);
    1536  
    1537  		// Tell the thread how much we copied.
    1538  		mythread_sync(coder->thr->mutex) {
    1539  			coder->thr->in_filled = cur_in_filled;
    1540  
    1541  			// NOTE: Most of the time we are copying input faster
    1542  			// than the thread can decode so most of the time
    1543  			// calling mythread_cond_signal() is useless but
    1544  			// we cannot make it conditional because thr->in_pos
    1545  			// is updated without a mutex. And the overhead should
    1546  			// be very much negligible anyway.
    1547  			mythread_cond_signal(&coder->thr->cond);
    1548  		}
    1549  
    1550  		// Read output from the output queue. Just like in
    1551  		// SEQ_BLOCK_HEADER, we wait to fill the output buffer
    1552  		// only if waiting_allowed was set to true in the beginning
    1553  		// of this function (see the comment there).
    1554  		return_if_error(read_output_and_wait(coder, allocator,
    1555  				out, out_pos, out_size,
    1556  				NULL, waiting_allowed,
    1557  				&wait_abs, &has_blocked));
    1558  
    1559  		if (coder->pending_error != LZMA_OK) {
    1560  			coder->sequence = SEQ_ERROR;
    1561  			break;
    1562  		}
    1563  
    1564  		// Return if the input didn't contain the whole Block.
    1565  		if (coder->thr->in_filled < coder->thr->in_size) {
    1566  			assert(*in_pos == in_size);
    1567  			return LZMA_OK;
    1568  		}
    1569  
    1570  		// The whole Block has been copied to the thread-specific
    1571  		// buffer. Continue from the next Block Header or Index.
    1572  		coder->thr = NULL;
    1573  		coder->sequence = SEQ_BLOCK_HEADER;
    1574  		break;
    1575  	}
    1576  
    1577  	case SEQ_BLOCK_DIRECT_INIT: {
    1578  		// Wait for the threads to finish and that all decoded data
    1579  		// has been copied to the output. That is, wait until the
    1580  		// output queue becomes empty.
    1581  		//
    1582  		// NOTE: No need to check for coder->pending_error as
    1583  		// we aren't consuming any input until the queue is empty
    1584  		// and if there is a pending error, read_output_and_wait()
    1585  		// will eventually return it before the queue is empty.
    1586  		return_if_error(read_output_and_wait(coder, allocator,
    1587  				out, out_pos, out_size,
    1588  				NULL, true, &wait_abs, &has_blocked));
    1589  		if (!lzma_outq_is_empty(&coder->outq))
    1590  			return LZMA_OK;
    1591  
    1592  		// Free the cached output buffers.
    1593  		lzma_outq_clear_cache(&coder->outq, allocator);
    1594  
    1595  		// Get rid of the worker threads, including the coder->threads
    1596  		// array.
    1597  		threads_end(coder, allocator);
    1598  
    1599  		// Initialize the Block decoder.
    1600  		const lzma_ret ret = lzma_block_decoder_init(
    1601  				&coder->block_decoder, allocator,
    1602  				&coder->block_options);
    1603  
    1604  		// Free the allocated filter options since they are needed
    1605  		// only to initialize the Block decoder.
    1606  		lzma_filters_free(coder->filters, allocator);
    1607  		coder->block_options.filters = NULL;
    1608  
    1609  		// Check if Block decoder initialization succeeded.
    1610  		if (ret != LZMA_OK)
    1611  			return ret;
    1612  
    1613  		// Make the memory usage visible to _memconfig().
    1614  		coder->mem_direct_mode = coder->mem_next_filters;
    1615  
    1616  		coder->sequence = SEQ_BLOCK_DIRECT_RUN;
    1617  	}
    1618  
    1619  	// Fall through
    1620  
    1621  	case SEQ_BLOCK_DIRECT_RUN: {
    1622  		const size_t in_old = *in_pos;
    1623  		const size_t out_old = *out_pos;
    1624  		const lzma_ret ret = coder->block_decoder.code(
    1625  				coder->block_decoder.coder, allocator,
    1626  				in, in_pos, in_size, out, out_pos, out_size,
    1627  				action);
    1628  		coder->progress_in += *in_pos - in_old;
    1629  		coder->progress_out += *out_pos - out_old;
    1630  
    1631  		if (ret != LZMA_STREAM_END)
    1632  			return ret;
    1633  
    1634  		// Block decoded successfully. Add the new size pair to
    1635  		// the Index hash.
    1636  		return_if_error(lzma_index_hash_append(coder->index_hash,
    1637  				lzma_block_unpadded_size(
    1638  					&coder->block_options),
    1639  				coder->block_options.uncompressed_size));
    1640  
    1641  		coder->sequence = SEQ_BLOCK_HEADER;
    1642  		break;
    1643  	}
    1644  
    1645  	case SEQ_INDEX_WAIT_OUTPUT:
    1646  		// Flush the output from all worker threads so that we can
    1647  		// decode the Index without thinking about threading.
    1648  		return_if_error(read_output_and_wait(coder, allocator,
    1649  				out, out_pos, out_size,
    1650  				NULL, true, &wait_abs, &has_blocked));
    1651  
    1652  		if (!lzma_outq_is_empty(&coder->outq))
    1653  			return LZMA_OK;
    1654  
    1655  		coder->sequence = SEQ_INDEX_DECODE;
    1656  
    1657  	// Fall through
    1658  
    1659  	case SEQ_INDEX_DECODE: {
    1660  		// If we don't have any input, don't call
    1661  		// lzma_index_hash_decode() since it would return
    1662  		// LZMA_BUF_ERROR, which we must not do here.
    1663  		if (*in_pos >= in_size)
    1664  			return LZMA_OK;
    1665  
    1666  		// Decode the Index and compare it to the hash calculated
    1667  		// from the sizes of the Blocks (if any).
    1668  		const size_t in_old = *in_pos;
    1669  		const lzma_ret ret = lzma_index_hash_decode(coder->index_hash,
    1670  				in, in_pos, in_size);
    1671  		coder->progress_in += *in_pos - in_old;
    1672  		if (ret != LZMA_STREAM_END)
    1673  			return ret;
    1674  
    1675  		coder->sequence = SEQ_STREAM_FOOTER;
    1676  	}
    1677  
    1678  	// Fall through
    1679  
    1680  	case SEQ_STREAM_FOOTER: {
    1681  		// Copy the Stream Footer to the internal buffer.
    1682  		const size_t in_old = *in_pos;
    1683  		lzma_bufcpy(in, in_pos, in_size, coder->buffer, &coder->pos,
    1684  				LZMA_STREAM_HEADER_SIZE);
    1685  		coder->progress_in += *in_pos - in_old;
    1686  
    1687  		// Return if we didn't get the whole Stream Footer yet.
    1688  		if (coder->pos < LZMA_STREAM_HEADER_SIZE)
    1689  			return LZMA_OK;
    1690  
    1691  		coder->pos = 0;
    1692  
    1693  		// Decode the Stream Footer. The decoder gives
    1694  		// LZMA_FORMAT_ERROR if the magic bytes don't match,
    1695  		// so convert that return code to LZMA_DATA_ERROR.
    1696  		lzma_stream_flags footer_flags;
    1697  		const lzma_ret ret = lzma_stream_footer_decode(
    1698  				&footer_flags, coder->buffer);
    1699  		if (ret != LZMA_OK)
    1700  			return ret == LZMA_FORMAT_ERROR
    1701  					? LZMA_DATA_ERROR : ret;
    1702  
    1703  		// Check that Index Size stored in the Stream Footer matches
    1704  		// the real size of the Index field.
    1705  		if (lzma_index_hash_size(coder->index_hash)
    1706  				!= footer_flags.backward_size)
    1707  			return LZMA_DATA_ERROR;
    1708  
    1709  		// Compare that the Stream Flags fields are identical in
    1710  		// both Stream Header and Stream Footer.
    1711  		return_if_error(lzma_stream_flags_compare(
    1712  				&coder->stream_flags, &footer_flags));
    1713  
    1714  		if (!coder->concatenated)
    1715  			return LZMA_STREAM_END;
    1716  
    1717  		coder->sequence = SEQ_STREAM_PADDING;
    1718  	}
    1719  
    1720  	// Fall through
    1721  
    1722  	case SEQ_STREAM_PADDING:
    1723  		assert(coder->concatenated);
    1724  
    1725  		// Skip over possible Stream Padding.
    1726  		while (true) {
    1727  			if (*in_pos >= in_size) {
    1728  				// Unless LZMA_FINISH was used, we cannot
    1729  				// know if there's more input coming later.
    1730  				if (action != LZMA_FINISH)
    1731  					return LZMA_OK;
    1732  
    1733  				// Stream Padding must be a multiple of
    1734  				// four bytes.
    1735  				return coder->pos == 0
    1736  						? LZMA_STREAM_END
    1737  						: LZMA_DATA_ERROR;
    1738  			}
    1739  
    1740  			// If the byte is not zero, it probably indicates
    1741  			// beginning of a new Stream (or the file is corrupt).
    1742  			if (in[*in_pos] != 0x00)
    1743  				break;
    1744  
    1745  			++*in_pos;
    1746  			++coder->progress_in;
    1747  			coder->pos = (coder->pos + 1) & 3;
    1748  		}
    1749  
    1750  		// Stream Padding must be a multiple of four bytes (empty
    1751  		// Stream Padding is OK).
    1752  		if (coder->pos != 0) {
    1753  			++*in_pos;
    1754  			++coder->progress_in;
    1755  			return LZMA_DATA_ERROR;
    1756  		}
    1757  
    1758  		// Prepare to decode the next Stream.
    1759  		return_if_error(stream_decoder_reset(coder, allocator));
    1760  		break;
    1761  
    1762  	case SEQ_ERROR:
    1763  		if (!coder->fail_fast) {
    1764  			// Let the application get all data before the point
    1765  			// where the error was detected. This matches the
    1766  			// behavior of single-threaded use.
    1767  			//
    1768  			// FIXME? Some errors (LZMA_MEM_ERROR) don't get here,
    1769  			// they are returned immediately. Thus in rare cases
    1770  			// the output will be less than in the single-threaded
    1771  			// mode. Maybe this doesn't matter much in practice.
    1772  			return_if_error(read_output_and_wait(coder, allocator,
    1773  					out, out_pos, out_size,
    1774  					NULL, true, &wait_abs, &has_blocked));
    1775  
    1776  			// We get here only if the error happened in the main
    1777  			// thread, for example, unsupported Block Header.
    1778  			if (!lzma_outq_is_empty(&coder->outq))
    1779  				return LZMA_OK;
    1780  		}
    1781  
    1782  		// We only get here if no errors were detected by the worker
    1783  		// threads. Errors from worker threads would have already been
    1784  		// returned by the call to read_output_and_wait() above.
    1785  		return coder->pending_error;
    1786  
    1787  	default:
    1788  		assert(0);
    1789  		return LZMA_PROG_ERROR;
    1790  	}
    1791  
    1792  	// Never reached
    1793  }
    1794  
    1795  
    1796  static void
    1797  stream_decoder_mt_end(void *coder_ptr, const lzma_allocator *allocator)
    1798  {
    1799  	struct lzma_stream_coder *coder = coder_ptr;
    1800  
    1801  	threads_end(coder, allocator);
    1802  	lzma_outq_end(&coder->outq, allocator);
    1803  
    1804  	lzma_next_end(&coder->block_decoder, allocator);
    1805  	lzma_filters_free(coder->filters, allocator);
    1806  	lzma_index_hash_end(coder->index_hash, allocator);
    1807  
    1808  	lzma_free(coder, allocator);
    1809  	return;
    1810  }
    1811  
    1812  
    1813  static lzma_check
    1814  stream_decoder_mt_get_check(const void *coder_ptr)
    1815  {
    1816  	const struct lzma_stream_coder *coder = coder_ptr;
    1817  	return coder->stream_flags.check;
    1818  }
    1819  
    1820  
    1821  static lzma_ret
    1822  stream_decoder_mt_memconfig(void *coder_ptr, uint64_t *memusage,
    1823  		uint64_t *old_memlimit, uint64_t new_memlimit)
    1824  {
    1825  	// NOTE: This function gets/sets memlimit_stop. For now,
    1826  	// memlimit_threading cannot be modified after initialization.
    1827  	//
    1828  	// *memusage will include cached memory too. Excluding cached memory
    1829  	// would be misleading and it wouldn't help the applications to
    1830  	// know how much memory is actually needed to decompress the file
    1831  	// because the higher the number of threads and the memlimits are
    1832  	// the more memory the decoder may use.
    1833  	//
    1834  	// Setting a new limit includes the cached memory too and too low
    1835  	// limits will be rejected. Alternative could be to free the cached
    1836  	// memory immediately if that helps to bring the limit down but
    1837  	// the current way is the simplest. It's unlikely that limit needs
    1838  	// to be lowered in the middle of a file anyway; the typical reason
    1839  	// to want a new limit is to increase after LZMA_MEMLIMIT_ERROR
    1840  	// and even such use isn't common.
    1841  	struct lzma_stream_coder *coder = coder_ptr;
    1842  
    1843  	mythread_sync(coder->mutex) {
    1844  		*memusage = coder->mem_direct_mode
    1845  				+ coder->mem_in_use
    1846  				+ coder->mem_cached
    1847  				+ coder->outq.mem_allocated;
    1848  	}
    1849  
    1850  	// If no filter chains are allocated, *memusage may be zero.
    1851  	// Always return at least LZMA_MEMUSAGE_BASE.
    1852  	if (*memusage < LZMA_MEMUSAGE_BASE)
    1853  		*memusage = LZMA_MEMUSAGE_BASE;
    1854  
    1855  	*old_memlimit = coder->memlimit_stop;
    1856  
    1857  	if (new_memlimit != 0) {
    1858  		if (new_memlimit < *memusage)
    1859  			return LZMA_MEMLIMIT_ERROR;
    1860  
    1861  		coder->memlimit_stop = new_memlimit;
    1862  	}
    1863  
    1864  	return LZMA_OK;
    1865  }
    1866  
    1867  
    1868  static void
    1869  stream_decoder_mt_get_progress(void *coder_ptr,
    1870  		uint64_t *progress_in, uint64_t *progress_out)
    1871  {
    1872  	struct lzma_stream_coder *coder = coder_ptr;
    1873  
    1874  	// Lock coder->mutex to prevent finishing threads from moving their
    1875  	// progress info from the worker_thread structure to lzma_stream_coder.
    1876  	mythread_sync(coder->mutex) {
    1877  		*progress_in = coder->progress_in;
    1878  		*progress_out = coder->progress_out;
    1879  
    1880  		for (size_t i = 0; i < coder->threads_initialized; ++i) {
    1881  			mythread_sync(coder->threads[i].mutex) {
    1882  				*progress_in += coder->threads[i].progress_in;
    1883  				*progress_out += coder->threads[i]
    1884  						.progress_out;
    1885  			}
    1886  		}
    1887  	}
    1888  
    1889  	return;
    1890  }
    1891  
    1892  
    1893  static lzma_ret
    1894  stream_decoder_mt_init(lzma_next_coder *next, const lzma_allocator *allocator,
    1895  		       const lzma_mt *options)
    1896  {
    1897  	struct lzma_stream_coder *coder;
    1898  
    1899  	if (options->threads == 0 || options->threads > LZMA_THREADS_MAX)
    1900  		return LZMA_OPTIONS_ERROR;
    1901  
    1902  	if (options->flags & ~LZMA_SUPPORTED_FLAGS)
    1903  		return LZMA_OPTIONS_ERROR;
    1904  
    1905  	lzma_next_coder_init(&stream_decoder_mt_init, next, allocator);
    1906  
    1907  	coder = next->coder;
    1908  	if (!coder) {
    1909  		coder = lzma_alloc(sizeof(struct lzma_stream_coder), allocator);
    1910  		if (coder == NULL)
    1911  			return LZMA_MEM_ERROR;
    1912  
    1913  		next->coder = coder;
    1914  
    1915  		if (mythread_mutex_init(&coder->mutex)) {
    1916  			lzma_free(coder, allocator);
    1917  			return LZMA_MEM_ERROR;
    1918  		}
    1919  
    1920  		if (mythread_cond_init(&coder->cond)) {
    1921  			mythread_mutex_destroy(&coder->mutex);
    1922  			lzma_free(coder, allocator);
    1923  			return LZMA_MEM_ERROR;
    1924  		}
    1925  
    1926  		next->code = &stream_decode_mt;
    1927  		next->end = &stream_decoder_mt_end;
    1928  		next->get_check = &stream_decoder_mt_get_check;
    1929  		next->memconfig = &stream_decoder_mt_memconfig;
    1930  		next->get_progress = &stream_decoder_mt_get_progress;
    1931  
    1932  		coder->filters[0].id = LZMA_VLI_UNKNOWN;
    1933  		memzero(&coder->outq, sizeof(coder->outq));
    1934  
    1935  		coder->block_decoder = LZMA_NEXT_CODER_INIT;
    1936  		coder->mem_direct_mode = 0;
    1937  
    1938  		coder->index_hash = NULL;
    1939  		coder->threads = NULL;
    1940  		coder->threads_free = NULL;
    1941  		coder->threads_initialized = 0;
    1942  	}
    1943  
    1944  	// Cleanup old filter chain if one remains after unfinished decoding
    1945  	// of a previous Stream.
    1946  	lzma_filters_free(coder->filters, allocator);
    1947  
    1948  	// By allocating threads from scratch we can start memory-usage
    1949  	// accounting from scratch, too. Changes in filter and block sizes may
    1950  	// affect number of threads.
    1951  	//
    1952  	// FIXME? Reusing should be easy but unlike the single-threaded
    1953  	// decoder, with some types of input file combinations reusing
    1954  	// could leave quite a lot of memory allocated but unused (first
    1955  	// file could allocate a lot, the next files could use fewer
    1956  	// threads and some of the allocations from the first file would not
    1957  	// get freed unless memlimit_threading forces us to clear caches).
    1958  	//
    1959  	// NOTE: The direct mode decoder isn't freed here if one exists.
    1960  	// It will be reused or freed as needed in the main loop.
    1961  	threads_end(coder, allocator);
    1962  
    1963  	// All memusage counters start at 0 (including mem_direct_mode).
    1964  	// The little extra that is needed for the structs in this file
    1965  	// get accounted well enough by the filter chain memory usage
    1966  	// which adds LZMA_MEMUSAGE_BASE for each chain. However,
    1967  	// stream_decoder_mt_memconfig() has to handle this specially so that
    1968  	// it will never return less than LZMA_MEMUSAGE_BASE as memory usage.
    1969  	coder->mem_in_use = 0;
    1970  	coder->mem_cached = 0;
    1971  	coder->mem_next_block = 0;
    1972  
    1973  	coder->progress_in = 0;
    1974  	coder->progress_out = 0;
    1975  
    1976  	coder->sequence = SEQ_STREAM_HEADER;
    1977  	coder->thread_error = LZMA_OK;
    1978  	coder->pending_error = LZMA_OK;
    1979  	coder->thr = NULL;
    1980  
    1981  	coder->timeout = options->timeout;
    1982  
    1983  	coder->memlimit_threading = my_max(1, options->memlimit_threading);
    1984  	coder->memlimit_stop = my_max(1, options->memlimit_stop);
    1985  	if (coder->memlimit_threading > coder->memlimit_stop)
    1986  		coder->memlimit_threading = coder->memlimit_stop;
    1987  
    1988  	coder->tell_no_check = (options->flags & LZMA_TELL_NO_CHECK) != 0;
    1989  	coder->tell_unsupported_check
    1990  			= (options->flags & LZMA_TELL_UNSUPPORTED_CHECK) != 0;
    1991  	coder->tell_any_check = (options->flags & LZMA_TELL_ANY_CHECK) != 0;
    1992  	coder->ignore_check = (options->flags & LZMA_IGNORE_CHECK) != 0;
    1993  	coder->concatenated = (options->flags & LZMA_CONCATENATED) != 0;
    1994  	coder->fail_fast = (options->flags & LZMA_FAIL_FAST) != 0;
    1995  
    1996  	coder->first_stream = true;
    1997  	coder->out_was_filled = false;
    1998  	coder->pos = 0;
    1999  
    2000  	coder->threads_max = options->threads;
    2001  
    2002  	return_if_error(lzma_outq_init(&coder->outq, allocator,
    2003  				       coder->threads_max));
    2004  
    2005  	return stream_decoder_reset(coder, allocator);
    2006  }
    2007  
    2008  
    2009  extern LZMA_API(lzma_ret)
    2010  lzma_stream_decoder_mt(lzma_stream *strm, const lzma_mt *options)
    2011  {
    2012  	lzma_next_strm_init(stream_decoder_mt_init, strm, options);
    2013  
    2014  	strm->internal->supported_actions[LZMA_RUN] = true;
    2015  	strm->internal->supported_actions[LZMA_FINISH] = true;
    2016  
    2017  	return LZMA_OK;
    2018  }