signaltools.py 117 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468346934703471347234733474347534763477347834793480348134823483348434853486348734883489349034913492349334943495349634973498349935003501
  1. # Author: Travis Oliphant
  2. # 1999 -- 2002
  3. from __future__ import division, print_function, absolute_import
  4. import operator
  5. import threading
  6. import sys
  7. import timeit
  8. from scipy.spatial import cKDTree
  9. from . import sigtools, dlti
  10. from ._upfirdn import upfirdn, _output_len
  11. from scipy._lib.six import callable
  12. from scipy._lib._version import NumpyVersion
  13. from scipy import fftpack, linalg
  14. from scipy.fftpack.helper import _init_nd_shape_and_axes_sorted
  15. from numpy import (allclose, angle, arange, argsort, array, asarray,
  16. atleast_1d, atleast_2d, cast, dot, exp, expand_dims,
  17. iscomplexobj, mean, ndarray, newaxis, ones, pi,
  18. poly, polyadd, polyder, polydiv, polymul, polysub, polyval,
  19. product, r_, ravel, real_if_close, reshape,
  20. roots, sort, take, transpose, unique, where, zeros,
  21. zeros_like)
  22. import numpy as np
  23. import math
  24. from scipy.special import factorial
  25. from .windows import get_window
  26. from ._arraytools import axis_slice, axis_reverse, odd_ext, even_ext, const_ext
  27. from .filter_design import cheby1, _validate_sos
  28. from .fir_filter_design import firwin
  29. if sys.version_info.major >= 3 and sys.version_info.minor >= 5:
  30. from math import gcd
  31. else:
  32. from fractions import gcd
  33. __all__ = ['correlate', 'fftconvolve', 'convolve', 'convolve2d', 'correlate2d',
  34. 'order_filter', 'medfilt', 'medfilt2d', 'wiener', 'lfilter',
  35. 'lfiltic', 'sosfilt', 'deconvolve', 'hilbert', 'hilbert2',
  36. 'cmplx_sort', 'unique_roots', 'invres', 'invresz', 'residue',
  37. 'residuez', 'resample', 'resample_poly', 'detrend',
  38. 'lfilter_zi', 'sosfilt_zi', 'sosfiltfilt', 'choose_conv_method',
  39. 'filtfilt', 'decimate', 'vectorstrength']
  40. _modedict = {'valid': 0, 'same': 1, 'full': 2}
  41. _boundarydict = {'fill': 0, 'pad': 0, 'wrap': 2, 'circular': 2, 'symm': 1,
  42. 'symmetric': 1, 'reflect': 4}
  43. _rfft_mt_safe = (NumpyVersion(np.__version__) >= '1.9.0.dev-e24486e')
  44. _rfft_lock = threading.Lock()
  45. def _valfrommode(mode):
  46. try:
  47. return _modedict[mode]
  48. except KeyError:
  49. raise ValueError("Acceptable mode flags are 'valid',"
  50. " 'same', or 'full'.")
  51. def _bvalfromboundary(boundary):
  52. try:
  53. return _boundarydict[boundary] << 2
  54. except KeyError:
  55. raise ValueError("Acceptable boundary flags are 'fill', 'circular' "
  56. "(or 'wrap'), and 'symmetric' (or 'symm').")
  57. def _inputs_swap_needed(mode, shape1, shape2):
  58. """
  59. If in 'valid' mode, returns whether or not the input arrays need to be
  60. swapped depending on whether `shape1` is at least as large as `shape2` in
  61. every dimension.
  62. This is important for some of the correlation and convolution
  63. implementations in this module, where the larger array input needs to come
  64. before the smaller array input when operating in this mode.
  65. Note that if the mode provided is not 'valid', False is immediately
  66. returned.
  67. """
  68. if mode == 'valid':
  69. ok1, ok2 = True, True
  70. for d1, d2 in zip(shape1, shape2):
  71. if not d1 >= d2:
  72. ok1 = False
  73. if not d2 >= d1:
  74. ok2 = False
  75. if not (ok1 or ok2):
  76. raise ValueError("For 'valid' mode, one must be at least "
  77. "as large as the other in every dimension")
  78. return not ok1
  79. return False
  80. def correlate(in1, in2, mode='full', method='auto'):
  81. r"""
  82. Cross-correlate two N-dimensional arrays.
  83. Cross-correlate `in1` and `in2`, with the output size determined by the
  84. `mode` argument.
  85. Parameters
  86. ----------
  87. in1 : array_like
  88. First input.
  89. in2 : array_like
  90. Second input. Should have the same number of dimensions as `in1`.
  91. mode : str {'full', 'valid', 'same'}, optional
  92. A string indicating the size of the output:
  93. ``full``
  94. The output is the full discrete linear cross-correlation
  95. of the inputs. (Default)
  96. ``valid``
  97. The output consists only of those elements that do not
  98. rely on the zero-padding. In 'valid' mode, either `in1` or `in2`
  99. must be at least as large as the other in every dimension.
  100. ``same``
  101. The output is the same size as `in1`, centered
  102. with respect to the 'full' output.
  103. method : str {'auto', 'direct', 'fft'}, optional
  104. A string indicating which method to use to calculate the correlation.
  105. ``direct``
  106. The correlation is determined directly from sums, the definition of
  107. correlation.
  108. ``fft``
  109. The Fast Fourier Transform is used to perform the correlation more
  110. quickly (only available for numerical arrays.)
  111. ``auto``
  112. Automatically chooses direct or Fourier method based on an estimate
  113. of which is faster (default). See `convolve` Notes for more detail.
  114. .. versionadded:: 0.19.0
  115. Returns
  116. -------
  117. correlate : array
  118. An N-dimensional array containing a subset of the discrete linear
  119. cross-correlation of `in1` with `in2`.
  120. See Also
  121. --------
  122. choose_conv_method : contains more documentation on `method`.
  123. Notes
  124. -----
  125. The correlation z of two d-dimensional arrays x and y is defined as::
  126. z[...,k,...] = sum[..., i_l, ...] x[..., i_l,...] * conj(y[..., i_l - k,...])
  127. This way, if x and y are 1-D arrays and ``z = correlate(x, y, 'full')``
  128. then
  129. .. math::
  130. z[k] = (x * y)(k - N + 1)
  131. = \sum_{l=0}^{||x||-1}x_l y_{l-k+N-1}^{*}
  132. for :math:`k = 0, 1, ..., ||x|| + ||y|| - 2`
  133. where :math:`||x||` is the length of ``x``, :math:`N = \max(||x||,||y||)`,
  134. and :math:`y_m` is 0 when m is outside the range of y.
  135. ``method='fft'`` only works for numerical arrays as it relies on
  136. `fftconvolve`. In certain cases (i.e., arrays of objects or when
  137. rounding integers can lose precision), ``method='direct'`` is always used.
  138. Examples
  139. --------
  140. Implement a matched filter using cross-correlation, to recover a signal
  141. that has passed through a noisy channel.
  142. >>> from scipy import signal
  143. >>> sig = np.repeat([0., 1., 1., 0., 1., 0., 0., 1.], 128)
  144. >>> sig_noise = sig + np.random.randn(len(sig))
  145. >>> corr = signal.correlate(sig_noise, np.ones(128), mode='same') / 128
  146. >>> import matplotlib.pyplot as plt
  147. >>> clock = np.arange(64, len(sig), 128)
  148. >>> fig, (ax_orig, ax_noise, ax_corr) = plt.subplots(3, 1, sharex=True)
  149. >>> ax_orig.plot(sig)
  150. >>> ax_orig.plot(clock, sig[clock], 'ro')
  151. >>> ax_orig.set_title('Original signal')
  152. >>> ax_noise.plot(sig_noise)
  153. >>> ax_noise.set_title('Signal with noise')
  154. >>> ax_corr.plot(corr)
  155. >>> ax_corr.plot(clock, corr[clock], 'ro')
  156. >>> ax_corr.axhline(0.5, ls=':')
  157. >>> ax_corr.set_title('Cross-correlated with rectangular pulse')
  158. >>> ax_orig.margins(0, 0.1)
  159. >>> fig.tight_layout()
  160. >>> fig.show()
  161. """
  162. in1 = asarray(in1)
  163. in2 = asarray(in2)
  164. if in1.ndim == in2.ndim == 0:
  165. return in1 * in2.conj()
  166. elif in1.ndim != in2.ndim:
  167. raise ValueError("in1 and in2 should have the same dimensionality")
  168. # Don't use _valfrommode, since correlate should not accept numeric modes
  169. try:
  170. val = _modedict[mode]
  171. except KeyError:
  172. raise ValueError("Acceptable mode flags are 'valid',"
  173. " 'same', or 'full'.")
  174. # this either calls fftconvolve or this function with method=='direct'
  175. if method in ('fft', 'auto'):
  176. return convolve(in1, _reverse_and_conj(in2), mode, method)
  177. elif method == 'direct':
  178. # fastpath to faster numpy.correlate for 1d inputs when possible
  179. if _np_conv_ok(in1, in2, mode):
  180. return np.correlate(in1, in2, mode)
  181. # _correlateND is far slower when in2.size > in1.size, so swap them
  182. # and then undo the effect afterward if mode == 'full'. Also, it fails
  183. # with 'valid' mode if in2 is larger than in1, so swap those, too.
  184. # Don't swap inputs for 'same' mode, since shape of in1 matters.
  185. swapped_inputs = ((mode == 'full') and (in2.size > in1.size) or
  186. _inputs_swap_needed(mode, in1.shape, in2.shape))
  187. if swapped_inputs:
  188. in1, in2 = in2, in1
  189. if mode == 'valid':
  190. ps = [i - j + 1 for i, j in zip(in1.shape, in2.shape)]
  191. out = np.empty(ps, in1.dtype)
  192. z = sigtools._correlateND(in1, in2, out, val)
  193. else:
  194. ps = [i + j - 1 for i, j in zip(in1.shape, in2.shape)]
  195. # zero pad input
  196. in1zpadded = np.zeros(ps, in1.dtype)
  197. sc = tuple(slice(0, i) for i in in1.shape)
  198. in1zpadded[sc] = in1.copy()
  199. if mode == 'full':
  200. out = np.empty(ps, in1.dtype)
  201. elif mode == 'same':
  202. out = np.empty(in1.shape, in1.dtype)
  203. z = sigtools._correlateND(in1zpadded, in2, out, val)
  204. if swapped_inputs:
  205. # Reverse and conjugate to undo the effect of swapping inputs
  206. z = _reverse_and_conj(z)
  207. return z
  208. else:
  209. raise ValueError("Acceptable method flags are 'auto',"
  210. " 'direct', or 'fft'.")
  211. def _centered(arr, newshape):
  212. # Return the center newshape portion of the array.
  213. newshape = asarray(newshape)
  214. currshape = array(arr.shape)
  215. startind = (currshape - newshape) // 2
  216. endind = startind + newshape
  217. myslice = [slice(startind[k], endind[k]) for k in range(len(endind))]
  218. return arr[tuple(myslice)]
  219. def fftconvolve(in1, in2, mode="full", axes=None):
  220. """Convolve two N-dimensional arrays using FFT.
  221. Convolve `in1` and `in2` using the fast Fourier transform method, with
  222. the output size determined by the `mode` argument.
  223. This is generally much faster than `convolve` for large arrays (n > ~500),
  224. but can be slower when only a few output values are needed, and can only
  225. output float arrays (int or object array inputs will be cast to float).
  226. As of v0.19, `convolve` automatically chooses this method or the direct
  227. method based on an estimation of which is faster.
  228. Parameters
  229. ----------
  230. in1 : array_like
  231. First input.
  232. in2 : array_like
  233. Second input. Should have the same number of dimensions as `in1`.
  234. mode : str {'full', 'valid', 'same'}, optional
  235. A string indicating the size of the output:
  236. ``full``
  237. The output is the full discrete linear convolution
  238. of the inputs. (Default)
  239. ``valid``
  240. The output consists only of those elements that do not
  241. rely on the zero-padding. In 'valid' mode, either `in1` or `in2`
  242. must be at least as large as the other in every dimension.
  243. ``same``
  244. The output is the same size as `in1`, centered
  245. with respect to the 'full' output.
  246. axis : tuple, optional
  247. axes : int or array_like of ints or None, optional
  248. Axes over which to compute the convolution.
  249. The default is over all axes.
  250. Returns
  251. -------
  252. out : array
  253. An N-dimensional array containing a subset of the discrete linear
  254. convolution of `in1` with `in2`.
  255. Examples
  256. --------
  257. Autocorrelation of white noise is an impulse.
  258. >>> from scipy import signal
  259. >>> sig = np.random.randn(1000)
  260. >>> autocorr = signal.fftconvolve(sig, sig[::-1], mode='full')
  261. >>> import matplotlib.pyplot as plt
  262. >>> fig, (ax_orig, ax_mag) = plt.subplots(2, 1)
  263. >>> ax_orig.plot(sig)
  264. >>> ax_orig.set_title('White noise')
  265. >>> ax_mag.plot(np.arange(-len(sig)+1,len(sig)), autocorr)
  266. >>> ax_mag.set_title('Autocorrelation')
  267. >>> fig.tight_layout()
  268. >>> fig.show()
  269. Gaussian blur implemented using FFT convolution. Notice the dark borders
  270. around the image, due to the zero-padding beyond its boundaries.
  271. The `convolve2d` function allows for other types of image boundaries,
  272. but is far slower.
  273. >>> from scipy import misc
  274. >>> face = misc.face(gray=True)
  275. >>> kernel = np.outer(signal.gaussian(70, 8), signal.gaussian(70, 8))
  276. >>> blurred = signal.fftconvolve(face, kernel, mode='same')
  277. >>> fig, (ax_orig, ax_kernel, ax_blurred) = plt.subplots(3, 1,
  278. ... figsize=(6, 15))
  279. >>> ax_orig.imshow(face, cmap='gray')
  280. >>> ax_orig.set_title('Original')
  281. >>> ax_orig.set_axis_off()
  282. >>> ax_kernel.imshow(kernel, cmap='gray')
  283. >>> ax_kernel.set_title('Gaussian kernel')
  284. >>> ax_kernel.set_axis_off()
  285. >>> ax_blurred.imshow(blurred, cmap='gray')
  286. >>> ax_blurred.set_title('Blurred')
  287. >>> ax_blurred.set_axis_off()
  288. >>> fig.show()
  289. """
  290. in1 = asarray(in1)
  291. in2 = asarray(in2)
  292. noaxes = axes is None
  293. if in1.ndim == in2.ndim == 0: # scalar inputs
  294. return in1 * in2
  295. elif in1.ndim != in2.ndim:
  296. raise ValueError("in1 and in2 should have the same dimensionality")
  297. elif in1.size == 0 or in2.size == 0: # empty arrays
  298. return array([])
  299. _, axes = _init_nd_shape_and_axes_sorted(in1, shape=None, axes=axes)
  300. if not noaxes and not axes.size:
  301. raise ValueError("when provided, axes cannot be empty")
  302. if noaxes:
  303. other_axes = array([], dtype=np.intc)
  304. else:
  305. other_axes = np.setdiff1d(np.arange(in1.ndim), axes)
  306. s1 = array(in1.shape)
  307. s2 = array(in2.shape)
  308. if not np.all((s1[other_axes] == s2[other_axes])
  309. | (s1[other_axes] == 1) | (s2[other_axes] == 1)):
  310. raise ValueError("incompatible shapes for in1 and in2:"
  311. " {0} and {1}".format(in1.shape, in2.shape))
  312. complex_result = (np.issubdtype(in1.dtype, np.complexfloating)
  313. or np.issubdtype(in2.dtype, np.complexfloating))
  314. shape = np.maximum(s1, s2)
  315. shape[axes] = s1[axes] + s2[axes] - 1
  316. # Check that input sizes are compatible with 'valid' mode
  317. if _inputs_swap_needed(mode, s1, s2):
  318. # Convolution is commutative; order doesn't have any effect on output
  319. in1, s1, in2, s2 = in2, s2, in1, s1
  320. # Speed up FFT by padding to optimal size for FFTPACK
  321. fshape = [fftpack.helper.next_fast_len(d) for d in shape[axes]]
  322. fslice = tuple([slice(sz) for sz in shape])
  323. # Pre-1.9 NumPy FFT routines are not threadsafe. For older NumPys, make
  324. # sure we only call rfftn/irfftn from one thread at a time.
  325. if not complex_result and (_rfft_mt_safe or _rfft_lock.acquire(False)):
  326. try:
  327. sp1 = np.fft.rfftn(in1, fshape, axes=axes)
  328. sp2 = np.fft.rfftn(in2, fshape, axes=axes)
  329. ret = np.fft.irfftn(sp1 * sp2, fshape, axes=axes)[fslice].copy()
  330. finally:
  331. if not _rfft_mt_safe:
  332. _rfft_lock.release()
  333. else:
  334. # If we're here, it's either because we need a complex result, or we
  335. # failed to acquire _rfft_lock (meaning rfftn isn't threadsafe and
  336. # is already in use by another thread). In either case, use the
  337. # (threadsafe but slower) SciPy complex-FFT routines instead.
  338. sp1 = fftpack.fftn(in1, fshape, axes=axes)
  339. sp2 = fftpack.fftn(in2, fshape, axes=axes)
  340. ret = fftpack.ifftn(sp1 * sp2, axes=axes)[fslice].copy()
  341. if not complex_result:
  342. ret = ret.real
  343. if mode == "full":
  344. return ret
  345. elif mode == "same":
  346. return _centered(ret, s1)
  347. elif mode == "valid":
  348. shape_valid = shape.copy()
  349. shape_valid[axes] = s1[axes] - s2[axes] + 1
  350. return _centered(ret, shape_valid)
  351. else:
  352. raise ValueError("acceptable mode flags are 'valid',"
  353. " 'same', or 'full'")
  354. def _numeric_arrays(arrays, kinds='buifc'):
  355. """
  356. See if a list of arrays are all numeric.
  357. Parameters
  358. ----------
  359. ndarrays : array or list of arrays
  360. arrays to check if numeric.
  361. numeric_kinds : string-like
  362. The dtypes of the arrays to be checked. If the dtype.kind of
  363. the ndarrays are not in this string the function returns False and
  364. otherwise returns True.
  365. """
  366. if type(arrays) == ndarray:
  367. return arrays.dtype.kind in kinds
  368. for array_ in arrays:
  369. if array_.dtype.kind not in kinds:
  370. return False
  371. return True
  372. def _prod(iterable):
  373. """
  374. Product of a list of numbers.
  375. Faster than np.prod for short lists like array shapes.
  376. """
  377. product = 1
  378. for x in iterable:
  379. product *= x
  380. return product
  381. def _fftconv_faster(x, h, mode):
  382. """
  383. See if using `fftconvolve` or `_correlateND` is faster. The boolean value
  384. returned depends on the sizes and shapes of the input values.
  385. The big O ratios were found to hold across different machines, which makes
  386. sense as it's the ratio that matters (the effective speed of the computer
  387. is found in both big O constants). Regardless, this had been tuned on an
  388. early 2015 MacBook Pro with 8GB RAM and an Intel i5 processor.
  389. """
  390. if mode == 'full':
  391. out_shape = [n + k - 1 for n, k in zip(x.shape, h.shape)]
  392. big_O_constant = 10963.92823819 if x.ndim == 1 else 8899.1104874
  393. elif mode == 'same':
  394. out_shape = x.shape
  395. if x.ndim == 1:
  396. if h.size <= x.size:
  397. big_O_constant = 7183.41306773
  398. else:
  399. big_O_constant = 856.78174111
  400. else:
  401. big_O_constant = 34519.21021589
  402. elif mode == 'valid':
  403. out_shape = [n - k + 1 for n, k in zip(x.shape, h.shape)]
  404. big_O_constant = 41954.28006344 if x.ndim == 1 else 66453.24316434
  405. else:
  406. raise ValueError("Acceptable mode flags are 'valid',"
  407. " 'same', or 'full'.")
  408. # see whether the Fourier transform convolution method or the direct
  409. # convolution method is faster (discussed in scikit-image PR #1792)
  410. direct_time = (x.size * h.size * _prod(out_shape))
  411. fft_time = sum(n * math.log(n) for n in (x.shape + h.shape +
  412. tuple(out_shape)))
  413. return big_O_constant * fft_time < direct_time
  414. def _reverse_and_conj(x):
  415. """
  416. Reverse array `x` in all dimensions and perform the complex conjugate
  417. """
  418. reverse = (slice(None, None, -1),) * x.ndim
  419. return x[reverse].conj()
  420. def _np_conv_ok(volume, kernel, mode):
  421. """
  422. See if numpy supports convolution of `volume` and `kernel` (i.e. both are
  423. 1D ndarrays and of the appropriate shape). Numpy's 'same' mode uses the
  424. size of the larger input, while Scipy's uses the size of the first input.
  425. Invalid mode strings will return False and be caught by the calling func.
  426. """
  427. if volume.ndim == kernel.ndim == 1:
  428. if mode in ('full', 'valid'):
  429. return True
  430. elif mode == 'same':
  431. return volume.size >= kernel.size
  432. else:
  433. return False
  434. def _timeit_fast(stmt="pass", setup="pass", repeat=3):
  435. """
  436. Returns the time the statement/function took, in seconds.
  437. Faster, less precise version of IPython's timeit. `stmt` can be a statement
  438. written as a string or a callable.
  439. Will do only 1 loop (like IPython's timeit) with no repetitions
  440. (unlike IPython) for very slow functions. For fast functions, only does
  441. enough loops to take 5 ms, which seems to produce similar results (on
  442. Windows at least), and avoids doing an extraneous cycle that isn't
  443. measured.
  444. """
  445. timer = timeit.Timer(stmt, setup)
  446. # determine number of calls per rep so total time for 1 rep >= 5 ms
  447. x = 0
  448. for p in range(0, 10):
  449. number = 10**p
  450. x = timer.timeit(number) # seconds
  451. if x >= 5e-3 / 10: # 5 ms for final test, 1/10th that for this one
  452. break
  453. if x > 1: # second
  454. # If it's macroscopic, don't bother with repetitions
  455. best = x
  456. else:
  457. number *= 10
  458. r = timer.repeat(repeat, number)
  459. best = min(r)
  460. sec = best / number
  461. return sec
  462. def choose_conv_method(in1, in2, mode='full', measure=False):
  463. """
  464. Find the fastest convolution/correlation method.
  465. This primarily exists to be called during the ``method='auto'`` option in
  466. `convolve` and `correlate`, but can also be used when performing many
  467. convolutions of the same input shapes and dtypes, determining
  468. which method to use for all of them, either to avoid the overhead of the
  469. 'auto' option or to use accurate real-world measurements.
  470. Parameters
  471. ----------
  472. in1 : array_like
  473. The first argument passed into the convolution function.
  474. in2 : array_like
  475. The second argument passed into the convolution function.
  476. mode : str {'full', 'valid', 'same'}, optional
  477. A string indicating the size of the output:
  478. ``full``
  479. The output is the full discrete linear convolution
  480. of the inputs. (Default)
  481. ``valid``
  482. The output consists only of those elements that do not
  483. rely on the zero-padding.
  484. ``same``
  485. The output is the same size as `in1`, centered
  486. with respect to the 'full' output.
  487. measure : bool, optional
  488. If True, run and time the convolution of `in1` and `in2` with both
  489. methods and return the fastest. If False (default), predict the fastest
  490. method using precomputed values.
  491. Returns
  492. -------
  493. method : str
  494. A string indicating which convolution method is fastest, either
  495. 'direct' or 'fft'
  496. times : dict, optional
  497. A dictionary containing the times (in seconds) needed for each method.
  498. This value is only returned if ``measure=True``.
  499. See Also
  500. --------
  501. convolve
  502. correlate
  503. Notes
  504. -----
  505. For large n, ``measure=False`` is accurate and can quickly determine the
  506. fastest method to perform the convolution. However, this is not as
  507. accurate for small n (when any dimension in the input or output is small).
  508. In practice, we found that this function estimates the faster method up to
  509. a multiplicative factor of 5 (i.e., the estimated method is *at most* 5
  510. times slower than the fastest method). The estimation values were tuned on
  511. an early 2015 MacBook Pro with 8GB RAM but we found that the prediction
  512. held *fairly* accurately across different machines.
  513. If ``measure=True``, time the convolutions. Because this function uses
  514. `fftconvolve`, an error will be thrown if it does not support the inputs.
  515. There are cases when `fftconvolve` supports the inputs but this function
  516. returns `direct` (e.g., to protect against floating point integer
  517. precision).
  518. .. versionadded:: 0.19
  519. Examples
  520. --------
  521. Estimate the fastest method for a given input:
  522. >>> from scipy import signal
  523. >>> a = np.random.randn(1000)
  524. >>> b = np.random.randn(1000000)
  525. >>> method = signal.choose_conv_method(a, b, mode='same')
  526. >>> method
  527. 'fft'
  528. This can then be applied to other arrays of the same dtype and shape:
  529. >>> c = np.random.randn(1000)
  530. >>> d = np.random.randn(1000000)
  531. >>> # `method` works with correlate and convolve
  532. >>> corr1 = signal.correlate(a, b, mode='same', method=method)
  533. >>> corr2 = signal.correlate(c, d, mode='same', method=method)
  534. >>> conv1 = signal.convolve(a, b, mode='same', method=method)
  535. >>> conv2 = signal.convolve(c, d, mode='same', method=method)
  536. """
  537. volume = asarray(in1)
  538. kernel = asarray(in2)
  539. if measure:
  540. times = {}
  541. for method in ['fft', 'direct']:
  542. times[method] = _timeit_fast(lambda: convolve(volume, kernel,
  543. mode=mode, method=method))
  544. chosen_method = 'fft' if times['fft'] < times['direct'] else 'direct'
  545. return chosen_method, times
  546. # fftconvolve doesn't support complex256
  547. fftconv_unsup = "complex256" if sys.maxsize > 2**32 else "complex192"
  548. if hasattr(np, fftconv_unsup):
  549. if volume.dtype == fftconv_unsup or kernel.dtype == fftconv_unsup:
  550. return 'direct'
  551. # for integer input,
  552. # catch when more precision required than float provides (representing an
  553. # integer as float can lose precision in fftconvolve if larger than 2**52)
  554. if any([_numeric_arrays([x], kinds='ui') for x in [volume, kernel]]):
  555. max_value = int(np.abs(volume).max()) * int(np.abs(kernel).max())
  556. max_value *= int(min(volume.size, kernel.size))
  557. if max_value > 2**np.finfo('float').nmant - 1:
  558. return 'direct'
  559. if _numeric_arrays([volume, kernel], kinds='b'):
  560. return 'direct'
  561. if _numeric_arrays([volume, kernel]):
  562. if _fftconv_faster(volume, kernel, mode):
  563. return 'fft'
  564. return 'direct'
  565. def convolve(in1, in2, mode='full', method='auto'):
  566. """
  567. Convolve two N-dimensional arrays.
  568. Convolve `in1` and `in2`, with the output size determined by the
  569. `mode` argument.
  570. Parameters
  571. ----------
  572. in1 : array_like
  573. First input.
  574. in2 : array_like
  575. Second input. Should have the same number of dimensions as `in1`.
  576. mode : str {'full', 'valid', 'same'}, optional
  577. A string indicating the size of the output:
  578. ``full``
  579. The output is the full discrete linear convolution
  580. of the inputs. (Default)
  581. ``valid``
  582. The output consists only of those elements that do not
  583. rely on the zero-padding. In 'valid' mode, either `in1` or `in2`
  584. must be at least as large as the other in every dimension.
  585. ``same``
  586. The output is the same size as `in1`, centered
  587. with respect to the 'full' output.
  588. method : str {'auto', 'direct', 'fft'}, optional
  589. A string indicating which method to use to calculate the convolution.
  590. ``direct``
  591. The convolution is determined directly from sums, the definition of
  592. convolution.
  593. ``fft``
  594. The Fourier Transform is used to perform the convolution by calling
  595. `fftconvolve`.
  596. ``auto``
  597. Automatically chooses direct or Fourier method based on an estimate
  598. of which is faster (default). See Notes for more detail.
  599. .. versionadded:: 0.19.0
  600. Returns
  601. -------
  602. convolve : array
  603. An N-dimensional array containing a subset of the discrete linear
  604. convolution of `in1` with `in2`.
  605. See Also
  606. --------
  607. numpy.polymul : performs polynomial multiplication (same operation, but
  608. also accepts poly1d objects)
  609. choose_conv_method : chooses the fastest appropriate convolution method
  610. fftconvolve
  611. Notes
  612. -----
  613. By default, `convolve` and `correlate` use ``method='auto'``, which calls
  614. `choose_conv_method` to choose the fastest method using pre-computed
  615. values (`choose_conv_method` can also measure real-world timing with a
  616. keyword argument). Because `fftconvolve` relies on floating point numbers,
  617. there are certain constraints that may force `method=direct` (more detail
  618. in `choose_conv_method` docstring).
  619. Examples
  620. --------
  621. Smooth a square pulse using a Hann window:
  622. >>> from scipy import signal
  623. >>> sig = np.repeat([0., 1., 0.], 100)
  624. >>> win = signal.hann(50)
  625. >>> filtered = signal.convolve(sig, win, mode='same') / sum(win)
  626. >>> import matplotlib.pyplot as plt
  627. >>> fig, (ax_orig, ax_win, ax_filt) = plt.subplots(3, 1, sharex=True)
  628. >>> ax_orig.plot(sig)
  629. >>> ax_orig.set_title('Original pulse')
  630. >>> ax_orig.margins(0, 0.1)
  631. >>> ax_win.plot(win)
  632. >>> ax_win.set_title('Filter impulse response')
  633. >>> ax_win.margins(0, 0.1)
  634. >>> ax_filt.plot(filtered)
  635. >>> ax_filt.set_title('Filtered signal')
  636. >>> ax_filt.margins(0, 0.1)
  637. >>> fig.tight_layout()
  638. >>> fig.show()
  639. """
  640. volume = asarray(in1)
  641. kernel = asarray(in2)
  642. if volume.ndim == kernel.ndim == 0:
  643. return volume * kernel
  644. elif volume.ndim != kernel.ndim:
  645. raise ValueError("volume and kernel should have the same "
  646. "dimensionality")
  647. if _inputs_swap_needed(mode, volume.shape, kernel.shape):
  648. # Convolution is commutative; order doesn't have any effect on output
  649. volume, kernel = kernel, volume
  650. if method == 'auto':
  651. method = choose_conv_method(volume, kernel, mode=mode)
  652. if method == 'fft':
  653. out = fftconvolve(volume, kernel, mode=mode)
  654. result_type = np.result_type(volume, kernel)
  655. if result_type.kind in {'u', 'i'}:
  656. out = np.around(out)
  657. return out.astype(result_type)
  658. elif method == 'direct':
  659. # fastpath to faster numpy.convolve for 1d inputs when possible
  660. if _np_conv_ok(volume, kernel, mode):
  661. return np.convolve(volume, kernel, mode)
  662. return correlate(volume, _reverse_and_conj(kernel), mode, 'direct')
  663. else:
  664. raise ValueError("Acceptable method flags are 'auto',"
  665. " 'direct', or 'fft'.")
  666. def order_filter(a, domain, rank):
  667. """
  668. Perform an order filter on an N-dimensional array.
  669. Perform an order filter on the array in. The domain argument acts as a
  670. mask centered over each pixel. The non-zero elements of domain are
  671. used to select elements surrounding each input pixel which are placed
  672. in a list. The list is sorted, and the output for that pixel is the
  673. element corresponding to rank in the sorted list.
  674. Parameters
  675. ----------
  676. a : ndarray
  677. The N-dimensional input array.
  678. domain : array_like
  679. A mask array with the same number of dimensions as `a`.
  680. Each dimension should have an odd number of elements.
  681. rank : int
  682. A non-negative integer which selects the element from the
  683. sorted list (0 corresponds to the smallest element, 1 is the
  684. next smallest element, etc.).
  685. Returns
  686. -------
  687. out : ndarray
  688. The results of the order filter in an array with the same
  689. shape as `a`.
  690. Examples
  691. --------
  692. >>> from scipy import signal
  693. >>> x = np.arange(25).reshape(5, 5)
  694. >>> domain = np.identity(3)
  695. >>> x
  696. array([[ 0, 1, 2, 3, 4],
  697. [ 5, 6, 7, 8, 9],
  698. [10, 11, 12, 13, 14],
  699. [15, 16, 17, 18, 19],
  700. [20, 21, 22, 23, 24]])
  701. >>> signal.order_filter(x, domain, 0)
  702. array([[ 0., 0., 0., 0., 0.],
  703. [ 0., 0., 1., 2., 0.],
  704. [ 0., 5., 6., 7., 0.],
  705. [ 0., 10., 11., 12., 0.],
  706. [ 0., 0., 0., 0., 0.]])
  707. >>> signal.order_filter(x, domain, 2)
  708. array([[ 6., 7., 8., 9., 4.],
  709. [ 11., 12., 13., 14., 9.],
  710. [ 16., 17., 18., 19., 14.],
  711. [ 21., 22., 23., 24., 19.],
  712. [ 20., 21., 22., 23., 24.]])
  713. """
  714. domain = asarray(domain)
  715. size = domain.shape
  716. for k in range(len(size)):
  717. if (size[k] % 2) != 1:
  718. raise ValueError("Each dimension of domain argument "
  719. " should have an odd number of elements.")
  720. return sigtools._order_filterND(a, domain, rank)
  721. def medfilt(volume, kernel_size=None):
  722. """
  723. Perform a median filter on an N-dimensional array.
  724. Apply a median filter to the input array using a local window-size
  725. given by `kernel_size`.
  726. Parameters
  727. ----------
  728. volume : array_like
  729. An N-dimensional input array.
  730. kernel_size : array_like, optional
  731. A scalar or an N-length list giving the size of the median filter
  732. window in each dimension. Elements of `kernel_size` should be odd.
  733. If `kernel_size` is a scalar, then this scalar is used as the size in
  734. each dimension. Default size is 3 for each dimension.
  735. Returns
  736. -------
  737. out : ndarray
  738. An array the same size as input containing the median filtered
  739. result.
  740. """
  741. volume = atleast_1d(volume)
  742. if kernel_size is None:
  743. kernel_size = [3] * volume.ndim
  744. kernel_size = asarray(kernel_size)
  745. if kernel_size.shape == ():
  746. kernel_size = np.repeat(kernel_size.item(), volume.ndim)
  747. for k in range(volume.ndim):
  748. if (kernel_size[k] % 2) != 1:
  749. raise ValueError("Each element of kernel_size should be odd.")
  750. domain = ones(kernel_size)
  751. numels = product(kernel_size, axis=0)
  752. order = numels // 2
  753. return sigtools._order_filterND(volume, domain, order)
  754. def wiener(im, mysize=None, noise=None):
  755. """
  756. Perform a Wiener filter on an N-dimensional array.
  757. Apply a Wiener filter to the N-dimensional array `im`.
  758. Parameters
  759. ----------
  760. im : ndarray
  761. An N-dimensional array.
  762. mysize : int or array_like, optional
  763. A scalar or an N-length list giving the size of the Wiener filter
  764. window in each dimension. Elements of mysize should be odd.
  765. If mysize is a scalar, then this scalar is used as the size
  766. in each dimension.
  767. noise : float, optional
  768. The noise-power to use. If None, then noise is estimated as the
  769. average of the local variance of the input.
  770. Returns
  771. -------
  772. out : ndarray
  773. Wiener filtered result with the same shape as `im`.
  774. """
  775. im = asarray(im)
  776. if mysize is None:
  777. mysize = [3] * im.ndim
  778. mysize = asarray(mysize)
  779. if mysize.shape == ():
  780. mysize = np.repeat(mysize.item(), im.ndim)
  781. # Estimate the local mean
  782. lMean = correlate(im, ones(mysize), 'same') / product(mysize, axis=0)
  783. # Estimate the local variance
  784. lVar = (correlate(im ** 2, ones(mysize), 'same') /
  785. product(mysize, axis=0) - lMean ** 2)
  786. # Estimate the noise power if needed.
  787. if noise is None:
  788. noise = mean(ravel(lVar), axis=0)
  789. res = (im - lMean)
  790. res *= (1 - noise / lVar)
  791. res += lMean
  792. out = where(lVar < noise, lMean, res)
  793. return out
  794. def convolve2d(in1, in2, mode='full', boundary='fill', fillvalue=0):
  795. """
  796. Convolve two 2-dimensional arrays.
  797. Convolve `in1` and `in2` with output size determined by `mode`, and
  798. boundary conditions determined by `boundary` and `fillvalue`.
  799. Parameters
  800. ----------
  801. in1 : array_like
  802. First input.
  803. in2 : array_like
  804. Second input. Should have the same number of dimensions as `in1`.
  805. mode : str {'full', 'valid', 'same'}, optional
  806. A string indicating the size of the output:
  807. ``full``
  808. The output is the full discrete linear convolution
  809. of the inputs. (Default)
  810. ``valid``
  811. The output consists only of those elements that do not
  812. rely on the zero-padding. In 'valid' mode, either `in1` or `in2`
  813. must be at least as large as the other in every dimension.
  814. ``same``
  815. The output is the same size as `in1`, centered
  816. with respect to the 'full' output.
  817. boundary : str {'fill', 'wrap', 'symm'}, optional
  818. A flag indicating how to handle boundaries:
  819. ``fill``
  820. pad input arrays with fillvalue. (default)
  821. ``wrap``
  822. circular boundary conditions.
  823. ``symm``
  824. symmetrical boundary conditions.
  825. fillvalue : scalar, optional
  826. Value to fill pad input arrays with. Default is 0.
  827. Returns
  828. -------
  829. out : ndarray
  830. A 2-dimensional array containing a subset of the discrete linear
  831. convolution of `in1` with `in2`.
  832. Examples
  833. --------
  834. Compute the gradient of an image by 2D convolution with a complex Scharr
  835. operator. (Horizontal operator is real, vertical is imaginary.) Use
  836. symmetric boundary condition to avoid creating edges at the image
  837. boundaries.
  838. >>> from scipy import signal
  839. >>> from scipy import misc
  840. >>> ascent = misc.ascent()
  841. >>> scharr = np.array([[ -3-3j, 0-10j, +3 -3j],
  842. ... [-10+0j, 0+ 0j, +10 +0j],
  843. ... [ -3+3j, 0+10j, +3 +3j]]) # Gx + j*Gy
  844. >>> grad = signal.convolve2d(ascent, scharr, boundary='symm', mode='same')
  845. >>> import matplotlib.pyplot as plt
  846. >>> fig, (ax_orig, ax_mag, ax_ang) = plt.subplots(3, 1, figsize=(6, 15))
  847. >>> ax_orig.imshow(ascent, cmap='gray')
  848. >>> ax_orig.set_title('Original')
  849. >>> ax_orig.set_axis_off()
  850. >>> ax_mag.imshow(np.absolute(grad), cmap='gray')
  851. >>> ax_mag.set_title('Gradient magnitude')
  852. >>> ax_mag.set_axis_off()
  853. >>> ax_ang.imshow(np.angle(grad), cmap='hsv') # hsv is cyclic, like angles
  854. >>> ax_ang.set_title('Gradient orientation')
  855. >>> ax_ang.set_axis_off()
  856. >>> fig.show()
  857. """
  858. in1 = asarray(in1)
  859. in2 = asarray(in2)
  860. if not in1.ndim == in2.ndim == 2:
  861. raise ValueError('convolve2d inputs must both be 2D arrays')
  862. if _inputs_swap_needed(mode, in1.shape, in2.shape):
  863. in1, in2 = in2, in1
  864. val = _valfrommode(mode)
  865. bval = _bvalfromboundary(boundary)
  866. out = sigtools._convolve2d(in1, in2, 1, val, bval, fillvalue)
  867. return out
  868. def correlate2d(in1, in2, mode='full', boundary='fill', fillvalue=0):
  869. """
  870. Cross-correlate two 2-dimensional arrays.
  871. Cross correlate `in1` and `in2` with output size determined by `mode`, and
  872. boundary conditions determined by `boundary` and `fillvalue`.
  873. Parameters
  874. ----------
  875. in1 : array_like
  876. First input.
  877. in2 : array_like
  878. Second input. Should have the same number of dimensions as `in1`.
  879. mode : str {'full', 'valid', 'same'}, optional
  880. A string indicating the size of the output:
  881. ``full``
  882. The output is the full discrete linear cross-correlation
  883. of the inputs. (Default)
  884. ``valid``
  885. The output consists only of those elements that do not
  886. rely on the zero-padding. In 'valid' mode, either `in1` or `in2`
  887. must be at least as large as the other in every dimension.
  888. ``same``
  889. The output is the same size as `in1`, centered
  890. with respect to the 'full' output.
  891. boundary : str {'fill', 'wrap', 'symm'}, optional
  892. A flag indicating how to handle boundaries:
  893. ``fill``
  894. pad input arrays with fillvalue. (default)
  895. ``wrap``
  896. circular boundary conditions.
  897. ``symm``
  898. symmetrical boundary conditions.
  899. fillvalue : scalar, optional
  900. Value to fill pad input arrays with. Default is 0.
  901. Returns
  902. -------
  903. correlate2d : ndarray
  904. A 2-dimensional array containing a subset of the discrete linear
  905. cross-correlation of `in1` with `in2`.
  906. Examples
  907. --------
  908. Use 2D cross-correlation to find the location of a template in a noisy
  909. image:
  910. >>> from scipy import signal
  911. >>> from scipy import misc
  912. >>> face = misc.face(gray=True) - misc.face(gray=True).mean()
  913. >>> template = np.copy(face[300:365, 670:750]) # right eye
  914. >>> template -= template.mean()
  915. >>> face = face + np.random.randn(*face.shape) * 50 # add noise
  916. >>> corr = signal.correlate2d(face, template, boundary='symm', mode='same')
  917. >>> y, x = np.unravel_index(np.argmax(corr), corr.shape) # find the match
  918. >>> import matplotlib.pyplot as plt
  919. >>> fig, (ax_orig, ax_template, ax_corr) = plt.subplots(3, 1,
  920. ... figsize=(6, 15))
  921. >>> ax_orig.imshow(face, cmap='gray')
  922. >>> ax_orig.set_title('Original')
  923. >>> ax_orig.set_axis_off()
  924. >>> ax_template.imshow(template, cmap='gray')
  925. >>> ax_template.set_title('Template')
  926. >>> ax_template.set_axis_off()
  927. >>> ax_corr.imshow(corr, cmap='gray')
  928. >>> ax_corr.set_title('Cross-correlation')
  929. >>> ax_corr.set_axis_off()
  930. >>> ax_orig.plot(x, y, 'ro')
  931. >>> fig.show()
  932. """
  933. in1 = asarray(in1)
  934. in2 = asarray(in2)
  935. if not in1.ndim == in2.ndim == 2:
  936. raise ValueError('correlate2d inputs must both be 2D arrays')
  937. swapped_inputs = _inputs_swap_needed(mode, in1.shape, in2.shape)
  938. if swapped_inputs:
  939. in1, in2 = in2, in1
  940. val = _valfrommode(mode)
  941. bval = _bvalfromboundary(boundary)
  942. out = sigtools._convolve2d(in1, in2.conj(), 0, val, bval, fillvalue)
  943. if swapped_inputs:
  944. out = out[::-1, ::-1]
  945. return out
  946. def medfilt2d(input, kernel_size=3):
  947. """
  948. Median filter a 2-dimensional array.
  949. Apply a median filter to the `input` array using a local window-size
  950. given by `kernel_size` (must be odd).
  951. Parameters
  952. ----------
  953. input : array_like
  954. A 2-dimensional input array.
  955. kernel_size : array_like, optional
  956. A scalar or a list of length 2, giving the size of the
  957. median filter window in each dimension. Elements of
  958. `kernel_size` should be odd. If `kernel_size` is a scalar,
  959. then this scalar is used as the size in each dimension.
  960. Default is a kernel of size (3, 3).
  961. Returns
  962. -------
  963. out : ndarray
  964. An array the same size as input containing the median filtered
  965. result.
  966. """
  967. image = asarray(input)
  968. if kernel_size is None:
  969. kernel_size = [3] * 2
  970. kernel_size = asarray(kernel_size)
  971. if kernel_size.shape == ():
  972. kernel_size = np.repeat(kernel_size.item(), 2)
  973. for size in kernel_size:
  974. if (size % 2) != 1:
  975. raise ValueError("Each element of kernel_size should be odd.")
  976. return sigtools._medfilt2d(image, kernel_size)
  977. def lfilter(b, a, x, axis=-1, zi=None):
  978. """
  979. Filter data along one-dimension with an IIR or FIR filter.
  980. Filter a data sequence, `x`, using a digital filter. This works for many
  981. fundamental data types (including Object type). The filter is a direct
  982. form II transposed implementation of the standard difference equation
  983. (see Notes).
  984. Parameters
  985. ----------
  986. b : array_like
  987. The numerator coefficient vector in a 1-D sequence.
  988. a : array_like
  989. The denominator coefficient vector in a 1-D sequence. If ``a[0]``
  990. is not 1, then both `a` and `b` are normalized by ``a[0]``.
  991. x : array_like
  992. An N-dimensional input array.
  993. axis : int, optional
  994. The axis of the input data array along which to apply the
  995. linear filter. The filter is applied to each subarray along
  996. this axis. Default is -1.
  997. zi : array_like, optional
  998. Initial conditions for the filter delays. It is a vector
  999. (or array of vectors for an N-dimensional input) of length
  1000. ``max(len(a), len(b)) - 1``. If `zi` is None or is not given then
  1001. initial rest is assumed. See `lfiltic` for more information.
  1002. Returns
  1003. -------
  1004. y : array
  1005. The output of the digital filter.
  1006. zf : array, optional
  1007. If `zi` is None, this is not returned, otherwise, `zf` holds the
  1008. final filter delay values.
  1009. See Also
  1010. --------
  1011. lfiltic : Construct initial conditions for `lfilter`.
  1012. lfilter_zi : Compute initial state (steady state of step response) for
  1013. `lfilter`.
  1014. filtfilt : A forward-backward filter, to obtain a filter with linear phase.
  1015. savgol_filter : A Savitzky-Golay filter.
  1016. sosfilt: Filter data using cascaded second-order sections.
  1017. sosfiltfilt: A forward-backward filter using second-order sections.
  1018. Notes
  1019. -----
  1020. The filter function is implemented as a direct II transposed structure.
  1021. This means that the filter implements::
  1022. a[0]*y[n] = b[0]*x[n] + b[1]*x[n-1] + ... + b[M]*x[n-M]
  1023. - a[1]*y[n-1] - ... - a[N]*y[n-N]
  1024. where `M` is the degree of the numerator, `N` is the degree of the
  1025. denominator, and `n` is the sample number. It is implemented using
  1026. the following difference equations (assuming M = N)::
  1027. a[0]*y[n] = b[0] * x[n] + d[0][n-1]
  1028. d[0][n] = b[1] * x[n] - a[1] * y[n] + d[1][n-1]
  1029. d[1][n] = b[2] * x[n] - a[2] * y[n] + d[2][n-1]
  1030. ...
  1031. d[N-2][n] = b[N-1]*x[n] - a[N-1]*y[n] + d[N-1][n-1]
  1032. d[N-1][n] = b[N] * x[n] - a[N] * y[n]
  1033. where `d` are the state variables.
  1034. The rational transfer function describing this filter in the
  1035. z-transform domain is::
  1036. -1 -M
  1037. b[0] + b[1]z + ... + b[M] z
  1038. Y(z) = -------------------------------- X(z)
  1039. -1 -N
  1040. a[0] + a[1]z + ... + a[N] z
  1041. Examples
  1042. --------
  1043. Generate a noisy signal to be filtered:
  1044. >>> from scipy import signal
  1045. >>> import matplotlib.pyplot as plt
  1046. >>> t = np.linspace(-1, 1, 201)
  1047. >>> x = (np.sin(2*np.pi*0.75*t*(1-t) + 2.1) +
  1048. ... 0.1*np.sin(2*np.pi*1.25*t + 1) +
  1049. ... 0.18*np.cos(2*np.pi*3.85*t))
  1050. >>> xn = x + np.random.randn(len(t)) * 0.08
  1051. Create an order 3 lowpass butterworth filter:
  1052. >>> b, a = signal.butter(3, 0.05)
  1053. Apply the filter to xn. Use lfilter_zi to choose the initial condition of
  1054. the filter:
  1055. >>> zi = signal.lfilter_zi(b, a)
  1056. >>> z, _ = signal.lfilter(b, a, xn, zi=zi*xn[0])
  1057. Apply the filter again, to have a result filtered at an order the same as
  1058. filtfilt:
  1059. >>> z2, _ = signal.lfilter(b, a, z, zi=zi*z[0])
  1060. Use filtfilt to apply the filter:
  1061. >>> y = signal.filtfilt(b, a, xn)
  1062. Plot the original signal and the various filtered versions:
  1063. >>> plt.figure
  1064. >>> plt.plot(t, xn, 'b', alpha=0.75)
  1065. >>> plt.plot(t, z, 'r--', t, z2, 'r', t, y, 'k')
  1066. >>> plt.legend(('noisy signal', 'lfilter, once', 'lfilter, twice',
  1067. ... 'filtfilt'), loc='best')
  1068. >>> plt.grid(True)
  1069. >>> plt.show()
  1070. """
  1071. a = np.atleast_1d(a)
  1072. if len(a) == 1:
  1073. # This path only supports types fdgFDGO to mirror _linear_filter below.
  1074. # Any of b, a, x, or zi can set the dtype, but there is no default
  1075. # casting of other types; instead a NotImplementedError is raised.
  1076. b = np.asarray(b)
  1077. a = np.asarray(a)
  1078. if b.ndim != 1 and a.ndim != 1:
  1079. raise ValueError('object of too small depth for desired array')
  1080. x = np.asarray(x)
  1081. inputs = [b, a, x]
  1082. if zi is not None:
  1083. # _linear_filter does not broadcast zi, but does do expansion of
  1084. # singleton dims.
  1085. zi = np.asarray(zi)
  1086. if zi.ndim != x.ndim:
  1087. raise ValueError('object of too small depth for desired array')
  1088. expected_shape = list(x.shape)
  1089. expected_shape[axis] = b.shape[0] - 1
  1090. expected_shape = tuple(expected_shape)
  1091. # check the trivial case where zi is the right shape first
  1092. if zi.shape != expected_shape:
  1093. strides = zi.ndim * [None]
  1094. if axis < 0:
  1095. axis += zi.ndim
  1096. for k in range(zi.ndim):
  1097. if k == axis and zi.shape[k] == expected_shape[k]:
  1098. strides[k] = zi.strides[k]
  1099. elif k != axis and zi.shape[k] == expected_shape[k]:
  1100. strides[k] = zi.strides[k]
  1101. elif k != axis and zi.shape[k] == 1:
  1102. strides[k] = 0
  1103. else:
  1104. raise ValueError('Unexpected shape for zi: expected '
  1105. '%s, found %s.' %
  1106. (expected_shape, zi.shape))
  1107. zi = np.lib.stride_tricks.as_strided(zi, expected_shape,
  1108. strides)
  1109. inputs.append(zi)
  1110. dtype = np.result_type(*inputs)
  1111. if dtype.char not in 'fdgFDGO':
  1112. raise NotImplementedError("input type '%s' not supported" % dtype)
  1113. b = np.array(b, dtype=dtype)
  1114. a = np.array(a, dtype=dtype, copy=False)
  1115. b /= a[0]
  1116. x = np.array(x, dtype=dtype, copy=False)
  1117. out_full = np.apply_along_axis(lambda y: np.convolve(b, y), axis, x)
  1118. ind = out_full.ndim * [slice(None)]
  1119. if zi is not None:
  1120. ind[axis] = slice(zi.shape[axis])
  1121. out_full[tuple(ind)] += zi
  1122. ind[axis] = slice(out_full.shape[axis] - len(b) + 1)
  1123. out = out_full[tuple(ind)]
  1124. if zi is None:
  1125. return out
  1126. else:
  1127. ind[axis] = slice(out_full.shape[axis] - len(b) + 1, None)
  1128. zf = out_full[tuple(ind)]
  1129. return out, zf
  1130. else:
  1131. if zi is None:
  1132. return sigtools._linear_filter(b, a, x, axis)
  1133. else:
  1134. return sigtools._linear_filter(b, a, x, axis, zi)
  1135. def lfiltic(b, a, y, x=None):
  1136. """
  1137. Construct initial conditions for lfilter given input and output vectors.
  1138. Given a linear filter (b, a) and initial conditions on the output `y`
  1139. and the input `x`, return the initial conditions on the state vector zi
  1140. which is used by `lfilter` to generate the output given the input.
  1141. Parameters
  1142. ----------
  1143. b : array_like
  1144. Linear filter term.
  1145. a : array_like
  1146. Linear filter term.
  1147. y : array_like
  1148. Initial conditions.
  1149. If ``N = len(a) - 1``, then ``y = {y[-1], y[-2], ..., y[-N]}``.
  1150. If `y` is too short, it is padded with zeros.
  1151. x : array_like, optional
  1152. Initial conditions.
  1153. If ``M = len(b) - 1``, then ``x = {x[-1], x[-2], ..., x[-M]}``.
  1154. If `x` is not given, its initial conditions are assumed zero.
  1155. If `x` is too short, it is padded with zeros.
  1156. Returns
  1157. -------
  1158. zi : ndarray
  1159. The state vector ``zi = {z_0[-1], z_1[-1], ..., z_K-1[-1]}``,
  1160. where ``K = max(M, N)``.
  1161. See Also
  1162. --------
  1163. lfilter, lfilter_zi
  1164. """
  1165. N = np.size(a) - 1
  1166. M = np.size(b) - 1
  1167. K = max(M, N)
  1168. y = asarray(y)
  1169. if y.dtype.kind in 'bui':
  1170. # ensure calculations are floating point
  1171. y = y.astype(np.float64)
  1172. zi = zeros(K, y.dtype)
  1173. if x is None:
  1174. x = zeros(M, y.dtype)
  1175. else:
  1176. x = asarray(x)
  1177. L = np.size(x)
  1178. if L < M:
  1179. x = r_[x, zeros(M - L)]
  1180. L = np.size(y)
  1181. if L < N:
  1182. y = r_[y, zeros(N - L)]
  1183. for m in range(M):
  1184. zi[m] = np.sum(b[m + 1:] * x[:M - m], axis=0)
  1185. for m in range(N):
  1186. zi[m] -= np.sum(a[m + 1:] * y[:N - m], axis=0)
  1187. return zi
  1188. def deconvolve(signal, divisor):
  1189. """Deconvolves ``divisor`` out of ``signal`` using inverse filtering.
  1190. Returns the quotient and remainder such that
  1191. ``signal = convolve(divisor, quotient) + remainder``
  1192. Parameters
  1193. ----------
  1194. signal : array_like
  1195. Signal data, typically a recorded signal
  1196. divisor : array_like
  1197. Divisor data, typically an impulse response or filter that was
  1198. applied to the original signal
  1199. Returns
  1200. -------
  1201. quotient : ndarray
  1202. Quotient, typically the recovered original signal
  1203. remainder : ndarray
  1204. Remainder
  1205. Examples
  1206. --------
  1207. Deconvolve a signal that's been filtered:
  1208. >>> from scipy import signal
  1209. >>> original = [0, 1, 0, 0, 1, 1, 0, 0]
  1210. >>> impulse_response = [2, 1]
  1211. >>> recorded = signal.convolve(impulse_response, original)
  1212. >>> recorded
  1213. array([0, 2, 1, 0, 2, 3, 1, 0, 0])
  1214. >>> recovered, remainder = signal.deconvolve(recorded, impulse_response)
  1215. >>> recovered
  1216. array([ 0., 1., 0., 0., 1., 1., 0., 0.])
  1217. See Also
  1218. --------
  1219. numpy.polydiv : performs polynomial division (same operation, but
  1220. also accepts poly1d objects)
  1221. """
  1222. num = atleast_1d(signal)
  1223. den = atleast_1d(divisor)
  1224. N = len(num)
  1225. D = len(den)
  1226. if D > N:
  1227. quot = []
  1228. rem = num
  1229. else:
  1230. input = zeros(N - D + 1, float)
  1231. input[0] = 1
  1232. quot = lfilter(num, den, input)
  1233. rem = num - convolve(den, quot, mode='full')
  1234. return quot, rem
  1235. def hilbert(x, N=None, axis=-1):
  1236. """
  1237. Compute the analytic signal, using the Hilbert transform.
  1238. The transformation is done along the last axis by default.
  1239. Parameters
  1240. ----------
  1241. x : array_like
  1242. Signal data. Must be real.
  1243. N : int, optional
  1244. Number of Fourier components. Default: ``x.shape[axis]``
  1245. axis : int, optional
  1246. Axis along which to do the transformation. Default: -1.
  1247. Returns
  1248. -------
  1249. xa : ndarray
  1250. Analytic signal of `x`, of each 1-D array along `axis`
  1251. See Also
  1252. --------
  1253. scipy.fftpack.hilbert : Return Hilbert transform of a periodic sequence x.
  1254. Notes
  1255. -----
  1256. The analytic signal ``x_a(t)`` of signal ``x(t)`` is:
  1257. .. math:: x_a = F^{-1}(F(x) 2U) = x + i y
  1258. where `F` is the Fourier transform, `U` the unit step function,
  1259. and `y` the Hilbert transform of `x`. [1]_
  1260. In other words, the negative half of the frequency spectrum is zeroed
  1261. out, turning the real-valued signal into a complex signal. The Hilbert
  1262. transformed signal can be obtained from ``np.imag(hilbert(x))``, and the
  1263. original signal from ``np.real(hilbert(x))``.
  1264. Examples
  1265. ---------
  1266. In this example we use the Hilbert transform to determine the amplitude
  1267. envelope and instantaneous frequency of an amplitude-modulated signal.
  1268. >>> import numpy as np
  1269. >>> import matplotlib.pyplot as plt
  1270. >>> from scipy.signal import hilbert, chirp
  1271. >>> duration = 1.0
  1272. >>> fs = 400.0
  1273. >>> samples = int(fs*duration)
  1274. >>> t = np.arange(samples) / fs
  1275. We create a chirp of which the frequency increases from 20 Hz to 100 Hz and
  1276. apply an amplitude modulation.
  1277. >>> signal = chirp(t, 20.0, t[-1], 100.0)
  1278. >>> signal *= (1.0 + 0.5 * np.sin(2.0*np.pi*3.0*t) )
  1279. The amplitude envelope is given by magnitude of the analytic signal. The
  1280. instantaneous frequency can be obtained by differentiating the
  1281. instantaneous phase in respect to time. The instantaneous phase corresponds
  1282. to the phase angle of the analytic signal.
  1283. >>> analytic_signal = hilbert(signal)
  1284. >>> amplitude_envelope = np.abs(analytic_signal)
  1285. >>> instantaneous_phase = np.unwrap(np.angle(analytic_signal))
  1286. >>> instantaneous_frequency = (np.diff(instantaneous_phase) /
  1287. ... (2.0*np.pi) * fs)
  1288. >>> fig = plt.figure()
  1289. >>> ax0 = fig.add_subplot(211)
  1290. >>> ax0.plot(t, signal, label='signal')
  1291. >>> ax0.plot(t, amplitude_envelope, label='envelope')
  1292. >>> ax0.set_xlabel("time in seconds")
  1293. >>> ax0.legend()
  1294. >>> ax1 = fig.add_subplot(212)
  1295. >>> ax1.plot(t[1:], instantaneous_frequency)
  1296. >>> ax1.set_xlabel("time in seconds")
  1297. >>> ax1.set_ylim(0.0, 120.0)
  1298. References
  1299. ----------
  1300. .. [1] Wikipedia, "Analytic signal".
  1301. https://en.wikipedia.org/wiki/Analytic_signal
  1302. .. [2] Leon Cohen, "Time-Frequency Analysis", 1995. Chapter 2.
  1303. .. [3] Alan V. Oppenheim, Ronald W. Schafer. Discrete-Time Signal
  1304. Processing, Third Edition, 2009. Chapter 12.
  1305. ISBN 13: 978-1292-02572-8
  1306. """
  1307. x = asarray(x)
  1308. if iscomplexobj(x):
  1309. raise ValueError("x must be real.")
  1310. if N is None:
  1311. N = x.shape[axis]
  1312. if N <= 0:
  1313. raise ValueError("N must be positive.")
  1314. Xf = fftpack.fft(x, N, axis=axis)
  1315. h = zeros(N)
  1316. if N % 2 == 0:
  1317. h[0] = h[N // 2] = 1
  1318. h[1:N // 2] = 2
  1319. else:
  1320. h[0] = 1
  1321. h[1:(N + 1) // 2] = 2
  1322. if x.ndim > 1:
  1323. ind = [newaxis] * x.ndim
  1324. ind[axis] = slice(None)
  1325. h = h[tuple(ind)]
  1326. x = fftpack.ifft(Xf * h, axis=axis)
  1327. return x
  1328. def hilbert2(x, N=None):
  1329. """
  1330. Compute the '2-D' analytic signal of `x`
  1331. Parameters
  1332. ----------
  1333. x : array_like
  1334. 2-D signal data.
  1335. N : int or tuple of two ints, optional
  1336. Number of Fourier components. Default is ``x.shape``
  1337. Returns
  1338. -------
  1339. xa : ndarray
  1340. Analytic signal of `x` taken along axes (0,1).
  1341. References
  1342. ----------
  1343. .. [1] Wikipedia, "Analytic signal",
  1344. https://en.wikipedia.org/wiki/Analytic_signal
  1345. """
  1346. x = atleast_2d(x)
  1347. if x.ndim > 2:
  1348. raise ValueError("x must be 2-D.")
  1349. if iscomplexobj(x):
  1350. raise ValueError("x must be real.")
  1351. if N is None:
  1352. N = x.shape
  1353. elif isinstance(N, int):
  1354. if N <= 0:
  1355. raise ValueError("N must be positive.")
  1356. N = (N, N)
  1357. elif len(N) != 2 or np.any(np.asarray(N) <= 0):
  1358. raise ValueError("When given as a tuple, N must hold exactly "
  1359. "two positive integers")
  1360. Xf = fftpack.fft2(x, N, axes=(0, 1))
  1361. h1 = zeros(N[0], 'd')
  1362. h2 = zeros(N[1], 'd')
  1363. for p in range(2):
  1364. h = eval("h%d" % (p + 1))
  1365. N1 = N[p]
  1366. if N1 % 2 == 0:
  1367. h[0] = h[N1 // 2] = 1
  1368. h[1:N1 // 2] = 2
  1369. else:
  1370. h[0] = 1
  1371. h[1:(N1 + 1) // 2] = 2
  1372. exec("h%d = h" % (p + 1), globals(), locals())
  1373. h = h1[:, newaxis] * h2[newaxis, :]
  1374. k = x.ndim
  1375. while k > 2:
  1376. h = h[:, newaxis]
  1377. k -= 1
  1378. x = fftpack.ifft2(Xf * h, axes=(0, 1))
  1379. return x
  1380. def cmplx_sort(p):
  1381. """Sort roots based on magnitude.
  1382. Parameters
  1383. ----------
  1384. p : array_like
  1385. The roots to sort, as a 1-D array.
  1386. Returns
  1387. -------
  1388. p_sorted : ndarray
  1389. Sorted roots.
  1390. indx : ndarray
  1391. Array of indices needed to sort the input `p`.
  1392. Examples
  1393. --------
  1394. >>> from scipy import signal
  1395. >>> vals = [1, 4, 1+1.j, 3]
  1396. >>> p_sorted, indx = signal.cmplx_sort(vals)
  1397. >>> p_sorted
  1398. array([1.+0.j, 1.+1.j, 3.+0.j, 4.+0.j])
  1399. >>> indx
  1400. array([0, 2, 3, 1])
  1401. """
  1402. p = asarray(p)
  1403. if iscomplexobj(p):
  1404. indx = argsort(abs(p))
  1405. else:
  1406. indx = argsort(p)
  1407. return take(p, indx, 0), indx
  1408. def unique_roots(p, tol=1e-3, rtype='min'):
  1409. """Determine unique roots and their multiplicities from a list of roots.
  1410. Parameters
  1411. ----------
  1412. p : array_like
  1413. The list of roots.
  1414. tol : float, optional
  1415. The tolerance for two roots to be considered equal in terms of
  1416. the distance between them. Default is 1e-3. Refer to Notes about
  1417. the details on roots grouping.
  1418. rtype : {'max', 'maximum', 'min', 'minimum', 'avg', 'mean'}, optional
  1419. How to determine the returned root if multiple roots are within
  1420. `tol` of each other.
  1421. - 'max', 'maximum': pick the maximum of those roots
  1422. - 'min', 'minimum': pick the minimum of those roots
  1423. - 'avg', 'mean': take the average of those roots
  1424. When finding minimum or maximum among complex roots they are compared
  1425. first by the real part and then by the imaginary part.
  1426. Returns
  1427. -------
  1428. unique : ndarray
  1429. The list of unique roots.
  1430. multiplicity : ndarray
  1431. The multiplicity of each root.
  1432. Notes
  1433. -----
  1434. If we have 3 roots ``a``, ``b`` and ``c``, such that ``a`` is close to
  1435. ``b`` and ``b`` is close to ``c`` (distance is less than `tol`), then it
  1436. doesn't necessarily mean that ``a`` is close to ``c``. It means that roots
  1437. grouping is not unique. In this function we use "greedy" grouping going
  1438. through the roots in the order they are given in the input `p`.
  1439. This utility function is not specific to roots but can be used for any
  1440. sequence of values for which uniqueness and multiplicity has to be
  1441. determined. For a more general routine, see `numpy.unique`.
  1442. Examples
  1443. --------
  1444. >>> from scipy import signal
  1445. >>> vals = [0, 1.3, 1.31, 2.8, 1.25, 2.2, 10.3]
  1446. >>> uniq, mult = signal.unique_roots(vals, tol=2e-2, rtype='avg')
  1447. Check which roots have multiplicity larger than 1:
  1448. >>> uniq[mult > 1]
  1449. array([ 1.305])
  1450. """
  1451. if rtype in ['max', 'maximum']:
  1452. reduce = np.max
  1453. elif rtype in ['min', 'minimum']:
  1454. reduce = np.min
  1455. elif rtype in ['avg', 'mean']:
  1456. reduce = np.mean
  1457. else:
  1458. raise ValueError("`rtype` must be one of "
  1459. "{'max', 'maximum', 'min', 'minimum', 'avg', 'mean'}")
  1460. p = np.asarray(p)
  1461. points = np.empty((len(p), 2))
  1462. points[:, 0] = np.real(p)
  1463. points[:, 1] = np.imag(p)
  1464. tree = cKDTree(points)
  1465. p_unique = []
  1466. p_multiplicity = []
  1467. used = np.zeros(len(p), dtype=bool)
  1468. for i in range(len(p)):
  1469. if used[i]:
  1470. continue
  1471. group = tree.query_ball_point(points[i], tol)
  1472. group = [x for x in group if not used[x]]
  1473. p_unique.append(reduce(p[group]))
  1474. p_multiplicity.append(len(group))
  1475. used[group] = True
  1476. return np.asarray(p_unique), np.asarray(p_multiplicity)
  1477. def invres(r, p, k, tol=1e-3, rtype='avg'):
  1478. """
  1479. Compute b(s) and a(s) from partial fraction expansion.
  1480. If `M` is the degree of numerator `b` and `N` the degree of denominator
  1481. `a`::
  1482. b(s) b[0] s**(M) + b[1] s**(M-1) + ... + b[M]
  1483. H(s) = ------ = ------------------------------------------
  1484. a(s) a[0] s**(N) + a[1] s**(N-1) + ... + a[N]
  1485. then the partial-fraction expansion H(s) is defined as::
  1486. r[0] r[1] r[-1]
  1487. = -------- + -------- + ... + --------- + k(s)
  1488. (s-p[0]) (s-p[1]) (s-p[-1])
  1489. If there are any repeated roots (closer together than `tol`), then H(s)
  1490. has terms like::
  1491. r[i] r[i+1] r[i+n-1]
  1492. -------- + ----------- + ... + -----------
  1493. (s-p[i]) (s-p[i])**2 (s-p[i])**n
  1494. This function is used for polynomials in positive powers of s or z,
  1495. such as analog filters or digital filters in controls engineering. For
  1496. negative powers of z (typical for digital filters in DSP), use `invresz`.
  1497. Parameters
  1498. ----------
  1499. r : array_like
  1500. Residues.
  1501. p : array_like
  1502. Poles.
  1503. k : array_like
  1504. Coefficients of the direct polynomial term.
  1505. tol : float, optional
  1506. The tolerance for two roots to be considered equal. Default is 1e-3.
  1507. rtype : {'max', 'min, 'avg'}, optional
  1508. How to determine the returned root if multiple roots are within
  1509. `tol` of each other.
  1510. - 'max': pick the maximum of those roots.
  1511. - 'min': pick the minimum of those roots.
  1512. - 'avg': take the average of those roots.
  1513. Returns
  1514. -------
  1515. b : ndarray
  1516. Numerator polynomial coefficients.
  1517. a : ndarray
  1518. Denominator polynomial coefficients.
  1519. See Also
  1520. --------
  1521. residue, invresz, unique_roots
  1522. """
  1523. extra = k
  1524. p, indx = cmplx_sort(p)
  1525. r = take(r, indx, 0)
  1526. pout, mult = unique_roots(p, tol=tol, rtype=rtype)
  1527. p = []
  1528. for k in range(len(pout)):
  1529. p.extend([pout[k]] * mult[k])
  1530. a = atleast_1d(poly(p))
  1531. if len(extra) > 0:
  1532. b = polymul(extra, a)
  1533. else:
  1534. b = [0]
  1535. indx = 0
  1536. for k in range(len(pout)):
  1537. temp = []
  1538. for l in range(len(pout)):
  1539. if l != k:
  1540. temp.extend([pout[l]] * mult[l])
  1541. for m in range(mult[k]):
  1542. t2 = temp[:]
  1543. t2.extend([pout[k]] * (mult[k] - m - 1))
  1544. b = polyadd(b, r[indx] * atleast_1d(poly(t2)))
  1545. indx += 1
  1546. b = real_if_close(b)
  1547. while allclose(b[0], 0, rtol=1e-14) and (b.shape[-1] > 1):
  1548. b = b[1:]
  1549. return b, a
  1550. def residue(b, a, tol=1e-3, rtype='avg'):
  1551. """
  1552. Compute partial-fraction expansion of b(s) / a(s).
  1553. If `M` is the degree of numerator `b` and `N` the degree of denominator
  1554. `a`::
  1555. b(s) b[0] s**(M) + b[1] s**(M-1) + ... + b[M]
  1556. H(s) = ------ = ------------------------------------------
  1557. a(s) a[0] s**(N) + a[1] s**(N-1) + ... + a[N]
  1558. then the partial-fraction expansion H(s) is defined as::
  1559. r[0] r[1] r[-1]
  1560. = -------- + -------- + ... + --------- + k(s)
  1561. (s-p[0]) (s-p[1]) (s-p[-1])
  1562. If there are any repeated roots (closer together than `tol`), then H(s)
  1563. has terms like::
  1564. r[i] r[i+1] r[i+n-1]
  1565. -------- + ----------- + ... + -----------
  1566. (s-p[i]) (s-p[i])**2 (s-p[i])**n
  1567. This function is used for polynomials in positive powers of s or z,
  1568. such as analog filters or digital filters in controls engineering. For
  1569. negative powers of z (typical for digital filters in DSP), use `residuez`.
  1570. Parameters
  1571. ----------
  1572. b : array_like
  1573. Numerator polynomial coefficients.
  1574. a : array_like
  1575. Denominator polynomial coefficients.
  1576. Returns
  1577. -------
  1578. r : ndarray
  1579. Residues.
  1580. p : ndarray
  1581. Poles.
  1582. k : ndarray
  1583. Coefficients of the direct polynomial term.
  1584. See Also
  1585. --------
  1586. invres, residuez, numpy.poly, unique_roots
  1587. """
  1588. b, a = map(asarray, (b, a))
  1589. rscale = a[0]
  1590. k, b = polydiv(b, a)
  1591. p = roots(a)
  1592. r = p * 0.0
  1593. pout, mult = unique_roots(p, tol=tol, rtype=rtype)
  1594. p = []
  1595. for n in range(len(pout)):
  1596. p.extend([pout[n]] * mult[n])
  1597. p = asarray(p)
  1598. # Compute the residue from the general formula
  1599. indx = 0
  1600. for n in range(len(pout)):
  1601. bn = b.copy()
  1602. pn = []
  1603. for l in range(len(pout)):
  1604. if l != n:
  1605. pn.extend([pout[l]] * mult[l])
  1606. an = atleast_1d(poly(pn))
  1607. # bn(s) / an(s) is (s-po[n])**Nn * b(s) / a(s) where Nn is
  1608. # multiplicity of pole at po[n]
  1609. sig = mult[n]
  1610. for m in range(sig, 0, -1):
  1611. if sig > m:
  1612. # compute next derivative of bn(s) / an(s)
  1613. term1 = polymul(polyder(bn, 1), an)
  1614. term2 = polymul(bn, polyder(an, 1))
  1615. bn = polysub(term1, term2)
  1616. an = polymul(an, an)
  1617. r[indx + m - 1] = (polyval(bn, pout[n]) / polyval(an, pout[n]) /
  1618. factorial(sig - m))
  1619. indx += sig
  1620. return r / rscale, p, k
  1621. def residuez(b, a, tol=1e-3, rtype='avg'):
  1622. """
  1623. Compute partial-fraction expansion of b(z) / a(z).
  1624. If `M` is the degree of numerator `b` and `N` the degree of denominator
  1625. `a`::
  1626. b(z) b[0] + b[1] z**(-1) + ... + b[M] z**(-M)
  1627. H(z) = ------ = ------------------------------------------
  1628. a(z) a[0] + a[1] z**(-1) + ... + a[N] z**(-N)
  1629. then the partial-fraction expansion H(z) is defined as::
  1630. r[0] r[-1]
  1631. = --------------- + ... + ---------------- + k[0] + k[1]z**(-1) ...
  1632. (1-p[0]z**(-1)) (1-p[-1]z**(-1))
  1633. If there are any repeated roots (closer than `tol`), then the partial
  1634. fraction expansion has terms like::
  1635. r[i] r[i+1] r[i+n-1]
  1636. -------------- + ------------------ + ... + ------------------
  1637. (1-p[i]z**(-1)) (1-p[i]z**(-1))**2 (1-p[i]z**(-1))**n
  1638. This function is used for polynomials in negative powers of z,
  1639. such as digital filters in DSP. For positive powers, use `residue`.
  1640. Parameters
  1641. ----------
  1642. b : array_like
  1643. Numerator polynomial coefficients.
  1644. a : array_like
  1645. Denominator polynomial coefficients.
  1646. Returns
  1647. -------
  1648. r : ndarray
  1649. Residues.
  1650. p : ndarray
  1651. Poles.
  1652. k : ndarray
  1653. Coefficients of the direct polynomial term.
  1654. See Also
  1655. --------
  1656. invresz, residue, unique_roots
  1657. """
  1658. b, a = map(asarray, (b, a))
  1659. gain = a[0]
  1660. brev, arev = b[::-1], a[::-1]
  1661. krev, brev = polydiv(brev, arev)
  1662. if krev == []:
  1663. k = []
  1664. else:
  1665. k = krev[::-1]
  1666. b = brev[::-1]
  1667. p = roots(a)
  1668. r = p * 0.0
  1669. pout, mult = unique_roots(p, tol=tol, rtype=rtype)
  1670. p = []
  1671. for n in range(len(pout)):
  1672. p.extend([pout[n]] * mult[n])
  1673. p = asarray(p)
  1674. # Compute the residue from the general formula (for discrete-time)
  1675. # the polynomial is in z**(-1) and the multiplication is by terms
  1676. # like this (1-p[i] z**(-1))**mult[i]. After differentiation,
  1677. # we must divide by (-p[i])**(m-k) as well as (m-k)!
  1678. indx = 0
  1679. for n in range(len(pout)):
  1680. bn = brev.copy()
  1681. pn = []
  1682. for l in range(len(pout)):
  1683. if l != n:
  1684. pn.extend([pout[l]] * mult[l])
  1685. an = atleast_1d(poly(pn))[::-1]
  1686. # bn(z) / an(z) is (1-po[n] z**(-1))**Nn * b(z) / a(z) where Nn is
  1687. # multiplicity of pole at po[n] and b(z) and a(z) are polynomials.
  1688. sig = mult[n]
  1689. for m in range(sig, 0, -1):
  1690. if sig > m:
  1691. # compute next derivative of bn(s) / an(s)
  1692. term1 = polymul(polyder(bn, 1), an)
  1693. term2 = polymul(bn, polyder(an, 1))
  1694. bn = polysub(term1, term2)
  1695. an = polymul(an, an)
  1696. r[indx + m - 1] = (polyval(bn, 1.0 / pout[n]) /
  1697. polyval(an, 1.0 / pout[n]) /
  1698. factorial(sig - m) / (-pout[n]) ** (sig - m))
  1699. indx += sig
  1700. return r / gain, p, k
  1701. def invresz(r, p, k, tol=1e-3, rtype='avg'):
  1702. """
  1703. Compute b(z) and a(z) from partial fraction expansion.
  1704. If `M` is the degree of numerator `b` and `N` the degree of denominator
  1705. `a`::
  1706. b(z) b[0] + b[1] z**(-1) + ... + b[M] z**(-M)
  1707. H(z) = ------ = ------------------------------------------
  1708. a(z) a[0] + a[1] z**(-1) + ... + a[N] z**(-N)
  1709. then the partial-fraction expansion H(z) is defined as::
  1710. r[0] r[-1]
  1711. = --------------- + ... + ---------------- + k[0] + k[1]z**(-1) ...
  1712. (1-p[0]z**(-1)) (1-p[-1]z**(-1))
  1713. If there are any repeated roots (closer than `tol`), then the partial
  1714. fraction expansion has terms like::
  1715. r[i] r[i+1] r[i+n-1]
  1716. -------------- + ------------------ + ... + ------------------
  1717. (1-p[i]z**(-1)) (1-p[i]z**(-1))**2 (1-p[i]z**(-1))**n
  1718. This function is used for polynomials in negative powers of z,
  1719. such as digital filters in DSP. For positive powers, use `invres`.
  1720. Parameters
  1721. ----------
  1722. r : array_like
  1723. Residues.
  1724. p : array_like
  1725. Poles.
  1726. k : array_like
  1727. Coefficients of the direct polynomial term.
  1728. tol : float, optional
  1729. The tolerance for two roots to be considered equal. Default is 1e-3.
  1730. rtype : {'max', 'min, 'avg'}, optional
  1731. How to determine the returned root if multiple roots are within
  1732. `tol` of each other.
  1733. - 'max': pick the maximum of those roots.
  1734. - 'min': pick the minimum of those roots.
  1735. - 'avg': take the average of those roots.
  1736. Returns
  1737. -------
  1738. b : ndarray
  1739. Numerator polynomial coefficients.
  1740. a : ndarray
  1741. Denominator polynomial coefficients.
  1742. See Also
  1743. --------
  1744. residuez, unique_roots, invres
  1745. """
  1746. extra = asarray(k)
  1747. p, indx = cmplx_sort(p)
  1748. r = take(r, indx, 0)
  1749. pout, mult = unique_roots(p, tol=tol, rtype=rtype)
  1750. p = []
  1751. for k in range(len(pout)):
  1752. p.extend([pout[k]] * mult[k])
  1753. a = atleast_1d(poly(p))
  1754. if len(extra) > 0:
  1755. b = polymul(extra, a)
  1756. else:
  1757. b = [0]
  1758. indx = 0
  1759. brev = asarray(b)[::-1]
  1760. for k in range(len(pout)):
  1761. temp = []
  1762. # Construct polynomial which does not include any of this root
  1763. for l in range(len(pout)):
  1764. if l != k:
  1765. temp.extend([pout[l]] * mult[l])
  1766. for m in range(mult[k]):
  1767. t2 = temp[:]
  1768. t2.extend([pout[k]] * (mult[k] - m - 1))
  1769. brev = polyadd(brev, (r[indx] * atleast_1d(poly(t2)))[::-1])
  1770. indx += 1
  1771. b = real_if_close(brev[::-1])
  1772. return b, a
  1773. def resample(x, num, t=None, axis=0, window=None):
  1774. """
  1775. Resample `x` to `num` samples using Fourier method along the given axis.
  1776. The resampled signal starts at the same value as `x` but is sampled
  1777. with a spacing of ``len(x) / num * (spacing of x)``. Because a
  1778. Fourier method is used, the signal is assumed to be periodic.
  1779. Parameters
  1780. ----------
  1781. x : array_like
  1782. The data to be resampled.
  1783. num : int
  1784. The number of samples in the resampled signal.
  1785. t : array_like, optional
  1786. If `t` is given, it is assumed to be the sample positions
  1787. associated with the signal data in `x`.
  1788. axis : int, optional
  1789. The axis of `x` that is resampled. Default is 0.
  1790. window : array_like, callable, string, float, or tuple, optional
  1791. Specifies the window applied to the signal in the Fourier
  1792. domain. See below for details.
  1793. Returns
  1794. -------
  1795. resampled_x or (resampled_x, resampled_t)
  1796. Either the resampled array, or, if `t` was given, a tuple
  1797. containing the resampled array and the corresponding resampled
  1798. positions.
  1799. See Also
  1800. --------
  1801. decimate : Downsample the signal after applying an FIR or IIR filter.
  1802. resample_poly : Resample using polyphase filtering and an FIR filter.
  1803. Notes
  1804. -----
  1805. The argument `window` controls a Fourier-domain window that tapers
  1806. the Fourier spectrum before zero-padding to alleviate ringing in
  1807. the resampled values for sampled signals you didn't intend to be
  1808. interpreted as band-limited.
  1809. If `window` is a function, then it is called with a vector of inputs
  1810. indicating the frequency bins (i.e. fftfreq(x.shape[axis]) ).
  1811. If `window` is an array of the same length as `x.shape[axis]` it is
  1812. assumed to be the window to be applied directly in the Fourier
  1813. domain (with dc and low-frequency first).
  1814. For any other type of `window`, the function `scipy.signal.get_window`
  1815. is called to generate the window.
  1816. The first sample of the returned vector is the same as the first
  1817. sample of the input vector. The spacing between samples is changed
  1818. from ``dx`` to ``dx * len(x) / num``.
  1819. If `t` is not None, then it represents the old sample positions,
  1820. and the new sample positions will be returned as well as the new
  1821. samples.
  1822. As noted, `resample` uses FFT transformations, which can be very
  1823. slow if the number of input or output samples is large and prime;
  1824. see `scipy.fftpack.fft`.
  1825. Examples
  1826. --------
  1827. Note that the end of the resampled data rises to meet the first
  1828. sample of the next cycle:
  1829. >>> from scipy import signal
  1830. >>> x = np.linspace(0, 10, 20, endpoint=False)
  1831. >>> y = np.cos(-x**2/6.0)
  1832. >>> f = signal.resample(y, 100)
  1833. >>> xnew = np.linspace(0, 10, 100, endpoint=False)
  1834. >>> import matplotlib.pyplot as plt
  1835. >>> plt.plot(x, y, 'go-', xnew, f, '.-', 10, y[0], 'ro')
  1836. >>> plt.legend(['data', 'resampled'], loc='best')
  1837. >>> plt.show()
  1838. """
  1839. x = asarray(x)
  1840. X = fftpack.fft(x, axis=axis)
  1841. Nx = x.shape[axis]
  1842. if window is not None:
  1843. if callable(window):
  1844. W = window(fftpack.fftfreq(Nx))
  1845. elif isinstance(window, ndarray):
  1846. if window.shape != (Nx,):
  1847. raise ValueError('window must have the same length as data')
  1848. W = window
  1849. else:
  1850. W = fftpack.ifftshift(get_window(window, Nx))
  1851. newshape = [1] * x.ndim
  1852. newshape[axis] = len(W)
  1853. W.shape = newshape
  1854. X = X * W
  1855. W.shape = (Nx,)
  1856. sl = [slice(None)] * x.ndim
  1857. newshape = list(x.shape)
  1858. newshape[axis] = num
  1859. N = int(np.minimum(num, Nx))
  1860. Y = zeros(newshape, 'D')
  1861. sl[axis] = slice(0, (N + 1) // 2)
  1862. Y[tuple(sl)] = X[tuple(sl)]
  1863. sl[axis] = slice(-(N - 1) // 2, None)
  1864. Y[tuple(sl)] = X[tuple(sl)]
  1865. if N % 2 == 0: # special treatment if low number of points is even. So far we have set Y[-N/2]=X[-N/2]
  1866. if N < Nx: # if downsampling
  1867. sl[axis] = slice(N//2,N//2+1,None) # select the component at frequency N/2
  1868. Y[tuple(sl)] += X[tuple(sl)] # add the component of X at N/2
  1869. elif N < num: # if upsampling
  1870. sl[axis] = slice(num-N//2,num-N//2+1,None) # select the component at frequency -N/2
  1871. Y[tuple(sl)] /= 2 # halve the component at -N/2
  1872. temp = Y[tuple(sl)]
  1873. sl[axis] = slice(N//2,N//2+1,None) # select the component at +N/2
  1874. Y[tuple(sl)] = temp # set that equal to the component at -N/2
  1875. y = fftpack.ifft(Y, axis=axis) * (float(num) / float(Nx))
  1876. if x.dtype.char not in ['F', 'D']:
  1877. y = y.real
  1878. if t is None:
  1879. return y
  1880. else:
  1881. new_t = arange(0, num) * (t[1] - t[0]) * Nx / float(num) + t[0]
  1882. return y, new_t
  1883. def resample_poly(x, up, down, axis=0, window=('kaiser', 5.0)):
  1884. """
  1885. Resample `x` along the given axis using polyphase filtering.
  1886. The signal `x` is upsampled by the factor `up`, a zero-phase low-pass
  1887. FIR filter is applied, and then it is downsampled by the factor `down`.
  1888. The resulting sample rate is ``up / down`` times the original sample
  1889. rate. Values beyond the boundary of the signal are assumed to be zero
  1890. during the filtering step.
  1891. Parameters
  1892. ----------
  1893. x : array_like
  1894. The data to be resampled.
  1895. up : int
  1896. The upsampling factor.
  1897. down : int
  1898. The downsampling factor.
  1899. axis : int, optional
  1900. The axis of `x` that is resampled. Default is 0.
  1901. window : string, tuple, or array_like, optional
  1902. Desired window to use to design the low-pass filter, or the FIR filter
  1903. coefficients to employ. See below for details.
  1904. Returns
  1905. -------
  1906. resampled_x : array
  1907. The resampled array.
  1908. See Also
  1909. --------
  1910. decimate : Downsample the signal after applying an FIR or IIR filter.
  1911. resample : Resample up or down using the FFT method.
  1912. Notes
  1913. -----
  1914. This polyphase method will likely be faster than the Fourier method
  1915. in `scipy.signal.resample` when the number of samples is large and
  1916. prime, or when the number of samples is large and `up` and `down`
  1917. share a large greatest common denominator. The length of the FIR
  1918. filter used will depend on ``max(up, down) // gcd(up, down)``, and
  1919. the number of operations during polyphase filtering will depend on
  1920. the filter length and `down` (see `scipy.signal.upfirdn` for details).
  1921. The argument `window` specifies the FIR low-pass filter design.
  1922. If `window` is an array_like it is assumed to be the FIR filter
  1923. coefficients. Note that the FIR filter is applied after the upsampling
  1924. step, so it should be designed to operate on a signal at a sampling
  1925. frequency higher than the original by a factor of `up//gcd(up, down)`.
  1926. This function's output will be centered with respect to this array, so it
  1927. is best to pass a symmetric filter with an odd number of samples if, as
  1928. is usually the case, a zero-phase filter is desired.
  1929. For any other type of `window`, the functions `scipy.signal.get_window`
  1930. and `scipy.signal.firwin` are called to generate the appropriate filter
  1931. coefficients.
  1932. The first sample of the returned vector is the same as the first
  1933. sample of the input vector. The spacing between samples is changed
  1934. from ``dx`` to ``dx * down / float(up)``.
  1935. Examples
  1936. --------
  1937. Note that the end of the resampled data rises to meet the first
  1938. sample of the next cycle for the FFT method, and gets closer to zero
  1939. for the polyphase method:
  1940. >>> from scipy import signal
  1941. >>> x = np.linspace(0, 10, 20, endpoint=False)
  1942. >>> y = np.cos(-x**2/6.0)
  1943. >>> f_fft = signal.resample(y, 100)
  1944. >>> f_poly = signal.resample_poly(y, 100, 20)
  1945. >>> xnew = np.linspace(0, 10, 100, endpoint=False)
  1946. >>> import matplotlib.pyplot as plt
  1947. >>> plt.plot(xnew, f_fft, 'b.-', xnew, f_poly, 'r.-')
  1948. >>> plt.plot(x, y, 'ko-')
  1949. >>> plt.plot(10, y[0], 'bo', 10, 0., 'ro') # boundaries
  1950. >>> plt.legend(['resample', 'resamp_poly', 'data'], loc='best')
  1951. >>> plt.show()
  1952. """
  1953. x = asarray(x)
  1954. if up != int(up):
  1955. raise ValueError("up must be an integer")
  1956. if down != int(down):
  1957. raise ValueError("down must be an integer")
  1958. up = int(up)
  1959. down = int(down)
  1960. if up < 1 or down < 1:
  1961. raise ValueError('up and down must be >= 1')
  1962. # Determine our up and down factors
  1963. # Use a rational approximation to save computation time on really long
  1964. # signals
  1965. g_ = gcd(up, down)
  1966. up //= g_
  1967. down //= g_
  1968. if up == down == 1:
  1969. return x.copy()
  1970. n_out = x.shape[axis] * up
  1971. n_out = n_out // down + bool(n_out % down)
  1972. if isinstance(window, (list, np.ndarray)):
  1973. window = array(window) # use array to force a copy (we modify it)
  1974. if window.ndim > 1:
  1975. raise ValueError('window must be 1-D')
  1976. half_len = (window.size - 1) // 2
  1977. h = window
  1978. else:
  1979. # Design a linear-phase low-pass FIR filter
  1980. max_rate = max(up, down)
  1981. f_c = 1. / max_rate # cutoff of FIR filter (rel. to Nyquist)
  1982. half_len = 10 * max_rate # reasonable cutoff for our sinc-like function
  1983. h = firwin(2 * half_len + 1, f_c, window=window)
  1984. h *= up
  1985. # Zero-pad our filter to put the output samples at the center
  1986. n_pre_pad = (down - half_len % down)
  1987. n_post_pad = 0
  1988. n_pre_remove = (half_len + n_pre_pad) // down
  1989. # We should rarely need to do this given our filter lengths...
  1990. while _output_len(len(h) + n_pre_pad + n_post_pad, x.shape[axis],
  1991. up, down) < n_out + n_pre_remove:
  1992. n_post_pad += 1
  1993. h = np.concatenate((np.zeros(n_pre_pad, dtype=h.dtype), h,
  1994. np.zeros(n_post_pad, dtype=h.dtype)))
  1995. n_pre_remove_end = n_pre_remove + n_out
  1996. # filter then remove excess
  1997. y = upfirdn(h, x, up, down, axis=axis)
  1998. keep = [slice(None), ]*x.ndim
  1999. keep[axis] = slice(n_pre_remove, n_pre_remove_end)
  2000. return y[tuple(keep)]
  2001. def vectorstrength(events, period):
  2002. '''
  2003. Determine the vector strength of the events corresponding to the given
  2004. period.
  2005. The vector strength is a measure of phase synchrony, how well the
  2006. timing of the events is synchronized to a single period of a periodic
  2007. signal.
  2008. If multiple periods are used, calculate the vector strength of each.
  2009. This is called the "resonating vector strength".
  2010. Parameters
  2011. ----------
  2012. events : 1D array_like
  2013. An array of time points containing the timing of the events.
  2014. period : float or array_like
  2015. The period of the signal that the events should synchronize to.
  2016. The period is in the same units as `events`. It can also be an array
  2017. of periods, in which case the outputs are arrays of the same length.
  2018. Returns
  2019. -------
  2020. strength : float or 1D array
  2021. The strength of the synchronization. 1.0 is perfect synchronization
  2022. and 0.0 is no synchronization. If `period` is an array, this is also
  2023. an array with each element containing the vector strength at the
  2024. corresponding period.
  2025. phase : float or array
  2026. The phase that the events are most strongly synchronized to in radians.
  2027. If `period` is an array, this is also an array with each element
  2028. containing the phase for the corresponding period.
  2029. References
  2030. ----------
  2031. van Hemmen, JL, Longtin, A, and Vollmayr, AN. Testing resonating vector
  2032. strength: Auditory system, electric fish, and noise.
  2033. Chaos 21, 047508 (2011);
  2034. :doi:`10.1063/1.3670512`.
  2035. van Hemmen, JL. Vector strength after Goldberg, Brown, and von Mises:
  2036. biological and mathematical perspectives. Biol Cybern.
  2037. 2013 Aug;107(4):385-96. :doi:`10.1007/s00422-013-0561-7`.
  2038. van Hemmen, JL and Vollmayr, AN. Resonating vector strength: what happens
  2039. when we vary the "probing" frequency while keeping the spike times
  2040. fixed. Biol Cybern. 2013 Aug;107(4):491-94.
  2041. :doi:`10.1007/s00422-013-0560-8`.
  2042. '''
  2043. events = asarray(events)
  2044. period = asarray(period)
  2045. if events.ndim > 1:
  2046. raise ValueError('events cannot have dimensions more than 1')
  2047. if period.ndim > 1:
  2048. raise ValueError('period cannot have dimensions more than 1')
  2049. # we need to know later if period was originally a scalar
  2050. scalarperiod = not period.ndim
  2051. events = atleast_2d(events)
  2052. period = atleast_2d(period)
  2053. if (period <= 0).any():
  2054. raise ValueError('periods must be positive')
  2055. # this converts the times to vectors
  2056. vectors = exp(dot(2j*pi/period.T, events))
  2057. # the vector strength is just the magnitude of the mean of the vectors
  2058. # the vector phase is the angle of the mean of the vectors
  2059. vectormean = mean(vectors, axis=1)
  2060. strength = abs(vectormean)
  2061. phase = angle(vectormean)
  2062. # if the original period was a scalar, return scalars
  2063. if scalarperiod:
  2064. strength = strength[0]
  2065. phase = phase[0]
  2066. return strength, phase
  2067. def detrend(data, axis=-1, type='linear', bp=0):
  2068. """
  2069. Remove linear trend along axis from data.
  2070. Parameters
  2071. ----------
  2072. data : array_like
  2073. The input data.
  2074. axis : int, optional
  2075. The axis along which to detrend the data. By default this is the
  2076. last axis (-1).
  2077. type : {'linear', 'constant'}, optional
  2078. The type of detrending. If ``type == 'linear'`` (default),
  2079. the result of a linear least-squares fit to `data` is subtracted
  2080. from `data`.
  2081. If ``type == 'constant'``, only the mean of `data` is subtracted.
  2082. bp : array_like of ints, optional
  2083. A sequence of break points. If given, an individual linear fit is
  2084. performed for each part of `data` between two break points.
  2085. Break points are specified as indices into `data`.
  2086. Returns
  2087. -------
  2088. ret : ndarray
  2089. The detrended input data.
  2090. Examples
  2091. --------
  2092. >>> from scipy import signal
  2093. >>> randgen = np.random.RandomState(9)
  2094. >>> npoints = 1000
  2095. >>> noise = randgen.randn(npoints)
  2096. >>> x = 3 + 2*np.linspace(0, 1, npoints) + noise
  2097. >>> (signal.detrend(x) - noise).max() < 0.01
  2098. True
  2099. """
  2100. if type not in ['linear', 'l', 'constant', 'c']:
  2101. raise ValueError("Trend type must be 'linear' or 'constant'.")
  2102. data = asarray(data)
  2103. dtype = data.dtype.char
  2104. if dtype not in 'dfDF':
  2105. dtype = 'd'
  2106. if type in ['constant', 'c']:
  2107. ret = data - expand_dims(mean(data, axis), axis)
  2108. return ret
  2109. else:
  2110. dshape = data.shape
  2111. N = dshape[axis]
  2112. bp = sort(unique(r_[0, bp, N]))
  2113. if np.any(bp > N):
  2114. raise ValueError("Breakpoints must be less than length "
  2115. "of data along given axis.")
  2116. Nreg = len(bp) - 1
  2117. # Restructure data so that axis is along first dimension and
  2118. # all other dimensions are collapsed into second dimension
  2119. rnk = len(dshape)
  2120. if axis < 0:
  2121. axis = axis + rnk
  2122. newdims = r_[axis, 0:axis, axis + 1:rnk]
  2123. newdata = reshape(transpose(data, tuple(newdims)),
  2124. (N, _prod(dshape) // N))
  2125. newdata = newdata.copy() # make sure we have a copy
  2126. if newdata.dtype.char not in 'dfDF':
  2127. newdata = newdata.astype(dtype)
  2128. # Find leastsq fit and remove it for each piece
  2129. for m in range(Nreg):
  2130. Npts = bp[m + 1] - bp[m]
  2131. A = ones((Npts, 2), dtype)
  2132. A[:, 0] = cast[dtype](arange(1, Npts + 1) * 1.0 / Npts)
  2133. sl = slice(bp[m], bp[m + 1])
  2134. coef, resids, rank, s = linalg.lstsq(A, newdata[sl])
  2135. newdata[sl] = newdata[sl] - dot(A, coef)
  2136. # Put data back in original shape.
  2137. tdshape = take(dshape, newdims, 0)
  2138. ret = reshape(newdata, tuple(tdshape))
  2139. vals = list(range(1, rnk))
  2140. olddims = vals[:axis] + [0] + vals[axis:]
  2141. ret = transpose(ret, tuple(olddims))
  2142. return ret
  2143. def lfilter_zi(b, a):
  2144. """
  2145. Construct initial conditions for lfilter for step response steady-state.
  2146. Compute an initial state `zi` for the `lfilter` function that corresponds
  2147. to the steady state of the step response.
  2148. A typical use of this function is to set the initial state so that the
  2149. output of the filter starts at the same value as the first element of
  2150. the signal to be filtered.
  2151. Parameters
  2152. ----------
  2153. b, a : array_like (1-D)
  2154. The IIR filter coefficients. See `lfilter` for more
  2155. information.
  2156. Returns
  2157. -------
  2158. zi : 1-D ndarray
  2159. The initial state for the filter.
  2160. See Also
  2161. --------
  2162. lfilter, lfiltic, filtfilt
  2163. Notes
  2164. -----
  2165. A linear filter with order m has a state space representation (A, B, C, D),
  2166. for which the output y of the filter can be expressed as::
  2167. z(n+1) = A*z(n) + B*x(n)
  2168. y(n) = C*z(n) + D*x(n)
  2169. where z(n) is a vector of length m, A has shape (m, m), B has shape
  2170. (m, 1), C has shape (1, m) and D has shape (1, 1) (assuming x(n) is
  2171. a scalar). lfilter_zi solves::
  2172. zi = A*zi + B
  2173. In other words, it finds the initial condition for which the response
  2174. to an input of all ones is a constant.
  2175. Given the filter coefficients `a` and `b`, the state space matrices
  2176. for the transposed direct form II implementation of the linear filter,
  2177. which is the implementation used by scipy.signal.lfilter, are::
  2178. A = scipy.linalg.companion(a).T
  2179. B = b[1:] - a[1:]*b[0]
  2180. assuming `a[0]` is 1.0; if `a[0]` is not 1, `a` and `b` are first
  2181. divided by a[0].
  2182. Examples
  2183. --------
  2184. The following code creates a lowpass Butterworth filter. Then it
  2185. applies that filter to an array whose values are all 1.0; the
  2186. output is also all 1.0, as expected for a lowpass filter. If the
  2187. `zi` argument of `lfilter` had not been given, the output would have
  2188. shown the transient signal.
  2189. >>> from numpy import array, ones
  2190. >>> from scipy.signal import lfilter, lfilter_zi, butter
  2191. >>> b, a = butter(5, 0.25)
  2192. >>> zi = lfilter_zi(b, a)
  2193. >>> y, zo = lfilter(b, a, ones(10), zi=zi)
  2194. >>> y
  2195. array([1., 1., 1., 1., 1., 1., 1., 1., 1., 1.])
  2196. Another example:
  2197. >>> x = array([0.5, 0.5, 0.5, 0.0, 0.0, 0.0, 0.0])
  2198. >>> y, zf = lfilter(b, a, x, zi=zi*x[0])
  2199. >>> y
  2200. array([ 0.5 , 0.5 , 0.5 , 0.49836039, 0.48610528,
  2201. 0.44399389, 0.35505241])
  2202. Note that the `zi` argument to `lfilter` was computed using
  2203. `lfilter_zi` and scaled by `x[0]`. Then the output `y` has no
  2204. transient until the input drops from 0.5 to 0.0.
  2205. """
  2206. # FIXME: Can this function be replaced with an appropriate
  2207. # use of lfiltic? For example, when b,a = butter(N,Wn),
  2208. # lfiltic(b, a, y=numpy.ones_like(a), x=numpy.ones_like(b)).
  2209. #
  2210. # We could use scipy.signal.normalize, but it uses warnings in
  2211. # cases where a ValueError is more appropriate, and it allows
  2212. # b to be 2D.
  2213. b = np.atleast_1d(b)
  2214. if b.ndim != 1:
  2215. raise ValueError("Numerator b must be 1-D.")
  2216. a = np.atleast_1d(a)
  2217. if a.ndim != 1:
  2218. raise ValueError("Denominator a must be 1-D.")
  2219. while len(a) > 1 and a[0] == 0.0:
  2220. a = a[1:]
  2221. if a.size < 1:
  2222. raise ValueError("There must be at least one nonzero `a` coefficient.")
  2223. if a[0] != 1.0:
  2224. # Normalize the coefficients so a[0] == 1.
  2225. b = b / a[0]
  2226. a = a / a[0]
  2227. n = max(len(a), len(b))
  2228. # Pad a or b with zeros so they are the same length.
  2229. if len(a) < n:
  2230. a = np.r_[a, np.zeros(n - len(a))]
  2231. elif len(b) < n:
  2232. b = np.r_[b, np.zeros(n - len(b))]
  2233. IminusA = np.eye(n - 1) - linalg.companion(a).T
  2234. B = b[1:] - a[1:] * b[0]
  2235. # Solve zi = A*zi + B
  2236. zi = np.linalg.solve(IminusA, B)
  2237. # For future reference: we could also use the following
  2238. # explicit formulas to solve the linear system:
  2239. #
  2240. # zi = np.zeros(n - 1)
  2241. # zi[0] = B.sum() / IminusA[:,0].sum()
  2242. # asum = 1.0
  2243. # csum = 0.0
  2244. # for k in range(1,n-1):
  2245. # asum += a[k]
  2246. # csum += b[k] - a[k]*b[0]
  2247. # zi[k] = asum*zi[0] - csum
  2248. return zi
  2249. def sosfilt_zi(sos):
  2250. """
  2251. Construct initial conditions for sosfilt for step response steady-state.
  2252. Compute an initial state `zi` for the `sosfilt` function that corresponds
  2253. to the steady state of the step response.
  2254. A typical use of this function is to set the initial state so that the
  2255. output of the filter starts at the same value as the first element of
  2256. the signal to be filtered.
  2257. Parameters
  2258. ----------
  2259. sos : array_like
  2260. Array of second-order filter coefficients, must have shape
  2261. ``(n_sections, 6)``. See `sosfilt` for the SOS filter format
  2262. specification.
  2263. Returns
  2264. -------
  2265. zi : ndarray
  2266. Initial conditions suitable for use with ``sosfilt``, shape
  2267. ``(n_sections, 2)``.
  2268. See Also
  2269. --------
  2270. sosfilt, zpk2sos
  2271. Notes
  2272. -----
  2273. .. versionadded:: 0.16.0
  2274. Examples
  2275. --------
  2276. Filter a rectangular pulse that begins at time 0, with and without
  2277. the use of the `zi` argument of `scipy.signal.sosfilt`.
  2278. >>> from scipy import signal
  2279. >>> import matplotlib.pyplot as plt
  2280. >>> sos = signal.butter(9, 0.125, output='sos')
  2281. >>> zi = signal.sosfilt_zi(sos)
  2282. >>> x = (np.arange(250) < 100).astype(int)
  2283. >>> f1 = signal.sosfilt(sos, x)
  2284. >>> f2, zo = signal.sosfilt(sos, x, zi=zi)
  2285. >>> plt.plot(x, 'k--', label='x')
  2286. >>> plt.plot(f1, 'b', alpha=0.5, linewidth=2, label='filtered')
  2287. >>> plt.plot(f2, 'g', alpha=0.25, linewidth=4, label='filtered with zi')
  2288. >>> plt.legend(loc='best')
  2289. >>> plt.show()
  2290. """
  2291. sos = np.asarray(sos)
  2292. if sos.ndim != 2 or sos.shape[1] != 6:
  2293. raise ValueError('sos must be shape (n_sections, 6)')
  2294. n_sections = sos.shape[0]
  2295. zi = np.empty((n_sections, 2))
  2296. scale = 1.0
  2297. for section in range(n_sections):
  2298. b = sos[section, :3]
  2299. a = sos[section, 3:]
  2300. zi[section] = scale * lfilter_zi(b, a)
  2301. # If H(z) = B(z)/A(z) is this section's transfer function, then
  2302. # b.sum()/a.sum() is H(1), the gain at omega=0. That's the steady
  2303. # state value of this section's step response.
  2304. scale *= b.sum() / a.sum()
  2305. return zi
  2306. def _filtfilt_gust(b, a, x, axis=-1, irlen=None):
  2307. """Forward-backward IIR filter that uses Gustafsson's method.
  2308. Apply the IIR filter defined by `(b,a)` to `x` twice, first forward
  2309. then backward, using Gustafsson's initial conditions [1]_.
  2310. Let ``y_fb`` be the result of filtering first forward and then backward,
  2311. and let ``y_bf`` be the result of filtering first backward then forward.
  2312. Gustafsson's method is to compute initial conditions for the forward
  2313. pass and the backward pass such that ``y_fb == y_bf``.
  2314. Parameters
  2315. ----------
  2316. b : scalar or 1-D ndarray
  2317. Numerator coefficients of the filter.
  2318. a : scalar or 1-D ndarray
  2319. Denominator coefficients of the filter.
  2320. x : ndarray
  2321. Data to be filtered.
  2322. axis : int, optional
  2323. Axis of `x` to be filtered. Default is -1.
  2324. irlen : int or None, optional
  2325. The length of the nonnegligible part of the impulse response.
  2326. If `irlen` is None, or if the length of the signal is less than
  2327. ``2 * irlen``, then no part of the impulse response is ignored.
  2328. Returns
  2329. -------
  2330. y : ndarray
  2331. The filtered data.
  2332. x0 : ndarray
  2333. Initial condition for the forward filter.
  2334. x1 : ndarray
  2335. Initial condition for the backward filter.
  2336. Notes
  2337. -----
  2338. Typically the return values `x0` and `x1` are not needed by the
  2339. caller. The intended use of these return values is in unit tests.
  2340. References
  2341. ----------
  2342. .. [1] F. Gustaffson. Determining the initial states in forward-backward
  2343. filtering. Transactions on Signal Processing, 46(4):988-992, 1996.
  2344. """
  2345. # In the comments, "Gustafsson's paper" and [1] refer to the
  2346. # paper referenced in the docstring.
  2347. b = np.atleast_1d(b)
  2348. a = np.atleast_1d(a)
  2349. order = max(len(b), len(a)) - 1
  2350. if order == 0:
  2351. # The filter is just scalar multiplication, with no state.
  2352. scale = (b[0] / a[0])**2
  2353. y = scale * x
  2354. return y, np.array([]), np.array([])
  2355. if axis != -1 or axis != x.ndim - 1:
  2356. # Move the axis containing the data to the end.
  2357. x = np.swapaxes(x, axis, x.ndim - 1)
  2358. # n is the number of samples in the data to be filtered.
  2359. n = x.shape[-1]
  2360. if irlen is None or n <= 2*irlen:
  2361. m = n
  2362. else:
  2363. m = irlen
  2364. # Create Obs, the observability matrix (called O in the paper).
  2365. # This matrix can be interpreted as the operator that propagates
  2366. # an arbitrary initial state to the output, assuming the input is
  2367. # zero.
  2368. # In Gustafsson's paper, the forward and backward filters are not
  2369. # necessarily the same, so he has both O_f and O_b. We use the same
  2370. # filter in both directions, so we only need O. The same comment
  2371. # applies to S below.
  2372. Obs = np.zeros((m, order))
  2373. zi = np.zeros(order)
  2374. zi[0] = 1
  2375. Obs[:, 0] = lfilter(b, a, np.zeros(m), zi=zi)[0]
  2376. for k in range(1, order):
  2377. Obs[k:, k] = Obs[:-k, 0]
  2378. # Obsr is O^R (Gustafsson's notation for row-reversed O)
  2379. Obsr = Obs[::-1]
  2380. # Create S. S is the matrix that applies the filter to the reversed
  2381. # propagated initial conditions. That is,
  2382. # out = S.dot(zi)
  2383. # is the same as
  2384. # tmp, _ = lfilter(b, a, zeros(), zi=zi) # Propagate ICs.
  2385. # out = lfilter(b, a, tmp[::-1]) # Reverse and filter.
  2386. # Equations (5) & (6) of [1]
  2387. S = lfilter(b, a, Obs[::-1], axis=0)
  2388. # Sr is S^R (row-reversed S)
  2389. Sr = S[::-1]
  2390. # M is [(S^R - O), (O^R - S)]
  2391. if m == n:
  2392. M = np.hstack((Sr - Obs, Obsr - S))
  2393. else:
  2394. # Matrix described in section IV of [1].
  2395. M = np.zeros((2*m, 2*order))
  2396. M[:m, :order] = Sr - Obs
  2397. M[m:, order:] = Obsr - S
  2398. # Naive forward-backward and backward-forward filters.
  2399. # These have large transients because the filters use zero initial
  2400. # conditions.
  2401. y_f = lfilter(b, a, x)
  2402. y_fb = lfilter(b, a, y_f[..., ::-1])[..., ::-1]
  2403. y_b = lfilter(b, a, x[..., ::-1])[..., ::-1]
  2404. y_bf = lfilter(b, a, y_b)
  2405. delta_y_bf_fb = y_bf - y_fb
  2406. if m == n:
  2407. delta = delta_y_bf_fb
  2408. else:
  2409. start_m = delta_y_bf_fb[..., :m]
  2410. end_m = delta_y_bf_fb[..., -m:]
  2411. delta = np.concatenate((start_m, end_m), axis=-1)
  2412. # ic_opt holds the "optimal" initial conditions.
  2413. # The following code computes the result shown in the formula
  2414. # of the paper between equations (6) and (7).
  2415. if delta.ndim == 1:
  2416. ic_opt = linalg.lstsq(M, delta)[0]
  2417. else:
  2418. # Reshape delta so it can be used as an array of multiple
  2419. # right-hand-sides in linalg.lstsq.
  2420. delta2d = delta.reshape(-1, delta.shape[-1]).T
  2421. ic_opt0 = linalg.lstsq(M, delta2d)[0].T
  2422. ic_opt = ic_opt0.reshape(delta.shape[:-1] + (M.shape[-1],))
  2423. # Now compute the filtered signal using equation (7) of [1].
  2424. # First, form [S^R, O^R] and call it W.
  2425. if m == n:
  2426. W = np.hstack((Sr, Obsr))
  2427. else:
  2428. W = np.zeros((2*m, 2*order))
  2429. W[:m, :order] = Sr
  2430. W[m:, order:] = Obsr
  2431. # Equation (7) of [1] says
  2432. # Y_fb^opt = Y_fb^0 + W * [x_0^opt; x_{N-1}^opt]
  2433. # `wic` is (almost) the product on the right.
  2434. # W has shape (m, 2*order), and ic_opt has shape (..., 2*order),
  2435. # so we can't use W.dot(ic_opt). Instead, we dot ic_opt with W.T,
  2436. # so wic has shape (..., m).
  2437. wic = ic_opt.dot(W.T)
  2438. # `wic` is "almost" the product of W and the optimal ICs in equation
  2439. # (7)--if we're using a truncated impulse response (m < n), `wic`
  2440. # contains only the adjustments required for the ends of the signal.
  2441. # Here we form y_opt, taking this into account if necessary.
  2442. y_opt = y_fb
  2443. if m == n:
  2444. y_opt += wic
  2445. else:
  2446. y_opt[..., :m] += wic[..., :m]
  2447. y_opt[..., -m:] += wic[..., -m:]
  2448. x0 = ic_opt[..., :order]
  2449. x1 = ic_opt[..., -order:]
  2450. if axis != -1 or axis != x.ndim - 1:
  2451. # Restore the data axis to its original position.
  2452. x0 = np.swapaxes(x0, axis, x.ndim - 1)
  2453. x1 = np.swapaxes(x1, axis, x.ndim - 1)
  2454. y_opt = np.swapaxes(y_opt, axis, x.ndim - 1)
  2455. return y_opt, x0, x1
  2456. def filtfilt(b, a, x, axis=-1, padtype='odd', padlen=None, method='pad',
  2457. irlen=None):
  2458. """
  2459. Apply a digital filter forward and backward to a signal.
  2460. This function applies a linear digital filter twice, once forward and
  2461. once backwards. The combined filter has zero phase and a filter order
  2462. twice that of the original.
  2463. The function provides options for handling the edges of the signal.
  2464. Parameters
  2465. ----------
  2466. b : (N,) array_like
  2467. The numerator coefficient vector of the filter.
  2468. a : (N,) array_like
  2469. The denominator coefficient vector of the filter. If ``a[0]``
  2470. is not 1, then both `a` and `b` are normalized by ``a[0]``.
  2471. x : array_like
  2472. The array of data to be filtered.
  2473. axis : int, optional
  2474. The axis of `x` to which the filter is applied.
  2475. Default is -1.
  2476. padtype : str or None, optional
  2477. Must be 'odd', 'even', 'constant', or None. This determines the
  2478. type of extension to use for the padded signal to which the filter
  2479. is applied. If `padtype` is None, no padding is used. The default
  2480. is 'odd'.
  2481. padlen : int or None, optional
  2482. The number of elements by which to extend `x` at both ends of
  2483. `axis` before applying the filter. This value must be less than
  2484. ``x.shape[axis] - 1``. ``padlen=0`` implies no padding.
  2485. The default value is ``3 * max(len(a), len(b))``.
  2486. method : str, optional
  2487. Determines the method for handling the edges of the signal, either
  2488. "pad" or "gust". When `method` is "pad", the signal is padded; the
  2489. type of padding is determined by `padtype` and `padlen`, and `irlen`
  2490. is ignored. When `method` is "gust", Gustafsson's method is used,
  2491. and `padtype` and `padlen` are ignored.
  2492. irlen : int or None, optional
  2493. When `method` is "gust", `irlen` specifies the length of the
  2494. impulse response of the filter. If `irlen` is None, no part
  2495. of the impulse response is ignored. For a long signal, specifying
  2496. `irlen` can significantly improve the performance of the filter.
  2497. Returns
  2498. -------
  2499. y : ndarray
  2500. The filtered output with the same shape as `x`.
  2501. See Also
  2502. --------
  2503. sosfiltfilt, lfilter_zi, lfilter, lfiltic, savgol_filter, sosfilt
  2504. Notes
  2505. -----
  2506. When `method` is "pad", the function pads the data along the given axis
  2507. in one of three ways: odd, even or constant. The odd and even extensions
  2508. have the corresponding symmetry about the end point of the data. The
  2509. constant extension extends the data with the values at the end points. On
  2510. both the forward and backward passes, the initial condition of the
  2511. filter is found by using `lfilter_zi` and scaling it by the end point of
  2512. the extended data.
  2513. When `method` is "gust", Gustafsson's method [1]_ is used. Initial
  2514. conditions are chosen for the forward and backward passes so that the
  2515. forward-backward filter gives the same result as the backward-forward
  2516. filter.
  2517. The option to use Gustaffson's method was added in scipy version 0.16.0.
  2518. References
  2519. ----------
  2520. .. [1] F. Gustaffson, "Determining the initial states in forward-backward
  2521. filtering", Transactions on Signal Processing, Vol. 46, pp. 988-992,
  2522. 1996.
  2523. Examples
  2524. --------
  2525. The examples will use several functions from `scipy.signal`.
  2526. >>> from scipy import signal
  2527. >>> import matplotlib.pyplot as plt
  2528. First we create a one second signal that is the sum of two pure sine
  2529. waves, with frequencies 5 Hz and 250 Hz, sampled at 2000 Hz.
  2530. >>> t = np.linspace(0, 1.0, 2001)
  2531. >>> xlow = np.sin(2 * np.pi * 5 * t)
  2532. >>> xhigh = np.sin(2 * np.pi * 250 * t)
  2533. >>> x = xlow + xhigh
  2534. Now create a lowpass Butterworth filter with a cutoff of 0.125 times
  2535. the Nyquist frequency, or 125 Hz, and apply it to ``x`` with `filtfilt`.
  2536. The result should be approximately ``xlow``, with no phase shift.
  2537. >>> b, a = signal.butter(8, 0.125)
  2538. >>> y = signal.filtfilt(b, a, x, padlen=150)
  2539. >>> np.abs(y - xlow).max()
  2540. 9.1086182074789912e-06
  2541. We get a fairly clean result for this artificial example because
  2542. the odd extension is exact, and with the moderately long padding,
  2543. the filter's transients have dissipated by the time the actual data
  2544. is reached. In general, transient effects at the edges are
  2545. unavoidable.
  2546. The following example demonstrates the option ``method="gust"``.
  2547. First, create a filter.
  2548. >>> b, a = signal.ellip(4, 0.01, 120, 0.125) # Filter to be applied.
  2549. >>> np.random.seed(123456)
  2550. `sig` is a random input signal to be filtered.
  2551. >>> n = 60
  2552. >>> sig = np.random.randn(n)**3 + 3*np.random.randn(n).cumsum()
  2553. Apply `filtfilt` to `sig`, once using the Gustafsson method, and
  2554. once using padding, and plot the results for comparison.
  2555. >>> fgust = signal.filtfilt(b, a, sig, method="gust")
  2556. >>> fpad = signal.filtfilt(b, a, sig, padlen=50)
  2557. >>> plt.plot(sig, 'k-', label='input')
  2558. >>> plt.plot(fgust, 'b-', linewidth=4, label='gust')
  2559. >>> plt.plot(fpad, 'c-', linewidth=1.5, label='pad')
  2560. >>> plt.legend(loc='best')
  2561. >>> plt.show()
  2562. The `irlen` argument can be used to improve the performance
  2563. of Gustafsson's method.
  2564. Estimate the impulse response length of the filter.
  2565. >>> z, p, k = signal.tf2zpk(b, a)
  2566. >>> eps = 1e-9
  2567. >>> r = np.max(np.abs(p))
  2568. >>> approx_impulse_len = int(np.ceil(np.log(eps) / np.log(r)))
  2569. >>> approx_impulse_len
  2570. 137
  2571. Apply the filter to a longer signal, with and without the `irlen`
  2572. argument. The difference between `y1` and `y2` is small. For long
  2573. signals, using `irlen` gives a significant performance improvement.
  2574. >>> x = np.random.randn(5000)
  2575. >>> y1 = signal.filtfilt(b, a, x, method='gust')
  2576. >>> y2 = signal.filtfilt(b, a, x, method='gust', irlen=approx_impulse_len)
  2577. >>> print(np.max(np.abs(y1 - y2)))
  2578. 1.80056858312e-10
  2579. """
  2580. b = np.atleast_1d(b)
  2581. a = np.atleast_1d(a)
  2582. x = np.asarray(x)
  2583. if method not in ["pad", "gust"]:
  2584. raise ValueError("method must be 'pad' or 'gust'.")
  2585. if method == "gust":
  2586. y, z1, z2 = _filtfilt_gust(b, a, x, axis=axis, irlen=irlen)
  2587. return y
  2588. # method == "pad"
  2589. edge, ext = _validate_pad(padtype, padlen, x, axis,
  2590. ntaps=max(len(a), len(b)))
  2591. # Get the steady state of the filter's step response.
  2592. zi = lfilter_zi(b, a)
  2593. # Reshape zi and create x0 so that zi*x0 broadcasts
  2594. # to the correct value for the 'zi' keyword argument
  2595. # to lfilter.
  2596. zi_shape = [1] * x.ndim
  2597. zi_shape[axis] = zi.size
  2598. zi = np.reshape(zi, zi_shape)
  2599. x0 = axis_slice(ext, stop=1, axis=axis)
  2600. # Forward filter.
  2601. (y, zf) = lfilter(b, a, ext, axis=axis, zi=zi * x0)
  2602. # Backward filter.
  2603. # Create y0 so zi*y0 broadcasts appropriately.
  2604. y0 = axis_slice(y, start=-1, axis=axis)
  2605. (y, zf) = lfilter(b, a, axis_reverse(y, axis=axis), axis=axis, zi=zi * y0)
  2606. # Reverse y.
  2607. y = axis_reverse(y, axis=axis)
  2608. if edge > 0:
  2609. # Slice the actual signal from the extended signal.
  2610. y = axis_slice(y, start=edge, stop=-edge, axis=axis)
  2611. return y
  2612. def _validate_pad(padtype, padlen, x, axis, ntaps):
  2613. """Helper to validate padding for filtfilt"""
  2614. if padtype not in ['even', 'odd', 'constant', None]:
  2615. raise ValueError(("Unknown value '%s' given to padtype. padtype "
  2616. "must be 'even', 'odd', 'constant', or None.") %
  2617. padtype)
  2618. if padtype is None:
  2619. padlen = 0
  2620. if padlen is None:
  2621. # Original padding; preserved for backwards compatibility.
  2622. edge = ntaps * 3
  2623. else:
  2624. edge = padlen
  2625. # x's 'axis' dimension must be bigger than edge.
  2626. if x.shape[axis] <= edge:
  2627. raise ValueError("The length of the input vector x must be at least "
  2628. "padlen, which is %d." % edge)
  2629. if padtype is not None and edge > 0:
  2630. # Make an extension of length `edge` at each
  2631. # end of the input array.
  2632. if padtype == 'even':
  2633. ext = even_ext(x, edge, axis=axis)
  2634. elif padtype == 'odd':
  2635. ext = odd_ext(x, edge, axis=axis)
  2636. else:
  2637. ext = const_ext(x, edge, axis=axis)
  2638. else:
  2639. ext = x
  2640. return edge, ext
  2641. def sosfilt(sos, x, axis=-1, zi=None):
  2642. """
  2643. Filter data along one dimension using cascaded second-order sections.
  2644. Filter a data sequence, `x`, using a digital IIR filter defined by
  2645. `sos`. This is implemented by performing `lfilter` for each
  2646. second-order section. See `lfilter` for details.
  2647. Parameters
  2648. ----------
  2649. sos : array_like
  2650. Array of second-order filter coefficients, must have shape
  2651. ``(n_sections, 6)``. Each row corresponds to a second-order
  2652. section, with the first three columns providing the numerator
  2653. coefficients and the last three providing the denominator
  2654. coefficients.
  2655. x : array_like
  2656. An N-dimensional input array.
  2657. axis : int, optional
  2658. The axis of the input data array along which to apply the
  2659. linear filter. The filter is applied to each subarray along
  2660. this axis. Default is -1.
  2661. zi : array_like, optional
  2662. Initial conditions for the cascaded filter delays. It is a (at
  2663. least 2D) vector of shape ``(n_sections, ..., 2, ...)``, where
  2664. ``..., 2, ...`` denotes the shape of `x`, but with ``x.shape[axis]``
  2665. replaced by 2. If `zi` is None or is not given then initial rest
  2666. (i.e. all zeros) is assumed.
  2667. Note that these initial conditions are *not* the same as the initial
  2668. conditions given by `lfiltic` or `lfilter_zi`.
  2669. Returns
  2670. -------
  2671. y : ndarray
  2672. The output of the digital filter.
  2673. zf : ndarray, optional
  2674. If `zi` is None, this is not returned, otherwise, `zf` holds the
  2675. final filter delay values.
  2676. See Also
  2677. --------
  2678. zpk2sos, sos2zpk, sosfilt_zi, sosfiltfilt, sosfreqz
  2679. Notes
  2680. -----
  2681. The filter function is implemented as a series of second-order filters
  2682. with direct-form II transposed structure. It is designed to minimize
  2683. numerical precision errors for high-order filters.
  2684. .. versionadded:: 0.16.0
  2685. Examples
  2686. --------
  2687. Plot a 13th-order filter's impulse response using both `lfilter` and
  2688. `sosfilt`, showing the instability that results from trying to do a
  2689. 13th-order filter in a single stage (the numerical error pushes some poles
  2690. outside of the unit circle):
  2691. >>> import matplotlib.pyplot as plt
  2692. >>> from scipy import signal
  2693. >>> b, a = signal.ellip(13, 0.009, 80, 0.05, output='ba')
  2694. >>> sos = signal.ellip(13, 0.009, 80, 0.05, output='sos')
  2695. >>> x = signal.unit_impulse(700)
  2696. >>> y_tf = signal.lfilter(b, a, x)
  2697. >>> y_sos = signal.sosfilt(sos, x)
  2698. >>> plt.plot(y_tf, 'r', label='TF')
  2699. >>> plt.plot(y_sos, 'k', label='SOS')
  2700. >>> plt.legend(loc='best')
  2701. >>> plt.show()
  2702. """
  2703. x = np.asarray(x)
  2704. sos, n_sections = _validate_sos(sos)
  2705. use_zi = zi is not None
  2706. if use_zi:
  2707. zi = np.asarray(zi)
  2708. x_zi_shape = list(x.shape)
  2709. x_zi_shape[axis] = 2
  2710. x_zi_shape = tuple([n_sections] + x_zi_shape)
  2711. if zi.shape != x_zi_shape:
  2712. raise ValueError('Invalid zi shape. With axis=%r, an input with '
  2713. 'shape %r, and an sos array with %d sections, zi '
  2714. 'must have shape %r, got %r.' %
  2715. (axis, x.shape, n_sections, x_zi_shape, zi.shape))
  2716. zf = zeros_like(zi)
  2717. for section in range(n_sections):
  2718. if use_zi:
  2719. x, zf[section] = lfilter(sos[section, :3], sos[section, 3:],
  2720. x, axis, zi=zi[section])
  2721. else:
  2722. x = lfilter(sos[section, :3], sos[section, 3:], x, axis)
  2723. out = (x, zf) if use_zi else x
  2724. return out
  2725. def sosfiltfilt(sos, x, axis=-1, padtype='odd', padlen=None):
  2726. """
  2727. A forward-backward digital filter using cascaded second-order sections.
  2728. See `filtfilt` for more complete information about this method.
  2729. Parameters
  2730. ----------
  2731. sos : array_like
  2732. Array of second-order filter coefficients, must have shape
  2733. ``(n_sections, 6)``. Each row corresponds to a second-order
  2734. section, with the first three columns providing the numerator
  2735. coefficients and the last three providing the denominator
  2736. coefficients.
  2737. x : array_like
  2738. The array of data to be filtered.
  2739. axis : int, optional
  2740. The axis of `x` to which the filter is applied.
  2741. Default is -1.
  2742. padtype : str or None, optional
  2743. Must be 'odd', 'even', 'constant', or None. This determines the
  2744. type of extension to use for the padded signal to which the filter
  2745. is applied. If `padtype` is None, no padding is used. The default
  2746. is 'odd'.
  2747. padlen : int or None, optional
  2748. The number of elements by which to extend `x` at both ends of
  2749. `axis` before applying the filter. This value must be less than
  2750. ``x.shape[axis] - 1``. ``padlen=0`` implies no padding.
  2751. The default value is::
  2752. 3 * (2 * len(sos) + 1 - min((sos[:, 2] == 0).sum(),
  2753. (sos[:, 5] == 0).sum()))
  2754. The extra subtraction at the end attempts to compensate for poles
  2755. and zeros at the origin (e.g. for odd-order filters) to yield
  2756. equivalent estimates of `padlen` to those of `filtfilt` for
  2757. second-order section filters built with `scipy.signal` functions.
  2758. Returns
  2759. -------
  2760. y : ndarray
  2761. The filtered output with the same shape as `x`.
  2762. See Also
  2763. --------
  2764. filtfilt, sosfilt, sosfilt_zi, sosfreqz
  2765. Notes
  2766. -----
  2767. .. versionadded:: 0.18.0
  2768. Examples
  2769. --------
  2770. >>> from scipy.signal import sosfiltfilt, butter
  2771. >>> import matplotlib.pyplot as plt
  2772. Create an interesting signal to filter.
  2773. >>> n = 201
  2774. >>> t = np.linspace(0, 1, n)
  2775. >>> np.random.seed(123)
  2776. >>> x = 1 + (t < 0.5) - 0.25*t**2 + 0.05*np.random.randn(n)
  2777. Create a lowpass Butterworth filter, and use it to filter `x`.
  2778. >>> sos = butter(4, 0.125, output='sos')
  2779. >>> y = sosfiltfilt(sos, x)
  2780. For comparison, apply an 8th order filter using `sosfilt`. The filter
  2781. is initialized using the mean of the first four values of `x`.
  2782. >>> from scipy.signal import sosfilt, sosfilt_zi
  2783. >>> sos8 = butter(8, 0.125, output='sos')
  2784. >>> zi = x[:4].mean() * sosfilt_zi(sos8)
  2785. >>> y2, zo = sosfilt(sos8, x, zi=zi)
  2786. Plot the results. Note that the phase of `y` matches the input, while
  2787. `y2` has a significant phase delay.
  2788. >>> plt.plot(t, x, alpha=0.5, label='x(t)')
  2789. >>> plt.plot(t, y, label='y(t)')
  2790. >>> plt.plot(t, y2, label='y2(t)')
  2791. >>> plt.legend(framealpha=1, shadow=True)
  2792. >>> plt.grid(alpha=0.25)
  2793. >>> plt.xlabel('t')
  2794. >>> plt.show()
  2795. """
  2796. sos, n_sections = _validate_sos(sos)
  2797. # `method` is "pad"...
  2798. ntaps = 2 * n_sections + 1
  2799. ntaps -= min((sos[:, 2] == 0).sum(), (sos[:, 5] == 0).sum())
  2800. edge, ext = _validate_pad(padtype, padlen, x, axis,
  2801. ntaps=ntaps)
  2802. # These steps follow the same form as filtfilt with modifications
  2803. zi = sosfilt_zi(sos) # shape (n_sections, 2) --> (n_sections, ..., 2, ...)
  2804. zi_shape = [1] * x.ndim
  2805. zi_shape[axis] = 2
  2806. zi.shape = [n_sections] + zi_shape
  2807. x_0 = axis_slice(ext, stop=1, axis=axis)
  2808. (y, zf) = sosfilt(sos, ext, axis=axis, zi=zi * x_0)
  2809. y_0 = axis_slice(y, start=-1, axis=axis)
  2810. (y, zf) = sosfilt(sos, axis_reverse(y, axis=axis), axis=axis, zi=zi * y_0)
  2811. y = axis_reverse(y, axis=axis)
  2812. if edge > 0:
  2813. y = axis_slice(y, start=edge, stop=-edge, axis=axis)
  2814. return y
  2815. def decimate(x, q, n=None, ftype='iir', axis=-1, zero_phase=True):
  2816. """
  2817. Downsample the signal after applying an anti-aliasing filter.
  2818. By default, an order 8 Chebyshev type I filter is used. A 30 point FIR
  2819. filter with Hamming window is used if `ftype` is 'fir'.
  2820. Parameters
  2821. ----------
  2822. x : array_like
  2823. The signal to be downsampled, as an N-dimensional array.
  2824. q : int
  2825. The downsampling factor. When using IIR downsampling, it is recommended
  2826. to call `decimate` multiple times for downsampling factors higher than
  2827. 13.
  2828. n : int, optional
  2829. The order of the filter (1 less than the length for 'fir'). Defaults to
  2830. 8 for 'iir' and 20 times the downsampling factor for 'fir'.
  2831. ftype : str {'iir', 'fir'} or ``dlti`` instance, optional
  2832. If 'iir' or 'fir', specifies the type of lowpass filter. If an instance
  2833. of an `dlti` object, uses that object to filter before downsampling.
  2834. axis : int, optional
  2835. The axis along which to decimate.
  2836. zero_phase : bool, optional
  2837. Prevent phase shift by filtering with `filtfilt` instead of `lfilter`
  2838. when using an IIR filter, and shifting the outputs back by the filter's
  2839. group delay when using an FIR filter. The default value of ``True`` is
  2840. recommended, since a phase shift is generally not desired.
  2841. .. versionadded:: 0.18.0
  2842. Returns
  2843. -------
  2844. y : ndarray
  2845. The down-sampled signal.
  2846. See Also
  2847. --------
  2848. resample : Resample up or down using the FFT method.
  2849. resample_poly : Resample using polyphase filtering and an FIR filter.
  2850. Notes
  2851. -----
  2852. The ``zero_phase`` keyword was added in 0.18.0.
  2853. The possibility to use instances of ``dlti`` as ``ftype`` was added in
  2854. 0.18.0.
  2855. """
  2856. x = asarray(x)
  2857. q = operator.index(q)
  2858. if n is not None:
  2859. n = operator.index(n)
  2860. if ftype == 'fir':
  2861. if n is None:
  2862. half_len = 10 * q # reasonable cutoff for our sinc-like function
  2863. n = 2 * half_len
  2864. b, a = firwin(n+1, 1. / q, window='hamming'), 1.
  2865. elif ftype == 'iir':
  2866. if n is None:
  2867. n = 8
  2868. system = dlti(*cheby1(n, 0.05, 0.8 / q))
  2869. b, a = system.num, system.den
  2870. elif isinstance(ftype, dlti):
  2871. system = ftype._as_tf() # Avoids copying if already in TF form
  2872. b, a = system.num, system.den
  2873. else:
  2874. raise ValueError('invalid ftype')
  2875. sl = [slice(None)] * x.ndim
  2876. a = np.asarray(a)
  2877. if a.size == 1: # FIR case
  2878. b = b / a
  2879. if zero_phase:
  2880. y = resample_poly(x, 1, q, axis=axis, window=b)
  2881. else:
  2882. # upfirdn is generally faster than lfilter by a factor equal to the
  2883. # downsampling factor, since it only calculates the needed outputs
  2884. n_out = x.shape[axis] // q + bool(x.shape[axis] % q)
  2885. y = upfirdn(b, x, up=1, down=q, axis=axis)
  2886. sl[axis] = slice(None, n_out, None)
  2887. else: # IIR case
  2888. if zero_phase:
  2889. y = filtfilt(b, a, x, axis=axis)
  2890. else:
  2891. y = lfilter(b, a, x, axis=axis)
  2892. sl[axis] = slice(None, None, q)
  2893. return y[tuple(sl)]