12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113311431153116311731183119312031213122312331243125312631273128312931303131313231333134313531363137313831393140314131423143314431453146314731483149315031513152315331543155315631573158315931603161316231633164316531663167316831693170317131723173317431753176317731783179318031813182318331843185318631873188318931903191319231933194319531963197319831993200320132023203320432053206320732083209321032113212321332143215321632173218321932203221322232233224322532263227322832293230323132323233323432353236323732383239324032413242324332443245324632473248324932503251325232533254325532563257325832593260326132623263326432653266326732683269327032713272327332743275327632773278327932803281328232833284328532863287328832893290329132923293329432953296329732983299330033013302330333043305330633073308330933103311331233133314331533163317331833193320332133223323332433253326332733283329333033313332333333343335333633373338333933403341334233433344334533463347334833493350335133523353335433553356335733583359336033613362336333643365336633673368336933703371337233733374337533763377337833793380338133823383338433853386338733883389339033913392339333943395339633973398339934003401340234033404340534063407340834093410341134123413341434153416341734183419342034213422342334243425342634273428342934303431343234333434343534363437343834393440344134423443344434453446 |
- #
- # Author: Travis Oliphant 2002-2011 with contributions from
- # SciPy Developers 2004-2011
- #
- from __future__ import division, print_function, absolute_import
- from scipy._lib.six import string_types, exec_, PY3
- from scipy._lib._util import getargspec_no_self as _getargspec
- import sys
- import keyword
- import re
- import types
- import warnings
- from scipy.misc import doccer
- from ._distr_params import distcont, distdiscrete
- from scipy._lib._util import check_random_state
- from scipy._lib._util import _valarray as valarray
- from scipy.special import (comb, chndtr, entr, rel_entr, xlogy, ive)
- # for root finding for discrete distribution ppf, and max likelihood estimation
- from scipy import optimize
- # for functions of continuous distributions (e.g. moments, entropy, cdf)
- from scipy import integrate
- # to approximate the pdf of a continuous distribution given its cdf
- from scipy.misc import derivative
- from numpy import (arange, putmask, ravel, ones, shape, ndarray, zeros, floor,
- logical_and, log, sqrt, place, argmax, vectorize, asarray,
- nan, inf, isinf, NINF, empty)
- import numpy as np
- from ._constants import _XMAX
- if PY3:
- def instancemethod(func, obj, cls):
- return types.MethodType(func, obj)
- else:
- instancemethod = types.MethodType
- # These are the docstring parts used for substitution in specific
- # distribution docstrings
- docheaders = {'methods': """\nMethods\n-------\n""",
- 'notes': """\nNotes\n-----\n""",
- 'examples': """\nExamples\n--------\n"""}
- _doc_rvs = """\
- rvs(%(shapes)s, loc=0, scale=1, size=1, random_state=None)
- Random variates.
- """
- _doc_pdf = """\
- pdf(x, %(shapes)s, loc=0, scale=1)
- Probability density function.
- """
- _doc_logpdf = """\
- logpdf(x, %(shapes)s, loc=0, scale=1)
- Log of the probability density function.
- """
- _doc_pmf = """\
- pmf(k, %(shapes)s, loc=0, scale=1)
- Probability mass function.
- """
- _doc_logpmf = """\
- logpmf(k, %(shapes)s, loc=0, scale=1)
- Log of the probability mass function.
- """
- _doc_cdf = """\
- cdf(x, %(shapes)s, loc=0, scale=1)
- Cumulative distribution function.
- """
- _doc_logcdf = """\
- logcdf(x, %(shapes)s, loc=0, scale=1)
- Log of the cumulative distribution function.
- """
- _doc_sf = """\
- sf(x, %(shapes)s, loc=0, scale=1)
- Survival function (also defined as ``1 - cdf``, but `sf` is sometimes more accurate).
- """
- _doc_logsf = """\
- logsf(x, %(shapes)s, loc=0, scale=1)
- Log of the survival function.
- """
- _doc_ppf = """\
- ppf(q, %(shapes)s, loc=0, scale=1)
- Percent point function (inverse of ``cdf`` --- percentiles).
- """
- _doc_isf = """\
- isf(q, %(shapes)s, loc=0, scale=1)
- Inverse survival function (inverse of ``sf``).
- """
- _doc_moment = """\
- moment(n, %(shapes)s, loc=0, scale=1)
- Non-central moment of order n
- """
- _doc_stats = """\
- stats(%(shapes)s, loc=0, scale=1, moments='mv')
- Mean('m'), variance('v'), skew('s'), and/or kurtosis('k').
- """
- _doc_entropy = """\
- entropy(%(shapes)s, loc=0, scale=1)
- (Differential) entropy of the RV.
- """
- _doc_fit = """\
- fit(data, %(shapes)s, loc=0, scale=1)
- Parameter estimates for generic data.
- """
- _doc_expect = """\
- expect(func, args=(%(shapes_)s), loc=0, scale=1, lb=None, ub=None, conditional=False, **kwds)
- Expected value of a function (of one argument) with respect to the distribution.
- """
- _doc_expect_discrete = """\
- expect(func, args=(%(shapes_)s), loc=0, lb=None, ub=None, conditional=False)
- Expected value of a function (of one argument) with respect to the distribution.
- """
- _doc_median = """\
- median(%(shapes)s, loc=0, scale=1)
- Median of the distribution.
- """
- _doc_mean = """\
- mean(%(shapes)s, loc=0, scale=1)
- Mean of the distribution.
- """
- _doc_var = """\
- var(%(shapes)s, loc=0, scale=1)
- Variance of the distribution.
- """
- _doc_std = """\
- std(%(shapes)s, loc=0, scale=1)
- Standard deviation of the distribution.
- """
- _doc_interval = """\
- interval(alpha, %(shapes)s, loc=0, scale=1)
- Endpoints of the range that contains alpha percent of the distribution
- """
- _doc_allmethods = ''.join([docheaders['methods'], _doc_rvs, _doc_pdf,
- _doc_logpdf, _doc_cdf, _doc_logcdf, _doc_sf,
- _doc_logsf, _doc_ppf, _doc_isf, _doc_moment,
- _doc_stats, _doc_entropy, _doc_fit,
- _doc_expect, _doc_median,
- _doc_mean, _doc_var, _doc_std, _doc_interval])
- _doc_default_longsummary = """\
- As an instance of the `rv_continuous` class, `%(name)s` object inherits from it
- a collection of generic methods (see below for the full list),
- and completes them with details specific for this particular distribution.
- """
- _doc_default_frozen_note = """
- Alternatively, the object may be called (as a function) to fix the shape,
- location, and scale parameters returning a "frozen" continuous RV object:
- rv = %(name)s(%(shapes)s, loc=0, scale=1)
- - Frozen RV object with the same methods but holding the given shape,
- location, and scale fixed.
- """
- _doc_default_example = """\
- Examples
- --------
- >>> from scipy.stats import %(name)s
- >>> import matplotlib.pyplot as plt
- >>> fig, ax = plt.subplots(1, 1)
- Calculate a few first moments:
- %(set_vals_stmt)s
- >>> mean, var, skew, kurt = %(name)s.stats(%(shapes)s, moments='mvsk')
- Display the probability density function (``pdf``):
- >>> x = np.linspace(%(name)s.ppf(0.01, %(shapes)s),
- ... %(name)s.ppf(0.99, %(shapes)s), 100)
- >>> ax.plot(x, %(name)s.pdf(x, %(shapes)s),
- ... 'r-', lw=5, alpha=0.6, label='%(name)s pdf')
- Alternatively, the distribution object can be called (as a function)
- to fix the shape, location and scale parameters. This returns a "frozen"
- RV object holding the given parameters fixed.
- Freeze the distribution and display the frozen ``pdf``:
- >>> rv = %(name)s(%(shapes)s)
- >>> ax.plot(x, rv.pdf(x), 'k-', lw=2, label='frozen pdf')
- Check accuracy of ``cdf`` and ``ppf``:
- >>> vals = %(name)s.ppf([0.001, 0.5, 0.999], %(shapes)s)
- >>> np.allclose([0.001, 0.5, 0.999], %(name)s.cdf(vals, %(shapes)s))
- True
- Generate random numbers:
- >>> r = %(name)s.rvs(%(shapes)s, size=1000)
- And compare the histogram:
- >>> ax.hist(r, density=True, histtype='stepfilled', alpha=0.2)
- >>> ax.legend(loc='best', frameon=False)
- >>> plt.show()
- """
- _doc_default_locscale = """\
- The probability density above is defined in the "standardized" form. To shift
- and/or scale the distribution use the ``loc`` and ``scale`` parameters.
- Specifically, ``%(name)s.pdf(x, %(shapes)s, loc, scale)`` is identically
- equivalent to ``%(name)s.pdf(y, %(shapes)s) / scale`` with
- ``y = (x - loc) / scale``.
- """
- _doc_default = ''.join([_doc_default_longsummary,
- _doc_allmethods,
- '\n',
- _doc_default_example])
- _doc_default_before_notes = ''.join([_doc_default_longsummary,
- _doc_allmethods])
- docdict = {
- 'rvs': _doc_rvs,
- 'pdf': _doc_pdf,
- 'logpdf': _doc_logpdf,
- 'cdf': _doc_cdf,
- 'logcdf': _doc_logcdf,
- 'sf': _doc_sf,
- 'logsf': _doc_logsf,
- 'ppf': _doc_ppf,
- 'isf': _doc_isf,
- 'stats': _doc_stats,
- 'entropy': _doc_entropy,
- 'fit': _doc_fit,
- 'moment': _doc_moment,
- 'expect': _doc_expect,
- 'interval': _doc_interval,
- 'mean': _doc_mean,
- 'std': _doc_std,
- 'var': _doc_var,
- 'median': _doc_median,
- 'allmethods': _doc_allmethods,
- 'longsummary': _doc_default_longsummary,
- 'frozennote': _doc_default_frozen_note,
- 'example': _doc_default_example,
- 'default': _doc_default,
- 'before_notes': _doc_default_before_notes,
- 'after_notes': _doc_default_locscale
- }
- # Reuse common content between continuous and discrete docs, change some
- # minor bits.
- docdict_discrete = docdict.copy()
- docdict_discrete['pmf'] = _doc_pmf
- docdict_discrete['logpmf'] = _doc_logpmf
- docdict_discrete['expect'] = _doc_expect_discrete
- _doc_disc_methods = ['rvs', 'pmf', 'logpmf', 'cdf', 'logcdf', 'sf', 'logsf',
- 'ppf', 'isf', 'stats', 'entropy', 'expect', 'median',
- 'mean', 'var', 'std', 'interval']
- for obj in _doc_disc_methods:
- docdict_discrete[obj] = docdict_discrete[obj].replace(', scale=1', '')
- _doc_disc_methods_err_varname = ['cdf', 'logcdf', 'sf', 'logsf']
- for obj in _doc_disc_methods_err_varname:
- docdict_discrete[obj] = docdict_discrete[obj].replace('(x, ', '(k, ')
- docdict_discrete.pop('pdf')
- docdict_discrete.pop('logpdf')
- _doc_allmethods = ''.join([docdict_discrete[obj] for obj in _doc_disc_methods])
- docdict_discrete['allmethods'] = docheaders['methods'] + _doc_allmethods
- docdict_discrete['longsummary'] = _doc_default_longsummary.replace(
- 'rv_continuous', 'rv_discrete')
- _doc_default_frozen_note = """
- Alternatively, the object may be called (as a function) to fix the shape and
- location parameters returning a "frozen" discrete RV object:
- rv = %(name)s(%(shapes)s, loc=0)
- - Frozen RV object with the same methods but holding the given shape and
- location fixed.
- """
- docdict_discrete['frozennote'] = _doc_default_frozen_note
- _doc_default_discrete_example = """\
- Examples
- --------
- >>> from scipy.stats import %(name)s
- >>> import matplotlib.pyplot as plt
- >>> fig, ax = plt.subplots(1, 1)
- Calculate a few first moments:
- %(set_vals_stmt)s
- >>> mean, var, skew, kurt = %(name)s.stats(%(shapes)s, moments='mvsk')
- Display the probability mass function (``pmf``):
- >>> x = np.arange(%(name)s.ppf(0.01, %(shapes)s),
- ... %(name)s.ppf(0.99, %(shapes)s))
- >>> ax.plot(x, %(name)s.pmf(x, %(shapes)s), 'bo', ms=8, label='%(name)s pmf')
- >>> ax.vlines(x, 0, %(name)s.pmf(x, %(shapes)s), colors='b', lw=5, alpha=0.5)
- Alternatively, the distribution object can be called (as a function)
- to fix the shape and location. This returns a "frozen" RV object holding
- the given parameters fixed.
- Freeze the distribution and display the frozen ``pmf``:
- >>> rv = %(name)s(%(shapes)s)
- >>> ax.vlines(x, 0, rv.pmf(x), colors='k', linestyles='-', lw=1,
- ... label='frozen pmf')
- >>> ax.legend(loc='best', frameon=False)
- >>> plt.show()
- Check accuracy of ``cdf`` and ``ppf``:
- >>> prob = %(name)s.cdf(x, %(shapes)s)
- >>> np.allclose(x, %(name)s.ppf(prob, %(shapes)s))
- True
- Generate random numbers:
- >>> r = %(name)s.rvs(%(shapes)s, size=1000)
- """
- _doc_default_discrete_locscale = """\
- The probability mass function above is defined in the "standardized" form.
- To shift distribution use the ``loc`` parameter.
- Specifically, ``%(name)s.pmf(k, %(shapes)s, loc)`` is identically
- equivalent to ``%(name)s.pmf(k - loc, %(shapes)s)``.
- """
- docdict_discrete['example'] = _doc_default_discrete_example
- docdict_discrete['after_notes'] = _doc_default_discrete_locscale
- _doc_default_before_notes = ''.join([docdict_discrete['longsummary'],
- docdict_discrete['allmethods']])
- docdict_discrete['before_notes'] = _doc_default_before_notes
- _doc_default_disc = ''.join([docdict_discrete['longsummary'],
- docdict_discrete['allmethods'],
- docdict_discrete['frozennote'],
- docdict_discrete['example']])
- docdict_discrete['default'] = _doc_default_disc
- # clean up all the separate docstring elements, we do not need them anymore
- for obj in [s for s in dir() if s.startswith('_doc_')]:
- exec('del ' + obj)
- del obj
- try:
- del s
- except NameError:
- # in Python 3, loop variables are not visible after the loop
- pass
- def _moment(data, n, mu=None):
- if mu is None:
- mu = data.mean()
- return ((data - mu)**n).mean()
- def _moment_from_stats(n, mu, mu2, g1, g2, moment_func, args):
- if (n == 0):
- return 1.0
- elif (n == 1):
- if mu is None:
- val = moment_func(1, *args)
- else:
- val = mu
- elif (n == 2):
- if mu2 is None or mu is None:
- val = moment_func(2, *args)
- else:
- val = mu2 + mu*mu
- elif (n == 3):
- if g1 is None or mu2 is None or mu is None:
- val = moment_func(3, *args)
- else:
- mu3 = g1 * np.power(mu2, 1.5) # 3rd central moment
- val = mu3+3*mu*mu2+mu*mu*mu # 3rd non-central moment
- elif (n == 4):
- if g1 is None or g2 is None or mu2 is None or mu is None:
- val = moment_func(4, *args)
- else:
- mu4 = (g2+3.0)*(mu2**2.0) # 4th central moment
- mu3 = g1*np.power(mu2, 1.5) # 3rd central moment
- val = mu4+4*mu*mu3+6*mu*mu*mu2+mu*mu*mu*mu
- else:
- val = moment_func(n, *args)
- return val
- def _skew(data):
- """
- skew is third central moment / variance**(1.5)
- """
- data = np.ravel(data)
- mu = data.mean()
- m2 = ((data - mu)**2).mean()
- m3 = ((data - mu)**3).mean()
- return m3 / np.power(m2, 1.5)
- def _kurtosis(data):
- """
- kurtosis is fourth central moment / variance**2 - 3
- """
- data = np.ravel(data)
- mu = data.mean()
- m2 = ((data - mu)**2).mean()
- m4 = ((data - mu)**4).mean()
- return m4 / m2**2 - 3
- # Frozen RV class
- class rv_frozen(object):
- def __init__(self, dist, *args, **kwds):
- self.args = args
- self.kwds = kwds
- # create a new instance
- self.dist = dist.__class__(**dist._updated_ctor_param())
- # a, b may be set in _argcheck, depending on *args, **kwds. Ouch.
- shapes, _, _ = self.dist._parse_args(*args, **kwds)
- self.dist._argcheck(*shapes)
- self.a, self.b = self.dist.a, self.dist.b
- @property
- def random_state(self):
- return self.dist._random_state
- @random_state.setter
- def random_state(self, seed):
- self.dist._random_state = check_random_state(seed)
- def pdf(self, x): # raises AttributeError in frozen discrete distribution
- return self.dist.pdf(x, *self.args, **self.kwds)
- def logpdf(self, x):
- return self.dist.logpdf(x, *self.args, **self.kwds)
- def cdf(self, x):
- return self.dist.cdf(x, *self.args, **self.kwds)
- def logcdf(self, x):
- return self.dist.logcdf(x, *self.args, **self.kwds)
- def ppf(self, q):
- return self.dist.ppf(q, *self.args, **self.kwds)
- def isf(self, q):
- return self.dist.isf(q, *self.args, **self.kwds)
- def rvs(self, size=None, random_state=None):
- kwds = self.kwds.copy()
- kwds.update({'size': size, 'random_state': random_state})
- return self.dist.rvs(*self.args, **kwds)
- def sf(self, x):
- return self.dist.sf(x, *self.args, **self.kwds)
- def logsf(self, x):
- return self.dist.logsf(x, *self.args, **self.kwds)
- def stats(self, moments='mv'):
- kwds = self.kwds.copy()
- kwds.update({'moments': moments})
- return self.dist.stats(*self.args, **kwds)
- def median(self):
- return self.dist.median(*self.args, **self.kwds)
- def mean(self):
- return self.dist.mean(*self.args, **self.kwds)
- def var(self):
- return self.dist.var(*self.args, **self.kwds)
- def std(self):
- return self.dist.std(*self.args, **self.kwds)
- def moment(self, n):
- return self.dist.moment(n, *self.args, **self.kwds)
- def entropy(self):
- return self.dist.entropy(*self.args, **self.kwds)
- def pmf(self, k):
- return self.dist.pmf(k, *self.args, **self.kwds)
- def logpmf(self, k):
- return self.dist.logpmf(k, *self.args, **self.kwds)
- def interval(self, alpha):
- return self.dist.interval(alpha, *self.args, **self.kwds)
- def expect(self, func=None, lb=None, ub=None, conditional=False, **kwds):
- # expect method only accepts shape parameters as positional args
- # hence convert self.args, self.kwds, also loc/scale
- # See the .expect method docstrings for the meaning of
- # other parameters.
- a, loc, scale = self.dist._parse_args(*self.args, **self.kwds)
- if isinstance(self.dist, rv_discrete):
- return self.dist.expect(func, a, loc, lb, ub, conditional, **kwds)
- else:
- return self.dist.expect(func, a, loc, scale, lb, ub,
- conditional, **kwds)
- # This should be rewritten
- def argsreduce(cond, *args):
- """Return the sequence of ravel(args[i]) where ravel(condition) is
- True in 1D.
- Examples
- --------
- >>> import numpy as np
- >>> rand = np.random.random_sample
- >>> A = rand((4, 5))
- >>> B = 2
- >>> C = rand((1, 5))
- >>> cond = np.ones(A.shape)
- >>> [A1, B1, C1] = argsreduce(cond, A, B, C)
- >>> B1.shape
- (20,)
- >>> cond[2,:] = 0
- >>> [A2, B2, C2] = argsreduce(cond, A, B, C)
- >>> B2.shape
- (15,)
- """
- newargs = np.atleast_1d(*args)
- if not isinstance(newargs, list):
- newargs = [newargs, ]
- expand_arr = (cond == cond)
- return [np.extract(cond, arr1 * expand_arr) for arr1 in newargs]
- parse_arg_template = """
- def _parse_args(self, %(shape_arg_str)s %(locscale_in)s):
- return (%(shape_arg_str)s), %(locscale_out)s
- def _parse_args_rvs(self, %(shape_arg_str)s %(locscale_in)s, size=None):
- return self._argcheck_rvs(%(shape_arg_str)s %(locscale_out)s, size=size)
- def _parse_args_stats(self, %(shape_arg_str)s %(locscale_in)s, moments='mv'):
- return (%(shape_arg_str)s), %(locscale_out)s, moments
- """
- # Both the continuous and discrete distributions depend on ncx2.
- # The function name ncx2 is an abbreviation for noncentral chi squared.
- def _ncx2_log_pdf(x, df, nc):
- # We use (xs**2 + ns**2)/2 = (xs - ns)**2/2 + xs*ns, and include the
- # factor of exp(-xs*ns) into the ive function to improve numerical
- # stability at large values of xs. See also `rice.pdf`.
- df2 = df/2.0 - 1.0
- xs, ns = np.sqrt(x), np.sqrt(nc)
- res = xlogy(df2/2.0, x/nc) - 0.5*(xs - ns)**2
- res += np.log(ive(df2, xs*ns) / 2.0)
- return res
- def _ncx2_pdf(x, df, nc):
- return np.exp(_ncx2_log_pdf(x, df, nc))
- def _ncx2_cdf(x, df, nc):
- return chndtr(x, df, nc)
- class rv_generic(object):
- """Class which encapsulates common functionality between rv_discrete
- and rv_continuous.
- """
- def __init__(self, seed=None):
- super(rv_generic, self).__init__()
- # figure out if _stats signature has 'moments' keyword
- sign = _getargspec(self._stats)
- self._stats_has_moments = ((sign[2] is not None) or
- ('moments' in sign[0]))
- self._random_state = check_random_state(seed)
- @property
- def random_state(self):
- """ Get or set the RandomState object for generating random variates.
- This can be either None or an existing RandomState object.
- If None (or np.random), use the RandomState singleton used by np.random.
- If already a RandomState instance, use it.
- If an int, use a new RandomState instance seeded with seed.
- """
- return self._random_state
- @random_state.setter
- def random_state(self, seed):
- self._random_state = check_random_state(seed)
- def __getstate__(self):
- return self._updated_ctor_param(), self._random_state
- def __setstate__(self, state):
- ctor_param, r = state
- self.__init__(**ctor_param)
- self._random_state = r
- return self
- def _construct_argparser(
- self, meths_to_inspect, locscale_in, locscale_out):
- """Construct the parser for the shape arguments.
- Generates the argument-parsing functions dynamically and attaches
- them to the instance.
- Is supposed to be called in __init__ of a class for each distribution.
- If self.shapes is a non-empty string, interprets it as a
- comma-separated list of shape parameters.
- Otherwise inspects the call signatures of `meths_to_inspect`
- and constructs the argument-parsing functions from these.
- In this case also sets `shapes` and `numargs`.
- """
- if self.shapes:
- # sanitize the user-supplied shapes
- if not isinstance(self.shapes, string_types):
- raise TypeError('shapes must be a string.')
- shapes = self.shapes.replace(',', ' ').split()
- for field in shapes:
- if keyword.iskeyword(field):
- raise SyntaxError('keywords cannot be used as shapes.')
- if not re.match('^[_a-zA-Z][_a-zA-Z0-9]*$', field):
- raise SyntaxError(
- 'shapes must be valid python identifiers')
- else:
- # find out the call signatures (_pdf, _cdf etc), deduce shape
- # arguments. Generic methods only have 'self, x', any further args
- # are shapes.
- shapes_list = []
- for meth in meths_to_inspect:
- shapes_args = _getargspec(meth) # NB: does not contain self
- args = shapes_args.args[1:] # peel off 'x', too
- if args:
- shapes_list.append(args)
- # *args or **kwargs are not allowed w/automatic shapes
- if shapes_args.varargs is not None:
- raise TypeError(
- '*args are not allowed w/out explicit shapes')
- if shapes_args.keywords is not None:
- raise TypeError(
- '**kwds are not allowed w/out explicit shapes')
- if shapes_args.defaults is not None:
- raise TypeError('defaults are not allowed for shapes')
- if shapes_list:
- shapes = shapes_list[0]
- # make sure the signatures are consistent
- for item in shapes_list:
- if item != shapes:
- raise TypeError('Shape arguments are inconsistent.')
- else:
- shapes = []
- # have the arguments, construct the method from template
- shapes_str = ', '.join(shapes) + ', ' if shapes else '' # NB: not None
- dct = dict(shape_arg_str=shapes_str,
- locscale_in=locscale_in,
- locscale_out=locscale_out,
- )
- ns = {}
- exec_(parse_arg_template % dct, ns)
- # NB: attach to the instance, not class
- for name in ['_parse_args', '_parse_args_stats', '_parse_args_rvs']:
- setattr(self, name,
- instancemethod(ns[name], self, self.__class__)
- )
- self.shapes = ', '.join(shapes) if shapes else None
- if not hasattr(self, 'numargs'):
- # allows more general subclassing with *args
- self.numargs = len(shapes)
- def _construct_doc(self, docdict, shapes_vals=None):
- """Construct the instance docstring with string substitutions."""
- tempdict = docdict.copy()
- tempdict['name'] = self.name or 'distname'
- tempdict['shapes'] = self.shapes or ''
- if shapes_vals is None:
- shapes_vals = ()
- vals = ', '.join('%.3g' % val for val in shapes_vals)
- tempdict['vals'] = vals
- tempdict['shapes_'] = self.shapes or ''
- if self.shapes and self.numargs == 1:
- tempdict['shapes_'] += ','
- if self.shapes:
- tempdict['set_vals_stmt'] = '>>> %s = %s' % (self.shapes, vals)
- else:
- tempdict['set_vals_stmt'] = ''
- if self.shapes is None:
- # remove shapes from call parameters if there are none
- for item in ['default', 'before_notes']:
- tempdict[item] = tempdict[item].replace(
- "\n%(shapes)s : array_like\n shape parameters", "")
- for i in range(2):
- if self.shapes is None:
- # necessary because we use %(shapes)s in two forms (w w/o ", ")
- self.__doc__ = self.__doc__.replace("%(shapes)s, ", "")
- self.__doc__ = doccer.docformat(self.__doc__, tempdict)
- # correct for empty shapes
- self.__doc__ = self.__doc__.replace('(, ', '(').replace(', )', ')')
- def _construct_default_doc(self, longname=None, extradoc=None,
- docdict=None, discrete='continuous'):
- """Construct instance docstring from the default template."""
- if longname is None:
- longname = 'A'
- if extradoc is None:
- extradoc = ''
- if extradoc.startswith('\n\n'):
- extradoc = extradoc[2:]
- self.__doc__ = ''.join(['%s %s random variable.' % (longname, discrete),
- '\n\n%(before_notes)s\n', docheaders['notes'],
- extradoc, '\n%(example)s'])
- self._construct_doc(docdict)
- def freeze(self, *args, **kwds):
- """Freeze the distribution for the given arguments.
- Parameters
- ----------
- arg1, arg2, arg3,... : array_like
- The shape parameter(s) for the distribution. Should include all
- the non-optional arguments, may include ``loc`` and ``scale``.
- Returns
- -------
- rv_frozen : rv_frozen instance
- The frozen distribution.
- """
- return rv_frozen(self, *args, **kwds)
- def __call__(self, *args, **kwds):
- return self.freeze(*args, **kwds)
- __call__.__doc__ = freeze.__doc__
- # The actual calculation functions (no basic checking need be done)
- # If these are defined, the others won't be looked at.
- # Otherwise, the other set can be defined.
- def _stats(self, *args, **kwds):
- return None, None, None, None
- # Central moments
- def _munp(self, n, *args):
- # Silence floating point warnings from integration.
- olderr = np.seterr(all='ignore')
- vals = self.generic_moment(n, *args)
- np.seterr(**olderr)
- return vals
- def _argcheck_rvs(self, *args, **kwargs):
- # Handle broadcasting and size validation of the rvs method.
- # Subclasses should not have to override this method.
- # The rule is that if `size` is not None, then `size` gives the
- # shape of the result (integer values of `size` are treated as
- # tuples with length 1; i.e. `size=3` is the same as `size=(3,)`.)
- #
- # `args` is expected to contain the shape parameters (if any), the
- # location and the scale in a flat tuple (e.g. if there are two
- # shape parameters `a` and `b`, `args` will be `(a, b, loc, scale)`).
- # The only keyword argument expected is 'size'.
- size = kwargs.get('size', None)
- all_bcast = np.broadcast_arrays(*args)
- def squeeze_left(a):
- while a.ndim > 0 and a.shape[0] == 1:
- a = a[0]
- return a
- # Eliminate trivial leading dimensions. In the convention
- # used by numpy's random variate generators, trivial leading
- # dimensions are effectively ignored. In other words, when `size`
- # is given, trivial leading dimensions of the broadcast parameters
- # in excess of the number of dimensions in size are ignored, e.g.
- # >>> np.random.normal([[1, 3, 5]], [[[[0.01]]]], size=3)
- # array([ 1.00104267, 3.00422496, 4.99799278])
- # If `size` is not given, the exact broadcast shape is preserved:
- # >>> np.random.normal([[1, 3, 5]], [[[[0.01]]]])
- # array([[[[ 1.00862899, 3.00061431, 4.99867122]]]])
- #
- all_bcast = [squeeze_left(a) for a in all_bcast]
- bcast_shape = all_bcast[0].shape
- bcast_ndim = all_bcast[0].ndim
- if size is None:
- size_ = bcast_shape
- else:
- size_ = tuple(np.atleast_1d(size))
- # Check compatibility of size_ with the broadcast shape of all
- # the parameters. This check is intended to be consistent with
- # how the numpy random variate generators (e.g. np.random.normal,
- # np.random.beta) handle their arguments. The rule is that, if size
- # is given, it determines the shape of the output. Broadcasting
- # can't change the output size.
- # This is the standard broadcasting convention of extending the
- # shape with fewer dimensions with enough dimensions of length 1
- # so that the two shapes have the same number of dimensions.
- ndiff = bcast_ndim - len(size_)
- if ndiff < 0:
- bcast_shape = (1,)*(-ndiff) + bcast_shape
- elif ndiff > 0:
- size_ = (1,)*ndiff + size_
- # This compatibility test is not standard. In "regular" broadcasting,
- # two shapes are compatible if for each dimension, the lengths are the
- # same or one of the lengths is 1. Here, the length of a dimension in
- # size_ must not be less than the corresponding length in bcast_shape.
- ok = all([bcdim == 1 or bcdim == szdim
- for (bcdim, szdim) in zip(bcast_shape, size_)])
- if not ok:
- raise ValueError("size does not match the broadcast shape of "
- "the parameters.")
- param_bcast = all_bcast[:-2]
- loc_bcast = all_bcast[-2]
- scale_bcast = all_bcast[-1]
- return param_bcast, loc_bcast, scale_bcast, size_
- ## These are the methods you must define (standard form functions)
- ## NB: generic _pdf, _logpdf, _cdf are different for
- ## rv_continuous and rv_discrete hence are defined in there
- def _argcheck(self, *args):
- """Default check for correct values on args and keywords.
- Returns condition array of 1's where arguments are correct and
- 0's where they are not.
- """
- cond = 1
- for arg in args:
- cond = logical_and(cond, (asarray(arg) > 0))
- return cond
- def _support_mask(self, x):
- return (self.a <= x) & (x <= self.b)
- def _open_support_mask(self, x):
- return (self.a < x) & (x < self.b)
- def _rvs(self, *args):
- # This method must handle self._size being a tuple, and it must
- # properly broadcast *args and self._size. self._size might be
- # an empty tuple, which means a scalar random variate is to be
- # generated.
- ## Use basic inverse cdf algorithm for RV generation as default.
- U = self._random_state.random_sample(self._size)
- Y = self._ppf(U, *args)
- return Y
- def _logcdf(self, x, *args):
- return log(self._cdf(x, *args))
- def _sf(self, x, *args):
- return 1.0-self._cdf(x, *args)
- def _logsf(self, x, *args):
- return log(self._sf(x, *args))
- def _ppf(self, q, *args):
- return self._ppfvec(q, *args)
- def _isf(self, q, *args):
- return self._ppf(1.0-q, *args) # use correct _ppf for subclasses
- # These are actually called, and should not be overwritten if you
- # want to keep error checking.
- def rvs(self, *args, **kwds):
- """
- Random variates of given type.
- Parameters
- ----------
- arg1, arg2, arg3,... : array_like
- The shape parameter(s) for the distribution (see docstring of the
- instance object for more information).
- loc : array_like, optional
- Location parameter (default=0).
- scale : array_like, optional
- Scale parameter (default=1).
- size : int or tuple of ints, optional
- Defining number of random variates (default is 1).
- random_state : None or int or ``np.random.RandomState`` instance, optional
- If int or RandomState, use it for drawing the random variates.
- If None, rely on ``self.random_state``.
- Default is None.
- Returns
- -------
- rvs : ndarray or scalar
- Random variates of given `size`.
- """
- discrete = kwds.pop('discrete', None)
- rndm = kwds.pop('random_state', None)
- args, loc, scale, size = self._parse_args_rvs(*args, **kwds)
- cond = logical_and(self._argcheck(*args), (scale >= 0))
- if not np.all(cond):
- raise ValueError("Domain error in arguments.")
- if np.all(scale == 0):
- return loc*ones(size, 'd')
- # extra gymnastics needed for a custom random_state
- if rndm is not None:
- random_state_saved = self._random_state
- self._random_state = check_random_state(rndm)
- # `size` should just be an argument to _rvs(), but for, um,
- # historical reasons, it is made an attribute that is read
- # by _rvs().
- self._size = size
- vals = self._rvs(*args)
- vals = vals * scale + loc
- # do not forget to restore the _random_state
- if rndm is not None:
- self._random_state = random_state_saved
- # Cast to int if discrete
- if discrete:
- if size == ():
- vals = int(vals)
- else:
- vals = vals.astype(int)
- return vals
- def stats(self, *args, **kwds):
- """
- Some statistics of the given RV.
- Parameters
- ----------
- arg1, arg2, arg3,... : array_like
- The shape parameter(s) for the distribution (see docstring of the
- instance object for more information)
- loc : array_like, optional
- location parameter (default=0)
- scale : array_like, optional (continuous RVs only)
- scale parameter (default=1)
- moments : str, optional
- composed of letters ['mvsk'] defining which moments to compute:
- 'm' = mean,
- 'v' = variance,
- 's' = (Fisher's) skew,
- 'k' = (Fisher's) kurtosis.
- (default is 'mv')
- Returns
- -------
- stats : sequence
- of requested moments.
- """
- args, loc, scale, moments = self._parse_args_stats(*args, **kwds)
- # scale = 1 by construction for discrete RVs
- loc, scale = map(asarray, (loc, scale))
- args = tuple(map(asarray, args))
- cond = self._argcheck(*args) & (scale > 0) & (loc == loc)
- output = []
- default = valarray(shape(cond), self.badvalue)
- # Use only entries that are valid in calculation
- if np.any(cond):
- goodargs = argsreduce(cond, *(args+(scale, loc)))
- scale, loc, goodargs = goodargs[-2], goodargs[-1], goodargs[:-2]
- if self._stats_has_moments:
- mu, mu2, g1, g2 = self._stats(*goodargs,
- **{'moments': moments})
- else:
- mu, mu2, g1, g2 = self._stats(*goodargs)
- if g1 is None:
- mu3 = None
- else:
- if mu2 is None:
- mu2 = self._munp(2, *goodargs)
- if g2 is None:
- # (mu2**1.5) breaks down for nan and inf
- mu3 = g1 * np.power(mu2, 1.5)
- if 'm' in moments:
- if mu is None:
- mu = self._munp(1, *goodargs)
- out0 = default.copy()
- place(out0, cond, mu * scale + loc)
- output.append(out0)
- if 'v' in moments:
- if mu2 is None:
- mu2p = self._munp(2, *goodargs)
- if mu is None:
- mu = self._munp(1, *goodargs)
- mu2 = mu2p - mu * mu
- if np.isinf(mu):
- # if mean is inf then var is also inf
- mu2 = np.inf
- out0 = default.copy()
- place(out0, cond, mu2 * scale * scale)
- output.append(out0)
- if 's' in moments:
- if g1 is None:
- mu3p = self._munp(3, *goodargs)
- if mu is None:
- mu = self._munp(1, *goodargs)
- if mu2 is None:
- mu2p = self._munp(2, *goodargs)
- mu2 = mu2p - mu * mu
- with np.errstate(invalid='ignore'):
- mu3 = mu3p - 3 * mu * mu2 - mu**3
- g1 = mu3 / np.power(mu2, 1.5)
- out0 = default.copy()
- place(out0, cond, g1)
- output.append(out0)
- if 'k' in moments:
- if g2 is None:
- mu4p = self._munp(4, *goodargs)
- if mu is None:
- mu = self._munp(1, *goodargs)
- if mu2 is None:
- mu2p = self._munp(2, *goodargs)
- mu2 = mu2p - mu * mu
- if mu3 is None:
- mu3p = self._munp(3, *goodargs)
- with np.errstate(invalid='ignore'):
- mu3 = mu3p - 3 * mu * mu2 - mu**3
- with np.errstate(invalid='ignore'):
- mu4 = mu4p - 4 * mu * mu3 - 6 * mu * mu * mu2 - mu**4
- g2 = mu4 / mu2**2.0 - 3.0
- out0 = default.copy()
- place(out0, cond, g2)
- output.append(out0)
- else: # no valid args
- output = []
- for _ in moments:
- out0 = default.copy()
- output.append(out0)
- if len(output) == 1:
- return output[0]
- else:
- return tuple(output)
- def entropy(self, *args, **kwds):
- """
- Differential entropy of the RV.
- Parameters
- ----------
- arg1, arg2, arg3,... : array_like
- The shape parameter(s) for the distribution (see docstring of the
- instance object for more information).
- loc : array_like, optional
- Location parameter (default=0).
- scale : array_like, optional (continuous distributions only).
- Scale parameter (default=1).
- Notes
- -----
- Entropy is defined base `e`:
- >>> drv = rv_discrete(values=((0, 1), (0.5, 0.5)))
- >>> np.allclose(drv.entropy(), np.log(2.0))
- True
- """
- args, loc, scale = self._parse_args(*args, **kwds)
- # NB: for discrete distributions scale=1 by construction in _parse_args
- loc, scale = map(asarray, (loc, scale))
- args = tuple(map(asarray, args))
- cond0 = self._argcheck(*args) & (scale > 0) & (loc == loc)
- output = zeros(shape(cond0), 'd')
- place(output, (1-cond0), self.badvalue)
- goodargs = argsreduce(cond0, scale, *args)
- goodscale = goodargs[0]
- goodargs = goodargs[1:]
- place(output, cond0, self.vecentropy(*goodargs) + log(goodscale))
- return output
- def moment(self, n, *args, **kwds):
- """
- n-th order non-central moment of distribution.
- Parameters
- ----------
- n : int, n >= 1
- Order of moment.
- arg1, arg2, arg3,... : float
- The shape parameter(s) for the distribution (see docstring of the
- instance object for more information).
- loc : array_like, optional
- location parameter (default=0)
- scale : array_like, optional
- scale parameter (default=1)
- """
- args, loc, scale = self._parse_args(*args, **kwds)
- if not (self._argcheck(*args) and (scale > 0)):
- return nan
- if (floor(n) != n):
- raise ValueError("Moment must be an integer.")
- if (n < 0):
- raise ValueError("Moment must be positive.")
- mu, mu2, g1, g2 = None, None, None, None
- if (n > 0) and (n < 5):
- if self._stats_has_moments:
- mdict = {'moments': {1: 'm', 2: 'v', 3: 'vs', 4: 'vk'}[n]}
- else:
- mdict = {}
- mu, mu2, g1, g2 = self._stats(*args, **mdict)
- val = _moment_from_stats(n, mu, mu2, g1, g2, self._munp, args)
- # Convert to transformed X = L + S*Y
- # E[X^n] = E[(L+S*Y)^n] = L^n sum(comb(n, k)*(S/L)^k E[Y^k], k=0...n)
- if loc == 0:
- return scale**n * val
- else:
- result = 0
- fac = float(scale) / float(loc)
- for k in range(n):
- valk = _moment_from_stats(k, mu, mu2, g1, g2, self._munp, args)
- result += comb(n, k, exact=True)*(fac**k) * valk
- result += fac**n * val
- return result * loc**n
- def median(self, *args, **kwds):
- """
- Median of the distribution.
- Parameters
- ----------
- arg1, arg2, arg3,... : array_like
- The shape parameter(s) for the distribution (see docstring of the
- instance object for more information)
- loc : array_like, optional
- Location parameter, Default is 0.
- scale : array_like, optional
- Scale parameter, Default is 1.
- Returns
- -------
- median : float
- The median of the distribution.
- See Also
- --------
- stats.distributions.rv_discrete.ppf
- Inverse of the CDF
- """
- return self.ppf(0.5, *args, **kwds)
- def mean(self, *args, **kwds):
- """
- Mean of the distribution.
- Parameters
- ----------
- arg1, arg2, arg3,... : array_like
- The shape parameter(s) for the distribution (see docstring of the
- instance object for more information)
- loc : array_like, optional
- location parameter (default=0)
- scale : array_like, optional
- scale parameter (default=1)
- Returns
- -------
- mean : float
- the mean of the distribution
- """
- kwds['moments'] = 'm'
- res = self.stats(*args, **kwds)
- if isinstance(res, ndarray) and res.ndim == 0:
- return res[()]
- return res
- def var(self, *args, **kwds):
- """
- Variance of the distribution.
- Parameters
- ----------
- arg1, arg2, arg3,... : array_like
- The shape parameter(s) for the distribution (see docstring of the
- instance object for more information)
- loc : array_like, optional
- location parameter (default=0)
- scale : array_like, optional
- scale parameter (default=1)
- Returns
- -------
- var : float
- the variance of the distribution
- """
- kwds['moments'] = 'v'
- res = self.stats(*args, **kwds)
- if isinstance(res, ndarray) and res.ndim == 0:
- return res[()]
- return res
- def std(self, *args, **kwds):
- """
- Standard deviation of the distribution.
- Parameters
- ----------
- arg1, arg2, arg3,... : array_like
- The shape parameter(s) for the distribution (see docstring of the
- instance object for more information)
- loc : array_like, optional
- location parameter (default=0)
- scale : array_like, optional
- scale parameter (default=1)
- Returns
- -------
- std : float
- standard deviation of the distribution
- """
- kwds['moments'] = 'v'
- res = sqrt(self.stats(*args, **kwds))
- return res
- def interval(self, alpha, *args, **kwds):
- """
- Confidence interval with equal areas around the median.
- Parameters
- ----------
- alpha : array_like of float
- Probability that an rv will be drawn from the returned range.
- Each value should be in the range [0, 1].
- arg1, arg2, ... : array_like
- The shape parameter(s) for the distribution (see docstring of the
- instance object for more information).
- loc : array_like, optional
- location parameter, Default is 0.
- scale : array_like, optional
- scale parameter, Default is 1.
- Returns
- -------
- a, b : ndarray of float
- end-points of range that contain ``100 * alpha %`` of the rv's
- possible values.
- """
- alpha = asarray(alpha)
- if np.any((alpha > 1) | (alpha < 0)):
- raise ValueError("alpha must be between 0 and 1 inclusive")
- q1 = (1.0-alpha)/2
- q2 = (1.0+alpha)/2
- a = self.ppf(q1, *args, **kwds)
- b = self.ppf(q2, *args, **kwds)
- return a, b
- ## continuous random variables: implement maybe later
- ##
- ## hf --- Hazard Function (PDF / SF)
- ## chf --- Cumulative hazard function (-log(SF))
- ## psf --- Probability sparsity function (reciprocal of the pdf) in
- ## units of percent-point-function (as a function of q).
- ## Also, the derivative of the percent-point function.
- class rv_continuous(rv_generic):
- """
- A generic continuous random variable class meant for subclassing.
- `rv_continuous` is a base class to construct specific distribution classes
- and instances for continuous random variables. It cannot be used
- directly as a distribution.
- Parameters
- ----------
- momtype : int, optional
- The type of generic moment calculation to use: 0 for pdf, 1 (default)
- for ppf.
- a : float, optional
- Lower bound of the support of the distribution, default is minus
- infinity.
- b : float, optional
- Upper bound of the support of the distribution, default is plus
- infinity.
- xtol : float, optional
- The tolerance for fixed point calculation for generic ppf.
- badvalue : float, optional
- The value in a result arrays that indicates a value that for which
- some argument restriction is violated, default is np.nan.
- name : str, optional
- The name of the instance. This string is used to construct the default
- example for distributions.
- longname : str, optional
- This string is used as part of the first line of the docstring returned
- when a subclass has no docstring of its own. Note: `longname` exists
- for backwards compatibility, do not use for new subclasses.
- shapes : str, optional
- The shape of the distribution. For example ``"m, n"`` for a
- distribution that takes two integers as the two shape arguments for all
- its methods. If not provided, shape parameters will be inferred from
- the signature of the private methods, ``_pdf`` and ``_cdf`` of the
- instance.
- extradoc : str, optional, deprecated
- This string is used as the last part of the docstring returned when a
- subclass has no docstring of its own. Note: `extradoc` exists for
- backwards compatibility, do not use for new subclasses.
- seed : None or int or ``numpy.random.RandomState`` instance, optional
- This parameter defines the RandomState object to use for drawing
- random variates.
- If None (or np.random), the global np.random state is used.
- If integer, it is used to seed the local RandomState instance.
- Default is None.
- Methods
- -------
- rvs
- pdf
- logpdf
- cdf
- logcdf
- sf
- logsf
- ppf
- isf
- moment
- stats
- entropy
- expect
- median
- mean
- std
- var
- interval
- __call__
- fit
- fit_loc_scale
- nnlf
- Notes
- -----
- Public methods of an instance of a distribution class (e.g., ``pdf``,
- ``cdf``) check their arguments and pass valid arguments to private,
- computational methods (``_pdf``, ``_cdf``). For ``pdf(x)``, ``x`` is valid
- if it is within the support of a distribution, ``self.a <= x <= self.b``.
- Whether a shape parameter is valid is decided by an ``_argcheck`` method
- (which defaults to checking that its arguments are strictly positive.)
- **Subclassing**
- New random variables can be defined by subclassing the `rv_continuous` class
- and re-defining at least the ``_pdf`` or the ``_cdf`` method (normalized
- to location 0 and scale 1).
- If positive argument checking is not correct for your RV
- then you will also need to re-define the ``_argcheck`` method.
- Correct, but potentially slow defaults exist for the remaining
- methods but for speed and/or accuracy you can over-ride::
- _logpdf, _cdf, _logcdf, _ppf, _rvs, _isf, _sf, _logsf
- The default method ``_rvs`` relies on the inverse of the cdf, ``_ppf``,
- applied to a uniform random variate. In order to generate random variates
- efficiently, either the default ``_ppf`` needs to be overwritten (e.g.
- if the inverse cdf can expressed in an explicit form) or a sampling
- method needs to be implemented in a custom ``_rvs`` method.
- If possible, you should override ``_isf``, ``_sf`` or ``_logsf``.
- The main reason would be to improve numerical accuracy: for example,
- the survival function ``_sf`` is computed as ``1 - _cdf`` which can
- result in loss of precision if ``_cdf(x)`` is close to one.
- **Methods that can be overwritten by subclasses**
- ::
- _rvs
- _pdf
- _cdf
- _sf
- _ppf
- _isf
- _stats
- _munp
- _entropy
- _argcheck
- There are additional (internal and private) generic methods that can
- be useful for cross-checking and for debugging, but might work in all
- cases when directly called.
- A note on ``shapes``: subclasses need not specify them explicitly. In this
- case, `shapes` will be automatically deduced from the signatures of the
- overridden methods (`pdf`, `cdf` etc).
- If, for some reason, you prefer to avoid relying on introspection, you can
- specify ``shapes`` explicitly as an argument to the instance constructor.
- **Frozen Distributions**
- Normally, you must provide shape parameters (and, optionally, location and
- scale parameters to each call of a method of a distribution.
- Alternatively, the object may be called (as a function) to fix the shape,
- location, and scale parameters returning a "frozen" continuous RV object:
- rv = generic(<shape(s)>, loc=0, scale=1)
- `rv_frozen` object with the same methods but holding the given shape,
- location, and scale fixed
- **Statistics**
- Statistics are computed using numerical integration by default.
- For speed you can redefine this using ``_stats``:
- - take shape parameters and return mu, mu2, g1, g2
- - If you can't compute one of these, return it as None
- - Can also be defined with a keyword argument ``moments``, which is a
- string composed of "m", "v", "s", and/or "k".
- Only the components appearing in string should be computed and
- returned in the order "m", "v", "s", or "k" with missing values
- returned as None.
- Alternatively, you can override ``_munp``, which takes ``n`` and shape
- parameters and returns the n-th non-central moment of the distribution.
- Examples
- --------
- To create a new Gaussian distribution, we would do the following:
- >>> from scipy.stats import rv_continuous
- >>> class gaussian_gen(rv_continuous):
- ... "Gaussian distribution"
- ... def _pdf(self, x):
- ... return np.exp(-x**2 / 2.) / np.sqrt(2.0 * np.pi)
- >>> gaussian = gaussian_gen(name='gaussian')
- ``scipy.stats`` distributions are *instances*, so here we subclass
- `rv_continuous` and create an instance. With this, we now have
- a fully functional distribution with all relevant methods automagically
- generated by the framework.
- Note that above we defined a standard normal distribution, with zero mean
- and unit variance. Shifting and scaling of the distribution can be done
- by using ``loc`` and ``scale`` parameters: ``gaussian.pdf(x, loc, scale)``
- essentially computes ``y = (x - loc) / scale`` and
- ``gaussian._pdf(y) / scale``.
- """
- def __init__(self, momtype=1, a=None, b=None, xtol=1e-14,
- badvalue=None, name=None, longname=None,
- shapes=None, extradoc=None, seed=None):
- super(rv_continuous, self).__init__(seed)
- # save the ctor parameters, cf generic freeze
- self._ctor_param = dict(
- momtype=momtype, a=a, b=b, xtol=xtol,
- badvalue=badvalue, name=name, longname=longname,
- shapes=shapes, extradoc=extradoc, seed=seed)
- if badvalue is None:
- badvalue = nan
- if name is None:
- name = 'Distribution'
- self.badvalue = badvalue
- self.name = name
- self.a = a
- self.b = b
- if a is None:
- self.a = -inf
- if b is None:
- self.b = inf
- self.xtol = xtol
- self.moment_type = momtype
- self.shapes = shapes
- self._construct_argparser(meths_to_inspect=[self._pdf, self._cdf],
- locscale_in='loc=0, scale=1',
- locscale_out='loc, scale')
- # nin correction
- self._ppfvec = vectorize(self._ppf_single, otypes='d')
- self._ppfvec.nin = self.numargs + 1
- self.vecentropy = vectorize(self._entropy, otypes='d')
- self._cdfvec = vectorize(self._cdf_single, otypes='d')
- self._cdfvec.nin = self.numargs + 1
- self.extradoc = extradoc
- if momtype == 0:
- self.generic_moment = vectorize(self._mom0_sc, otypes='d')
- else:
- self.generic_moment = vectorize(self._mom1_sc, otypes='d')
- # Because of the *args argument of _mom0_sc, vectorize cannot count the
- # number of arguments correctly.
- self.generic_moment.nin = self.numargs + 1
- if longname is None:
- if name[0] in ['aeiouAEIOU']:
- hstr = "An "
- else:
- hstr = "A "
- longname = hstr + name
- if sys.flags.optimize < 2:
- # Skip adding docstrings if interpreter is run with -OO
- if self.__doc__ is None:
- self._construct_default_doc(longname=longname,
- extradoc=extradoc,
- docdict=docdict,
- discrete='continuous')
- else:
- dct = dict(distcont)
- self._construct_doc(docdict, dct.get(self.name))
- def _updated_ctor_param(self):
- """ Return the current version of _ctor_param, possibly updated by user.
- Used by freezing and pickling.
- Keep this in sync with the signature of __init__.
- """
- dct = self._ctor_param.copy()
- dct['a'] = self.a
- dct['b'] = self.b
- dct['xtol'] = self.xtol
- dct['badvalue'] = self.badvalue
- dct['name'] = self.name
- dct['shapes'] = self.shapes
- dct['extradoc'] = self.extradoc
- return dct
- def _ppf_to_solve(self, x, q, *args):
- return self.cdf(*(x, )+args)-q
- def _ppf_single(self, q, *args):
- left = right = None
- if self.a > -np.inf:
- left = self.a
- if self.b < np.inf:
- right = self.b
- factor = 10.
- if not left: # i.e. self.a = -inf
- left = -1.*factor
- while self._ppf_to_solve(left, q, *args) > 0.:
- right = left
- left *= factor
- # left is now such that cdf(left) < q
- if not right: # i.e. self.b = inf
- right = factor
- while self._ppf_to_solve(right, q, *args) < 0.:
- left = right
- right *= factor
- # right is now such that cdf(right) > q
- return optimize.brentq(self._ppf_to_solve,
- left, right, args=(q,)+args, xtol=self.xtol)
- # moment from definition
- def _mom_integ0(self, x, m, *args):
- return x**m * self.pdf(x, *args)
- def _mom0_sc(self, m, *args):
- return integrate.quad(self._mom_integ0, self.a, self.b,
- args=(m,)+args)[0]
- # moment calculated using ppf
- def _mom_integ1(self, q, m, *args):
- return (self.ppf(q, *args))**m
- def _mom1_sc(self, m, *args):
- return integrate.quad(self._mom_integ1, 0, 1, args=(m,)+args)[0]
- def _pdf(self, x, *args):
- return derivative(self._cdf, x, dx=1e-5, args=args, order=5)
- ## Could also define any of these
- def _logpdf(self, x, *args):
- return log(self._pdf(x, *args))
- def _cdf_single(self, x, *args):
- return integrate.quad(self._pdf, self.a, x, args=args)[0]
- def _cdf(self, x, *args):
- return self._cdfvec(x, *args)
- ## generic _argcheck, _logcdf, _sf, _logsf, _ppf, _isf, _rvs are defined
- ## in rv_generic
- def pdf(self, x, *args, **kwds):
- """
- Probability density function at x of the given RV.
- Parameters
- ----------
- x : array_like
- quantiles
- arg1, arg2, arg3,... : array_like
- The shape parameter(s) for the distribution (see docstring of the
- instance object for more information)
- loc : array_like, optional
- location parameter (default=0)
- scale : array_like, optional
- scale parameter (default=1)
- Returns
- -------
- pdf : ndarray
- Probability density function evaluated at x
- """
- args, loc, scale = self._parse_args(*args, **kwds)
- x, loc, scale = map(asarray, (x, loc, scale))
- args = tuple(map(asarray, args))
- dtyp = np.find_common_type([x.dtype, np.float64], [])
- x = np.asarray((x - loc)/scale, dtype=dtyp)
- cond0 = self._argcheck(*args) & (scale > 0)
- cond1 = self._support_mask(x) & (scale > 0)
- cond = cond0 & cond1
- output = zeros(shape(cond), dtyp)
- putmask(output, (1-cond0)+np.isnan(x), self.badvalue)
- if np.any(cond):
- goodargs = argsreduce(cond, *((x,)+args+(scale,)))
- scale, goodargs = goodargs[-1], goodargs[:-1]
- place(output, cond, self._pdf(*goodargs) / scale)
- if output.ndim == 0:
- return output[()]
- return output
- def logpdf(self, x, *args, **kwds):
- """
- Log of the probability density function at x of the given RV.
- This uses a more numerically accurate calculation if available.
- Parameters
- ----------
- x : array_like
- quantiles
- arg1, arg2, arg3,... : array_like
- The shape parameter(s) for the distribution (see docstring of the
- instance object for more information)
- loc : array_like, optional
- location parameter (default=0)
- scale : array_like, optional
- scale parameter (default=1)
- Returns
- -------
- logpdf : array_like
- Log of the probability density function evaluated at x
- """
- args, loc, scale = self._parse_args(*args, **kwds)
- x, loc, scale = map(asarray, (x, loc, scale))
- args = tuple(map(asarray, args))
- dtyp = np.find_common_type([x.dtype, np.float64], [])
- x = np.asarray((x - loc)/scale, dtype=dtyp)
- cond0 = self._argcheck(*args) & (scale > 0)
- cond1 = self._support_mask(x) & (scale > 0)
- cond = cond0 & cond1
- output = empty(shape(cond), dtyp)
- output.fill(NINF)
- putmask(output, (1-cond0)+np.isnan(x), self.badvalue)
- if np.any(cond):
- goodargs = argsreduce(cond, *((x,)+args+(scale,)))
- scale, goodargs = goodargs[-1], goodargs[:-1]
- place(output, cond, self._logpdf(*goodargs) - log(scale))
- if output.ndim == 0:
- return output[()]
- return output
- def cdf(self, x, *args, **kwds):
- """
- Cumulative distribution function of the given RV.
- Parameters
- ----------
- x : array_like
- quantiles
- arg1, arg2, arg3,... : array_like
- The shape parameter(s) for the distribution (see docstring of the
- instance object for more information)
- loc : array_like, optional
- location parameter (default=0)
- scale : array_like, optional
- scale parameter (default=1)
- Returns
- -------
- cdf : ndarray
- Cumulative distribution function evaluated at `x`
- """
- args, loc, scale = self._parse_args(*args, **kwds)
- x, loc, scale = map(asarray, (x, loc, scale))
- args = tuple(map(asarray, args))
- dtyp = np.find_common_type([x.dtype, np.float64], [])
- x = np.asarray((x - loc)/scale, dtype=dtyp)
- cond0 = self._argcheck(*args) & (scale > 0)
- cond1 = self._open_support_mask(x) & (scale > 0)
- cond2 = (x >= self.b) & cond0
- cond = cond0 & cond1
- output = zeros(shape(cond), dtyp)
- place(output, (1-cond0)+np.isnan(x), self.badvalue)
- place(output, cond2, 1.0)
- if np.any(cond): # call only if at least 1 entry
- goodargs = argsreduce(cond, *((x,)+args))
- place(output, cond, self._cdf(*goodargs))
- if output.ndim == 0:
- return output[()]
- return output
- def logcdf(self, x, *args, **kwds):
- """
- Log of the cumulative distribution function at x of the given RV.
- Parameters
- ----------
- x : array_like
- quantiles
- arg1, arg2, arg3,... : array_like
- The shape parameter(s) for the distribution (see docstring of the
- instance object for more information)
- loc : array_like, optional
- location parameter (default=0)
- scale : array_like, optional
- scale parameter (default=1)
- Returns
- -------
- logcdf : array_like
- Log of the cumulative distribution function evaluated at x
- """
- args, loc, scale = self._parse_args(*args, **kwds)
- x, loc, scale = map(asarray, (x, loc, scale))
- args = tuple(map(asarray, args))
- dtyp = np.find_common_type([x.dtype, np.float64], [])
- x = np.asarray((x - loc)/scale, dtype=dtyp)
- cond0 = self._argcheck(*args) & (scale > 0)
- cond1 = self._open_support_mask(x) & (scale > 0)
- cond2 = (x >= self.b) & cond0
- cond = cond0 & cond1
- output = empty(shape(cond), dtyp)
- output.fill(NINF)
- place(output, (1-cond0)*(cond1 == cond1)+np.isnan(x), self.badvalue)
- place(output, cond2, 0.0)
- if np.any(cond): # call only if at least 1 entry
- goodargs = argsreduce(cond, *((x,)+args))
- place(output, cond, self._logcdf(*goodargs))
- if output.ndim == 0:
- return output[()]
- return output
- def sf(self, x, *args, **kwds):
- """
- Survival function (1 - `cdf`) at x of the given RV.
- Parameters
- ----------
- x : array_like
- quantiles
- arg1, arg2, arg3,... : array_like
- The shape parameter(s) for the distribution (see docstring of the
- instance object for more information)
- loc : array_like, optional
- location parameter (default=0)
- scale : array_like, optional
- scale parameter (default=1)
- Returns
- -------
- sf : array_like
- Survival function evaluated at x
- """
- args, loc, scale = self._parse_args(*args, **kwds)
- x, loc, scale = map(asarray, (x, loc, scale))
- args = tuple(map(asarray, args))
- dtyp = np.find_common_type([x.dtype, np.float64], [])
- x = np.asarray((x - loc)/scale, dtype=dtyp)
- cond0 = self._argcheck(*args) & (scale > 0)
- cond1 = self._open_support_mask(x) & (scale > 0)
- cond2 = cond0 & (x <= self.a)
- cond = cond0 & cond1
- output = zeros(shape(cond), dtyp)
- place(output, (1-cond0)+np.isnan(x), self.badvalue)
- place(output, cond2, 1.0)
- if np.any(cond):
- goodargs = argsreduce(cond, *((x,)+args))
- place(output, cond, self._sf(*goodargs))
- if output.ndim == 0:
- return output[()]
- return output
- def logsf(self, x, *args, **kwds):
- """
- Log of the survival function of the given RV.
- Returns the log of the "survival function," defined as (1 - `cdf`),
- evaluated at `x`.
- Parameters
- ----------
- x : array_like
- quantiles
- arg1, arg2, arg3,... : array_like
- The shape parameter(s) for the distribution (see docstring of the
- instance object for more information)
- loc : array_like, optional
- location parameter (default=0)
- scale : array_like, optional
- scale parameter (default=1)
- Returns
- -------
- logsf : ndarray
- Log of the survival function evaluated at `x`.
- """
- args, loc, scale = self._parse_args(*args, **kwds)
- x, loc, scale = map(asarray, (x, loc, scale))
- args = tuple(map(asarray, args))
- dtyp = np.find_common_type([x.dtype, np.float64], [])
- x = np.asarray((x - loc)/scale, dtype=dtyp)
- cond0 = self._argcheck(*args) & (scale > 0)
- cond1 = self._open_support_mask(x) & (scale > 0)
- cond2 = cond0 & (x <= self.a)
- cond = cond0 & cond1
- output = empty(shape(cond), dtyp)
- output.fill(NINF)
- place(output, (1-cond0)+np.isnan(x), self.badvalue)
- place(output, cond2, 0.0)
- if np.any(cond):
- goodargs = argsreduce(cond, *((x,)+args))
- place(output, cond, self._logsf(*goodargs))
- if output.ndim == 0:
- return output[()]
- return output
- def ppf(self, q, *args, **kwds):
- """
- Percent point function (inverse of `cdf`) at q of the given RV.
- Parameters
- ----------
- q : array_like
- lower tail probability
- arg1, arg2, arg3,... : array_like
- The shape parameter(s) for the distribution (see docstring of the
- instance object for more information)
- loc : array_like, optional
- location parameter (default=0)
- scale : array_like, optional
- scale parameter (default=1)
- Returns
- -------
- x : array_like
- quantile corresponding to the lower tail probability q.
- """
- args, loc, scale = self._parse_args(*args, **kwds)
- q, loc, scale = map(asarray, (q, loc, scale))
- args = tuple(map(asarray, args))
- cond0 = self._argcheck(*args) & (scale > 0) & (loc == loc)
- cond1 = (0 < q) & (q < 1)
- cond2 = cond0 & (q == 0)
- cond3 = cond0 & (q == 1)
- cond = cond0 & cond1
- output = valarray(shape(cond), value=self.badvalue)
- lower_bound = self.a * scale + loc
- upper_bound = self.b * scale + loc
- place(output, cond2, argsreduce(cond2, lower_bound)[0])
- place(output, cond3, argsreduce(cond3, upper_bound)[0])
- if np.any(cond): # call only if at least 1 entry
- goodargs = argsreduce(cond, *((q,)+args+(scale, loc)))
- scale, loc, goodargs = goodargs[-2], goodargs[-1], goodargs[:-2]
- place(output, cond, self._ppf(*goodargs) * scale + loc)
- if output.ndim == 0:
- return output[()]
- return output
- def isf(self, q, *args, **kwds):
- """
- Inverse survival function (inverse of `sf`) at q of the given RV.
- Parameters
- ----------
- q : array_like
- upper tail probability
- arg1, arg2, arg3,... : array_like
- The shape parameter(s) for the distribution (see docstring of the
- instance object for more information)
- loc : array_like, optional
- location parameter (default=0)
- scale : array_like, optional
- scale parameter (default=1)
- Returns
- -------
- x : ndarray or scalar
- Quantile corresponding to the upper tail probability q.
- """
- args, loc, scale = self._parse_args(*args, **kwds)
- q, loc, scale = map(asarray, (q, loc, scale))
- args = tuple(map(asarray, args))
- cond0 = self._argcheck(*args) & (scale > 0) & (loc == loc)
- cond1 = (0 < q) & (q < 1)
- cond2 = cond0 & (q == 1)
- cond3 = cond0 & (q == 0)
- cond = cond0 & cond1
- output = valarray(shape(cond), value=self.badvalue)
- lower_bound = self.a * scale + loc
- upper_bound = self.b * scale + loc
- place(output, cond2, argsreduce(cond2, lower_bound)[0])
- place(output, cond3, argsreduce(cond3, upper_bound)[0])
- if np.any(cond):
- goodargs = argsreduce(cond, *((q,)+args+(scale, loc)))
- scale, loc, goodargs = goodargs[-2], goodargs[-1], goodargs[:-2]
- place(output, cond, self._isf(*goodargs) * scale + loc)
- if output.ndim == 0:
- return output[()]
- return output
- def _nnlf(self, x, *args):
- return -np.sum(self._logpdf(x, *args), axis=0)
- def _unpack_loc_scale(self, theta):
- try:
- loc = theta[-2]
- scale = theta[-1]
- args = tuple(theta[:-2])
- except IndexError:
- raise ValueError("Not enough input arguments.")
- return loc, scale, args
- def nnlf(self, theta, x):
- '''Return negative loglikelihood function.
- Notes
- -----
- This is ``-sum(log pdf(x, theta), axis=0)`` where `theta` are the
- parameters (including loc and scale).
- '''
- loc, scale, args = self._unpack_loc_scale(theta)
- if not self._argcheck(*args) or scale <= 0:
- return inf
- x = asarray((x-loc) / scale)
- n_log_scale = len(x) * log(scale)
- if np.any(~self._support_mask(x)):
- return inf
- return self._nnlf(x, *args) + n_log_scale
- def _nnlf_and_penalty(self, x, args):
- cond0 = ~self._support_mask(x)
- n_bad = np.count_nonzero(cond0, axis=0)
- if n_bad > 0:
- x = argsreduce(~cond0, x)[0]
- logpdf = self._logpdf(x, *args)
- finite_logpdf = np.isfinite(logpdf)
- n_bad += np.sum(~finite_logpdf, axis=0)
- if n_bad > 0:
- penalty = n_bad * log(_XMAX) * 100
- return -np.sum(logpdf[finite_logpdf], axis=0) + penalty
- return -np.sum(logpdf, axis=0)
- def _penalized_nnlf(self, theta, x):
- ''' Return penalized negative loglikelihood function,
- i.e., - sum (log pdf(x, theta), axis=0) + penalty
- where theta are the parameters (including loc and scale)
- '''
- loc, scale, args = self._unpack_loc_scale(theta)
- if not self._argcheck(*args) or scale <= 0:
- return inf
- x = asarray((x-loc) / scale)
- n_log_scale = len(x) * log(scale)
- return self._nnlf_and_penalty(x, args) + n_log_scale
- # return starting point for fit (shape arguments + loc + scale)
- def _fitstart(self, data, args=None):
- if args is None:
- args = (1.0,)*self.numargs
- loc, scale = self._fit_loc_scale_support(data, *args)
- return args + (loc, scale)
- # Return the (possibly reduced) function to optimize in order to find MLE
- # estimates for the .fit method
- def _reduce_func(self, args, kwds):
- # First of all, convert fshapes params to fnum: eg for stats.beta,
- # shapes='a, b'. To fix `a`, can specify either `f1` or `fa`.
- # Convert the latter into the former.
- if self.shapes:
- shapes = self.shapes.replace(',', ' ').split()
- for j, s in enumerate(shapes):
- val = kwds.pop('f' + s, None) or kwds.pop('fix_' + s, None)
- if val is not None:
- key = 'f%d' % j
- if key in kwds:
- raise ValueError("Duplicate entry for %s." % key)
- else:
- kwds[key] = val
- args = list(args)
- Nargs = len(args)
- fixedn = []
- names = ['f%d' % n for n in range(Nargs - 2)] + ['floc', 'fscale']
- x0 = []
- for n, key in enumerate(names):
- if key in kwds:
- fixedn.append(n)
- args[n] = kwds.pop(key)
- else:
- x0.append(args[n])
- if len(fixedn) == 0:
- func = self._penalized_nnlf
- restore = None
- else:
- if len(fixedn) == Nargs:
- raise ValueError(
- "All parameters fixed. There is nothing to optimize.")
- def restore(args, theta):
- # Replace with theta for all numbers not in fixedn
- # This allows the non-fixed values to vary, but
- # we still call self.nnlf with all parameters.
- i = 0
- for n in range(Nargs):
- if n not in fixedn:
- args[n] = theta[i]
- i += 1
- return args
- def func(theta, x):
- newtheta = restore(args[:], theta)
- return self._penalized_nnlf(newtheta, x)
- return x0, func, restore, args
- def fit(self, data, *args, **kwds):
- """
- Return MLEs for shape (if applicable), location, and scale
- parameters from data.
- MLE stands for Maximum Likelihood Estimate. Starting estimates for
- the fit are given by input arguments; for any arguments not provided
- with starting estimates, ``self._fitstart(data)`` is called to generate
- such.
- One can hold some parameters fixed to specific values by passing in
- keyword arguments ``f0``, ``f1``, ..., ``fn`` (for shape parameters)
- and ``floc`` and ``fscale`` (for location and scale parameters,
- respectively).
- Parameters
- ----------
- data : array_like
- Data to use in calculating the MLEs.
- args : floats, optional
- Starting value(s) for any shape-characterizing arguments (those not
- provided will be determined by a call to ``_fitstart(data)``).
- No default value.
- kwds : floats, optional
- Starting values for the location and scale parameters; no default.
- Special keyword arguments are recognized as holding certain
- parameters fixed:
- - f0...fn : hold respective shape parameters fixed.
- Alternatively, shape parameters to fix can be specified by name.
- For example, if ``self.shapes == "a, b"``, ``fa``and ``fix_a``
- are equivalent to ``f0``, and ``fb`` and ``fix_b`` are
- equivalent to ``f1``.
- - floc : hold location parameter fixed to specified value.
- - fscale : hold scale parameter fixed to specified value.
- - optimizer : The optimizer to use. The optimizer must take ``func``,
- and starting position as the first two arguments,
- plus ``args`` (for extra arguments to pass to the
- function to be optimized) and ``disp=0`` to suppress
- output as keyword arguments.
- Returns
- -------
- mle_tuple : tuple of floats
- MLEs for any shape parameters (if applicable), followed by those
- for location and scale. For most random variables, shape statistics
- will be returned, but there are exceptions (e.g. ``norm``).
- Notes
- -----
- This fit is computed by maximizing a log-likelihood function, with
- penalty applied for samples outside of range of the distribution. The
- returned answer is not guaranteed to be the globally optimal MLE, it
- may only be locally optimal, or the optimization may fail altogether.
- Examples
- --------
- Generate some data to fit: draw random variates from the `beta`
- distribution
- >>> from scipy.stats import beta
- >>> a, b = 1., 2.
- >>> x = beta.rvs(a, b, size=1000)
- Now we can fit all four parameters (``a``, ``b``, ``loc`` and ``scale``):
- >>> a1, b1, loc1, scale1 = beta.fit(x)
- We can also use some prior knowledge about the dataset: let's keep
- ``loc`` and ``scale`` fixed:
- >>> a1, b1, loc1, scale1 = beta.fit(x, floc=0, fscale=1)
- >>> loc1, scale1
- (0, 1)
- We can also keep shape parameters fixed by using ``f``-keywords. To
- keep the zero-th shape parameter ``a`` equal 1, use ``f0=1`` or,
- equivalently, ``fa=1``:
- >>> a1, b1, loc1, scale1 = beta.fit(x, fa=1, floc=0, fscale=1)
- >>> a1
- 1
- Not all distributions return estimates for the shape parameters.
- ``norm`` for example just returns estimates for location and scale:
- >>> from scipy.stats import norm
- >>> x = norm.rvs(a, b, size=1000, random_state=123)
- >>> loc1, scale1 = norm.fit(x)
- >>> loc1, scale1
- (0.92087172783841631, 2.0015750750324668)
- """
- Narg = len(args)
- if Narg > self.numargs:
- raise TypeError("Too many input arguments.")
- start = [None]*2
- if (Narg < self.numargs) or not ('loc' in kwds and
- 'scale' in kwds):
- # get distribution specific starting locations
- start = self._fitstart(data)
- args += start[Narg:-2]
- loc = kwds.pop('loc', start[-2])
- scale = kwds.pop('scale', start[-1])
- args += (loc, scale)
- x0, func, restore, args = self._reduce_func(args, kwds)
- optimizer = kwds.pop('optimizer', optimize.fmin)
- # convert string to function in scipy.optimize
- if not callable(optimizer) and isinstance(optimizer, string_types):
- if not optimizer.startswith('fmin_'):
- optimizer = "fmin_"+optimizer
- if optimizer == 'fmin_':
- optimizer = 'fmin'
- try:
- optimizer = getattr(optimize, optimizer)
- except AttributeError:
- raise ValueError("%s is not a valid optimizer" % optimizer)
- # by now kwds must be empty, since everybody took what they needed
- if kwds:
- raise TypeError("Unknown arguments: %s." % kwds)
- vals = optimizer(func, x0, args=(ravel(data),), disp=0)
- if restore is not None:
- vals = restore(args, vals)
- vals = tuple(vals)
- return vals
- def _fit_loc_scale_support(self, data, *args):
- """
- Estimate loc and scale parameters from data accounting for support.
- Parameters
- ----------
- data : array_like
- Data to fit.
- arg1, arg2, arg3,... : array_like
- The shape parameter(s) for the distribution (see docstring of the
- instance object for more information).
- Returns
- -------
- Lhat : float
- Estimated location parameter for the data.
- Shat : float
- Estimated scale parameter for the data.
- """
- data = np.asarray(data)
- # Estimate location and scale according to the method of moments.
- loc_hat, scale_hat = self.fit_loc_scale(data, *args)
- # Compute the support according to the shape parameters.
- self._argcheck(*args)
- a, b = self.a, self.b
- support_width = b - a
- # If the support is empty then return the moment-based estimates.
- if support_width <= 0:
- return loc_hat, scale_hat
- # Compute the proposed support according to the loc and scale
- # estimates.
- a_hat = loc_hat + a * scale_hat
- b_hat = loc_hat + b * scale_hat
- # Use the moment-based estimates if they are compatible with the data.
- data_a = np.min(data)
- data_b = np.max(data)
- if a_hat < data_a and data_b < b_hat:
- return loc_hat, scale_hat
- # Otherwise find other estimates that are compatible with the data.
- data_width = data_b - data_a
- rel_margin = 0.1
- margin = data_width * rel_margin
- # For a finite interval, both the location and scale
- # should have interesting values.
- if support_width < np.inf:
- loc_hat = (data_a - a) - margin
- scale_hat = (data_width + 2 * margin) / support_width
- return loc_hat, scale_hat
- # For a one-sided interval, use only an interesting location parameter.
- if a > -np.inf:
- return (data_a - a) - margin, 1
- elif b < np.inf:
- return (data_b - b) + margin, 1
- else:
- raise RuntimeError
- def fit_loc_scale(self, data, *args):
- """
- Estimate loc and scale parameters from data using 1st and 2nd moments.
- Parameters
- ----------
- data : array_like
- Data to fit.
- arg1, arg2, arg3,... : array_like
- The shape parameter(s) for the distribution (see docstring of the
- instance object for more information).
- Returns
- -------
- Lhat : float
- Estimated location parameter for the data.
- Shat : float
- Estimated scale parameter for the data.
- """
- mu, mu2 = self.stats(*args, **{'moments': 'mv'})
- tmp = asarray(data)
- muhat = tmp.mean()
- mu2hat = tmp.var()
- Shat = sqrt(mu2hat / mu2)
- Lhat = muhat - Shat*mu
- if not np.isfinite(Lhat):
- Lhat = 0
- if not (np.isfinite(Shat) and (0 < Shat)):
- Shat = 1
- return Lhat, Shat
- def _entropy(self, *args):
- def integ(x):
- val = self._pdf(x, *args)
- return entr(val)
- # upper limit is often inf, so suppress warnings when integrating
- olderr = np.seterr(over='ignore')
- h = integrate.quad(integ, self.a, self.b)[0]
- np.seterr(**olderr)
- if not np.isnan(h):
- return h
- else:
- # try with different limits if integration problems
- low, upp = self.ppf([1e-10, 1. - 1e-10], *args)
- if np.isinf(self.b):
- upper = upp
- else:
- upper = self.b
- if np.isinf(self.a):
- lower = low
- else:
- lower = self.a
- return integrate.quad(integ, lower, upper)[0]
- def expect(self, func=None, args=(), loc=0, scale=1, lb=None, ub=None,
- conditional=False, **kwds):
- """Calculate expected value of a function with respect to the
- distribution by numerical integration.
- The expected value of a function ``f(x)`` with respect to a
- distribution ``dist`` is defined as::
- ub
- E[f(x)] = Integral(f(x) * dist.pdf(x)),
- lb
- where ``ub`` and ``lb`` are arguments and ``x`` has the ``dist.pdf(x)``
- distribution. If the bounds ``lb`` and ``ub`` correspond to the
- support of the distribution, e.g. ``[-inf, inf]`` in the default
- case, then the integral is the unrestricted expectation of ``f(x)``.
- Also, the function ``f(x)`` may be defined such that ``f(x)`` is ``0``
- outside a finite interval in which case the expectation is
- calculated within the finite range ``[lb, ub]``.
- Parameters
- ----------
- func : callable, optional
- Function for which integral is calculated. Takes only one argument.
- The default is the identity mapping f(x) = x.
- args : tuple, optional
- Shape parameters of the distribution.
- loc : float, optional
- Location parameter (default=0).
- scale : float, optional
- Scale parameter (default=1).
- lb, ub : scalar, optional
- Lower and upper bound for integration. Default is set to the
- support of the distribution.
- conditional : bool, optional
- If True, the integral is corrected by the conditional probability
- of the integration interval. The return value is the expectation
- of the function, conditional on being in the given interval.
- Default is False.
- Additional keyword arguments are passed to the integration routine.
- Returns
- -------
- expect : float
- The calculated expected value.
- Notes
- -----
- The integration behavior of this function is inherited from
- `scipy.integrate.quad`. Neither this function nor
- `scipy.integrate.quad` can verify whether the integral exists or is
- finite. For example ``cauchy(0).mean()`` returns ``np.nan`` and
- ``cauchy(0).expect()`` returns ``0.0``.
- Examples
- --------
- To understand the effect of the bounds of integration consider
- >>> from scipy.stats import expon
- >>> expon(1).expect(lambda x: 1, lb=0.0, ub=2.0)
- 0.6321205588285578
- This is close to
- >>> expon(1).cdf(2.0) - expon(1).cdf(0.0)
- 0.6321205588285577
- If ``conditional=True``
- >>> expon(1).expect(lambda x: 1, lb=0.0, ub=2.0, conditional=True)
- 1.0000000000000002
- The slight deviation from 1 is due to numerical integration.
- """
- lockwds = {'loc': loc,
- 'scale': scale}
- self._argcheck(*args)
- if func is None:
- def fun(x, *args):
- return x * self.pdf(x, *args, **lockwds)
- else:
- def fun(x, *args):
- return func(x) * self.pdf(x, *args, **lockwds)
- if lb is None:
- lb = loc + self.a * scale
- if ub is None:
- ub = loc + self.b * scale
- if conditional:
- invfac = (self.sf(lb, *args, **lockwds)
- - self.sf(ub, *args, **lockwds))
- else:
- invfac = 1.0
- kwds['args'] = args
- # Silence floating point warnings from integration.
- olderr = np.seterr(all='ignore')
- vals = integrate.quad(fun, lb, ub, **kwds)[0] / invfac
- np.seterr(**olderr)
- return vals
- # Helpers for the discrete distributions
- def _drv2_moment(self, n, *args):
- """Non-central moment of discrete distribution."""
- def fun(x):
- return np.power(x, n) * self._pmf(x, *args)
- return _expect(fun, self.a, self.b, self.ppf(0.5, *args), self.inc)
- def _drv2_ppfsingle(self, q, *args): # Use basic bisection algorithm
- b = self.b
- a = self.a
- if isinf(b): # Be sure ending point is > q
- b = int(max(100*q, 10))
- while 1:
- if b >= self.b:
- qb = 1.0
- break
- qb = self._cdf(b, *args)
- if (qb < q):
- b += 10
- else:
- break
- else:
- qb = 1.0
- if isinf(a): # be sure starting point < q
- a = int(min(-100*q, -10))
- while 1:
- if a <= self.a:
- qb = 0.0
- break
- qa = self._cdf(a, *args)
- if (qa > q):
- a -= 10
- else:
- break
- else:
- qa = self._cdf(a, *args)
- while 1:
- if (qa == q):
- return a
- if (qb == q):
- return b
- if b <= a+1:
- if qa > q:
- return a
- else:
- return b
- c = int((a+b)/2.0)
- qc = self._cdf(c, *args)
- if (qc < q):
- if a != c:
- a = c
- else:
- raise RuntimeError('updating stopped, endless loop')
- qa = qc
- elif (qc > q):
- if b != c:
- b = c
- else:
- raise RuntimeError('updating stopped, endless loop')
- qb = qc
- else:
- return c
- def entropy(pk, qk=None, base=None):
- """Calculate the entropy of a distribution for given probability values.
- If only probabilities `pk` are given, the entropy is calculated as
- ``S = -sum(pk * log(pk), axis=0)``.
- If `qk` is not None, then compute the Kullback-Leibler divergence
- ``S = sum(pk * log(pk / qk), axis=0)``.
- This routine will normalize `pk` and `qk` if they don't sum to 1.
- Parameters
- ----------
- pk : sequence
- Defines the (discrete) distribution. ``pk[i]`` is the (possibly
- unnormalized) probability of event ``i``.
- qk : sequence, optional
- Sequence against which the relative entropy is computed. Should be in
- the same format as `pk`.
- base : float, optional
- The logarithmic base to use, defaults to ``e`` (natural logarithm).
- Returns
- -------
- S : float
- The calculated entropy.
- """
- pk = asarray(pk)
- pk = 1.0*pk / np.sum(pk, axis=0)
- if qk is None:
- vec = entr(pk)
- else:
- qk = asarray(qk)
- if len(qk) != len(pk):
- raise ValueError("qk and pk must have same length.")
- qk = 1.0*qk / np.sum(qk, axis=0)
- vec = rel_entr(pk, qk)
- S = np.sum(vec, axis=0)
- if base is not None:
- S /= log(base)
- return S
- # Must over-ride one of _pmf or _cdf or pass in
- # x_k, p(x_k) lists in initialization
- class rv_discrete(rv_generic):
- """
- A generic discrete random variable class meant for subclassing.
- `rv_discrete` is a base class to construct specific distribution classes
- and instances for discrete random variables. It can also be used
- to construct an arbitrary distribution defined by a list of support
- points and corresponding probabilities.
- Parameters
- ----------
- a : float, optional
- Lower bound of the support of the distribution, default: 0
- b : float, optional
- Upper bound of the support of the distribution, default: plus infinity
- moment_tol : float, optional
- The tolerance for the generic calculation of moments.
- values : tuple of two array_like, optional
- ``(xk, pk)`` where ``xk`` are integers with non-zero
- probabilities ``pk`` with ``sum(pk) = 1``.
- inc : integer, optional
- Increment for the support of the distribution.
- Default is 1. (other values have not been tested)
- badvalue : float, optional
- The value in a result arrays that indicates a value that for which
- some argument restriction is violated, default is np.nan.
- name : str, optional
- The name of the instance. This string is used to construct the default
- example for distributions.
- longname : str, optional
- This string is used as part of the first line of the docstring returned
- when a subclass has no docstring of its own. Note: `longname` exists
- for backwards compatibility, do not use for new subclasses.
- shapes : str, optional
- The shape of the distribution. For example "m, n" for a distribution
- that takes two integers as the two shape arguments for all its methods
- If not provided, shape parameters will be inferred from
- the signatures of the private methods, ``_pmf`` and ``_cdf`` of
- the instance.
- extradoc : str, optional
- This string is used as the last part of the docstring returned when a
- subclass has no docstring of its own. Note: `extradoc` exists for
- backwards compatibility, do not use for new subclasses.
- seed : None or int or ``numpy.random.RandomState`` instance, optional
- This parameter defines the RandomState object to use for drawing
- random variates.
- If None, the global np.random state is used.
- If integer, it is used to seed the local RandomState instance.
- Default is None.
- Methods
- -------
- rvs
- pmf
- logpmf
- cdf
- logcdf
- sf
- logsf
- ppf
- isf
- moment
- stats
- entropy
- expect
- median
- mean
- std
- var
- interval
- __call__
- Notes
- -----
- This class is similar to `rv_continuous`. Whether a shape parameter is
- valid is decided by an ``_argcheck`` method (which defaults to checking
- that its arguments are strictly positive.)
- The main differences are:
- - the support of the distribution is a set of integers
- - instead of the probability density function, ``pdf`` (and the
- corresponding private ``_pdf``), this class defines the
- *probability mass function*, `pmf` (and the corresponding
- private ``_pmf``.)
- - scale parameter is not defined.
- To create a new discrete distribution, we would do the following:
- >>> from scipy.stats import rv_discrete
- >>> class poisson_gen(rv_discrete):
- ... "Poisson distribution"
- ... def _pmf(self, k, mu):
- ... return exp(-mu) * mu**k / factorial(k)
- and create an instance::
- >>> poisson = poisson_gen(name="poisson")
- Note that above we defined the Poisson distribution in the standard form.
- Shifting the distribution can be done by providing the ``loc`` parameter
- to the methods of the instance. For example, ``poisson.pmf(x, mu, loc)``
- delegates the work to ``poisson._pmf(x-loc, mu)``.
- **Discrete distributions from a list of probabilities**
- Alternatively, you can construct an arbitrary discrete rv defined
- on a finite set of values ``xk`` with ``Prob{X=xk} = pk`` by using the
- ``values`` keyword argument to the `rv_discrete` constructor.
- Examples
- --------
- Custom made discrete distribution:
- >>> from scipy import stats
- >>> xk = np.arange(7)
- >>> pk = (0.1, 0.2, 0.3, 0.1, 0.1, 0.0, 0.2)
- >>> custm = stats.rv_discrete(name='custm', values=(xk, pk))
- >>>
- >>> import matplotlib.pyplot as plt
- >>> fig, ax = plt.subplots(1, 1)
- >>> ax.plot(xk, custm.pmf(xk), 'ro', ms=12, mec='r')
- >>> ax.vlines(xk, 0, custm.pmf(xk), colors='r', lw=4)
- >>> plt.show()
- Random number generation:
- >>> R = custm.rvs(size=100)
- """
- def __new__(cls, a=0, b=inf, name=None, badvalue=None,
- moment_tol=1e-8, values=None, inc=1, longname=None,
- shapes=None, extradoc=None, seed=None):
- if values is not None:
- # dispatch to a subclass
- return super(rv_discrete, cls).__new__(rv_sample)
- else:
- # business as usual
- return super(rv_discrete, cls).__new__(cls)
- def __init__(self, a=0, b=inf, name=None, badvalue=None,
- moment_tol=1e-8, values=None, inc=1, longname=None,
- shapes=None, extradoc=None, seed=None):
- super(rv_discrete, self).__init__(seed)
- # cf generic freeze
- self._ctor_param = dict(
- a=a, b=b, name=name, badvalue=badvalue,
- moment_tol=moment_tol, values=values, inc=inc,
- longname=longname, shapes=shapes, extradoc=extradoc, seed=seed)
- if badvalue is None:
- badvalue = nan
- self.badvalue = badvalue
- self.a = a
- self.b = b
- self.moment_tol = moment_tol
- self.inc = inc
- self._cdfvec = vectorize(self._cdf_single, otypes='d')
- self.vecentropy = vectorize(self._entropy)
- self.shapes = shapes
- if values is not None:
- raise ValueError("rv_discrete.__init__(..., values != None, ...)")
- self._construct_argparser(meths_to_inspect=[self._pmf, self._cdf],
- locscale_in='loc=0',
- # scale=1 for discrete RVs
- locscale_out='loc, 1')
- # nin correction needs to be after we know numargs
- # correct nin for generic moment vectorization
- _vec_generic_moment = vectorize(_drv2_moment, otypes='d')
- _vec_generic_moment.nin = self.numargs + 2
- self.generic_moment = instancemethod(_vec_generic_moment,
- self, rv_discrete)
- # correct nin for ppf vectorization
- _vppf = vectorize(_drv2_ppfsingle, otypes='d')
- _vppf.nin = self.numargs + 2
- self._ppfvec = instancemethod(_vppf,
- self, rv_discrete)
- # now that self.numargs is defined, we can adjust nin
- self._cdfvec.nin = self.numargs + 1
- self._construct_docstrings(name, longname, extradoc)
- def _construct_docstrings(self, name, longname, extradoc):
- if name is None:
- name = 'Distribution'
- self.name = name
- self.extradoc = extradoc
- # generate docstring for subclass instances
- if longname is None:
- if name[0] in ['aeiouAEIOU']:
- hstr = "An "
- else:
- hstr = "A "
- longname = hstr + name
- if sys.flags.optimize < 2:
- # Skip adding docstrings if interpreter is run with -OO
- if self.__doc__ is None:
- self._construct_default_doc(longname=longname,
- extradoc=extradoc,
- docdict=docdict_discrete,
- discrete='discrete')
- else:
- dct = dict(distdiscrete)
- self._construct_doc(docdict_discrete, dct.get(self.name))
- # discrete RV do not have the scale parameter, remove it
- self.__doc__ = self.__doc__.replace(
- '\n scale : array_like, '
- 'optional\n scale parameter (default=1)', '')
- def _updated_ctor_param(self):
- """ Return the current version of _ctor_param, possibly updated by user.
- Used by freezing and pickling.
- Keep this in sync with the signature of __init__.
- """
- dct = self._ctor_param.copy()
- dct['a'] = self.a
- dct['b'] = self.b
- dct['badvalue'] = self.badvalue
- dct['moment_tol'] = self.moment_tol
- dct['inc'] = self.inc
- dct['name'] = self.name
- dct['shapes'] = self.shapes
- dct['extradoc'] = self.extradoc
- return dct
- def _nonzero(self, k, *args):
- return floor(k) == k
- def _pmf(self, k, *args):
- return self._cdf(k, *args) - self._cdf(k-1, *args)
- def _logpmf(self, k, *args):
- return log(self._pmf(k, *args))
- def _cdf_single(self, k, *args):
- m = arange(int(self.a), k+1)
- return np.sum(self._pmf(m, *args), axis=0)
- def _cdf(self, x, *args):
- k = floor(x)
- return self._cdfvec(k, *args)
- # generic _logcdf, _sf, _logsf, _ppf, _isf, _rvs defined in rv_generic
- def rvs(self, *args, **kwargs):
- """
- Random variates of given type.
- Parameters
- ----------
- arg1, arg2, arg3,... : array_like
- The shape parameter(s) for the distribution (see docstring of the
- instance object for more information).
- loc : array_like, optional
- Location parameter (default=0).
- size : int or tuple of ints, optional
- Defining number of random variates (Default is 1). Note that `size`
- has to be given as keyword, not as positional argument.
- random_state : None or int or ``np.random.RandomState`` instance, optional
- If int or RandomState, use it for drawing the random variates.
- If None, rely on ``self.random_state``.
- Default is None.
- Returns
- -------
- rvs : ndarray or scalar
- Random variates of given `size`.
- """
- kwargs['discrete'] = True
- return super(rv_discrete, self).rvs(*args, **kwargs)
- def pmf(self, k, *args, **kwds):
- """
- Probability mass function at k of the given RV.
- Parameters
- ----------
- k : array_like
- Quantiles.
- arg1, arg2, arg3,... : array_like
- The shape parameter(s) for the distribution (see docstring of the
- instance object for more information)
- loc : array_like, optional
- Location parameter (default=0).
- Returns
- -------
- pmf : array_like
- Probability mass function evaluated at k
- """
- args, loc, _ = self._parse_args(*args, **kwds)
- k, loc = map(asarray, (k, loc))
- args = tuple(map(asarray, args))
- k = asarray((k-loc))
- cond0 = self._argcheck(*args)
- cond1 = (k >= self.a) & (k <= self.b) & self._nonzero(k, *args)
- cond = cond0 & cond1
- output = zeros(shape(cond), 'd')
- place(output, (1-cond0) + np.isnan(k), self.badvalue)
- if np.any(cond):
- goodargs = argsreduce(cond, *((k,)+args))
- place(output, cond, np.clip(self._pmf(*goodargs), 0, 1))
- if output.ndim == 0:
- return output[()]
- return output
- def logpmf(self, k, *args, **kwds):
- """
- Log of the probability mass function at k of the given RV.
- Parameters
- ----------
- k : array_like
- Quantiles.
- arg1, arg2, arg3,... : array_like
- The shape parameter(s) for the distribution (see docstring of the
- instance object for more information).
- loc : array_like, optional
- Location parameter. Default is 0.
- Returns
- -------
- logpmf : array_like
- Log of the probability mass function evaluated at k.
- """
- args, loc, _ = self._parse_args(*args, **kwds)
- k, loc = map(asarray, (k, loc))
- args = tuple(map(asarray, args))
- k = asarray((k-loc))
- cond0 = self._argcheck(*args)
- cond1 = (k >= self.a) & (k <= self.b) & self._nonzero(k, *args)
- cond = cond0 & cond1
- output = empty(shape(cond), 'd')
- output.fill(NINF)
- place(output, (1-cond0) + np.isnan(k), self.badvalue)
- if np.any(cond):
- goodargs = argsreduce(cond, *((k,)+args))
- place(output, cond, self._logpmf(*goodargs))
- if output.ndim == 0:
- return output[()]
- return output
- def cdf(self, k, *args, **kwds):
- """
- Cumulative distribution function of the given RV.
- Parameters
- ----------
- k : array_like, int
- Quantiles.
- arg1, arg2, arg3,... : array_like
- The shape parameter(s) for the distribution (see docstring of the
- instance object for more information).
- loc : array_like, optional
- Location parameter (default=0).
- Returns
- -------
- cdf : ndarray
- Cumulative distribution function evaluated at `k`.
- """
- args, loc, _ = self._parse_args(*args, **kwds)
- k, loc = map(asarray, (k, loc))
- args = tuple(map(asarray, args))
- k = asarray((k-loc))
- cond0 = self._argcheck(*args)
- cond1 = (k >= self.a) & (k < self.b)
- cond2 = (k >= self.b)
- cond = cond0 & cond1
- output = zeros(shape(cond), 'd')
- place(output, (1-cond0) + np.isnan(k), self.badvalue)
- place(output, cond2*(cond0 == cond0), 1.0)
- if np.any(cond):
- goodargs = argsreduce(cond, *((k,)+args))
- place(output, cond, np.clip(self._cdf(*goodargs), 0, 1))
- if output.ndim == 0:
- return output[()]
- return output
- def logcdf(self, k, *args, **kwds):
- """
- Log of the cumulative distribution function at k of the given RV.
- Parameters
- ----------
- k : array_like, int
- Quantiles.
- arg1, arg2, arg3,... : array_like
- The shape parameter(s) for the distribution (see docstring of the
- instance object for more information).
- loc : array_like, optional
- Location parameter (default=0).
- Returns
- -------
- logcdf : array_like
- Log of the cumulative distribution function evaluated at k.
- """
- args, loc, _ = self._parse_args(*args, **kwds)
- k, loc = map(asarray, (k, loc))
- args = tuple(map(asarray, args))
- k = asarray((k-loc))
- cond0 = self._argcheck(*args)
- cond1 = (k >= self.a) & (k < self.b)
- cond2 = (k >= self.b)
- cond = cond0 & cond1
- output = empty(shape(cond), 'd')
- output.fill(NINF)
- place(output, (1-cond0) + np.isnan(k), self.badvalue)
- place(output, cond2*(cond0 == cond0), 0.0)
- if np.any(cond):
- goodargs = argsreduce(cond, *((k,)+args))
- place(output, cond, self._logcdf(*goodargs))
- if output.ndim == 0:
- return output[()]
- return output
- def sf(self, k, *args, **kwds):
- """
- Survival function (1 - `cdf`) at k of the given RV.
- Parameters
- ----------
- k : array_like
- Quantiles.
- arg1, arg2, arg3,... : array_like
- The shape parameter(s) for the distribution (see docstring of the
- instance object for more information).
- loc : array_like, optional
- Location parameter (default=0).
- Returns
- -------
- sf : array_like
- Survival function evaluated at k.
- """
- args, loc, _ = self._parse_args(*args, **kwds)
- k, loc = map(asarray, (k, loc))
- args = tuple(map(asarray, args))
- k = asarray(k-loc)
- cond0 = self._argcheck(*args)
- cond1 = (k >= self.a) & (k < self.b)
- cond2 = (k < self.a) & cond0
- cond = cond0 & cond1
- output = zeros(shape(cond), 'd')
- place(output, (1-cond0) + np.isnan(k), self.badvalue)
- place(output, cond2, 1.0)
- if np.any(cond):
- goodargs = argsreduce(cond, *((k,)+args))
- place(output, cond, np.clip(self._sf(*goodargs), 0, 1))
- if output.ndim == 0:
- return output[()]
- return output
- def logsf(self, k, *args, **kwds):
- """
- Log of the survival function of the given RV.
- Returns the log of the "survival function," defined as 1 - `cdf`,
- evaluated at `k`.
- Parameters
- ----------
- k : array_like
- Quantiles.
- arg1, arg2, arg3,... : array_like
- The shape parameter(s) for the distribution (see docstring of the
- instance object for more information).
- loc : array_like, optional
- Location parameter (default=0).
- Returns
- -------
- logsf : ndarray
- Log of the survival function evaluated at `k`.
- """
- args, loc, _ = self._parse_args(*args, **kwds)
- k, loc = map(asarray, (k, loc))
- args = tuple(map(asarray, args))
- k = asarray(k-loc)
- cond0 = self._argcheck(*args)
- cond1 = (k >= self.a) & (k < self.b)
- cond2 = (k < self.a) & cond0
- cond = cond0 & cond1
- output = empty(shape(cond), 'd')
- output.fill(NINF)
- place(output, (1-cond0) + np.isnan(k), self.badvalue)
- place(output, cond2, 0.0)
- if np.any(cond):
- goodargs = argsreduce(cond, *((k,)+args))
- place(output, cond, self._logsf(*goodargs))
- if output.ndim == 0:
- return output[()]
- return output
- def ppf(self, q, *args, **kwds):
- """
- Percent point function (inverse of `cdf`) at q of the given RV.
- Parameters
- ----------
- q : array_like
- Lower tail probability.
- arg1, arg2, arg3,... : array_like
- The shape parameter(s) for the distribution (see docstring of the
- instance object for more information).
- loc : array_like, optional
- Location parameter (default=0).
- Returns
- -------
- k : array_like
- Quantile corresponding to the lower tail probability, q.
- """
- args, loc, _ = self._parse_args(*args, **kwds)
- q, loc = map(asarray, (q, loc))
- args = tuple(map(asarray, args))
- cond0 = self._argcheck(*args) & (loc == loc)
- cond1 = (q > 0) & (q < 1)
- cond2 = (q == 1) & cond0
- cond = cond0 & cond1
- output = valarray(shape(cond), value=self.badvalue, typecode='d')
- # output type 'd' to handle nin and inf
- place(output, (q == 0)*(cond == cond), self.a-1)
- place(output, cond2, self.b)
- if np.any(cond):
- goodargs = argsreduce(cond, *((q,)+args+(loc,)))
- loc, goodargs = goodargs[-1], goodargs[:-1]
- place(output, cond, self._ppf(*goodargs) + loc)
- if output.ndim == 0:
- return output[()]
- return output
- def isf(self, q, *args, **kwds):
- """
- Inverse survival function (inverse of `sf`) at q of the given RV.
- Parameters
- ----------
- q : array_like
- Upper tail probability.
- arg1, arg2, arg3,... : array_like
- The shape parameter(s) for the distribution (see docstring of the
- instance object for more information).
- loc : array_like, optional
- Location parameter (default=0).
- Returns
- -------
- k : ndarray or scalar
- Quantile corresponding to the upper tail probability, q.
- """
- args, loc, _ = self._parse_args(*args, **kwds)
- q, loc = map(asarray, (q, loc))
- args = tuple(map(asarray, args))
- cond0 = self._argcheck(*args) & (loc == loc)
- cond1 = (q > 0) & (q < 1)
- cond2 = (q == 1) & cond0
- cond = cond0 & cond1
- # same problem as with ppf; copied from ppf and changed
- output = valarray(shape(cond), value=self.badvalue, typecode='d')
- # output type 'd' to handle nin and inf
- place(output, (q == 0)*(cond == cond), self.b)
- place(output, cond2, self.a-1)
- # call place only if at least 1 valid argument
- if np.any(cond):
- goodargs = argsreduce(cond, *((q,)+args+(loc,)))
- loc, goodargs = goodargs[-1], goodargs[:-1]
- # PB same as ticket 766
- place(output, cond, self._isf(*goodargs) + loc)
- if output.ndim == 0:
- return output[()]
- return output
- def _entropy(self, *args):
- if hasattr(self, 'pk'):
- return entropy(self.pk)
- else:
- return _expect(lambda x: entr(self.pmf(x, *args)),
- self.a, self.b, self.ppf(0.5, *args), self.inc)
- def expect(self, func=None, args=(), loc=0, lb=None, ub=None,
- conditional=False, maxcount=1000, tolerance=1e-10, chunksize=32):
- """
- Calculate expected value of a function with respect to the distribution
- for discrete distribution by numerical summation.
- Parameters
- ----------
- func : callable, optional
- Function for which the expectation value is calculated.
- Takes only one argument.
- The default is the identity mapping f(k) = k.
- args : tuple, optional
- Shape parameters of the distribution.
- loc : float, optional
- Location parameter.
- Default is 0.
- lb, ub : int, optional
- Lower and upper bound for the summation, default is set to the
- support of the distribution, inclusive (``ul <= k <= ub``).
- conditional : bool, optional
- If true then the expectation is corrected by the conditional
- probability of the summation interval. The return value is the
- expectation of the function, `func`, conditional on being in
- the given interval (k such that ``ul <= k <= ub``).
- Default is False.
- maxcount : int, optional
- Maximal number of terms to evaluate (to avoid an endless loop for
- an infinite sum). Default is 1000.
- tolerance : float, optional
- Absolute tolerance for the summation. Default is 1e-10.
- chunksize : int, optional
- Iterate over the support of a distributions in chunks of this size.
- Default is 32.
- Returns
- -------
- expect : float
- Expected value.
- Notes
- -----
- For heavy-tailed distributions, the expected value may or may not exist,
- depending on the function, `func`. If it does exist, but the sum converges
- slowly, the accuracy of the result may be rather low. For instance, for
- ``zipf(4)``, accuracy for mean, variance in example is only 1e-5.
- increasing `maxcount` and/or `chunksize` may improve the result, but may
- also make zipf very slow.
- The function is not vectorized.
- """
- if func is None:
- def fun(x):
- # loc and args from outer scope
- return (x+loc)*self._pmf(x, *args)
- else:
- def fun(x):
- # loc and args from outer scope
- return func(x+loc)*self._pmf(x, *args)
- # used pmf because _pmf does not check support in randint and there
- # might be problems(?) with correct self.a, self.b at this stage maybe
- # not anymore, seems to work now with _pmf
- self._argcheck(*args) # (re)generate scalar self.a and self.b
- if lb is None:
- lb = self.a
- else:
- lb = lb - loc # convert bound for standardized distribution
- if ub is None:
- ub = self.b
- else:
- ub = ub - loc # convert bound for standardized distribution
- if conditional:
- invfac = self.sf(lb-1, *args) - self.sf(ub, *args)
- else:
- invfac = 1.0
- # iterate over the support, starting from the median
- x0 = self.ppf(0.5, *args)
- res = _expect(fun, lb, ub, x0, self.inc, maxcount, tolerance, chunksize)
- return res / invfac
- def _expect(fun, lb, ub, x0, inc, maxcount=1000, tolerance=1e-10,
- chunksize=32):
- """Helper for computing the expectation value of `fun`."""
- # short-circuit if the support size is small enough
- if (ub - lb) <= chunksize:
- supp = np.arange(lb, ub+1, inc)
- vals = fun(supp)
- return np.sum(vals)
- # otherwise, iterate starting from x0
- if x0 < lb:
- x0 = lb
- if x0 > ub:
- x0 = ub
- count, tot = 0, 0.
- # iterate over [x0, ub] inclusive
- for x in _iter_chunked(x0, ub+1, chunksize=chunksize, inc=inc):
- count += x.size
- delta = np.sum(fun(x))
- tot += delta
- if abs(delta) < tolerance * x.size:
- break
- if count > maxcount:
- warnings.warn('expect(): sum did not converge', RuntimeWarning)
- return tot
- # iterate over [lb, x0)
- for x in _iter_chunked(x0-1, lb-1, chunksize=chunksize, inc=-inc):
- count += x.size
- delta = np.sum(fun(x))
- tot += delta
- if abs(delta) < tolerance * x.size:
- break
- if count > maxcount:
- warnings.warn('expect(): sum did not converge', RuntimeWarning)
- break
- return tot
- def _iter_chunked(x0, x1, chunksize=4, inc=1):
- """Iterate from x0 to x1 in chunks of chunksize and steps inc.
- x0 must be finite, x1 need not be. In the latter case, the iterator is
- infinite.
- Handles both x0 < x1 and x0 > x1. In the latter case, iterates downwards
- (make sure to set inc < 0.)
- >>> [x for x in _iter_chunked(2, 5, inc=2)]
- [array([2, 4])]
- >>> [x for x in _iter_chunked(2, 11, inc=2)]
- [array([2, 4, 6, 8]), array([10])]
- >>> [x for x in _iter_chunked(2, -5, inc=-2)]
- [array([ 2, 0, -2, -4])]
- >>> [x for x in _iter_chunked(2, -9, inc=-2)]
- [array([ 2, 0, -2, -4]), array([-6, -8])]
- """
- if inc == 0:
- raise ValueError('Cannot increment by zero.')
- if chunksize <= 0:
- raise ValueError('Chunk size must be positive; got %s.' % chunksize)
- s = 1 if inc > 0 else -1
- stepsize = abs(chunksize * inc)
- x = x0
- while (x - x1) * inc < 0:
- delta = min(stepsize, abs(x - x1))
- step = delta * s
- supp = np.arange(x, x + step, inc)
- x += step
- yield supp
- class rv_sample(rv_discrete):
- """A 'sample' discrete distribution defined by the support and values.
- The ctor ignores most of the arguments, only needs the `values` argument.
- """
- def __init__(self, a=0, b=inf, name=None, badvalue=None,
- moment_tol=1e-8, values=None, inc=1, longname=None,
- shapes=None, extradoc=None, seed=None):
- super(rv_discrete, self).__init__(seed)
- if values is None:
- raise ValueError("rv_sample.__init__(..., values=None,...)")
- # cf generic freeze
- self._ctor_param = dict(
- a=a, b=b, name=name, badvalue=badvalue,
- moment_tol=moment_tol, values=values, inc=inc,
- longname=longname, shapes=shapes, extradoc=extradoc, seed=seed)
- if badvalue is None:
- badvalue = nan
- self.badvalue = badvalue
- self.moment_tol = moment_tol
- self.inc = inc
- self.shapes = shapes
- self.vecentropy = self._entropy
- xk, pk = values
- if len(xk) != len(pk):
- raise ValueError("xk and pk need to have the same length.")
- if not np.allclose(np.sum(pk), 1):
- raise ValueError("The sum of provided pk is not 1.")
- indx = np.argsort(np.ravel(xk))
- self.xk = np.take(np.ravel(xk), indx, 0)
- self.pk = np.take(np.ravel(pk), indx, 0)
- self.a = self.xk[0]
- self.b = self.xk[-1]
- self.qvals = np.cumsum(self.pk, axis=0)
- self.shapes = ' ' # bypass inspection
- self._construct_argparser(meths_to_inspect=[self._pmf],
- locscale_in='loc=0',
- # scale=1 for discrete RVs
- locscale_out='loc, 1')
- self._construct_docstrings(name, longname, extradoc)
- def _pmf(self, x):
- return np.select([x == k for k in self.xk],
- [np.broadcast_arrays(p, x)[0] for p in self.pk], 0)
- def _cdf(self, x):
- xx, xxk = np.broadcast_arrays(x[:, None], self.xk)
- indx = np.argmax(xxk > xx, axis=-1) - 1
- return self.qvals[indx]
- def _ppf(self, q):
- qq, sqq = np.broadcast_arrays(q[..., None], self.qvals)
- indx = argmax(sqq >= qq, axis=-1)
- return self.xk[indx]
- def _rvs(self):
- # Need to define it explicitly, otherwise .rvs() with size=None
- # fails due to explicit broadcasting in _ppf
- U = self._random_state.random_sample(self._size)
- if self._size is None:
- U = np.array(U, ndmin=1)
- Y = self._ppf(U)[0]
- else:
- Y = self._ppf(U)
- return Y
- def _entropy(self):
- return entropy(self.pk)
- def generic_moment(self, n):
- n = asarray(n)
- return np.sum(self.xk**n[np.newaxis, ...] * self.pk, axis=0)
- def get_distribution_names(namespace_pairs, rv_base_class):
- """
- Collect names of statistical distributions and their generators.
- Parameters
- ----------
- namespace_pairs : sequence
- A snapshot of (name, value) pairs in the namespace of a module.
- rv_base_class : class
- The base class of random variable generator classes in a module.
- Returns
- -------
- distn_names : list of strings
- Names of the statistical distributions.
- distn_gen_names : list of strings
- Names of the generators of the statistical distributions.
- Note that these are not simply the names of the statistical
- distributions, with a _gen suffix added.
- """
- distn_names = []
- distn_gen_names = []
- for name, value in namespace_pairs:
- if name.startswith('_'):
- continue
- if name.endswith('_gen') and issubclass(value, rv_base_class):
- distn_gen_names.append(name)
- if isinstance(value, rv_base_class):
- distn_names.append(name)
- return distn_names, distn_gen_names
|