distance.py 84 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774
  1. """
  2. =====================================================
  3. Distance computations (:mod:`scipy.spatial.distance`)
  4. =====================================================
  5. .. sectionauthor:: Damian Eads
  6. Function Reference
  7. ------------------
  8. Distance matrix computation from a collection of raw observation vectors
  9. stored in a rectangular array.
  10. .. autosummary::
  11. :toctree: generated/
  12. pdist -- pairwise distances between observation vectors.
  13. cdist -- distances between two collections of observation vectors
  14. squareform -- convert distance matrix to a condensed one and vice versa
  15. directed_hausdorff -- directed Hausdorff distance between arrays
  16. Predicates for checking the validity of distance matrices, both
  17. condensed and redundant. Also contained in this module are functions
  18. for computing the number of observations in a distance matrix.
  19. .. autosummary::
  20. :toctree: generated/
  21. is_valid_dm -- checks for a valid distance matrix
  22. is_valid_y -- checks for a valid condensed distance matrix
  23. num_obs_dm -- # of observations in a distance matrix
  24. num_obs_y -- # of observations in a condensed distance matrix
  25. Distance functions between two numeric vectors ``u`` and ``v``. Computing
  26. distances over a large collection of vectors is inefficient for these
  27. functions. Use ``pdist`` for this purpose.
  28. .. autosummary::
  29. :toctree: generated/
  30. braycurtis -- the Bray-Curtis distance.
  31. canberra -- the Canberra distance.
  32. chebyshev -- the Chebyshev distance.
  33. cityblock -- the Manhattan distance.
  34. correlation -- the Correlation distance.
  35. cosine -- the Cosine distance.
  36. euclidean -- the Euclidean distance.
  37. jensenshannon -- the Jensen-Shannon distance.
  38. mahalanobis -- the Mahalanobis distance.
  39. minkowski -- the Minkowski distance.
  40. seuclidean -- the normalized Euclidean distance.
  41. sqeuclidean -- the squared Euclidean distance.
  42. wminkowski -- (deprecated) alias of `minkowski`.
  43. Distance functions between two boolean vectors (representing sets) ``u`` and
  44. ``v``. As in the case of numerical vectors, ``pdist`` is more efficient for
  45. computing the distances between all pairs.
  46. .. autosummary::
  47. :toctree: generated/
  48. dice -- the Dice dissimilarity.
  49. hamming -- the Hamming distance.
  50. jaccard -- the Jaccard distance.
  51. kulsinski -- the Kulsinski distance.
  52. rogerstanimoto -- the Rogers-Tanimoto dissimilarity.
  53. russellrao -- the Russell-Rao dissimilarity.
  54. sokalmichener -- the Sokal-Michener dissimilarity.
  55. sokalsneath -- the Sokal-Sneath dissimilarity.
  56. yule -- the Yule dissimilarity.
  57. :func:`hamming` also operates over discrete numerical vectors.
  58. """
  59. # Copyright (C) Damian Eads, 2007-2008. New BSD License.
  60. from __future__ import division, print_function, absolute_import
  61. __all__ = [
  62. 'braycurtis',
  63. 'canberra',
  64. 'cdist',
  65. 'chebyshev',
  66. 'cityblock',
  67. 'correlation',
  68. 'cosine',
  69. 'dice',
  70. 'directed_hausdorff',
  71. 'euclidean',
  72. 'hamming',
  73. 'is_valid_dm',
  74. 'is_valid_y',
  75. 'jaccard',
  76. 'jensenshannon',
  77. 'kulsinski',
  78. 'mahalanobis',
  79. 'matching',
  80. 'minkowski',
  81. 'num_obs_dm',
  82. 'num_obs_y',
  83. 'pdist',
  84. 'rogerstanimoto',
  85. 'russellrao',
  86. 'seuclidean',
  87. 'sokalmichener',
  88. 'sokalsneath',
  89. 'sqeuclidean',
  90. 'squareform',
  91. 'wminkowski',
  92. 'yule'
  93. ]
  94. import warnings
  95. import numpy as np
  96. from functools import partial
  97. from collections import namedtuple
  98. from scipy._lib.six import callable, string_types
  99. from scipy._lib.six import xrange
  100. from scipy._lib._util import _asarray_validated
  101. from . import _distance_wrap
  102. from . import _hausdorff
  103. from ..linalg import norm
  104. from ..special import rel_entr
  105. def _args_to_kwargs_xdist(args, kwargs, metric, func_name):
  106. """
  107. Convert legacy positional arguments to keyword arguments for pdist/cdist.
  108. """
  109. if not args:
  110. return kwargs
  111. if (callable(metric) and metric not in [
  112. braycurtis, canberra, chebyshev, cityblock, correlation, cosine,
  113. dice, euclidean, hamming, jaccard, jensenshannon, kulsinski,
  114. mahalanobis, matching, minkowski, rogerstanimoto, russellrao,
  115. seuclidean, sokalmichener, sokalsneath, sqeuclidean, yule,
  116. wminkowski]):
  117. raise TypeError('When using a custom metric arguments must be passed'
  118. 'as keyword (i.e., ARGNAME=ARGVALUE)')
  119. if func_name == 'pdist':
  120. old_arg_names = ['p', 'w', 'V', 'VI']
  121. else:
  122. old_arg_names = ['p', 'V', 'VI', 'w']
  123. num_args = len(args)
  124. warnings.warn('%d metric parameters have been passed as positional.'
  125. 'This will raise an error in a future version.'
  126. 'Please pass arguments as keywords(i.e., ARGNAME=ARGVALUE)'
  127. % num_args, DeprecationWarning)
  128. if num_args > 4:
  129. raise ValueError('Deprecated %s signature accepts only 4'
  130. 'positional arguments (%s), %d given.'
  131. % (func_name, ', '.join(old_arg_names), num_args))
  132. for old_arg, arg in zip(old_arg_names, args):
  133. if old_arg in kwargs:
  134. raise TypeError('%s() got multiple values for argument %s'
  135. % (func_name, old_arg))
  136. kwargs[old_arg] = arg
  137. return kwargs
  138. def _copy_array_if_base_present(a):
  139. """Copy the array if its base points to a parent array."""
  140. if a.base is not None:
  141. return a.copy()
  142. return a
  143. def _correlation_cdist_wrap(XA, XB, dm, **kwargs):
  144. XA = XA - XA.mean(axis=1, keepdims=True)
  145. XB = XB - XB.mean(axis=1, keepdims=True)
  146. _distance_wrap.cdist_cosine_double_wrap(XA, XB, dm, **kwargs)
  147. def _correlation_pdist_wrap(X, dm, **kwargs):
  148. X2 = X - X.mean(axis=1, keepdims=True)
  149. _distance_wrap.pdist_cosine_double_wrap(X2, dm, **kwargs)
  150. def _convert_to_type(X, out_type):
  151. return np.ascontiguousarray(X, dtype=out_type)
  152. def _filter_deprecated_kwargs(kwargs, args_blacklist):
  153. # Filtering out old default keywords
  154. for k in args_blacklist:
  155. if k in kwargs:
  156. del kwargs[k]
  157. warnings.warn('Got unexpected kwarg %s. This will raise an error'
  158. ' in a future version.' % k, DeprecationWarning)
  159. def _nbool_correspond_all(u, v, w=None):
  160. if u.dtype == v.dtype == bool and w is None:
  161. not_u = ~u
  162. not_v = ~v
  163. nff = (not_u & not_v).sum()
  164. nft = (not_u & v).sum()
  165. ntf = (u & not_v).sum()
  166. ntt = (u & v).sum()
  167. else:
  168. dtype = np.find_common_type([int], [u.dtype, v.dtype])
  169. u = u.astype(dtype)
  170. v = v.astype(dtype)
  171. not_u = 1.0 - u
  172. not_v = 1.0 - v
  173. if w is not None:
  174. not_u = w * not_u
  175. u = w * u
  176. nff = (not_u * not_v).sum()
  177. nft = (not_u * v).sum()
  178. ntf = (u * not_v).sum()
  179. ntt = (u * v).sum()
  180. return (nff, nft, ntf, ntt)
  181. def _nbool_correspond_ft_tf(u, v, w=None):
  182. if u.dtype == v.dtype == bool and w is None:
  183. not_u = ~u
  184. not_v = ~v
  185. nft = (not_u & v).sum()
  186. ntf = (u & not_v).sum()
  187. else:
  188. dtype = np.find_common_type([int], [u.dtype, v.dtype])
  189. u = u.astype(dtype)
  190. v = v.astype(dtype)
  191. not_u = 1.0 - u
  192. not_v = 1.0 - v
  193. if w is not None:
  194. not_u = w * not_u
  195. u = w * u
  196. nft = (not_u * v).sum()
  197. ntf = (u * not_v).sum()
  198. return (nft, ntf)
  199. def _validate_cdist_input(XA, XB, mA, mB, n, metric_name, **kwargs):
  200. if metric_name is not None:
  201. # get supported types
  202. types = _METRICS[metric_name].types
  203. # choose best type
  204. typ = types[types.index(XA.dtype)] if XA.dtype in types else types[0]
  205. # validate data
  206. XA = _convert_to_type(XA, out_type=typ)
  207. XB = _convert_to_type(XB, out_type=typ)
  208. # validate kwargs
  209. _validate_kwargs = _METRICS[metric_name].validator
  210. if _validate_kwargs:
  211. kwargs = _validate_kwargs(np.vstack([XA, XB]), mA + mB, n, **kwargs)
  212. else:
  213. typ = None
  214. return XA, XB, typ, kwargs
  215. def _validate_mahalanobis_kwargs(X, m, n, **kwargs):
  216. VI = kwargs.pop('VI', None)
  217. if VI is None:
  218. if m <= n:
  219. # There are fewer observations than the dimension of
  220. # the observations.
  221. raise ValueError("The number of observations (%d) is too "
  222. "small; the covariance matrix is "
  223. "singular. For observations with %d "
  224. "dimensions, at least %d observations "
  225. "are required." % (m, n, n + 1))
  226. CV = np.atleast_2d(np.cov(X.astype(np.double).T))
  227. VI = np.linalg.inv(CV).T.copy()
  228. kwargs["VI"] = _convert_to_double(VI)
  229. return kwargs
  230. def _validate_minkowski_kwargs(X, m, n, **kwargs):
  231. if 'p' not in kwargs:
  232. kwargs['p'] = 2.
  233. return kwargs
  234. def _validate_pdist_input(X, m, n, metric_name, **kwargs):
  235. if metric_name is not None:
  236. # get supported types
  237. types = _METRICS[metric_name].types
  238. # choose best type
  239. typ = types[types.index(X.dtype)] if X.dtype in types else types[0]
  240. # validate data
  241. X = _convert_to_type(X, out_type=typ)
  242. # validate kwargs
  243. _validate_kwargs = _METRICS[metric_name].validator
  244. if _validate_kwargs:
  245. kwargs = _validate_kwargs(X, m, n, **kwargs)
  246. else:
  247. typ = None
  248. return X, typ, kwargs
  249. def _validate_seuclidean_kwargs(X, m, n, **kwargs):
  250. V = kwargs.pop('V', None)
  251. if V is None:
  252. V = np.var(X.astype(np.double), axis=0, ddof=1)
  253. else:
  254. V = np.asarray(V, order='c')
  255. if V.dtype != np.double:
  256. raise TypeError('Variance vector V must contain doubles.')
  257. if len(V.shape) != 1:
  258. raise ValueError('Variance vector V must '
  259. 'be one-dimensional.')
  260. if V.shape[0] != n:
  261. raise ValueError('Variance vector V must be of the same '
  262. 'dimension as the vectors on which the distances '
  263. 'are computed.')
  264. kwargs['V'] = _convert_to_double(V)
  265. return kwargs
  266. def _validate_vector(u, dtype=None):
  267. # XXX Is order='c' really necessary?
  268. u = np.asarray(u, dtype=dtype, order='c').squeeze()
  269. # Ensure values such as u=1 and u=[1] still return 1-D arrays.
  270. u = np.atleast_1d(u)
  271. if u.ndim > 1:
  272. raise ValueError("Input vector should be 1-D.")
  273. return u
  274. def _validate_weights(w, dtype=np.double):
  275. w = _validate_vector(w, dtype=dtype)
  276. if np.any(w < 0):
  277. raise ValueError("Input weights should be all non-negative")
  278. return w
  279. def _validate_wminkowski_kwargs(X, m, n, **kwargs):
  280. w = kwargs.pop('w', None)
  281. if w is None:
  282. raise ValueError('weighted minkowski requires a weight '
  283. 'vector `w` to be given.')
  284. kwargs['w'] = _validate_weights(w)
  285. if 'p' not in kwargs:
  286. kwargs['p'] = 2.
  287. return kwargs
  288. def directed_hausdorff(u, v, seed=0):
  289. """
  290. Compute the directed Hausdorff distance between two N-D arrays.
  291. Distances between pairs are calculated using a Euclidean metric.
  292. Parameters
  293. ----------
  294. u : (M,N) ndarray
  295. Input array.
  296. v : (O,N) ndarray
  297. Input array.
  298. seed : int or None
  299. Local `np.random.RandomState` seed. Default is 0, a random shuffling of
  300. u and v that guarantees reproducibility.
  301. Returns
  302. -------
  303. d : double
  304. The directed Hausdorff distance between arrays `u` and `v`,
  305. index_1 : int
  306. index of point contributing to Hausdorff pair in `u`
  307. index_2 : int
  308. index of point contributing to Hausdorff pair in `v`
  309. Notes
  310. -----
  311. Uses the early break technique and the random sampling approach
  312. described by [1]_. Although worst-case performance is ``O(m * o)``
  313. (as with the brute force algorithm), this is unlikely in practice
  314. as the input data would have to require the algorithm to explore
  315. every single point interaction, and after the algorithm shuffles
  316. the input points at that. The best case performance is O(m), which
  317. is satisfied by selecting an inner loop distance that is less than
  318. cmax and leads to an early break as often as possible. The authors
  319. have formally shown that the average runtime is closer to O(m).
  320. .. versionadded:: 0.19.0
  321. References
  322. ----------
  323. .. [1] A. A. Taha and A. Hanbury, "An efficient algorithm for
  324. calculating the exact Hausdorff distance." IEEE Transactions On
  325. Pattern Analysis And Machine Intelligence, vol. 37 pp. 2153-63,
  326. 2015.
  327. See Also
  328. --------
  329. scipy.spatial.procrustes : Another similarity test for two data sets
  330. Examples
  331. --------
  332. Find the directed Hausdorff distance between two 2-D arrays of
  333. coordinates:
  334. >>> from scipy.spatial.distance import directed_hausdorff
  335. >>> u = np.array([(1.0, 0.0),
  336. ... (0.0, 1.0),
  337. ... (-1.0, 0.0),
  338. ... (0.0, -1.0)])
  339. >>> v = np.array([(2.0, 0.0),
  340. ... (0.0, 2.0),
  341. ... (-2.0, 0.0),
  342. ... (0.0, -4.0)])
  343. >>> directed_hausdorff(u, v)[0]
  344. 2.23606797749979
  345. >>> directed_hausdorff(v, u)[0]
  346. 3.0
  347. Find the general (symmetric) Hausdorff distance between two 2-D
  348. arrays of coordinates:
  349. >>> max(directed_hausdorff(u, v)[0], directed_hausdorff(v, u)[0])
  350. 3.0
  351. Find the indices of the points that generate the Hausdorff distance
  352. (the Hausdorff pair):
  353. >>> directed_hausdorff(v, u)[1:]
  354. (3, 3)
  355. """
  356. u = np.asarray(u, dtype=np.float64, order='c')
  357. v = np.asarray(v, dtype=np.float64, order='c')
  358. result = _hausdorff.directed_hausdorff(u, v, seed)
  359. return result
  360. def minkowski(u, v, p=2, w=None):
  361. """
  362. Compute the Minkowski distance between two 1-D arrays.
  363. The Minkowski distance between 1-D arrays `u` and `v`,
  364. is defined as
  365. .. math::
  366. {||u-v||}_p = (\\sum{|u_i - v_i|^p})^{1/p}.
  367. \\left(\\sum{w_i(|(u_i - v_i)|^p)}\\right)^{1/p}.
  368. Parameters
  369. ----------
  370. u : (N,) array_like
  371. Input array.
  372. v : (N,) array_like
  373. Input array.
  374. p : int
  375. The order of the norm of the difference :math:`{||u-v||}_p`.
  376. w : (N,) array_like, optional
  377. The weights for each value in `u` and `v`. Default is None,
  378. which gives each value a weight of 1.0
  379. Returns
  380. -------
  381. minkowski : double
  382. The Minkowski distance between vectors `u` and `v`.
  383. Examples
  384. --------
  385. >>> from scipy.spatial import distance
  386. >>> distance.minkowski([1, 0, 0], [0, 1, 0], 1)
  387. 2.0
  388. >>> distance.minkowski([1, 0, 0], [0, 1, 0], 2)
  389. 1.4142135623730951
  390. >>> distance.minkowski([1, 0, 0], [0, 1, 0], 3)
  391. 1.2599210498948732
  392. >>> distance.minkowski([1, 1, 0], [0, 1, 0], 1)
  393. 1.0
  394. >>> distance.minkowski([1, 1, 0], [0, 1, 0], 2)
  395. 1.0
  396. >>> distance.minkowski([1, 1, 0], [0, 1, 0], 3)
  397. 1.0
  398. """
  399. u = _validate_vector(u)
  400. v = _validate_vector(v)
  401. if p < 1:
  402. raise ValueError("p must be at least 1")
  403. u_v = u - v
  404. if w is not None:
  405. w = _validate_weights(w)
  406. if p == 1:
  407. root_w = w
  408. if p == 2:
  409. # better precision and speed
  410. root_w = np.sqrt(w)
  411. else:
  412. root_w = np.power(w, 1/p)
  413. u_v = root_w * u_v
  414. dist = norm(u_v, ord=p)
  415. return dist
  416. # `minkowski` gained weights in scipy 1.0. Once we're at say version 1.3,
  417. # deprecated `wminkowski`. Not done at once because it would be annoying for
  418. # downstream libraries that used `wminkowski` and support multiple scipy
  419. # versions.
  420. def wminkowski(u, v, p, w):
  421. """
  422. Compute the weighted Minkowski distance between two 1-D arrays.
  423. The weighted Minkowski distance between `u` and `v`, defined as
  424. .. math::
  425. \\left(\\sum{(|w_i (u_i - v_i)|^p)}\\right)^{1/p}.
  426. Parameters
  427. ----------
  428. u : (N,) array_like
  429. Input array.
  430. v : (N,) array_like
  431. Input array.
  432. p : int
  433. The order of the norm of the difference :math:`{||u-v||}_p`.
  434. w : (N,) array_like
  435. The weight vector.
  436. Returns
  437. -------
  438. wminkowski : double
  439. The weighted Minkowski distance between vectors `u` and `v`.
  440. Notes
  441. -----
  442. `wminkowski` is DEPRECATED. It implements a definition where weights
  443. are powered. It is recommended to use the weighted version of `minkowski`
  444. instead. This function will be removed in a future version of scipy.
  445. Examples
  446. --------
  447. >>> from scipy.spatial import distance
  448. >>> distance.wminkowski([1, 0, 0], [0, 1, 0], 1, np.ones(3))
  449. 2.0
  450. >>> distance.wminkowski([1, 0, 0], [0, 1, 0], 2, np.ones(3))
  451. 1.4142135623730951
  452. >>> distance.wminkowski([1, 0, 0], [0, 1, 0], 3, np.ones(3))
  453. 1.2599210498948732
  454. >>> distance.wminkowski([1, 1, 0], [0, 1, 0], 1, np.ones(3))
  455. 1.0
  456. >>> distance.wminkowski([1, 1, 0], [0, 1, 0], 2, np.ones(3))
  457. 1.0
  458. >>> distance.wminkowski([1, 1, 0], [0, 1, 0], 3, np.ones(3))
  459. 1.0
  460. """
  461. w = _validate_weights(w)
  462. return minkowski(u, v, p=p, w=w**p)
  463. def euclidean(u, v, w=None):
  464. """
  465. Computes the Euclidean distance between two 1-D arrays.
  466. The Euclidean distance between 1-D arrays `u` and `v`, is defined as
  467. .. math::
  468. {||u-v||}_2
  469. \\left(\\sum{(w_i |(u_i - v_i)|^2)}\\right)^{1/2}
  470. Parameters
  471. ----------
  472. u : (N,) array_like
  473. Input array.
  474. v : (N,) array_like
  475. Input array.
  476. w : (N,) array_like, optional
  477. The weights for each value in `u` and `v`. Default is None,
  478. which gives each value a weight of 1.0
  479. Returns
  480. -------
  481. euclidean : double
  482. The Euclidean distance between vectors `u` and `v`.
  483. Examples
  484. --------
  485. >>> from scipy.spatial import distance
  486. >>> distance.euclidean([1, 0, 0], [0, 1, 0])
  487. 1.4142135623730951
  488. >>> distance.euclidean([1, 1, 0], [0, 1, 0])
  489. 1.0
  490. """
  491. return minkowski(u, v, p=2, w=w)
  492. def sqeuclidean(u, v, w=None):
  493. """
  494. Compute the squared Euclidean distance between two 1-D arrays.
  495. The squared Euclidean distance between `u` and `v` is defined as
  496. .. math::
  497. {||u-v||}_2^2
  498. \\left(\\sum{(w_i |(u_i - v_i)|^2)}\\right)
  499. Parameters
  500. ----------
  501. u : (N,) array_like
  502. Input array.
  503. v : (N,) array_like
  504. Input array.
  505. w : (N,) array_like, optional
  506. The weights for each value in `u` and `v`. Default is None,
  507. which gives each value a weight of 1.0
  508. Returns
  509. -------
  510. sqeuclidean : double
  511. The squared Euclidean distance between vectors `u` and `v`.
  512. Examples
  513. --------
  514. >>> from scipy.spatial import distance
  515. >>> distance.sqeuclidean([1, 0, 0], [0, 1, 0])
  516. 2.0
  517. >>> distance.sqeuclidean([1, 1, 0], [0, 1, 0])
  518. 1.0
  519. """
  520. # Preserve float dtypes, but convert everything else to np.float64
  521. # for stability.
  522. utype, vtype = None, None
  523. if not (hasattr(u, "dtype") and np.issubdtype(u.dtype, np.inexact)):
  524. utype = np.float64
  525. if not (hasattr(v, "dtype") and np.issubdtype(v.dtype, np.inexact)):
  526. vtype = np.float64
  527. u = _validate_vector(u, dtype=utype)
  528. v = _validate_vector(v, dtype=vtype)
  529. u_v = u - v
  530. u_v_w = u_v # only want weights applied once
  531. if w is not None:
  532. w = _validate_weights(w)
  533. u_v_w = w * u_v
  534. return np.dot(u_v, u_v_w)
  535. def correlation(u, v, w=None, centered=True):
  536. """
  537. Compute the correlation distance between two 1-D arrays.
  538. The correlation distance between `u` and `v`, is
  539. defined as
  540. .. math::
  541. 1 - \\frac{(u - \\bar{u}) \\cdot (v - \\bar{v})}
  542. {{||(u - \\bar{u})||}_2 {||(v - \\bar{v})||}_2}
  543. where :math:`\\bar{u}` is the mean of the elements of `u`
  544. and :math:`x \\cdot y` is the dot product of :math:`x` and :math:`y`.
  545. Parameters
  546. ----------
  547. u : (N,) array_like
  548. Input array.
  549. v : (N,) array_like
  550. Input array.
  551. w : (N,) array_like, optional
  552. The weights for each value in `u` and `v`. Default is None,
  553. which gives each value a weight of 1.0
  554. Returns
  555. -------
  556. correlation : double
  557. The correlation distance between 1-D array `u` and `v`.
  558. """
  559. u = _validate_vector(u)
  560. v = _validate_vector(v)
  561. if w is not None:
  562. w = _validate_weights(w)
  563. if centered:
  564. umu = np.average(u, weights=w)
  565. vmu = np.average(v, weights=w)
  566. u = u - umu
  567. v = v - vmu
  568. uv = np.average(u * v, weights=w)
  569. uu = np.average(np.square(u), weights=w)
  570. vv = np.average(np.square(v), weights=w)
  571. dist = 1.0 - uv / np.sqrt(uu * vv)
  572. return dist
  573. def cosine(u, v, w=None):
  574. """
  575. Compute the Cosine distance between 1-D arrays.
  576. The Cosine distance between `u` and `v`, is defined as
  577. .. math::
  578. 1 - \\frac{u \\cdot v}
  579. {||u||_2 ||v||_2}.
  580. where :math:`u \\cdot v` is the dot product of :math:`u` and
  581. :math:`v`.
  582. Parameters
  583. ----------
  584. u : (N,) array_like
  585. Input array.
  586. v : (N,) array_like
  587. Input array.
  588. w : (N,) array_like, optional
  589. The weights for each value in `u` and `v`. Default is None,
  590. which gives each value a weight of 1.0
  591. Returns
  592. -------
  593. cosine : double
  594. The Cosine distance between vectors `u` and `v`.
  595. Examples
  596. --------
  597. >>> from scipy.spatial import distance
  598. >>> distance.cosine([1, 0, 0], [0, 1, 0])
  599. 1.0
  600. >>> distance.cosine([100, 0, 0], [0, 1, 0])
  601. 1.0
  602. >>> distance.cosine([1, 1, 0], [0, 1, 0])
  603. 0.29289321881345254
  604. """
  605. # cosine distance is also referred to as 'uncentered correlation',
  606. # or 'reflective correlation'
  607. return correlation(u, v, w=w, centered=False)
  608. def hamming(u, v, w=None):
  609. """
  610. Compute the Hamming distance between two 1-D arrays.
  611. The Hamming distance between 1-D arrays `u` and `v`, is simply the
  612. proportion of disagreeing components in `u` and `v`. If `u` and `v` are
  613. boolean vectors, the Hamming distance is
  614. .. math::
  615. \\frac{c_{01} + c_{10}}{n}
  616. where :math:`c_{ij}` is the number of occurrences of
  617. :math:`\\mathtt{u[k]} = i` and :math:`\\mathtt{v[k]} = j` for
  618. :math:`k < n`.
  619. Parameters
  620. ----------
  621. u : (N,) array_like
  622. Input array.
  623. v : (N,) array_like
  624. Input array.
  625. w : (N,) array_like, optional
  626. The weights for each value in `u` and `v`. Default is None,
  627. which gives each value a weight of 1.0
  628. Returns
  629. -------
  630. hamming : double
  631. The Hamming distance between vectors `u` and `v`.
  632. Examples
  633. --------
  634. >>> from scipy.spatial import distance
  635. >>> distance.hamming([1, 0, 0], [0, 1, 0])
  636. 0.66666666666666663
  637. >>> distance.hamming([1, 0, 0], [1, 1, 0])
  638. 0.33333333333333331
  639. >>> distance.hamming([1, 0, 0], [2, 0, 0])
  640. 0.33333333333333331
  641. >>> distance.hamming([1, 0, 0], [3, 0, 0])
  642. 0.33333333333333331
  643. """
  644. u = _validate_vector(u)
  645. v = _validate_vector(v)
  646. if u.shape != v.shape:
  647. raise ValueError('The 1d arrays must have equal lengths.')
  648. u_ne_v = u != v
  649. if w is not None:
  650. w = _validate_weights(w)
  651. return np.average(u_ne_v, weights=w)
  652. def jaccard(u, v, w=None):
  653. """
  654. Compute the Jaccard-Needham dissimilarity between two boolean 1-D arrays.
  655. The Jaccard-Needham dissimilarity between 1-D boolean arrays `u` and `v`,
  656. is defined as
  657. .. math::
  658. \\frac{c_{TF} + c_{FT}}
  659. {c_{TT} + c_{FT} + c_{TF}}
  660. where :math:`c_{ij}` is the number of occurrences of
  661. :math:`\\mathtt{u[k]} = i` and :math:`\\mathtt{v[k]} = j` for
  662. :math:`k < n`.
  663. Parameters
  664. ----------
  665. u : (N,) array_like, bool
  666. Input array.
  667. v : (N,) array_like, bool
  668. Input array.
  669. w : (N,) array_like, optional
  670. The weights for each value in `u` and `v`. Default is None,
  671. which gives each value a weight of 1.0
  672. Returns
  673. -------
  674. jaccard : double
  675. The Jaccard distance between vectors `u` and `v`.
  676. Notes
  677. -----
  678. When both `u` and `v` lead to a `0/0` division i.e. there is no overlap
  679. between the items in the vectors the returned distance is 0. See the
  680. Wikipedia page on the Jaccard index [1]_, and this paper [2]_.
  681. .. versionchanged:: 1.2.0
  682. Previously, when `u` and `v` lead to a `0/0` division, the function
  683. would return NaN. This was changed to return 0 instead.
  684. References
  685. ----------
  686. .. [1] https://en.wikipedia.org/wiki/Jaccard_index
  687. .. [2] S. Kosub, "A note on the triangle inequality for the Jaccard
  688. distance", 2016, Available online: https://arxiv.org/pdf/1612.02696.pdf
  689. Examples
  690. --------
  691. >>> from scipy.spatial import distance
  692. >>> distance.jaccard([1, 0, 0], [0, 1, 0])
  693. 1.0
  694. >>> distance.jaccard([1, 0, 0], [1, 1, 0])
  695. 0.5
  696. >>> distance.jaccard([1, 0, 0], [1, 2, 0])
  697. 0.5
  698. >>> distance.jaccard([1, 0, 0], [1, 1, 1])
  699. 0.66666666666666663
  700. """
  701. u = _validate_vector(u)
  702. v = _validate_vector(v)
  703. nonzero = np.bitwise_or(u != 0, v != 0)
  704. unequal_nonzero = np.bitwise_and((u != v), nonzero)
  705. if w is not None:
  706. w = _validate_weights(w)
  707. nonzero = w * nonzero
  708. unequal_nonzero = w * unequal_nonzero
  709. a = np.double(unequal_nonzero.sum())
  710. b = np.double(nonzero.sum())
  711. return (a / b) if b != 0 else 0
  712. def kulsinski(u, v, w=None):
  713. """
  714. Compute the Kulsinski dissimilarity between two boolean 1-D arrays.
  715. The Kulsinski dissimilarity between two boolean 1-D arrays `u` and `v`,
  716. is defined as
  717. .. math::
  718. \\frac{c_{TF} + c_{FT} - c_{TT} + n}
  719. {c_{FT} + c_{TF} + n}
  720. where :math:`c_{ij}` is the number of occurrences of
  721. :math:`\\mathtt{u[k]} = i` and :math:`\\mathtt{v[k]} = j` for
  722. :math:`k < n`.
  723. Parameters
  724. ----------
  725. u : (N,) array_like, bool
  726. Input array.
  727. v : (N,) array_like, bool
  728. Input array.
  729. w : (N,) array_like, optional
  730. The weights for each value in `u` and `v`. Default is None,
  731. which gives each value a weight of 1.0
  732. Returns
  733. -------
  734. kulsinski : double
  735. The Kulsinski distance between vectors `u` and `v`.
  736. Examples
  737. --------
  738. >>> from scipy.spatial import distance
  739. >>> distance.kulsinski([1, 0, 0], [0, 1, 0])
  740. 1.0
  741. >>> distance.kulsinski([1, 0, 0], [1, 1, 0])
  742. 0.75
  743. >>> distance.kulsinski([1, 0, 0], [2, 1, 0])
  744. 0.33333333333333331
  745. >>> distance.kulsinski([1, 0, 0], [3, 1, 0])
  746. -0.5
  747. """
  748. u = _validate_vector(u)
  749. v = _validate_vector(v)
  750. if w is None:
  751. n = float(len(u))
  752. else:
  753. w = _validate_weights(w)
  754. n = w.sum()
  755. (nff, nft, ntf, ntt) = _nbool_correspond_all(u, v, w=w)
  756. return (ntf + nft - ntt + n) / (ntf + nft + n)
  757. def seuclidean(u, v, V):
  758. """
  759. Return the standardized Euclidean distance between two 1-D arrays.
  760. The standardized Euclidean distance between `u` and `v`.
  761. Parameters
  762. ----------
  763. u : (N,) array_like
  764. Input array.
  765. v : (N,) array_like
  766. Input array.
  767. V : (N,) array_like
  768. `V` is an 1-D array of component variances. It is usually computed
  769. among a larger collection vectors.
  770. Returns
  771. -------
  772. seuclidean : double
  773. The standardized Euclidean distance between vectors `u` and `v`.
  774. Examples
  775. --------
  776. >>> from scipy.spatial import distance
  777. >>> distance.seuclidean([1, 0, 0], [0, 1, 0], [0.1, 0.1, 0.1])
  778. 4.4721359549995796
  779. >>> distance.seuclidean([1, 0, 0], [0, 1, 0], [1, 0.1, 0.1])
  780. 3.3166247903553998
  781. >>> distance.seuclidean([1, 0, 0], [0, 1, 0], [10, 0.1, 0.1])
  782. 3.1780497164141406
  783. """
  784. u = _validate_vector(u)
  785. v = _validate_vector(v)
  786. V = _validate_vector(V, dtype=np.float64)
  787. if V.shape[0] != u.shape[0] or u.shape[0] != v.shape[0]:
  788. raise TypeError('V must be a 1-D array of the same dimension '
  789. 'as u and v.')
  790. return euclidean(u, v, w=1/V)
  791. def cityblock(u, v, w=None):
  792. """
  793. Compute the City Block (Manhattan) distance.
  794. Computes the Manhattan distance between two 1-D arrays `u` and `v`,
  795. which is defined as
  796. .. math::
  797. \\sum_i {\\left| u_i - v_i \\right|}.
  798. Parameters
  799. ----------
  800. u : (N,) array_like
  801. Input array.
  802. v : (N,) array_like
  803. Input array.
  804. w : (N,) array_like, optional
  805. The weights for each value in `u` and `v`. Default is None,
  806. which gives each value a weight of 1.0
  807. Returns
  808. -------
  809. cityblock : double
  810. The City Block (Manhattan) distance between vectors `u` and `v`.
  811. Examples
  812. --------
  813. >>> from scipy.spatial import distance
  814. >>> distance.cityblock([1, 0, 0], [0, 1, 0])
  815. 2
  816. >>> distance.cityblock([1, 0, 0], [0, 2, 0])
  817. 3
  818. >>> distance.cityblock([1, 0, 0], [1, 1, 0])
  819. 1
  820. """
  821. u = _validate_vector(u)
  822. v = _validate_vector(v)
  823. l1_diff = abs(u - v)
  824. if w is not None:
  825. w = _validate_weights(w)
  826. l1_diff = w * l1_diff
  827. return l1_diff.sum()
  828. def mahalanobis(u, v, VI):
  829. """
  830. Compute the Mahalanobis distance between two 1-D arrays.
  831. The Mahalanobis distance between 1-D arrays `u` and `v`, is defined as
  832. .. math::
  833. \\sqrt{ (u-v) V^{-1} (u-v)^T }
  834. where ``V`` is the covariance matrix. Note that the argument `VI`
  835. is the inverse of ``V``.
  836. Parameters
  837. ----------
  838. u : (N,) array_like
  839. Input array.
  840. v : (N,) array_like
  841. Input array.
  842. VI : ndarray
  843. The inverse of the covariance matrix.
  844. Returns
  845. -------
  846. mahalanobis : double
  847. The Mahalanobis distance between vectors `u` and `v`.
  848. Examples
  849. --------
  850. >>> from scipy.spatial import distance
  851. >>> iv = [[1, 0.5, 0.5], [0.5, 1, 0.5], [0.5, 0.5, 1]]
  852. >>> distance.mahalanobis([1, 0, 0], [0, 1, 0], iv)
  853. 1.0
  854. >>> distance.mahalanobis([0, 2, 0], [0, 1, 0], iv)
  855. 1.0
  856. >>> distance.mahalanobis([2, 0, 0], [0, 1, 0], iv)
  857. 1.7320508075688772
  858. """
  859. u = _validate_vector(u)
  860. v = _validate_vector(v)
  861. VI = np.atleast_2d(VI)
  862. delta = u - v
  863. m = np.dot(np.dot(delta, VI), delta)
  864. return np.sqrt(m)
  865. def chebyshev(u, v, w=None):
  866. """
  867. Compute the Chebyshev distance.
  868. Computes the Chebyshev distance between two 1-D arrays `u` and `v`,
  869. which is defined as
  870. .. math::
  871. \\max_i {|u_i-v_i|}.
  872. Parameters
  873. ----------
  874. u : (N,) array_like
  875. Input vector.
  876. v : (N,) array_like
  877. Input vector.
  878. w : (N,) array_like, optional
  879. The weights for each value in `u` and `v`. Default is None,
  880. which gives each value a weight of 1.0
  881. Returns
  882. -------
  883. chebyshev : double
  884. The Chebyshev distance between vectors `u` and `v`.
  885. Examples
  886. --------
  887. >>> from scipy.spatial import distance
  888. >>> distance.chebyshev([1, 0, 0], [0, 1, 0])
  889. 1
  890. >>> distance.chebyshev([1, 1, 0], [0, 1, 0])
  891. 1
  892. """
  893. u = _validate_vector(u)
  894. v = _validate_vector(v)
  895. if w is not None:
  896. w = _validate_weights(w)
  897. has_weight = w > 0
  898. if has_weight.sum() < w.size:
  899. u = u[has_weight]
  900. v = v[has_weight]
  901. return max(abs(u - v))
  902. def braycurtis(u, v, w=None):
  903. """
  904. Compute the Bray-Curtis distance between two 1-D arrays.
  905. Bray-Curtis distance is defined as
  906. .. math::
  907. \\sum{|u_i-v_i|} / \\sum{|u_i+v_i|}
  908. The Bray-Curtis distance is in the range [0, 1] if all coordinates are
  909. positive, and is undefined if the inputs are of length zero.
  910. Parameters
  911. ----------
  912. u : (N,) array_like
  913. Input array.
  914. v : (N,) array_like
  915. Input array.
  916. w : (N,) array_like, optional
  917. The weights for each value in `u` and `v`. Default is None,
  918. which gives each value a weight of 1.0
  919. Returns
  920. -------
  921. braycurtis : double
  922. The Bray-Curtis distance between 1-D arrays `u` and `v`.
  923. Examples
  924. --------
  925. >>> from scipy.spatial import distance
  926. >>> distance.braycurtis([1, 0, 0], [0, 1, 0])
  927. 1.0
  928. >>> distance.braycurtis([1, 1, 0], [0, 1, 0])
  929. 0.33333333333333331
  930. """
  931. u = _validate_vector(u)
  932. v = _validate_vector(v, dtype=np.float64)
  933. l1_diff = abs(u - v)
  934. l1_sum = abs(u + v)
  935. if w is not None:
  936. w = _validate_weights(w)
  937. l1_diff = w * l1_diff
  938. l1_sum = w * l1_sum
  939. return l1_diff.sum() / l1_sum.sum()
  940. def canberra(u, v, w=None):
  941. """
  942. Compute the Canberra distance between two 1-D arrays.
  943. The Canberra distance is defined as
  944. .. math::
  945. d(u,v) = \\sum_i \\frac{|u_i-v_i|}
  946. {|u_i|+|v_i|}.
  947. Parameters
  948. ----------
  949. u : (N,) array_like
  950. Input array.
  951. v : (N,) array_like
  952. Input array.
  953. w : (N,) array_like, optional
  954. The weights for each value in `u` and `v`. Default is None,
  955. which gives each value a weight of 1.0
  956. Returns
  957. -------
  958. canberra : double
  959. The Canberra distance between vectors `u` and `v`.
  960. Notes
  961. -----
  962. When `u[i]` and `v[i]` are 0 for given i, then the fraction 0/0 = 0 is
  963. used in the calculation.
  964. Examples
  965. --------
  966. >>> from scipy.spatial import distance
  967. >>> distance.canberra([1, 0, 0], [0, 1, 0])
  968. 2.0
  969. >>> distance.canberra([1, 1, 0], [0, 1, 0])
  970. 1.0
  971. """
  972. u = _validate_vector(u)
  973. v = _validate_vector(v, dtype=np.float64)
  974. if w is not None:
  975. w = _validate_weights(w)
  976. olderr = np.seterr(invalid='ignore')
  977. try:
  978. abs_uv = abs(u - v)
  979. abs_u = abs(u)
  980. abs_v = abs(v)
  981. d = abs_uv / (abs_u + abs_v)
  982. if w is not None:
  983. d = w * d
  984. d = np.nansum(d)
  985. finally:
  986. np.seterr(**olderr)
  987. return d
  988. def jensenshannon(p, q, base=None):
  989. """
  990. Compute the Jensen-Shannon distance (metric) between
  991. two 1-D probability arrays. This is the square root
  992. of the Jensen-Shannon divergence.
  993. The Jensen-Shannon distance between two probability
  994. vectors `p` and `q` is defined as,
  995. .. math::
  996. \\sqrt{\\frac{D(p \\parallel m) + D(q \\parallel m)}{2}}
  997. where :math:`m` is the pointwise mean of :math:`p` and :math:`q`
  998. and :math:`D` is the Kullback-Leibler divergence.
  999. This routine will normalize `p` and `q` if they don't sum to 1.0.
  1000. Parameters
  1001. ----------
  1002. p : (N,) array_like
  1003. left probability vector
  1004. q : (N,) array_like
  1005. right probability vector
  1006. base : double, optional
  1007. the base of the logarithm used to compute the output
  1008. if not given, then the routine uses the default base of
  1009. scipy.stats.entropy.
  1010. Returns
  1011. -------
  1012. js : double
  1013. The Jensen-Shannon distance between `p` and `q`
  1014. .. versionadded:: 1.2.0
  1015. Examples
  1016. --------
  1017. >>> from scipy.spatial import distance
  1018. >>> distance.jensenshannon([1.0, 0.0, 0.0], [0.0, 1.0, 0.0], 2.0)
  1019. 1.0
  1020. >>> distance.jensenshannon([1.0, 0.0], [0.5, 0.5])
  1021. 0.46450140402245893
  1022. >>> distance.jensenshannon([1.0, 0.0, 0.0], [1.0, 0.0, 0.0])
  1023. 0.0
  1024. """
  1025. p = np.asarray(p)
  1026. q = np.asarray(q)
  1027. p = p / np.sum(p, axis=0)
  1028. q = q / np.sum(q, axis=0)
  1029. m = (p + q) / 2.0
  1030. left = rel_entr(p, m)
  1031. right = rel_entr(q, m)
  1032. js = np.sum(left, axis=0) + np.sum(right, axis=0)
  1033. if base is not None:
  1034. js /= np.log(base)
  1035. return np.sqrt(js / 2.0)
  1036. def yule(u, v, w=None):
  1037. """
  1038. Compute the Yule dissimilarity between two boolean 1-D arrays.
  1039. The Yule dissimilarity is defined as
  1040. .. math::
  1041. \\frac{R}{c_{TT} * c_{FF} + \\frac{R}{2}}
  1042. where :math:`c_{ij}` is the number of occurrences of
  1043. :math:`\\mathtt{u[k]} = i` and :math:`\\mathtt{v[k]} = j` for
  1044. :math:`k < n` and :math:`R = 2.0 * c_{TF} * c_{FT}`.
  1045. Parameters
  1046. ----------
  1047. u : (N,) array_like, bool
  1048. Input array.
  1049. v : (N,) array_like, bool
  1050. Input array.
  1051. w : (N,) array_like, optional
  1052. The weights for each value in `u` and `v`. Default is None,
  1053. which gives each value a weight of 1.0
  1054. Returns
  1055. -------
  1056. yule : double
  1057. The Yule dissimilarity between vectors `u` and `v`.
  1058. Examples
  1059. --------
  1060. >>> from scipy.spatial import distance
  1061. >>> distance.yule([1, 0, 0], [0, 1, 0])
  1062. 2.0
  1063. >>> distance.yule([1, 1, 0], [0, 1, 0])
  1064. 0.0
  1065. """
  1066. u = _validate_vector(u)
  1067. v = _validate_vector(v)
  1068. if w is not None:
  1069. w = _validate_weights(w)
  1070. (nff, nft, ntf, ntt) = _nbool_correspond_all(u, v, w=w)
  1071. return float(2.0 * ntf * nft / np.array(ntt * nff + ntf * nft))
  1072. @np.deprecate(message="spatial.distance.matching is deprecated in scipy 1.0.0; "
  1073. "use spatial.distance.hamming instead.")
  1074. def matching(u, v, w=None):
  1075. """
  1076. Compute the Hamming distance between two boolean 1-D arrays.
  1077. This is a deprecated synonym for :func:`hamming`.
  1078. """
  1079. return hamming(u, v, w=w)
  1080. def dice(u, v, w=None):
  1081. """
  1082. Compute the Dice dissimilarity between two boolean 1-D arrays.
  1083. The Dice dissimilarity between `u` and `v`, is
  1084. .. math::
  1085. \\frac{c_{TF} + c_{FT}}
  1086. {2c_{TT} + c_{FT} + c_{TF}}
  1087. where :math:`c_{ij}` is the number of occurrences of
  1088. :math:`\\mathtt{u[k]} = i` and :math:`\\mathtt{v[k]} = j` for
  1089. :math:`k < n`.
  1090. Parameters
  1091. ----------
  1092. u : (N,) ndarray, bool
  1093. Input 1-D array.
  1094. v : (N,) ndarray, bool
  1095. Input 1-D array.
  1096. w : (N,) array_like, optional
  1097. The weights for each value in `u` and `v`. Default is None,
  1098. which gives each value a weight of 1.0
  1099. Returns
  1100. -------
  1101. dice : double
  1102. The Dice dissimilarity between 1-D arrays `u` and `v`.
  1103. Examples
  1104. --------
  1105. >>> from scipy.spatial import distance
  1106. >>> distance.dice([1, 0, 0], [0, 1, 0])
  1107. 1.0
  1108. >>> distance.dice([1, 0, 0], [1, 1, 0])
  1109. 0.3333333333333333
  1110. >>> distance.dice([1, 0, 0], [2, 0, 0])
  1111. -0.3333333333333333
  1112. """
  1113. u = _validate_vector(u)
  1114. v = _validate_vector(v)
  1115. if w is not None:
  1116. w = _validate_weights(w)
  1117. if u.dtype == v.dtype == bool and w is None:
  1118. ntt = (u & v).sum()
  1119. else:
  1120. dtype = np.find_common_type([int], [u.dtype, v.dtype])
  1121. u = u.astype(dtype)
  1122. v = v.astype(dtype)
  1123. if w is None:
  1124. ntt = (u * v).sum()
  1125. else:
  1126. ntt = (u * v * w).sum()
  1127. (nft, ntf) = _nbool_correspond_ft_tf(u, v, w=w)
  1128. return float((ntf + nft) / np.array(2.0 * ntt + ntf + nft))
  1129. def rogerstanimoto(u, v, w=None):
  1130. """
  1131. Compute the Rogers-Tanimoto dissimilarity between two boolean 1-D arrays.
  1132. The Rogers-Tanimoto dissimilarity between two boolean 1-D arrays
  1133. `u` and `v`, is defined as
  1134. .. math::
  1135. \\frac{R}
  1136. {c_{TT} + c_{FF} + R}
  1137. where :math:`c_{ij}` is the number of occurrences of
  1138. :math:`\\mathtt{u[k]} = i` and :math:`\\mathtt{v[k]} = j` for
  1139. :math:`k < n` and :math:`R = 2(c_{TF} + c_{FT})`.
  1140. Parameters
  1141. ----------
  1142. u : (N,) array_like, bool
  1143. Input array.
  1144. v : (N,) array_like, bool
  1145. Input array.
  1146. w : (N,) array_like, optional
  1147. The weights for each value in `u` and `v`. Default is None,
  1148. which gives each value a weight of 1.0
  1149. Returns
  1150. -------
  1151. rogerstanimoto : double
  1152. The Rogers-Tanimoto dissimilarity between vectors
  1153. `u` and `v`.
  1154. Examples
  1155. --------
  1156. >>> from scipy.spatial import distance
  1157. >>> distance.rogerstanimoto([1, 0, 0], [0, 1, 0])
  1158. 0.8
  1159. >>> distance.rogerstanimoto([1, 0, 0], [1, 1, 0])
  1160. 0.5
  1161. >>> distance.rogerstanimoto([1, 0, 0], [2, 0, 0])
  1162. -1.0
  1163. """
  1164. u = _validate_vector(u)
  1165. v = _validate_vector(v)
  1166. if w is not None:
  1167. w = _validate_weights(w)
  1168. (nff, nft, ntf, ntt) = _nbool_correspond_all(u, v, w=w)
  1169. return float(2.0 * (ntf + nft)) / float(ntt + nff + (2.0 * (ntf + nft)))
  1170. def russellrao(u, v, w=None):
  1171. """
  1172. Compute the Russell-Rao dissimilarity between two boolean 1-D arrays.
  1173. The Russell-Rao dissimilarity between two boolean 1-D arrays, `u` and
  1174. `v`, is defined as
  1175. .. math::
  1176. \\frac{n - c_{TT}}
  1177. {n}
  1178. where :math:`c_{ij}` is the number of occurrences of
  1179. :math:`\\mathtt{u[k]} = i` and :math:`\\mathtt{v[k]} = j` for
  1180. :math:`k < n`.
  1181. Parameters
  1182. ----------
  1183. u : (N,) array_like, bool
  1184. Input array.
  1185. v : (N,) array_like, bool
  1186. Input array.
  1187. w : (N,) array_like, optional
  1188. The weights for each value in `u` and `v`. Default is None,
  1189. which gives each value a weight of 1.0
  1190. Returns
  1191. -------
  1192. russellrao : double
  1193. The Russell-Rao dissimilarity between vectors `u` and `v`.
  1194. Examples
  1195. --------
  1196. >>> from scipy.spatial import distance
  1197. >>> distance.russellrao([1, 0, 0], [0, 1, 0])
  1198. 1.0
  1199. >>> distance.russellrao([1, 0, 0], [1, 1, 0])
  1200. 0.6666666666666666
  1201. >>> distance.russellrao([1, 0, 0], [2, 0, 0])
  1202. 0.3333333333333333
  1203. """
  1204. u = _validate_vector(u)
  1205. v = _validate_vector(v)
  1206. if u.dtype == v.dtype == bool and w is None:
  1207. ntt = (u & v).sum()
  1208. n = float(len(u))
  1209. elif w is None:
  1210. ntt = (u * v).sum()
  1211. n = float(len(u))
  1212. else:
  1213. w = _validate_weights(w)
  1214. ntt = (u * v * w).sum()
  1215. n = w.sum()
  1216. return float(n - ntt) / n
  1217. def sokalmichener(u, v, w=None):
  1218. """
  1219. Compute the Sokal-Michener dissimilarity between two boolean 1-D arrays.
  1220. The Sokal-Michener dissimilarity between boolean 1-D arrays `u` and `v`,
  1221. is defined as
  1222. .. math::
  1223. \\frac{R}
  1224. {S + R}
  1225. where :math:`c_{ij}` is the number of occurrences of
  1226. :math:`\\mathtt{u[k]} = i` and :math:`\\mathtt{v[k]} = j` for
  1227. :math:`k < n`, :math:`R = 2 * (c_{TF} + c_{FT})` and
  1228. :math:`S = c_{FF} + c_{TT}`.
  1229. Parameters
  1230. ----------
  1231. u : (N,) array_like, bool
  1232. Input array.
  1233. v : (N,) array_like, bool
  1234. Input array.
  1235. w : (N,) array_like, optional
  1236. The weights for each value in `u` and `v`. Default is None,
  1237. which gives each value a weight of 1.0
  1238. Returns
  1239. -------
  1240. sokalmichener : double
  1241. The Sokal-Michener dissimilarity between vectors `u` and `v`.
  1242. Examples
  1243. --------
  1244. >>> from scipy.spatial import distance
  1245. >>> distance.sokalmichener([1, 0, 0], [0, 1, 0])
  1246. 0.8
  1247. >>> distance.sokalmichener([1, 0, 0], [1, 1, 0])
  1248. 0.5
  1249. >>> distance.sokalmichener([1, 0, 0], [2, 0, 0])
  1250. -1.0
  1251. """
  1252. u = _validate_vector(u)
  1253. v = _validate_vector(v)
  1254. if u.dtype == v.dtype == bool and w is None:
  1255. ntt = (u & v).sum()
  1256. nff = (~u & ~v).sum()
  1257. elif w is None:
  1258. ntt = (u * v).sum()
  1259. nff = ((1.0 - u) * (1.0 - v)).sum()
  1260. else:
  1261. w = _validate_weights(w)
  1262. ntt = (u * v * w).sum()
  1263. nff = ((1.0 - u) * (1.0 - v) * w).sum()
  1264. (nft, ntf) = _nbool_correspond_ft_tf(u, v)
  1265. return float(2.0 * (ntf + nft)) / float(ntt + nff + 2.0 * (ntf + nft))
  1266. def sokalsneath(u, v, w=None):
  1267. """
  1268. Compute the Sokal-Sneath dissimilarity between two boolean 1-D arrays.
  1269. The Sokal-Sneath dissimilarity between `u` and `v`,
  1270. .. math::
  1271. \\frac{R}
  1272. {c_{TT} + R}
  1273. where :math:`c_{ij}` is the number of occurrences of
  1274. :math:`\\mathtt{u[k]} = i` and :math:`\\mathtt{v[k]} = j` for
  1275. :math:`k < n` and :math:`R = 2(c_{TF} + c_{FT})`.
  1276. Parameters
  1277. ----------
  1278. u : (N,) array_like, bool
  1279. Input array.
  1280. v : (N,) array_like, bool
  1281. Input array.
  1282. w : (N,) array_like, optional
  1283. The weights for each value in `u` and `v`. Default is None,
  1284. which gives each value a weight of 1.0
  1285. Returns
  1286. -------
  1287. sokalsneath : double
  1288. The Sokal-Sneath dissimilarity between vectors `u` and `v`.
  1289. Examples
  1290. --------
  1291. >>> from scipy.spatial import distance
  1292. >>> distance.sokalsneath([1, 0, 0], [0, 1, 0])
  1293. 1.0
  1294. >>> distance.sokalsneath([1, 0, 0], [1, 1, 0])
  1295. 0.66666666666666663
  1296. >>> distance.sokalsneath([1, 0, 0], [2, 1, 0])
  1297. 0.0
  1298. >>> distance.sokalsneath([1, 0, 0], [3, 1, 0])
  1299. -2.0
  1300. """
  1301. u = _validate_vector(u)
  1302. v = _validate_vector(v)
  1303. if u.dtype == v.dtype == bool and w is None:
  1304. ntt = (u & v).sum()
  1305. elif w is None:
  1306. ntt = (u * v).sum()
  1307. else:
  1308. w = _validate_weights(w)
  1309. ntt = (u * v * w).sum()
  1310. (nft, ntf) = _nbool_correspond_ft_tf(u, v, w=w)
  1311. denom = np.array(ntt + 2.0 * (ntf + nft))
  1312. if not denom.any():
  1313. raise ValueError('Sokal-Sneath dissimilarity is not defined for '
  1314. 'vectors that are entirely false.')
  1315. return float(2.0 * (ntf + nft)) / denom
  1316. _convert_to_double = partial(_convert_to_type, out_type=np.double)
  1317. _convert_to_bool = partial(_convert_to_type, out_type=bool)
  1318. # adding python-only wrappers to _distance_wrap module
  1319. _distance_wrap.pdist_correlation_double_wrap = _correlation_pdist_wrap
  1320. _distance_wrap.cdist_correlation_double_wrap = _correlation_cdist_wrap
  1321. # Registry of implemented metrics:
  1322. # Dictionary with the following structure:
  1323. # {
  1324. # metric_name : MetricInfo(aka, types=[double], validator=None)
  1325. # }
  1326. #
  1327. # Where:
  1328. # `metric_name` must be equal to python metric name
  1329. #
  1330. # MetricInfo is a named tuple with fields:
  1331. # 'aka' : [list of aliases],
  1332. #
  1333. # 'validator': f(X, m, n, **kwargs) # function that check kwargs and
  1334. # # computes default values.
  1335. #
  1336. # 'types': [list of supported types], # X (pdist) and XA (cdist) are used to
  1337. # # choose the type. if there is no match
  1338. # # the first type is used. Default double
  1339. # }
  1340. MetricInfo = namedtuple("MetricInfo", 'aka types validator ')
  1341. MetricInfo.__new__.__defaults__ = (['double'], None)
  1342. _METRICS = {
  1343. 'braycurtis': MetricInfo(aka=['braycurtis']),
  1344. 'canberra': MetricInfo(aka=['canberra']),
  1345. 'chebyshev': MetricInfo(aka=['chebychev', 'chebyshev', 'cheby', 'cheb', 'ch']),
  1346. 'cityblock': MetricInfo(aka=['cityblock', 'cblock', 'cb', 'c']),
  1347. 'correlation': MetricInfo(aka=['correlation', 'co']),
  1348. 'cosine': MetricInfo(aka=['cosine', 'cos']),
  1349. 'dice': MetricInfo(aka=['dice'], types=['bool']),
  1350. 'euclidean': MetricInfo(aka=['euclidean', 'euclid', 'eu', 'e']),
  1351. 'hamming': MetricInfo(aka=['matching', 'hamming', 'hamm', 'ha', 'h'],
  1352. types=['double', 'bool']),
  1353. 'jaccard': MetricInfo(aka=['jaccard', 'jacc', 'ja', 'j'],
  1354. types=['double', 'bool']),
  1355. 'jensenshannon': MetricInfo(aka=['jensenshannon', 'js'],
  1356. types=['double']),
  1357. 'kulsinski': MetricInfo(aka=['kulsinski'], types=['bool']),
  1358. 'mahalanobis': MetricInfo(aka=['mahalanobis', 'mahal', 'mah'],
  1359. validator=_validate_mahalanobis_kwargs),
  1360. 'minkowski': MetricInfo(aka=['minkowski', 'mi', 'm', 'pnorm'],
  1361. validator=_validate_minkowski_kwargs),
  1362. 'rogerstanimoto': MetricInfo(aka=['rogerstanimoto'], types=['bool']),
  1363. 'russellrao': MetricInfo(aka=['russellrao'], types=['bool']),
  1364. 'seuclidean': MetricInfo(aka=['seuclidean', 'se', 's'],
  1365. validator=_validate_seuclidean_kwargs),
  1366. 'sokalmichener': MetricInfo(aka=['sokalmichener'], types=['bool']),
  1367. 'sokalsneath': MetricInfo(aka=['sokalsneath'], types=['bool']),
  1368. 'sqeuclidean': MetricInfo(aka=['sqeuclidean', 'sqe', 'sqeuclid']),
  1369. 'wminkowski': MetricInfo(aka=['wminkowski', 'wmi', 'wm', 'wpnorm'],
  1370. validator=_validate_wminkowski_kwargs),
  1371. 'yule': MetricInfo(aka=['yule'], types=['bool']),
  1372. }
  1373. _METRIC_ALIAS = dict((alias, name)
  1374. for name, info in _METRICS.items()
  1375. for alias in info.aka)
  1376. _METRICS_NAMES = list(_METRICS.keys())
  1377. _TEST_METRICS = {'test_' + name: globals()[name] for name in _METRICS.keys()}
  1378. def _select_weighted_metric(mstr, kwargs, out):
  1379. kwargs = dict(kwargs)
  1380. if "w" in kwargs and kwargs["w"] is None:
  1381. # w=None is the same as omitting it
  1382. kwargs.pop("w")
  1383. if mstr.startswith("test_") or mstr in _METRICS['wminkowski'].aka:
  1384. # These support weights
  1385. pass
  1386. elif "w" in kwargs:
  1387. if (mstr in _METRICS['seuclidean'].aka or
  1388. mstr in _METRICS['mahalanobis'].aka):
  1389. raise ValueError("metric %s incompatible with weights" % mstr)
  1390. # XXX: C-versions do not support weights
  1391. # need to use python version for weighting
  1392. kwargs['out'] = out
  1393. mstr = "test_%s" % mstr
  1394. return mstr, kwargs
  1395. def pdist(X, metric='euclidean', *args, **kwargs):
  1396. """
  1397. Pairwise distances between observations in n-dimensional space.
  1398. See Notes for common calling conventions.
  1399. Parameters
  1400. ----------
  1401. X : ndarray
  1402. An m by n array of m original observations in an
  1403. n-dimensional space.
  1404. metric : str or function, optional
  1405. The distance metric to use. The distance function can
  1406. be 'braycurtis', 'canberra', 'chebyshev', 'cityblock',
  1407. 'correlation', 'cosine', 'dice', 'euclidean', 'hamming',
  1408. 'jaccard', 'jensenshannon', 'kulsinski', 'mahalanobis', 'matching',
  1409. 'minkowski', 'rogerstanimoto', 'russellrao', 'seuclidean',
  1410. 'sokalmichener', 'sokalsneath', 'sqeuclidean', 'yule'.
  1411. *args : tuple. Deprecated.
  1412. Additional arguments should be passed as keyword arguments
  1413. **kwargs : dict, optional
  1414. Extra arguments to `metric`: refer to each metric documentation for a
  1415. list of all possible arguments.
  1416. Some possible arguments:
  1417. p : scalar
  1418. The p-norm to apply for Minkowski, weighted and unweighted.
  1419. Default: 2.
  1420. w : ndarray
  1421. The weight vector for metrics that support weights (e.g., Minkowski).
  1422. V : ndarray
  1423. The variance vector for standardized Euclidean.
  1424. Default: var(X, axis=0, ddof=1)
  1425. VI : ndarray
  1426. The inverse of the covariance matrix for Mahalanobis.
  1427. Default: inv(cov(X.T)).T
  1428. out : ndarray.
  1429. The output array
  1430. If not None, condensed distance matrix Y is stored in this array.
  1431. Note: metric independent, it will become a regular keyword arg in a
  1432. future scipy version
  1433. Returns
  1434. -------
  1435. Y : ndarray
  1436. Returns a condensed distance matrix Y. For
  1437. each :math:`i` and :math:`j` (where :math:`i<j<m`),where m is the number
  1438. of original observations. The metric ``dist(u=X[i], v=X[j])``
  1439. is computed and stored in entry ``ij``.
  1440. See Also
  1441. --------
  1442. squareform : converts between condensed distance matrices and
  1443. square distance matrices.
  1444. Notes
  1445. -----
  1446. See ``squareform`` for information on how to calculate the index of
  1447. this entry or to convert the condensed distance matrix to a
  1448. redundant square matrix.
  1449. The following are common calling conventions.
  1450. 1. ``Y = pdist(X, 'euclidean')``
  1451. Computes the distance between m points using Euclidean distance
  1452. (2-norm) as the distance metric between the points. The points
  1453. are arranged as m n-dimensional row vectors in the matrix X.
  1454. 2. ``Y = pdist(X, 'minkowski', p=2.)``
  1455. Computes the distances using the Minkowski distance
  1456. :math:`||u-v||_p` (p-norm) where :math:`p \\geq 1`.
  1457. 3. ``Y = pdist(X, 'cityblock')``
  1458. Computes the city block or Manhattan distance between the
  1459. points.
  1460. 4. ``Y = pdist(X, 'seuclidean', V=None)``
  1461. Computes the standardized Euclidean distance. The standardized
  1462. Euclidean distance between two n-vectors ``u`` and ``v`` is
  1463. .. math::
  1464. \\sqrt{\\sum {(u_i-v_i)^2 / V[x_i]}}
  1465. V is the variance vector; V[i] is the variance computed over all
  1466. the i'th components of the points. If not passed, it is
  1467. automatically computed.
  1468. 5. ``Y = pdist(X, 'sqeuclidean')``
  1469. Computes the squared Euclidean distance :math:`||u-v||_2^2` between
  1470. the vectors.
  1471. 6. ``Y = pdist(X, 'cosine')``
  1472. Computes the cosine distance between vectors u and v,
  1473. .. math::
  1474. 1 - \\frac{u \\cdot v}
  1475. {{||u||}_2 {||v||}_2}
  1476. where :math:`||*||_2` is the 2-norm of its argument ``*``, and
  1477. :math:`u \\cdot v` is the dot product of ``u`` and ``v``.
  1478. 7. ``Y = pdist(X, 'correlation')``
  1479. Computes the correlation distance between vectors u and v. This is
  1480. .. math::
  1481. 1 - \\frac{(u - \\bar{u}) \\cdot (v - \\bar{v})}
  1482. {{||(u - \\bar{u})||}_2 {||(v - \\bar{v})||}_2}
  1483. where :math:`\\bar{v}` is the mean of the elements of vector v,
  1484. and :math:`x \\cdot y` is the dot product of :math:`x` and :math:`y`.
  1485. 8. ``Y = pdist(X, 'hamming')``
  1486. Computes the normalized Hamming distance, or the proportion of
  1487. those vector elements between two n-vectors ``u`` and ``v``
  1488. which disagree. To save memory, the matrix ``X`` can be of type
  1489. boolean.
  1490. 9. ``Y = pdist(X, 'jaccard')``
  1491. Computes the Jaccard distance between the points. Given two
  1492. vectors, ``u`` and ``v``, the Jaccard distance is the
  1493. proportion of those elements ``u[i]`` and ``v[i]`` that
  1494. disagree.
  1495. 10. ``Y = pdist(X, 'chebyshev')``
  1496. Computes the Chebyshev distance between the points. The
  1497. Chebyshev distance between two n-vectors ``u`` and ``v`` is the
  1498. maximum norm-1 distance between their respective elements. More
  1499. precisely, the distance is given by
  1500. .. math::
  1501. d(u,v) = \\max_i {|u_i-v_i|}
  1502. 11. ``Y = pdist(X, 'canberra')``
  1503. Computes the Canberra distance between the points. The
  1504. Canberra distance between two points ``u`` and ``v`` is
  1505. .. math::
  1506. d(u,v) = \\sum_i \\frac{|u_i-v_i|}
  1507. {|u_i|+|v_i|}
  1508. 12. ``Y = pdist(X, 'braycurtis')``
  1509. Computes the Bray-Curtis distance between the points. The
  1510. Bray-Curtis distance between two points ``u`` and ``v`` is
  1511. .. math::
  1512. d(u,v) = \\frac{\\sum_i {|u_i-v_i|}}
  1513. {\\sum_i {|u_i+v_i|}}
  1514. 13. ``Y = pdist(X, 'mahalanobis', VI=None)``
  1515. Computes the Mahalanobis distance between the points. The
  1516. Mahalanobis distance between two points ``u`` and ``v`` is
  1517. :math:`\\sqrt{(u-v)(1/V)(u-v)^T}` where :math:`(1/V)` (the ``VI``
  1518. variable) is the inverse covariance. If ``VI`` is not None,
  1519. ``VI`` will be used as the inverse covariance matrix.
  1520. 14. ``Y = pdist(X, 'yule')``
  1521. Computes the Yule distance between each pair of boolean
  1522. vectors. (see yule function documentation)
  1523. 15. ``Y = pdist(X, 'matching')``
  1524. Synonym for 'hamming'.
  1525. 16. ``Y = pdist(X, 'dice')``
  1526. Computes the Dice distance between each pair of boolean
  1527. vectors. (see dice function documentation)
  1528. 17. ``Y = pdist(X, 'kulsinski')``
  1529. Computes the Kulsinski distance between each pair of
  1530. boolean vectors. (see kulsinski function documentation)
  1531. 18. ``Y = pdist(X, 'rogerstanimoto')``
  1532. Computes the Rogers-Tanimoto distance between each pair of
  1533. boolean vectors. (see rogerstanimoto function documentation)
  1534. 19. ``Y = pdist(X, 'russellrao')``
  1535. Computes the Russell-Rao distance between each pair of
  1536. boolean vectors. (see russellrao function documentation)
  1537. 20. ``Y = pdist(X, 'sokalmichener')``
  1538. Computes the Sokal-Michener distance between each pair of
  1539. boolean vectors. (see sokalmichener function documentation)
  1540. 21. ``Y = pdist(X, 'sokalsneath')``
  1541. Computes the Sokal-Sneath distance between each pair of
  1542. boolean vectors. (see sokalsneath function documentation)
  1543. 22. ``Y = pdist(X, 'wminkowski', p=2, w=w)``
  1544. Computes the weighted Minkowski distance between each pair of
  1545. vectors. (see wminkowski function documentation)
  1546. 23. ``Y = pdist(X, f)``
  1547. Computes the distance between all pairs of vectors in X
  1548. using the user supplied 2-arity function f. For example,
  1549. Euclidean distance between the vectors could be computed
  1550. as follows::
  1551. dm = pdist(X, lambda u, v: np.sqrt(((u-v)**2).sum()))
  1552. Note that you should avoid passing a reference to one of
  1553. the distance functions defined in this library. For example,::
  1554. dm = pdist(X, sokalsneath)
  1555. would calculate the pair-wise distances between the vectors in
  1556. X using the Python function sokalsneath. This would result in
  1557. sokalsneath being called :math:`{n \\choose 2}` times, which
  1558. is inefficient. Instead, the optimized C version is more
  1559. efficient, and we call it using the following syntax.::
  1560. dm = pdist(X, 'sokalsneath')
  1561. """
  1562. # You can also call this as:
  1563. # Y = pdist(X, 'test_abc')
  1564. # where 'abc' is the metric being tested. This computes the distance
  1565. # between all pairs of vectors in X using the distance metric 'abc' but
  1566. # with a more succinct, verifiable, but less efficient implementation.
  1567. X = _asarray_validated(X, sparse_ok=False, objects_ok=True, mask_ok=True,
  1568. check_finite=False)
  1569. kwargs = _args_to_kwargs_xdist(args, kwargs, metric, "pdist")
  1570. X = np.asarray(X, order='c')
  1571. s = X.shape
  1572. if len(s) != 2:
  1573. raise ValueError('A 2-dimensional array must be passed.')
  1574. m, n = s
  1575. out = kwargs.pop("out", None)
  1576. if out is None:
  1577. dm = np.empty((m * (m - 1)) // 2, dtype=np.double)
  1578. else:
  1579. if out.shape != (m * (m - 1) // 2,):
  1580. raise ValueError("output array has incorrect shape.")
  1581. if not out.flags.c_contiguous:
  1582. raise ValueError("Output array must be C-contiguous.")
  1583. if out.dtype != np.double:
  1584. raise ValueError("Output array must be double type.")
  1585. dm = out
  1586. # compute blacklist for deprecated kwargs
  1587. if(metric in _METRICS['jensenshannon'].aka
  1588. or metric == 'test_jensenshannon' or metric == jensenshannon):
  1589. kwargs_blacklist = ["p", "w", "V", "VI"]
  1590. elif(metric in _METRICS['minkowski'].aka
  1591. or metric in _METRICS['wminkowski'].aka
  1592. or metric in ['test_minkowski', 'test_wminkowski']
  1593. or metric in [minkowski, wminkowski]):
  1594. kwargs_blacklist = ["V", "VI"]
  1595. elif(metric in _METRICS['seuclidean'].aka or
  1596. metric == 'test_seuclidean' or metric == seuclidean):
  1597. kwargs_blacklist = ["p", "w", "VI"]
  1598. elif(metric in _METRICS['mahalanobis'].aka
  1599. or metric == 'test_mahalanobis' or metric == mahalanobis):
  1600. kwargs_blacklist = ["p", "w", "V"]
  1601. else:
  1602. kwargs_blacklist = ["p", "V", "VI"]
  1603. _filter_deprecated_kwargs(kwargs, kwargs_blacklist)
  1604. if callable(metric):
  1605. mstr = getattr(metric, '__name__', 'UnknownCustomMetric')
  1606. metric_name = _METRIC_ALIAS.get(mstr, None)
  1607. if metric_name is not None:
  1608. X, typ, kwargs = _validate_pdist_input(X, m, n,
  1609. metric_name, **kwargs)
  1610. k = 0
  1611. for i in xrange(0, m - 1):
  1612. for j in xrange(i + 1, m):
  1613. dm[k] = metric(X[i], X[j], **kwargs)
  1614. k = k + 1
  1615. elif isinstance(metric, string_types):
  1616. mstr = metric.lower()
  1617. mstr, kwargs = _select_weighted_metric(mstr, kwargs, out)
  1618. metric_name = _METRIC_ALIAS.get(mstr, None)
  1619. if metric_name is not None:
  1620. X, typ, kwargs = _validate_pdist_input(X, m, n,
  1621. metric_name, **kwargs)
  1622. # get pdist wrapper
  1623. pdist_fn = getattr(_distance_wrap,
  1624. "pdist_%s_%s_wrap" % (metric_name, typ))
  1625. pdist_fn(X, dm, **kwargs)
  1626. return dm
  1627. elif mstr in ['old_cosine', 'old_cos']:
  1628. warnings.warn('"old_cosine" is deprecated and will be removed in '
  1629. 'a future version. Use "cosine" instead.',
  1630. DeprecationWarning)
  1631. X = _convert_to_double(X)
  1632. norms = np.einsum('ij,ij->i', X, X, dtype=np.double)
  1633. np.sqrt(norms, out=norms)
  1634. nV = norms.reshape(m, 1)
  1635. # The numerator u * v
  1636. nm = np.dot(X, X.T)
  1637. # The denom. ||u||*||v||
  1638. de = np.dot(nV, nV.T)
  1639. dm = 1.0 - (nm / de)
  1640. dm[xrange(0, m), xrange(0, m)] = 0.0
  1641. dm = squareform(dm)
  1642. elif mstr.startswith("test_"):
  1643. if mstr in _TEST_METRICS:
  1644. dm = pdist(X, _TEST_METRICS[mstr], **kwargs)
  1645. else:
  1646. raise ValueError('Unknown "Test" Distance Metric: %s' % mstr[5:])
  1647. else:
  1648. raise ValueError('Unknown Distance Metric: %s' % mstr)
  1649. else:
  1650. raise TypeError('2nd argument metric must be a string identifier '
  1651. 'or a function.')
  1652. return dm
  1653. def squareform(X, force="no", checks=True):
  1654. """
  1655. Convert a vector-form distance vector to a square-form distance
  1656. matrix, and vice-versa.
  1657. Parameters
  1658. ----------
  1659. X : ndarray
  1660. Either a condensed or redundant distance matrix.
  1661. force : str, optional
  1662. As with MATLAB(TM), if force is equal to ``'tovector'`` or
  1663. ``'tomatrix'``, the input will be treated as a distance matrix or
  1664. distance vector respectively.
  1665. checks : bool, optional
  1666. If set to False, no checks will be made for matrix
  1667. symmetry nor zero diagonals. This is useful if it is known that
  1668. ``X - X.T1`` is small and ``diag(X)`` is close to zero.
  1669. These values are ignored any way so they do not disrupt the
  1670. squareform transformation.
  1671. Returns
  1672. -------
  1673. Y : ndarray
  1674. If a condensed distance matrix is passed, a redundant one is
  1675. returned, or if a redundant one is passed, a condensed distance
  1676. matrix is returned.
  1677. Notes
  1678. -----
  1679. 1. v = squareform(X)
  1680. Given a square d-by-d symmetric distance matrix X,
  1681. ``v = squareform(X)`` returns a ``d * (d-1) / 2`` (or
  1682. :math:`{n \\choose 2}`) sized vector v.
  1683. :math:`v[{n \\choose 2}-{n-i \\choose 2} + (j-i-1)]` is the distance
  1684. between points i and j. If X is non-square or asymmetric, an error
  1685. is returned.
  1686. 2. X = squareform(v)
  1687. Given a ``d*(d-1)/2`` sized v for some integer ``d >= 2`` encoding
  1688. distances as described, ``X = squareform(v)`` returns a d by d distance
  1689. matrix X. The ``X[i, j]`` and ``X[j, i]`` values are set to
  1690. :math:`v[{n \\choose 2}-{n-i \\choose 2} + (j-i-1)]` and all
  1691. diagonal elements are zero.
  1692. In Scipy 0.19.0, ``squareform`` stopped casting all input types to
  1693. float64, and started returning arrays of the same dtype as the input.
  1694. """
  1695. X = np.ascontiguousarray(X)
  1696. s = X.shape
  1697. if force.lower() == 'tomatrix':
  1698. if len(s) != 1:
  1699. raise ValueError("Forcing 'tomatrix' but input X is not a "
  1700. "distance vector.")
  1701. elif force.lower() == 'tovector':
  1702. if len(s) != 2:
  1703. raise ValueError("Forcing 'tovector' but input X is not a "
  1704. "distance matrix.")
  1705. # X = squareform(v)
  1706. if len(s) == 1:
  1707. if s[0] == 0:
  1708. return np.zeros((1, 1), dtype=X.dtype)
  1709. # Grab the closest value to the square root of the number
  1710. # of elements times 2 to see if the number of elements
  1711. # is indeed a binomial coefficient.
  1712. d = int(np.ceil(np.sqrt(s[0] * 2)))
  1713. # Check that v is of valid dimensions.
  1714. if d * (d - 1) != s[0] * 2:
  1715. raise ValueError('Incompatible vector size. It must be a binomial '
  1716. 'coefficient n choose 2 for some integer n >= 2.')
  1717. # Allocate memory for the distance matrix.
  1718. M = np.zeros((d, d), dtype=X.dtype)
  1719. # Since the C code does not support striding using strides.
  1720. # The dimensions are used instead.
  1721. X = _copy_array_if_base_present(X)
  1722. # Fill in the values of the distance matrix.
  1723. _distance_wrap.to_squareform_from_vector_wrap(M, X)
  1724. # Return the distance matrix.
  1725. return M
  1726. elif len(s) == 2:
  1727. if s[0] != s[1]:
  1728. raise ValueError('The matrix argument must be square.')
  1729. if checks:
  1730. is_valid_dm(X, throw=True, name='X')
  1731. # One-side of the dimensions is set here.
  1732. d = s[0]
  1733. if d <= 1:
  1734. return np.array([], dtype=X.dtype)
  1735. # Create a vector.
  1736. v = np.zeros((d * (d - 1)) // 2, dtype=X.dtype)
  1737. # Since the C code does not support striding using strides.
  1738. # The dimensions are used instead.
  1739. X = _copy_array_if_base_present(X)
  1740. # Convert the vector to squareform.
  1741. _distance_wrap.to_vector_from_squareform_wrap(X, v)
  1742. return v
  1743. else:
  1744. raise ValueError(('The first argument must be one or two dimensional '
  1745. 'array. A %d-dimensional array is not '
  1746. 'permitted') % len(s))
  1747. def is_valid_dm(D, tol=0.0, throw=False, name="D", warning=False):
  1748. """
  1749. Return True if input array is a valid distance matrix.
  1750. Distance matrices must be 2-dimensional numpy arrays.
  1751. They must have a zero-diagonal, and they must be symmetric.
  1752. Parameters
  1753. ----------
  1754. D : ndarray
  1755. The candidate object to test for validity.
  1756. tol : float, optional
  1757. The distance matrix should be symmetric. `tol` is the maximum
  1758. difference between entries ``ij`` and ``ji`` for the distance
  1759. metric to be considered symmetric.
  1760. throw : bool, optional
  1761. An exception is thrown if the distance matrix passed is not valid.
  1762. name : str, optional
  1763. The name of the variable to checked. This is useful if
  1764. throw is set to True so the offending variable can be identified
  1765. in the exception message when an exception is thrown.
  1766. warning : bool, optional
  1767. Instead of throwing an exception, a warning message is
  1768. raised.
  1769. Returns
  1770. -------
  1771. valid : bool
  1772. True if the variable `D` passed is a valid distance matrix.
  1773. Notes
  1774. -----
  1775. Small numerical differences in `D` and `D.T` and non-zeroness of
  1776. the diagonal are ignored if they are within the tolerance specified
  1777. by `tol`.
  1778. """
  1779. D = np.asarray(D, order='c')
  1780. valid = True
  1781. try:
  1782. s = D.shape
  1783. if len(D.shape) != 2:
  1784. if name:
  1785. raise ValueError(('Distance matrix \'%s\' must have shape=2 '
  1786. '(i.e. be two-dimensional).') % name)
  1787. else:
  1788. raise ValueError('Distance matrix must have shape=2 (i.e. '
  1789. 'be two-dimensional).')
  1790. if tol == 0.0:
  1791. if not (D == D.T).all():
  1792. if name:
  1793. raise ValueError(('Distance matrix \'%s\' must be '
  1794. 'symmetric.') % name)
  1795. else:
  1796. raise ValueError('Distance matrix must be symmetric.')
  1797. if not (D[xrange(0, s[0]), xrange(0, s[0])] == 0).all():
  1798. if name:
  1799. raise ValueError(('Distance matrix \'%s\' diagonal must '
  1800. 'be zero.') % name)
  1801. else:
  1802. raise ValueError('Distance matrix diagonal must be zero.')
  1803. else:
  1804. if not (D - D.T <= tol).all():
  1805. if name:
  1806. raise ValueError(('Distance matrix \'%s\' must be '
  1807. 'symmetric within tolerance %5.5f.')
  1808. % (name, tol))
  1809. else:
  1810. raise ValueError('Distance matrix must be symmetric within'
  1811. ' tolerance %5.5f.' % tol)
  1812. if not (D[xrange(0, s[0]), xrange(0, s[0])] <= tol).all():
  1813. if name:
  1814. raise ValueError(('Distance matrix \'%s\' diagonal must be'
  1815. ' close to zero within tolerance %5.5f.')
  1816. % (name, tol))
  1817. else:
  1818. raise ValueError(('Distance matrix \'%s\' diagonal must be'
  1819. ' close to zero within tolerance %5.5f.')
  1820. % tol)
  1821. except Exception as e:
  1822. if throw:
  1823. raise
  1824. if warning:
  1825. warnings.warn(str(e))
  1826. valid = False
  1827. return valid
  1828. def is_valid_y(y, warning=False, throw=False, name=None):
  1829. """
  1830. Return True if the input array is a valid condensed distance matrix.
  1831. Condensed distance matrices must be 1-dimensional numpy arrays.
  1832. Their length must be a binomial coefficient :math:`{n \\choose 2}`
  1833. for some positive integer n.
  1834. Parameters
  1835. ----------
  1836. y : ndarray
  1837. The condensed distance matrix.
  1838. warning : bool, optional
  1839. Invokes a warning if the variable passed is not a valid
  1840. condensed distance matrix. The warning message explains why
  1841. the distance matrix is not valid. `name` is used when
  1842. referencing the offending variable.
  1843. throw : bool, optional
  1844. Throws an exception if the variable passed is not a valid
  1845. condensed distance matrix.
  1846. name : bool, optional
  1847. Used when referencing the offending variable in the
  1848. warning or exception message.
  1849. """
  1850. y = np.asarray(y, order='c')
  1851. valid = True
  1852. try:
  1853. if len(y.shape) != 1:
  1854. if name:
  1855. raise ValueError(('Condensed distance matrix \'%s\' must '
  1856. 'have shape=1 (i.e. be one-dimensional).')
  1857. % name)
  1858. else:
  1859. raise ValueError('Condensed distance matrix must have shape=1 '
  1860. '(i.e. be one-dimensional).')
  1861. n = y.shape[0]
  1862. d = int(np.ceil(np.sqrt(n * 2)))
  1863. if (d * (d - 1) / 2) != n:
  1864. if name:
  1865. raise ValueError(('Length n of condensed distance matrix '
  1866. '\'%s\' must be a binomial coefficient, i.e.'
  1867. 'there must be a k such that '
  1868. '(k \\choose 2)=n)!') % name)
  1869. else:
  1870. raise ValueError('Length n of condensed distance matrix must '
  1871. 'be a binomial coefficient, i.e. there must '
  1872. 'be a k such that (k \\choose 2)=n)!')
  1873. except Exception as e:
  1874. if throw:
  1875. raise
  1876. if warning:
  1877. warnings.warn(str(e))
  1878. valid = False
  1879. return valid
  1880. def num_obs_dm(d):
  1881. """
  1882. Return the number of original observations that correspond to a
  1883. square, redundant distance matrix.
  1884. Parameters
  1885. ----------
  1886. d : ndarray
  1887. The target distance matrix.
  1888. Returns
  1889. -------
  1890. num_obs_dm : int
  1891. The number of observations in the redundant distance matrix.
  1892. """
  1893. d = np.asarray(d, order='c')
  1894. is_valid_dm(d, tol=np.inf, throw=True, name='d')
  1895. return d.shape[0]
  1896. def num_obs_y(Y):
  1897. """
  1898. Return the number of original observations that correspond to a
  1899. condensed distance matrix.
  1900. Parameters
  1901. ----------
  1902. Y : ndarray
  1903. Condensed distance matrix.
  1904. Returns
  1905. -------
  1906. n : int
  1907. The number of observations in the condensed distance matrix `Y`.
  1908. """
  1909. Y = np.asarray(Y, order='c')
  1910. is_valid_y(Y, throw=True, name='Y')
  1911. k = Y.shape[0]
  1912. if k == 0:
  1913. raise ValueError("The number of observations cannot be determined on "
  1914. "an empty distance matrix.")
  1915. d = int(np.ceil(np.sqrt(k * 2)))
  1916. if (d * (d - 1) / 2) != k:
  1917. raise ValueError("Invalid condensed distance matrix passed. Must be "
  1918. "some k where k=(n choose 2) for some n >= 2.")
  1919. return d
  1920. def cdist(XA, XB, metric='euclidean', *args, **kwargs):
  1921. """
  1922. Compute distance between each pair of the two collections of inputs.
  1923. See Notes for common calling conventions.
  1924. Parameters
  1925. ----------
  1926. XA : ndarray
  1927. An :math:`m_A` by :math:`n` array of :math:`m_A`
  1928. original observations in an :math:`n`-dimensional space.
  1929. Inputs are converted to float type.
  1930. XB : ndarray
  1931. An :math:`m_B` by :math:`n` array of :math:`m_B`
  1932. original observations in an :math:`n`-dimensional space.
  1933. Inputs are converted to float type.
  1934. metric : str or callable, optional
  1935. The distance metric to use. If a string, the distance function can be
  1936. 'braycurtis', 'canberra', 'chebyshev', 'cityblock', 'correlation',
  1937. 'cosine', 'dice', 'euclidean', 'hamming', 'jaccard', 'jensenshannon',
  1938. 'kulsinski', 'mahalanobis', 'matching', 'minkowski', 'rogerstanimoto',
  1939. 'russellrao', 'seuclidean', 'sokalmichener', 'sokalsneath', 'sqeuclidean',
  1940. 'wminkowski', 'yule'.
  1941. *args : tuple. Deprecated.
  1942. Additional arguments should be passed as keyword arguments
  1943. **kwargs : dict, optional
  1944. Extra arguments to `metric`: refer to each metric documentation for a
  1945. list of all possible arguments.
  1946. Some possible arguments:
  1947. p : scalar
  1948. The p-norm to apply for Minkowski, weighted and unweighted.
  1949. Default: 2.
  1950. w : ndarray
  1951. The weight vector for metrics that support weights (e.g., Minkowski).
  1952. V : ndarray
  1953. The variance vector for standardized Euclidean.
  1954. Default: var(vstack([XA, XB]), axis=0, ddof=1)
  1955. VI : ndarray
  1956. The inverse of the covariance matrix for Mahalanobis.
  1957. Default: inv(cov(vstack([XA, XB].T))).T
  1958. out : ndarray
  1959. The output array
  1960. If not None, the distance matrix Y is stored in this array.
  1961. Note: metric independent, it will become a regular keyword arg in a
  1962. future scipy version
  1963. Returns
  1964. -------
  1965. Y : ndarray
  1966. A :math:`m_A` by :math:`m_B` distance matrix is returned.
  1967. For each :math:`i` and :math:`j`, the metric
  1968. ``dist(u=XA[i], v=XB[j])`` is computed and stored in the
  1969. :math:`ij` th entry.
  1970. Raises
  1971. ------
  1972. ValueError
  1973. An exception is thrown if `XA` and `XB` do not have
  1974. the same number of columns.
  1975. Notes
  1976. -----
  1977. The following are common calling conventions:
  1978. 1. ``Y = cdist(XA, XB, 'euclidean')``
  1979. Computes the distance between :math:`m` points using
  1980. Euclidean distance (2-norm) as the distance metric between the
  1981. points. The points are arranged as :math:`m`
  1982. :math:`n`-dimensional row vectors in the matrix X.
  1983. 2. ``Y = cdist(XA, XB, 'minkowski', p=2.)``
  1984. Computes the distances using the Minkowski distance
  1985. :math:`||u-v||_p` (:math:`p`-norm) where :math:`p \\geq 1`.
  1986. 3. ``Y = cdist(XA, XB, 'cityblock')``
  1987. Computes the city block or Manhattan distance between the
  1988. points.
  1989. 4. ``Y = cdist(XA, XB, 'seuclidean', V=None)``
  1990. Computes the standardized Euclidean distance. The standardized
  1991. Euclidean distance between two n-vectors ``u`` and ``v`` is
  1992. .. math::
  1993. \\sqrt{\\sum {(u_i-v_i)^2 / V[x_i]}}.
  1994. V is the variance vector; V[i] is the variance computed over all
  1995. the i'th components of the points. If not passed, it is
  1996. automatically computed.
  1997. 5. ``Y = cdist(XA, XB, 'sqeuclidean')``
  1998. Computes the squared Euclidean distance :math:`||u-v||_2^2` between
  1999. the vectors.
  2000. 6. ``Y = cdist(XA, XB, 'cosine')``
  2001. Computes the cosine distance between vectors u and v,
  2002. .. math::
  2003. 1 - \\frac{u \\cdot v}
  2004. {{||u||}_2 {||v||}_2}
  2005. where :math:`||*||_2` is the 2-norm of its argument ``*``, and
  2006. :math:`u \\cdot v` is the dot product of :math:`u` and :math:`v`.
  2007. 7. ``Y = cdist(XA, XB, 'correlation')``
  2008. Computes the correlation distance between vectors u and v. This is
  2009. .. math::
  2010. 1 - \\frac{(u - \\bar{u}) \\cdot (v - \\bar{v})}
  2011. {{||(u - \\bar{u})||}_2 {||(v - \\bar{v})||}_2}
  2012. where :math:`\\bar{v}` is the mean of the elements of vector v,
  2013. and :math:`x \\cdot y` is the dot product of :math:`x` and :math:`y`.
  2014. 8. ``Y = cdist(XA, XB, 'hamming')``
  2015. Computes the normalized Hamming distance, or the proportion of
  2016. those vector elements between two n-vectors ``u`` and ``v``
  2017. which disagree. To save memory, the matrix ``X`` can be of type
  2018. boolean.
  2019. 9. ``Y = cdist(XA, XB, 'jaccard')``
  2020. Computes the Jaccard distance between the points. Given two
  2021. vectors, ``u`` and ``v``, the Jaccard distance is the
  2022. proportion of those elements ``u[i]`` and ``v[i]`` that
  2023. disagree where at least one of them is non-zero.
  2024. 10. ``Y = cdist(XA, XB, 'chebyshev')``
  2025. Computes the Chebyshev distance between the points. The
  2026. Chebyshev distance between two n-vectors ``u`` and ``v`` is the
  2027. maximum norm-1 distance between their respective elements. More
  2028. precisely, the distance is given by
  2029. .. math::
  2030. d(u,v) = \\max_i {|u_i-v_i|}.
  2031. 11. ``Y = cdist(XA, XB, 'canberra')``
  2032. Computes the Canberra distance between the points. The
  2033. Canberra distance between two points ``u`` and ``v`` is
  2034. .. math::
  2035. d(u,v) = \\sum_i \\frac{|u_i-v_i|}
  2036. {|u_i|+|v_i|}.
  2037. 12. ``Y = cdist(XA, XB, 'braycurtis')``
  2038. Computes the Bray-Curtis distance between the points. The
  2039. Bray-Curtis distance between two points ``u`` and ``v`` is
  2040. .. math::
  2041. d(u,v) = \\frac{\\sum_i (|u_i-v_i|)}
  2042. {\\sum_i (|u_i+v_i|)}
  2043. 13. ``Y = cdist(XA, XB, 'mahalanobis', VI=None)``
  2044. Computes the Mahalanobis distance between the points. The
  2045. Mahalanobis distance between two points ``u`` and ``v`` is
  2046. :math:`\\sqrt{(u-v)(1/V)(u-v)^T}` where :math:`(1/V)` (the ``VI``
  2047. variable) is the inverse covariance. If ``VI`` is not None,
  2048. ``VI`` will be used as the inverse covariance matrix.
  2049. 14. ``Y = cdist(XA, XB, 'yule')``
  2050. Computes the Yule distance between the boolean
  2051. vectors. (see `yule` function documentation)
  2052. 15. ``Y = cdist(XA, XB, 'matching')``
  2053. Synonym for 'hamming'.
  2054. 16. ``Y = cdist(XA, XB, 'dice')``
  2055. Computes the Dice distance between the boolean vectors. (see
  2056. `dice` function documentation)
  2057. 17. ``Y = cdist(XA, XB, 'kulsinski')``
  2058. Computes the Kulsinski distance between the boolean
  2059. vectors. (see `kulsinski` function documentation)
  2060. 18. ``Y = cdist(XA, XB, 'rogerstanimoto')``
  2061. Computes the Rogers-Tanimoto distance between the boolean
  2062. vectors. (see `rogerstanimoto` function documentation)
  2063. 19. ``Y = cdist(XA, XB, 'russellrao')``
  2064. Computes the Russell-Rao distance between the boolean
  2065. vectors. (see `russellrao` function documentation)
  2066. 20. ``Y = cdist(XA, XB, 'sokalmichener')``
  2067. Computes the Sokal-Michener distance between the boolean
  2068. vectors. (see `sokalmichener` function documentation)
  2069. 21. ``Y = cdist(XA, XB, 'sokalsneath')``
  2070. Computes the Sokal-Sneath distance between the vectors. (see
  2071. `sokalsneath` function documentation)
  2072. 22. ``Y = cdist(XA, XB, 'wminkowski', p=2., w=w)``
  2073. Computes the weighted Minkowski distance between the
  2074. vectors. (see `wminkowski` function documentation)
  2075. 23. ``Y = cdist(XA, XB, f)``
  2076. Computes the distance between all pairs of vectors in X
  2077. using the user supplied 2-arity function f. For example,
  2078. Euclidean distance between the vectors could be computed
  2079. as follows::
  2080. dm = cdist(XA, XB, lambda u, v: np.sqrt(((u-v)**2).sum()))
  2081. Note that you should avoid passing a reference to one of
  2082. the distance functions defined in this library. For example,::
  2083. dm = cdist(XA, XB, sokalsneath)
  2084. would calculate the pair-wise distances between the vectors in
  2085. X using the Python function `sokalsneath`. This would result in
  2086. sokalsneath being called :math:`{n \\choose 2}` times, which
  2087. is inefficient. Instead, the optimized C version is more
  2088. efficient, and we call it using the following syntax::
  2089. dm = cdist(XA, XB, 'sokalsneath')
  2090. Examples
  2091. --------
  2092. Find the Euclidean distances between four 2-D coordinates:
  2093. >>> from scipy.spatial import distance
  2094. >>> coords = [(35.0456, -85.2672),
  2095. ... (35.1174, -89.9711),
  2096. ... (35.9728, -83.9422),
  2097. ... (36.1667, -86.7833)]
  2098. >>> distance.cdist(coords, coords, 'euclidean')
  2099. array([[ 0. , 4.7044, 1.6172, 1.8856],
  2100. [ 4.7044, 0. , 6.0893, 3.3561],
  2101. [ 1.6172, 6.0893, 0. , 2.8477],
  2102. [ 1.8856, 3.3561, 2.8477, 0. ]])
  2103. Find the Manhattan distance from a 3-D point to the corners of the unit
  2104. cube:
  2105. >>> a = np.array([[0, 0, 0],
  2106. ... [0, 0, 1],
  2107. ... [0, 1, 0],
  2108. ... [0, 1, 1],
  2109. ... [1, 0, 0],
  2110. ... [1, 0, 1],
  2111. ... [1, 1, 0],
  2112. ... [1, 1, 1]])
  2113. >>> b = np.array([[ 0.1, 0.2, 0.4]])
  2114. >>> distance.cdist(a, b, 'cityblock')
  2115. array([[ 0.7],
  2116. [ 0.9],
  2117. [ 1.3],
  2118. [ 1.5],
  2119. [ 1.5],
  2120. [ 1.7],
  2121. [ 2.1],
  2122. [ 2.3]])
  2123. """
  2124. # You can also call this as:
  2125. # Y = cdist(XA, XB, 'test_abc')
  2126. # where 'abc' is the metric being tested. This computes the distance
  2127. # between all pairs of vectors in XA and XB using the distance metric 'abc'
  2128. # but with a more succinct, verifiable, but less efficient implementation.
  2129. kwargs = _args_to_kwargs_xdist(args, kwargs, metric, "cdist")
  2130. XA = np.asarray(XA, order='c')
  2131. XB = np.asarray(XB, order='c')
  2132. s = XA.shape
  2133. sB = XB.shape
  2134. if len(s) != 2:
  2135. raise ValueError('XA must be a 2-dimensional array.')
  2136. if len(sB) != 2:
  2137. raise ValueError('XB must be a 2-dimensional array.')
  2138. if s[1] != sB[1]:
  2139. raise ValueError('XA and XB must have the same number of columns '
  2140. '(i.e. feature dimension.)')
  2141. mA = s[0]
  2142. mB = sB[0]
  2143. n = s[1]
  2144. out = kwargs.pop("out", None)
  2145. if out is None:
  2146. dm = np.empty((mA, mB), dtype=np.double)
  2147. else:
  2148. if out.shape != (mA, mB):
  2149. raise ValueError("Output array has incorrect shape.")
  2150. if not out.flags.c_contiguous:
  2151. raise ValueError("Output array must be C-contiguous.")
  2152. if out.dtype != np.double:
  2153. raise ValueError("Output array must be double type.")
  2154. dm = out
  2155. # compute blacklist for deprecated kwargs
  2156. if(metric in _METRICS['minkowski'].aka or
  2157. metric in _METRICS['wminkowski'].aka or
  2158. metric in ['test_minkowski', 'test_wminkowski'] or
  2159. metric in [minkowski, wminkowski]):
  2160. kwargs_blacklist = ["V", "VI"]
  2161. elif(metric in _METRICS['seuclidean'].aka or
  2162. metric == 'test_seuclidean' or metric == seuclidean):
  2163. kwargs_blacklist = ["p", "w", "VI"]
  2164. elif(metric in _METRICS['mahalanobis'].aka or
  2165. metric == 'test_mahalanobis' or metric == mahalanobis):
  2166. kwargs_blacklist = ["p", "w", "V"]
  2167. else:
  2168. kwargs_blacklist = ["p", "V", "VI"]
  2169. _filter_deprecated_kwargs(kwargs, kwargs_blacklist)
  2170. if callable(metric):
  2171. mstr = getattr(metric, '__name__', 'Unknown')
  2172. metric_name = _METRIC_ALIAS.get(mstr, None)
  2173. XA, XB, typ, kwargs = _validate_cdist_input(XA, XB, mA, mB, n,
  2174. metric_name, **kwargs)
  2175. for i in xrange(0, mA):
  2176. for j in xrange(0, mB):
  2177. dm[i, j] = metric(XA[i], XB[j], **kwargs)
  2178. elif isinstance(metric, string_types):
  2179. mstr = metric.lower()
  2180. mstr, kwargs = _select_weighted_metric(mstr, kwargs, out)
  2181. metric_name = _METRIC_ALIAS.get(mstr, None)
  2182. if metric_name is not None:
  2183. XA, XB, typ, kwargs = _validate_cdist_input(XA, XB, mA, mB, n,
  2184. metric_name, **kwargs)
  2185. # get cdist wrapper
  2186. cdist_fn = getattr(_distance_wrap,
  2187. "cdist_%s_%s_wrap" % (metric_name, typ))
  2188. cdist_fn(XA, XB, dm, **kwargs)
  2189. return dm
  2190. elif mstr.startswith("test_"):
  2191. if mstr in _TEST_METRICS:
  2192. dm = cdist(XA, XB, _TEST_METRICS[mstr], **kwargs)
  2193. else:
  2194. raise ValueError('Unknown "Test" Distance Metric: %s' % mstr[5:])
  2195. else:
  2196. raise ValueError('Unknown Distance Metric: %s' % mstr)
  2197. else:
  2198. raise TypeError('2nd argument metric must be a string identifier '
  2199. 'or a function.')
  2200. return dm