1
- __precompile__ ()
2
-
3
-
4
1
module Loess
5
2
6
3
import Distances. euclidean
@@ -38,9 +35,11 @@ Returns:
38
35
39
36
"""
40
37
function loess (xs:: AbstractMatrix{T} , ys:: AbstractVector{T} ;
41
- normalize:: Bool = true , span:: T = 0.75 , degree:: Int = 2 ) where T <: AbstractFloat
38
+ normalize:: Bool = true ,
39
+ span:: AbstractFloat = 0.75 ,
40
+ degree:: Integer = 2 ) where T<: AbstractFloat
42
41
if size (xs, 1 ) != size (ys, 1 )
43
- error ( " Predictor and response arrays must of the same length" )
42
+ throw ( DimensionMismatch ( " Predictor and response arrays must of the same length" ) )
44
43
end
45
44
46
45
n, m = size (xs)
@@ -50,7 +49,7 @@ function loess(xs::AbstractMatrix{T}, ys::AbstractVector{T};
50
49
# correctly apply predict to unnormalized data. We should have a normalize
51
50
# function that just returns a vector of scaling factors.
52
51
if normalize && m > 1
53
- xs = tnormalize! (copy (xs))
52
+ xs = tnormalize! (copy (xs))
54
53
end
55
54
56
55
kdtree = KDTree (xs, 0.05 * span)
@@ -59,7 +58,7 @@ function loess(xs::AbstractMatrix{T}, ys::AbstractVector{T};
59
58
# map verticies to their index in the bs coefficient matrix
60
59
verts = Dict {Vector{T}, Int} ()
61
60
for (k, vert) in enumerate (kdtree. verts)
62
- verts[vert] = k
61
+ verts[vert] = k
63
62
end
64
63
65
64
# Fit each vertex
@@ -72,44 +71,46 @@ function loess(xs::AbstractMatrix{T}, ys::AbstractVector{T};
72
71
vs = Array {T} (undef, q)
73
72
for (vert, k) in verts
74
73
# reset perm
75
- for i in 1 : n
76
- perm[i] = i
77
- end
74
+ for i in 1 : n
75
+ perm[i] = i
76
+ end
78
77
79
78
# distance to each point
80
- for i in 1 : n
81
- ds[i] = euclidean (vec (vert), vec (xs[i,:]))
82
- end
79
+ for i in 1 : n
80
+ ds[i] = euclidean (vec (vert), vec (xs[i,:]))
81
+ end
83
82
84
- # copy the q nearest points to vert into X
85
- partialsort! (perm, q, by= i -> ds[i])
86
- dmax = maximum ([ds[perm[i]] for i = 1 : q])
83
+ # copy the q nearest points to vert into X
84
+ partialsort! (perm, q, by= i -> ds[i])
85
+ dmax = maximum ([ds[perm[i]] for i = 1 : q])
87
86
88
- for i in 1 : q
87
+ for i in 1 : q
89
88
pi = perm[i]
90
- w = tricubic (ds[pi ] / dmax)
91
- us[i,1 ] = w
92
- for j in 1 : m
93
- x = xs[pi , j]
94
- wxl = w
95
- for l in 1 : degree
89
+ w = tricubic (ds[pi ] / dmax)
90
+ us[i,1 ] = w
91
+ for j in 1 : m
92
+ x = xs[pi , j]
93
+ wxl = w
94
+ for l in 1 : degree
96
95
wxl *= x
97
- us[i, 1 + (j- 1 )* degree + l] = wxl # w*x^l
98
- end
99
- end
100
- vs[i] = ys[pi ] * w
101
- end
102
- bs[k,:] = us \ vs
96
+ us[i, 1 + (j- 1 )* degree + l] = wxl # w*x^l
97
+ end
98
+ end
99
+ vs[i] = ys[pi ] * w
100
+ end
101
+ bs[k,:] = us \ vs
103
102
end
104
103
105
104
LoessModel {T} (xs, ys, bs, verts, kdtree)
106
105
end
107
106
108
- function loess (xs:: AbstractVector{T} , ys:: AbstractVector{T} ;
109
- normalize:: Bool = true , span:: T = 0.75 , degree:: Int = 2 ) where T <: AbstractFloat
110
- loess (reshape (xs, (length (xs), 1 )), ys, normalize= normalize, span= span, degree= degree)
111
- end
107
+ loess (xs:: AbstractVector{T} , ys:: AbstractVector{T} ; kwargs... ) where {T<: AbstractFloat } =
108
+ loess (reshape (xs, (length (xs), 1 )), ys; kwargs... )
112
109
110
+ function loess (xs:: AbstractArray{T,N} , ys:: AbstractVector{S} ; kwargs... ) where {T,N,S}
111
+ R = float (promote_type (T, S))
112
+ loess (convert (AbstractArray{R,N}, xs), convert (AbstractVector{R}, ys); kwargs... )
113
+ end
113
114
114
115
115
116
# Predict response values from a trained loess model and predictor observations.
126
127
# A length n' vector of predicted response values.
127
128
#
128
129
function predict (model:: LoessModel{T} , z:: T ) where T <: AbstractFloat
129
- predict (model, T[z])
130
+ predict (model, T[z])
130
131
end
131
132
132
133
@@ -136,40 +137,40 @@ function predict(model::LoessModel{T}, zs::AbstractVector{T}) where T <: Abstrac
136
137
# in the univariate case, interpret a non-singleton zs as vector of
137
138
# ponits, not one point
138
139
if m == 1 && length (zs) > 1
139
- return predict (model, reshape (zs, (length (zs), 1 )))
140
+ return predict (model, reshape (zs, (length (zs), 1 )))
140
141
end
141
142
142
143
if length (zs) != m
143
- error (" $(m) -dimensional model applied to length $(length (zs)) vector" )
144
+ error (" $(m) -dimensional model applied to length $(length (zs)) vector" )
144
145
end
145
146
146
147
adjacent_verts = traverse (model. kdtree, zs)
147
148
148
149
if m == 1
149
- @assert (length (adjacent_verts) == 2 )
150
- z = zs[1 ]
151
- u = (z - adjacent_verts[1 ][1 ]) /
152
- (adjacent_verts[2 ][1 ] - adjacent_verts[1 ][1 ])
153
-
154
- y1 = evalpoly (zs, model. bs[model. verts[[adjacent_verts[1 ][1 ]]],:])
155
- y2 = evalpoly (zs, model. bs[model. verts[[adjacent_verts[2 ][1 ]]],:])
156
- return (1.0 - u) * y1 + u * y2
150
+ @assert (length (adjacent_verts) == 2 )
151
+ z = zs[1 ]
152
+ u = (z - adjacent_verts[1 ][1 ]) /
153
+ (adjacent_verts[2 ][1 ] - adjacent_verts[1 ][1 ])
154
+
155
+ y1 = evalpoly (zs, model. bs[model. verts[[adjacent_verts[1 ][1 ]]],:])
156
+ y2 = evalpoly (zs, model. bs[model. verts[[adjacent_verts[2 ][1 ]]],:])
157
+ return (1.0 - u) * y1 + u * y2
157
158
else
158
- error (" Multivariate blending not yet implemented" )
159
- # TODO :
160
- # 1. Univariate linear interpolation between adjacent verticies.
161
- # 2. Blend these estimates. (I'm not sure how this is done.)
159
+ error (" Multivariate blending not yet implemented" )
160
+ # TODO :
161
+ # 1. Univariate linear interpolation between adjacent verticies.
162
+ # 2. Blend these estimates. (I'm not sure how this is done.)
162
163
end
163
164
end
164
165
165
166
166
167
function predict (model:: LoessModel{T} , zs:: AbstractMatrix{T} ) where T <: AbstractFloat
167
- ys = Array {T} (undef, size (zs, 1 ))
168
- for i in 1 : size (zs, 1 )
169
- # the vec() here is not necessary on 0.5 anymore
170
- ys[i] = predict (model, vec (zs[i,:]))
171
- end
172
- ys
168
+ ys = Array {T} (undef, size (zs, 1 ))
169
+ for i in 1 : size (zs, 1 )
170
+ # the vec() here is not necessary on 0.5 anymore
171
+ ys[i] = predict (model, vec (zs[i,:]))
172
+ end
173
+ ys
173
174
end
174
175
175
176
"""
@@ -201,13 +202,13 @@ function evalpoly(xs, bs)
201
202
degree = div (length (bs) - 1 , m)
202
203
y = bs[1 ]
203
204
for i in 1 : m
204
- x = xs[i]
205
+ x = xs[i]
205
206
xx = x
206
207
y += xx * bs[1 + (i- 1 )* degree + 1 ]
207
- for l in 2 : degree
208
+ for l in 2 : degree
208
209
xx *= x
209
- y += xx * bs[1 + (i- 1 )* degree + l]
210
- end
210
+ y += xx * bs[1 + (i- 1 )* degree + l]
211
+ end
211
212
end
212
213
y
213
214
end
@@ -230,8 +231,8 @@ function tnormalize!(xs::AbstractMatrix{T}, q::T=0.1) where T <: AbstractFloat
230
231
n, m = size (xs)
231
232
cut = ceil (Int, (q * n))
232
233
for j in 1 : m
233
- tmp = sort! (xs[:,j])
234
- xs[:,j] ./= mean (tmp[cut+ 1 : n- cut])
234
+ tmp = sort! (xs[:,j])
235
+ xs[:,j] ./= mean (tmp[cut+ 1 : n- cut])
235
236
end
236
237
xs
237
238
end
0 commit comments