From 14cff54f648b75be034e186faad735291a6bed2d Mon Sep 17 00:00:00 2001 From: Tony Kelman <tony@kelman.net> Date: Sat, 8 Mar 2014 12:29:15 -0800 Subject: [PATCH 01/27] use LIBUV_INC so that USE_SYSTEM_LIBUV works --- src/support/Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/support/Makefile b/src/support/Makefile index 9eba3bdaadfbe..d284a26098b93 100644 --- a/src/support/Makefile +++ b/src/support/Makefile @@ -35,7 +35,7 @@ SHIPFLAGS += $(FLAGS) default: release -HEADERS = $(wildcard *.h) $(JULIAHOME)/deps/libuv/include/uv.h +HEADERS = $(wildcard *.h) $(LIBUV_INC)/uv.h %.o: %.c $(HEADERS) @$(call PRINT_CC, $(CC) $(CPPFLAGS) $(SHIPFLAGS) -DNDEBUG -c $< -o $@) From 1a28512e1b6cd8aeb97b0de61756b860f36c57f2 Mon Sep 17 00:00:00 2001 From: Jeff Bezanson <jeff.bezanson@gmail.com> Date: Sat, 8 Mar 2014 18:33:58 -0500 Subject: [PATCH 02/27] make ranges and arrays unequal, faster hashing of ranges, fixes #5778 fix == and isequal of ranges to only compare by components when it is safe to do so (ranges with different types but == components can generate different elements) add float32(x) etc. for FloatRange don't show floating point Range objects using colons, since those construct FloatRanges --- NEWS.md | 11 ++++++ base/abstractarray.jl | 7 ++++ base/bitarray.jl | 10 ----- base/range.jl | 33 +++++++++++++++- test/arrayops.jl | 22 +++++------ test/broadcast.jl | 2 +- test/ranges.jl | 88 +++++++++++++++++++++++++++---------------- 7 files changed, 116 insertions(+), 57 deletions(-) diff --git a/NEWS.md b/NEWS.md index c6462b408de58..bf23cdf871472 100644 --- a/NEWS.md +++ b/NEWS.md @@ -210,6 +210,9 @@ Library improvements * Constructors for collections (`Set`, `Dict`, etc.) now generally accept a single iterable argument giving the elements of the collection ([#4996], [#4871]) + * Ranges and arrays with the same elements are now unequal. This allows hashing + and comparing ranges to be faster. ([#5778]) + Deprecated or removed --------------------- @@ -303,6 +306,14 @@ Deprecated or removed [#2333]: https://github.com/JuliaLang/julia/issues/2333 [#5636]: https://github.com/JuliaLang/julia/issues/5636 [#1268]: https://github.com/JuliaLang/julia/issues/1268 +[#5677]: https://github.com/JuliaLang/julia/issues/5677 +[#5545]: https://github.com/JuliaLang/julia/issues/5545 +[#6057]: https://github.com/JuliaLang/julia/issues/6057 +[#6056]: https://github.com/JuliaLang/julia/issues/6056 +[#3344]: https://github.com/JuliaLang/julia/issues/3344 +[#5737]: https://github.com/JuliaLang/julia/issues/5737 +[#6073]: https://github.com/JuliaLang/julia/issues/6073 +[#5778]: https://github.com/JuliaLang/julia/issues/5778 Julia v0.2.0 Release Notes ========================== diff --git a/base/abstractarray.jl b/base/abstractarray.jl index 49e3b764e4502..6d51d0429731f 100644 --- a/base/abstractarray.jl +++ b/base/abstractarray.jl @@ -306,6 +306,7 @@ for fn in _numeric_conversion_func_names @eval begin $fn(r::Range ) = Range($fn(r.start), $fn(r.step), r.len) $fn(r::Range1) = Range1($fn(r.start), r.len) + $fn(r::FloatRange) = FloatRange($fn(r.start), $fn(r.step), r.len, $fn(r.divisor)) end end @@ -814,6 +815,9 @@ function isequal(A::AbstractArray, B::AbstractArray) if size(A) != size(B) return false end + if isa(A,Ranges) != isa(B,Ranges) + return false + end for i = 1:length(A) if !isequal(A[i], B[i]) return false @@ -835,6 +839,9 @@ function (==)(A::AbstractArray, B::AbstractArray) if size(A) != size(B) return false end + if isa(A,Ranges) != isa(B,Ranges) + return false + end for i = 1:length(A) if !(A[i]==B[i]) return false diff --git a/base/bitarray.jl b/base/bitarray.jl index 1cc388d8c8005..5d193ed8d2ae0 100644 --- a/base/bitarray.jl +++ b/base/bitarray.jl @@ -1226,16 +1226,6 @@ function (!=)(A::BitArray, B::BitArray) return A.chunks != B.chunks end -# TODO: avoid bitpack/bitunpack -for f in (:(==), :!=) - @eval begin - ($f)(A::BitArray, B::AbstractArray{Bool}) = ($f)(A, bitpack(B)) - ($f)(A::AbstractArray{Bool}, B::BitArray) = ($f)(bitpack(A), B) - ($f)(A::BitArray, B::AbstractArray) = ($f)(bitunpack(A), B) - ($f)(A::AbstractArray, B::BitArray) = ($f)(A, bitunpack(B)) - end -end - ## Data movement ## diff --git a/base/range.jl b/base/range.jl index 5230331889472..6f77542f574b8 100644 --- a/base/range.jl +++ b/base/range.jl @@ -222,6 +222,8 @@ function show(io::IO, r::Ranges) end show(io::IO, r::Range1) = print(io, repr(first(r)), ':', repr(last(r))) +show{T<:FloatingPoint}(io::IO, r::Range{T}) = invoke(show, (IO,Any), io, r) + start(r::Ranges) = 0 next{T}(r::Range{T}, i) = (oftype(T, r.start + i*step(r)), i+1) next{T}(r::Range1{T}, i) = (oftype(T, r.start + i), i+1) @@ -234,8 +236,35 @@ start{T<:Integer}(r::Range1{T}) = r.start next{T<:Integer}(r::Range1{T}, i) = (i, oftype(T, i+1)) done{T<:Integer}(r::Range1{T}, i) = i==oftype(T, r.start+r.len) -==(r::Ranges, s::Ranges) = (first(r)==first(s)) & (step(r)==step(s)) & (length(r)==length(s)) -==(r::Range1, s::Range1) = (r.start==s.start) & (r.len==s.len) +isequal{T<:Ranges}(r::T, s::T) = + (first(r)==first(s)) & (step(r)==step(s)) & (length(r)==length(s)) + +isequal(r::Ranges, s::Ranges) = false + +=={T<:Ranges}(r::T, s::T) = isequal(r, s) + +=={T<:Integer, S<:Integer}(r::Ranges{T}, s::Ranges{S}) = + (first(r)==first(s)) & (step(r)==step(s)) & (length(r)==length(s)) + +function ==(r::Ranges, s::Ranges) + lr = length(r) + if lr != length(s) + return false + end + u, v = start(r), start(s) + while !done(r, u) + x, u = next(r, u) + y, v = next(s, v) + if x != y + return false + end + end + return true +end + +# hashing ranges by component at worst leads to collisions for very similar ranges +hash(r::Ranges) = + bitmix(hash(first(r)), bitmix(hash(step(r)), bitmix(hash(length(r)), uint(0xaaeeaaee)))) # TODO: isless? diff --git a/test/arrayops.jl b/test/arrayops.jl index 29d370a7ad3e3..da711bebab8be 100644 --- a/test/arrayops.jl +++ b/test/arrayops.jl @@ -99,10 +99,10 @@ A = reshape(1:120, 3, 5, 8) sA = sub(A, 2, 1:5, :) @test parent(sA) == A @test parentindexes(sA) == (2:2, 1:5, 1:8) -@test Base.parentdims(sA) == 1:3 +@test Base.parentdims(sA) == [1:3] @test size(sA) == (1, 5, 8) @test_throws sA[2, 1:8] -@test sA[1, 2, 1:8][:] == 5:15:120 +@test sA[1, 2, 1:8][:] == [5:15:120] sA[2:5:end] = -1 @test all(sA[2:5:end] .== -1) @test all(A[5:15:120] .== -1) @@ -110,19 +110,19 @@ sA[2:5:end] = -1 @test stride(sA,3) == 15 @test stride(sA,4) == 120 sA = sub(A, 1:3, 1:5, 5) -@test Base.parentdims(sA) == 1:2 +@test Base.parentdims(sA) == [1:2] sA[1:3,1:5] = -2 @test all(A[:,:,5] .== -2) sA[:] = -3 @test all(A[:,:,5] .== -3) @test strides(sA) == (1,3) sA = sub(A, 1:3, 3, 2:5) -@test Base.parentdims(sA) == 1:3 +@test Base.parentdims(sA) == [1:3] @test size(sA) == (3,1,4) @test sA == A[1:3,3,2:5] @test sA[:] == A[1:3,3,2:5][:] sA = sub(A, 1:2:3, 1:3:5, 1:2:8) -@test Base.parentdims(sA) == 1:3 +@test Base.parentdims(sA) == [1:3] @test strides(sA) == (2,9,30) @test sA[:] == A[1:2:3, 1:3:5, 1:2:8][:] @@ -138,17 +138,17 @@ A = reshape(1:120, 3, 5, 8) sA = slice(A, 2, :, 1:8) @test parent(sA) == A @test parentindexes(sA) == (2, 1:5, 1:8) -@test Base.parentdims(sA) == 2:3 +@test Base.parentdims(sA) == [2:3] @test size(sA) == (5, 8) @test strides(sA) == (3,15) -@test sA[2, 1:8][:] == 5:15:120 -@test sA[:,1] == 2:3:14 -@test sA[2:5:end] == 5:15:110 +@test sA[2, 1:8][:] == [5:15:120] +@test sA[:,1] == [2:3:14] +@test sA[2:5:end] == [5:15:110] sA[2:5:end] = -1 @test all(sA[2:5:end] .== -1) @test all(A[5:15:120] .== -1) sA = slice(A, 1:3, 1:5, 5) -@test Base.parentdims(sA) == 1:2 +@test Base.parentdims(sA) == [1:2] @test size(sA) == (3,5) @test strides(sA) == (1,3) sA = slice(A, 1:2:3, 3, 1:2:8) @@ -198,7 +198,7 @@ let X = get(A, -5:5, nan(Float32)) @test eltype(X) == Float32 @test isnan(X) == [trues(6),falses(5)] - @test X[7:11] == 1:5 + @test X[7:11] == [1:5] X = get(A, (2:4, 9:-2:-13), 0) Xv = zeros(Int, 3, 12) Xv[1:2, 2:5] = A[2:3, 7:-2:1] diff --git a/test/broadcast.jl b/test/broadcast.jl index 9752a6e444a31..7e43596ad8d09 100644 --- a/test/broadcast.jl +++ b/test/broadcast.jl @@ -62,7 +62,7 @@ end r1 = 1:1 r2 = 1:5 ratio = [1,1/2,1/3,1/4,1/5] -@test r1.*r2 == 1:5 +@test r1.*r2 == [1:5] @test r1./r2 == ratio m = [1:2]' @test m.*r2 == [1:5 2:2:10] diff --git a/test/ranges.jl b/test/ranges.jl index 1870f4e8fe4e3..c897f1229a4bb 100644 --- a/test/ranges.jl +++ b/test/ranges.jl @@ -168,39 +168,39 @@ end # tricky floating-point ranges -@test 0.1:0.1:0.3 == [1:3]./10 -@test 0.0:0.1:0.3 == [0:3]./10 -@test 0.3:-0.1:-0.1 == [3:-1:-1]./10 -@test 0.1:-0.1:-0.3 == [1:-1:-3]./10 -@test 0.0:0.1:1.0 == [0:10]./10 -@test 0.0:-0.1:1.0 == [] -@test 0.0:0.1:-1.0 == [] -@test 0.0:-0.1:-1.0 == [0:-1:-10]./10 -@test 1.0:1/49:27.0 == [49:1323]./49 -@test 0.0:0.7:2.1 == [0:7:21]./10 -@test 0.0:1.1:3.3 == [0:11:33]./10 -@test 0.1:1.1:3.4 == [1:11:34]./10 -@test 0.0:1.3:3.9 == [0:13:39]./10 -@test 0.1:1.3:4.0 == [1:13:40]./10 -@test 1.1:1.1:3.3 == [11:11:33]./10 -@test 0.3:0.1:1.1 == [3:1:11]./10 - -@test 0.0:1.0:5.5 == [0:10:55]./10 -@test 0.0:-1.0:0.5 == [] -@test 0.0:1.0:0.5 == [0.0] - -@test prevfloat(0.1):0.1:0.3 == [prevfloat(0.1), 0.2, 0.3] -@test nextfloat(0.1):0.1:0.3 == [nextfloat(0.1), 0.2] -@test prevfloat(0.0):0.1:0.3 == [prevfloat(0.0), 0.1, 0.2] -@test nextfloat(0.0):0.1:0.3 == [nextfloat(0.0), 0.1, 0.2] -@test 0.1:0.1:prevfloat(0.3) == [0.1, 0.2] -@test 0.1:0.1:nextfloat(0.3) == [0.1, 0.2, nextfloat(0.3)] -@test 0.0:0.1:prevfloat(0.3) == [0.0, 0.1, 0.2] -@test 0.0:0.1:nextfloat(0.3) == [0.0, 0.1, 0.2, nextfloat(0.3)] -@test 0.1:prevfloat(0.1):0.3 == [0.1, 0.2, 0.3] -@test 0.1:nextfloat(0.1):0.3 == [0.1, 0.2] -@test 0.0:prevfloat(0.1):0.3 == [0.0, prevfloat(0.1), prevfloat(0.2), 0.3] -@test 0.0:nextfloat(0.1):0.3 == [0.0, nextfloat(0.1), nextfloat(0.2)] +@test [0.1:0.1:0.3] == [1:3]./10 +@test [0.0:0.1:0.3] == [0:3]./10 +@test [0.3:-0.1:-0.1] == [3:-1:-1]./10 +@test [0.1:-0.1:-0.3] == [1:-1:-3]./10 +@test [0.0:0.1:1.0] == [0:10]./10 +@test [0.0:-0.1:1.0] == [] +@test [0.0:0.1:-1.0] == [] +@test [0.0:-0.1:-1.0] == [0:-1:-10]./10 +@test [1.0:1/49:27.0] == [49:1323]./49 +@test [0.0:0.7:2.1] == [0:7:21]./10 +@test [0.0:1.1:3.3] == [0:11:33]./10 +@test [0.1:1.1:3.4] == [1:11:34]./10 +@test [0.0:1.3:3.9] == [0:13:39]./10 +@test [0.1:1.3:4.0] == [1:13:40]./10 +@test [1.1:1.1:3.3] == [11:11:33]./10 +@test [0.3:0.1:1.1] == [3:1:11]./10 + +@test [0.0:1.0:5.5] == [0:10:55]./10 +@test [0.0:-1.0:0.5] == [] +@test [0.0:1.0:0.5] == [0.0] + +@test [prevfloat(0.1):0.1:0.3] == [prevfloat(0.1), 0.2, 0.3] +@test [nextfloat(0.1):0.1:0.3] == [nextfloat(0.1), 0.2] +@test [prevfloat(0.0):0.1:0.3] == [prevfloat(0.0), 0.1, 0.2] +@test [nextfloat(0.0):0.1:0.3] == [nextfloat(0.0), 0.1, 0.2] +@test [0.1:0.1:prevfloat(0.3)] == [0.1, 0.2] +@test [0.1:0.1:nextfloat(0.3)] == [0.1, 0.2, nextfloat(0.3)] +@test [0.0:0.1:prevfloat(0.3)] == [0.0, 0.1, 0.2] +@test [0.0:0.1:nextfloat(0.3)] == [0.0, 0.1, 0.2, nextfloat(0.3)] +@test [0.1:prevfloat(0.1):0.3] == [0.1, 0.2, 0.3] +@test [0.1:nextfloat(0.1):0.3] == [0.1, 0.2] +@test [0.0:prevfloat(0.1):0.3] == [0.0, prevfloat(0.1), prevfloat(0.2), 0.3] +@test [0.0:nextfloat(0.1):0.3] == [0.0, nextfloat(0.1), nextfloat(0.2)] for T = (Float32, Float64,),# BigFloat), a = -5:25, s = [-5:-1;1:25], d = 1:25, n = -1:15 @@ -210,3 +210,25 @@ for T = (Float32, Float64,),# BigFloat), stop = convert(T,(a+(n-1)*s))/den @test [start:step:stop] == T[a:s:a+(n-1)*s]./den end + +# near-equal ranges +@test 0.0:0.1:1.0 != Range(0.0,0.1,11) + +# comparing and hashing ranges +let + Rs = {1:2, int32(1:3:17), int64(1:3:17), 1:0, 17:-3:0, + 0.0:0.1:1.0, Range(0.0,0.1,11), + float32(0.0:0.1:1.0), float32(Range(0.0,0.1,11))} + for r in Rs + ar = collect(r) + @test r != ar + @test !isequal(r, ar) + for s in Rs + as = collect(s) + + @test !isequal(r, s) || hash(r)==hash(s) + + @test (r==s) == (ar==as) + end + end +end From 7f18a51a4da248dd7028f1090b3c01bad4e95efb Mon Sep 17 00:00:00 2001 From: Andreas Noack Jensen <andreasnoackjensen@gmail.com> Date: Mon, 10 Mar 2014 07:21:32 +0100 Subject: [PATCH 03/27] =?UTF-8?q?Fix=20A=5Fmul=5FB!=20for=20y=20inputs=20w?= =?UTF-8?q?ith=20NaNs=20by=20setting=20elements=20to=20zero=20when=20?= =?UTF-8?q?=CE=B2=20is=20zero.?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- base/linalg/sparse.jl | 28 ++++++++++++++++++++++------ 1 file changed, 22 insertions(+), 6 deletions(-) diff --git a/base/linalg/sparse.jl b/base/linalg/sparse.jl index 3ce8f4b7837b2..ea5af46aba3d6 100644 --- a/base/linalg/sparse.jl +++ b/base/linalg/sparse.jl @@ -13,7 +13,9 @@ end function A_mul_B!(α::Number, A::SparseMatrixCSC, x::AbstractVector, β::Number, y::AbstractVector) A.n == length(x) || throw(DimensionMismatch("")) A.m == length(y) || throw(DimensionMismatch("")) - for i = 1:A.m; y[i] *= β; end + if β != 1 + β != 0 ? scale!(y,β) : fill!(y,zero(eltype(y))) + end nzv = A.nzval rv = A.rowval for col = 1 : A.n @@ -28,7 +30,7 @@ A_mul_B!(y::AbstractVector, A::SparseMatrixCSC, x::AbstractVector) = A_mul_B!(on function *{TA,S,Tx}(A::SparseMatrixCSC{TA,S}, x::AbstractVector{Tx}) T = promote_type(TA,Tx) - A_mul_B!(one(T), A, x, zero(T), zeros(T, A.m)) + A_mul_B!(one(T), A, x, zero(T), Array(T, A.m)) end function Ac_mul_B!(α::Number, A::SparseMatrixCSC, x::AbstractVector, β::Number, y::AbstractVector) @@ -36,9 +38,16 @@ function Ac_mul_B!(α::Number, A::SparseMatrixCSC, x::AbstractVector, β::Number A.m == length(x) || throw(DimensionMismatch("")) nzv = A.nzval rv = A.rowval + zro = zero(eltype(y)) @inbounds begin for i = 1 : A.n - y[i] *= β + if β != 1 + if β != 0 + y[i] *= β + else + y[i] = zro + end + end tmp = zero(eltype(y)) for j = A.colptr[i] : (A.colptr[i+1]-1) tmp += conj(nzv[j])*x[rv[j]] @@ -54,9 +63,16 @@ function At_mul_B!(α::Number, A::SparseMatrixCSC, x::AbstractVector, β::Number A.m == length(x) || throw(DimensionMismatch("")) nzv = A.nzval rv = A.rowval + zro = zero(eltype(y)) @inbounds begin for i = 1 : A.n - y[i] *= β + if β != 1 + if β != 0 + y[i] *= β + else + y[i] = zro + end + end tmp = zero(eltype(y)) for j = A.colptr[i] : (A.colptr[i+1]-1) tmp += nzv[j]*x[rv[j]] @@ -69,11 +85,11 @@ end At_mul_B!(y::AbstractVector, A::SparseMatrixCSC, x::AbstractVector) = At_mul_B!(one(eltype(x)), A, x, zero(eltype(y)), y) function Ac_mul_B{TA,S,Tx}(A::SparseMatrixCSC{TA,S}, x::AbstractVector{Tx}) T = promote_type(TA, Tx) - Ac_mul_B!(one(T), A, x, zero(T), zeros(T, A.n)) + Ac_mul_B!(one(T), A, x, zero(T), Array(T, A.n)) end function At_mul_B{TA,S,Tx}(A::SparseMatrixCSC{TA,S}, x::AbstractVector{Tx}) T = promote_type(TA, Tx) - At_mul_B!(one(T), A, x, zero(T), zeros(T, A.n)) + At_mul_B!(one(T), A, x, zero(T), Array(T, A.n)) end *(X::BitArray{1}, A::SparseMatrixCSC) = invoke(*, (AbstractVector, SparseMatrixCSC), X, A) From 8fde67c1993fc639e0034c39d3356c3e5ded7379 Mon Sep 17 00:00:00 2001 From: "Steven G. Johnson" <stevenj@alum.mit.edu> Date: Mon, 10 Mar 2014 10:38:46 -0400 Subject: [PATCH 04/27] grammar --- NEWS.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/NEWS.md b/NEWS.md index 9ce07ade9fdd2..1c145b885aa51 100644 --- a/NEWS.md +++ b/NEWS.md @@ -156,9 +156,9 @@ Library improvements the same length. This generalizes and replaces `normfro` ([#6057]), and `norm` is now type-stable ([#6056]). - * + and - now only works when the sizes of the arrays are the same, i.e. the - operations no longer do broadcasting. New `UniformScaling` type and identity - `I` constant (#5810). + * `+` and `-` now require the sizes of the arrays to be the + same: the operations no longer do broadcasting. New + `UniformScaling` matrix type and identity `I` constant (#5810). * Sparse linear algebra From b42587036bd076da83d9b7db8278e813d1e1afdc Mon Sep 17 00:00:00 2001 From: Jeff Bezanson <jeff.bezanson@gmail.com> Date: Mon, 10 Mar 2014 13:27:21 -0400 Subject: [PATCH 05/27] fix #6098, crash on stack overflow splicing big argument lists still slow as hell and still a bad idea, but at least it doesn't segfault --- src/builtins.c | 30 +++++++++++++++++++----------- 1 file changed, 19 insertions(+), 11 deletions(-) diff --git a/src/builtins.c b/src/builtins.c index 7082245f02b96..4e62a4f356f7f 100644 --- a/src/builtins.c +++ b/src/builtins.c @@ -266,6 +266,9 @@ JL_CALLABLE(jl_f_apply) jl_tuple_len(args[1])); } } + jl_value_t *argarr = NULL; + JL_GC_PUSH1(&argarr); + jl_value_t *result; jl_value_t **newargs; size_t n=0, i, j; for(i=1; i < nargs; i++) { @@ -285,15 +288,27 @@ JL_CALLABLE(jl_f_apply) JL_TYPECHK(apply, tuple, args[i]); } } - goto fancy_apply; + argarr = jl_apply(jl_append_any_func, &args[1], nargs-1); + assert(jl_typeis(argarr, jl_array_any_type)); + result = jl_apply((jl_function_t*)args[0], jl_cell_data(argarr), jl_array_len(argarr)); + JL_GC_POP(); + return result; } } - newargs = (jl_value_t**)alloca(n * sizeof(jl_value_t*)); + if (n > 64000) { + // put arguments on the heap if there are too many + argarr = (jl_value_t*)jl_alloc_cell_1d(n); + newargs = jl_cell_data(argarr); + } + else { + newargs = (jl_value_t**)alloca(n * sizeof(jl_value_t*)); + } n = 0; for(i=1; i < nargs; i++) { if (jl_is_tuple(args[i])) { jl_tuple_t *t = (jl_tuple_t*)args[i]; - for(j=0; j < jl_tuple_len(t); j++) + size_t al = jl_tuple_len(t); + for(j=0; j < al; j++) newargs[n++] = jl_tupleref(t, j); } else { @@ -302,14 +317,7 @@ JL_CALLABLE(jl_f_apply) newargs[n++] = jl_cellref(args[i], j); } } - return jl_apply((jl_function_t*)args[0], newargs, n); - - fancy_apply: ; - jl_value_t *argarr = jl_apply(jl_append_any_func, &args[1], nargs-1); - JL_GC_PUSH1(&argarr); - assert(jl_typeis(argarr, jl_array_any_type)); - jl_value_t *result = jl_apply((jl_function_t*)args[0], - jl_cell_data(argarr), jl_array_len(argarr)); + result = jl_apply((jl_function_t*)args[0], newargs, n); JL_GC_POP(); return result; } From 493432999562b75199e2e1cc52a0c6735e75ca3f Mon Sep 17 00:00:00 2001 From: Jeff Bezanson <jeff.bezanson@gmail.com> Date: Mon, 10 Mar 2014 13:34:30 -0400 Subject: [PATCH 06/27] fix #6099, small oversight that left out .+= and .-= --- src/julia-parser.scm | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/julia-parser.scm b/src/julia-parser.scm index b836b3c7f024e..e276d1a8567d5 100644 --- a/src/julia-parser.scm +++ b/src/julia-parser.scm @@ -83,7 +83,7 @@ ; operators that are special forms, not function names (define syntactic-operators '(= := += -= *= /= //= .//= .*= ./= |\\=| |.\\=| ^= .^= %= .%= |\|=| &= $= => - <<= >>= >>>= -> --> |\|\|| && |::| |.| ...)) + <<= >>= >>>= -> --> |\|\|| && |::| |.| ... |.+=| |.-=|)) (define syntactic-unary-operators '($ &)) (define reserved-words '(begin while if for try return break continue From c4724779df059a8965aa76c197d952b7b13ecaa6 Mon Sep 17 00:00:00 2001 From: Jeff Bezanson <jeff.bezanson@gmail.com> Date: Mon, 10 Mar 2014 13:56:34 -0400 Subject: [PATCH 07/27] fix #6091, message for non-finite or length mismatch in test_approx_eq --- base/test.jl | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/base/test.jl b/base/test.jl index a4e6581e33fcb..0e96e28c7732f 100644 --- a/base/test.jl +++ b/base/test.jl @@ -68,18 +68,24 @@ approx_full(x) = full(x) function test_approx_eq(va, vb, Eps, astr, bstr) va = approx_full(va) vb = approx_full(vb) + if length(va) != length(vb) + error("lengths of ", astr, " and ", bstr, " do not match: ", + "\n ", astr, " (length $(length(va))) = ", va, + "\n ", bstr, " (length $(length(vb))) = ", vb) + end diff = real(zero(eltype(va))) - ok = true for i = 1:length(va) xa = va[i]; xb = vb[i] if isfinite(xa) && isfinite(xb) diff = max(diff, abs(xa-xb)) elseif !isequal(xa,xb) - ok = false; break + error("mismatch of non-finite elements: ", + "\n ", astr, " = ", va, + "\n ", bstr, " = ", vb) end end - if !ok || (!isnan(Eps) && !(diff <= Eps)) + if !isnan(Eps) && !(diff <= Eps) sdiff = string("|", astr, " - ", bstr, "| <= ", Eps) error("assertion failed: ", sdiff, "\n ", astr, " = ", va, From d2d7c3217f52c72ac3eb06ccc7da0dfc9fd573c3 Mon Sep 17 00:00:00 2001 From: Ivar Nesje <ivarne@gmail.com> Date: Mon, 10 Mar 2014 19:48:13 +0100 Subject: [PATCH 08/27] Which for error message for non-broadcasting (+) and (-) --- base/deprecated.jl | 2 ++ 1 file changed, 2 insertions(+) diff --git a/base/deprecated.jl b/base/deprecated.jl index a90e506ec3971..a5c3029f38291 100644 --- a/base/deprecated.jl +++ b/base/deprecated.jl @@ -189,6 +189,8 @@ export PipeString @deprecate svdfact(A,thin) svdfact(A,thin=thin) @deprecate svdfact!(A,thin) svdfact(A,thin=thin) @deprecate svd(A,thin) svd(A,thin=thin) +# Note: These methods need a more helpfull error message than a `NoMethodError`, +# when the deprecation is removed @deprecate (+)(A::Array{Bool},x::Bool) A .+ x @deprecate (+)(x::Bool,A::Array{Bool}) x .+ A @deprecate (-)(A::Array{Bool},x::Bool) A .- x From 764098f251a8e6bdccaae42bd8386f596757d2ee Mon Sep 17 00:00:00 2001 From: Jeff Bezanson <jeff.bezanson@gmail.com> Date: Mon, 10 Mar 2014 14:59:24 -0400 Subject: [PATCH 09/27] mitigate #6097 the general problem was that one place in the code had a larger type for a tuple than another. I don't know why, but we can at least generate correct code instead of crashing. there was already some code in boxed() towards handling this kind of thing, but it needed to be rearranged a bit. --- src/cgutils.cpp | 31 +++++++++++++++++-------------- 1 file changed, 17 insertions(+), 14 deletions(-) diff --git a/src/cgutils.cpp b/src/cgutils.cpp index 2f6d232da8fe6..a5389a0ac88d8 100644 --- a/src/cgutils.cpp +++ b/src/cgutils.cpp @@ -1457,9 +1457,19 @@ static jl_value_t *static_constant_instance(Constant *constant, jl_value_t *jt) static Value *boxed(Value *v, jl_codectx_t *ctx, jl_value_t *jt) { Type *t = (v == NULL) ? NULL : v->getType(); + + if (jt == NULL) { + jt = julia_type_of(v); + } + else if (!jl_is_leaf_type(jt)) { + // we can get a sharper type from julia_type_of than expr_type in some + // cases, due to ccall's compile-time evaluations of types. see issue #5752 + jl_value_t *jt2 = julia_type_of(v); + if (jl_subtype(jt2, jt, 0)) + jt = jt2; + } + if (v == NULL || dyn_cast<UndefValue>(v) != 0 || t == NoopType) { - if (jt == NULL || jl_is_uniontype(jt) || jl_is_abstracttype(jt)) - jt = julia_type_of(v); jl_value_t *s = static_void_instance(jt); if (jl_is_tuple(jt) && jl_tuple_len(jt) > 0) jl_add_linfo_root(ctx->linfo, s); @@ -1468,8 +1478,6 @@ static Value *boxed(Value *v, jl_codectx_t *ctx, jl_value_t *jt) if (t == jl_pvalue_llvmt) return v; if (t == T_int1) return julia_bool(v); - if (jt == NULL || jl_is_uniontype(jt) || jl_is_abstracttype(jt)) - jt = julia_type_of(v); if (t == T_void || t->isEmptyTy()) { jl_value_t *s = static_void_instance(jt); if (jl_is_tuple(jt) && jl_tuple_len(jt) > 0) @@ -1489,12 +1497,14 @@ static Value *boxed(Value *v, jl_codectx_t *ctx, jl_value_t *jt) make_gcroot(tpl,ctx); for (size_t i = 0; i < n; ++i) { jl_value_t *jti = jl_tupleref(jt,i); - Value *vi = emit_tupleref(v,ConstantInt::get(T_size,i+1),jt,ctx); - emit_tupleset(tpl,ConstantInt::get(T_size,i+1),boxed(vi,ctx,jti),jt,ctx); + Value *vi = emit_tupleref(v, ConstantInt::get(T_size,i+1), jt, ctx); + Value *boxedvi = boxed(vi, ctx, jti); + emit_tupleset(tpl, ConstantInt::get(T_size,i+1), boxedvi, jt, ctx); } ctx->argDepth = last_depth; return tpl; } + jl_datatype_t *jb = (jl_datatype_t*)jt; assert(jl_is_datatype(jb)); if (jb == jl_int8_type) @@ -1522,18 +1532,11 @@ static Value *boxed(Value *v, jl_codectx_t *ctx, jl_value_t *jt) if (jb == jl_uint64_type) return builder.CreateCall(prepare_call(box_uint64_func), v); if (jb == jl_char_type) return builder.CreateCall(prepare_call(box_char_func), v); - if (!jl_is_leaf_type(jt)) { - // we can get a sharper type from julia_type_of than expr_type in some - // cases, due to ccall's compile-time evaluations of types. see issue #5752 - jl_value_t *jt2 = julia_type_of(v); - if (jl_subtype(jt2, jt, 0)) - jt = jt2; - } - if (!jl_isbits(jt) || !jl_is_leaf_type(jt)) { assert("Don't know how to box this type" && false); return NULL; } + if (!jb->abstract && jb->size == 0) { if (jb->instance == NULL) jl_new_struct_uninit(jb); From f27e1549c92392939967c9c0b71e9194165382f6 Mon Sep 17 00:00:00 2001 From: "Steven G. Johnson" <stevenj@mit.edu> Date: Mon, 10 Mar 2014 16:26:58 -0400 Subject: [PATCH 10/27] type stability in all norm variants (continuing #6056 and #6057) --- base/linalg/cholmod.jl | 2 +- base/linalg/dense.jl | 8 ++------ base/linalg/generic.jl | 29 +++++++++++++++-------------- base/linalg/sparse.jl | 37 ++++++++++++++++++++----------------- 4 files changed, 38 insertions(+), 38 deletions(-) diff --git a/base/linalg/cholmod.jl b/base/linalg/cholmod.jl index 5dcc61dfeae1e..28768126f963a 100644 --- a/base/linalg/cholmod.jl +++ b/base/linalg/cholmod.jl @@ -782,7 +782,7 @@ for Ti in (:Int32,:Int64) ccall((@chm_nm "nnz" $Ti ,:libcholmod), Int, (Ptr{c_CholmodSparse{Tv,$Ti}},Ptr{Uint8}),&A.c,cmn($Ti)) end - function norm{Tv<:CHMVTypes}(A::CholmodSparse{Tv,$Ti},p::Number) + function norm{Tv<:CHMVTypes}(A::CholmodSparse{Tv,$Ti},p::Real) ccall((@chm_nm "norm_sparse" $Ti , :libcholmod), Float64, (Ptr{c_CholmodSparse{Tv,$Ti}}, Cint, Ptr{Uint8}), diff --git a/base/linalg/dense.jl b/base/linalg/dense.jl index 9639192e7b098..fdb7bec2b0840 100644 --- a/base/linalg/dense.jl +++ b/base/linalg/dense.jl @@ -18,12 +18,8 @@ function norm{T<:BlasFloat, TI<:Integer}(x::StridedVector{T}, rx::Union(Range1{T BLAS.nrm2(length(rx), pointer(x)+(first(rx)-1)*sizeof(T), step(rx)) end -function vecnorm{T<:BlasFloat}(x::Union(Array{T},StridedVector{T}), p::Real=2) - length(x) == 0 && return zero(T) - p == 1 && T <: Real && return BLAS.asum(x) - p == 2 && return BLAS.nrm2(x) - invoke(vecnorm, (Any, Real), x, p) -end +vecnorm1{T<:BlasReal}(x::Union(Array{T},StridedVector{T})) = BLAS.asum(x) +vecnorm2{T<:BlasFloat}(x::Union(Array{T},StridedVector{T})) = BLAS.nrm2(x) function triu!{T}(M::Matrix{T}, k::Integer) m, n = size(M) diff --git a/base/linalg/generic.jl b/base/linalg/generic.jl index 9c7b6023daf79..1b2fbbe606f44 100644 --- a/base/linalg/generic.jl +++ b/base/linalg/generic.jl @@ -145,36 +145,40 @@ norm(x::AbstractVector, p::Real=2) = vecnorm(x, p) function norm1{T}(A::AbstractMatrix{T}) m,n = size(A) - nrm = zero(real(zero(T))) + Tnorm = typeof(float(real(zero(T)))) + Tsum = promote_type(Float64,Tnorm) + nrm::Tsum = 0 @inbounds begin for j = 1:n - nrmj = zero(real(zero(T))) + nrmj::Tsum = 0 for i = 1:m nrmj += abs(A[i,j]) end nrm = max(nrm,nrmj) end end - return nrm + return convert(Tnorm, nrm) end -function norm2(A::AbstractMatrix) +function norm2{T}(A::AbstractMatrix{T}) m,n = size(A) - if m == 0 || n == 0 return real(zero(eltype(A))) end - svdvals(A)[1] + Tnorm = typeof(float(real(zero(T)))) + (m == 0 || n == 0) ? zero(Tnorm) : convert(Tnorm, svdvals(A)[1]) end function normInf{T}(A::AbstractMatrix{T}) m,n = size(A) - nrm = zero(real(zero(T))) + Tnorm = typeof(float(real(zero(T)))) + Tsum = promote_type(Float64,Tnorm) + nrm::Tsum = 0 @inbounds begin for i = 1:m - nrmi = zero(real(zero(T))) + nrmi::Tsum = 0 for j = 1:n nrmi += abs(A[i,j]) end nrm = max(nrm,nrmi) end end - return nrm + return convert(Tnorm, nrm) end function norm{T}(A::AbstractMatrix{T}, p::Real=2) p == 2 && return norm2(A) @@ -183,11 +187,8 @@ function norm{T}(A::AbstractMatrix{T}, p::Real=2) throw(ArgumentError("invalid p-norm p=$p. Valid: 1, 2, Inf")) end -function norm(x::Number, p=2) - if p == 1 || p == Inf || p == -Inf return abs(x) end - p == 0 && return ifelse(x != 0, 1, 0) - float(abs(x)) -end +norm(x::Number, p::Real=2) = + p == 0 ? convert(typeof(real(x)), ifelse(x != 0, 1, 0)) : abs(x) rank(A::AbstractMatrix, tol::Real) = sum(svdvals(A) .> tol) function rank(A::AbstractMatrix) diff --git a/base/linalg/sparse.jl b/base/linalg/sparse.jl index 3ce8f4b7837b2..88cf3c876fa41 100644 --- a/base/linalg/sparse.jl +++ b/base/linalg/sparse.jl @@ -462,28 +462,31 @@ vecnorm(A::SparseMatrixCSC, p::Real=2) = vecnorm(A.nzval, p) function norm(A::SparseMatrixCSC,p::Real=1) m, n = size(A) if m == 0 || n == 0 || isempty(A) - return real(zero(eltype(A))) + return float(real(zero(eltype(A)))) elseif m == 1 || n == 1 return norm(reshape(full(A), length(A)), p) - elseif p==1 - nA = real(zero(eltype(A))) - for j=1:n - colSum = real(zero(eltype(A))) - for i = A.colptr[j]:A.colptr[j+1]-1 - colSum += abs(A.nzval[i]) + else + Tnorm = typeof(float(real(zero(eltype(A))))) + Tsum = promote_type(Float64,Tnorm) + if p==1 + nA::Tsum = 0 + for j=1:n + colSum::Tsum = 0 + for i = A.colptr[j]:A.colptr[j+1]-1 + colSum += abs(A.nzval[i]) + end + nA = max(nA, colSum) end - nA = max(nA, colSum) - end - elseif p==Inf - rowSum = zeros(typeof(real(A[1])),m) - for i=1:length(A.nzval) - rowSum[A.rowval[i]] += abs(A.nzval[i]) + return convert(Tnorm, nA) + elseif p==Inf + rowSum = zeros(Tsum,m) + for i=1:length(A.nzval) + rowSum[A.rowval[i]] += abs(A.nzval[i]) + end + return convert(Tnorm, maximum(rowSum)) end - nA = maximum(rowSum) - else - throw(ArgumentError("invalid p-norm p=$p. Valid: 1, Inf")) end - return nA + throw(ArgumentError("invalid p-norm p=$p. Valid: 1, Inf")) end # TODO From e2515bbf33ea1778a9fd8f6947806eae25e27b97 Mon Sep 17 00:00:00 2001 From: Jiahao Chen <jiahao@mit.edu> Date: Mon, 10 Mar 2014 17:13:54 -0400 Subject: [PATCH 11/27] Doc for lufact is corrected and updated. - lufact(::Tridiagonal) is now documented. - Tabulate the output types LU, LUTridiagonal and UmfpackLU types, their components, and supported methods. --- doc/stdlib/linalg.rst | 38 +++++++++++++++++++++++++++++++++++--- 1 file changed, 35 insertions(+), 3 deletions(-) diff --git a/doc/stdlib/linalg.rst b/doc/stdlib/linalg.rst index a74e0d2297bc1..6d5e2f9867d2c 100644 --- a/doc/stdlib/linalg.rst +++ b/doc/stdlib/linalg.rst @@ -43,9 +43,41 @@ Linear algebra functions in Julia are largely implemented by calling functions f Compute the LU factorization of ``A``, such that ``A[p,:] = L*U``. -.. function:: lufact(A) -> LU - - Compute the LU factorization of ``A``, returning an ``LU`` object for dense ``A`` or an ``UmfpackLU`` object for sparse ``A``. The individual components of the factorization ``F`` can be accesed by indexing: ``F[:L]``, ``F[:U]``, and ``F[:P]`` (permutation matrix) or ``F[:p]`` (permutation vector). An ``UmfpackLU`` object has additional components ``F[:q]`` (the left permutation vector) and ``Rs`` the vector of scaling factors. The following functions are available for both ``LU`` and ``UmfpackLU`` objects: ``size``, ``\`` and ``det``. For ``LU`` there is also an ``inv`` method. The sparse LU factorization is such that ``L*U`` is equal to``scale(Rs,A)[p,q]``. +.. function:: lufact(A) -> F + + Compute the LU factorization of ``A``. The return type of ``F`` depends on the type of ``A``. + + ======================= ==================== ======================================== + Type of input ``A`` Type of output ``F`` Relationship between ``F`` and ``A`` + ----------------------- -------------------- ---------------------------------------- + :func:`Matrix` ``LU`` ``F[:L]*F[:U] == A[F[:p], :]`` + :func:`Tridiagonal` ``LUTridiagonal`` N/A + :func:`SparseMatrixCSC` ``UmfpackLU`` ``F[:L]*F[:U] == Rs .* A[F[:p], F[:q]]`` + ======================= ==================== ======================================== + + The individual components of the factorization ``F`` can be accessed by indexing: + + =========== ======================================= ====== ================= ============= + Component Description ``LU`` ``LUTridiagonal`` ``UmfpackLU`` + ----------- --------------------------------------- ------ ----------------- ------------- + ``F[:L]`` ``L`` (lower triangular) part of ``LU`` ✓ ✓ + ``F[:U]`` ``U`` (upper triangular) part of ``LU`` ✓ ✓ + ``F[:p]`` (right) permutation ``Vector`` ✓ ✓ + ``F[:P]`` (right) permutation ``Matrix`` ✓ + ``F[:q]`` left permutation ``Vector`` ✓ + ``F[:Rs]`` ``Vector`` of scaling factors ✓ + ``F[:(:)]`` ``(L,U,p,q,Rs)`` components ✓ + =========== ======================================= ====== ================= ============= + + ================== ====== ================= ============= + Supported function ``LU`` ``LUTridiagonal`` ``UmfpackLU`` + ------------------ ------ ----------------- ------------- + ``/`` ✓ + ``\`` ✓ ✓ ✓ + ``cond`` ✓ ✓ + ``det`` ✓ ✓ + ``size`` ✓ + ================== ====== ================= ============= .. function:: lufact!(A) -> LU From 7f2c22434453a98e3fa8ff1cecdeaa929cd67df5 Mon Sep 17 00:00:00 2001 From: Tony Kelman <tony@kelman.net> Date: Mon, 10 Mar 2014 20:31:32 -0700 Subject: [PATCH 12/27] explicitly return nothing from runtests --- test/testdefs.jl | 1 + 1 file changed, 1 insertion(+) diff --git a/test/testdefs.jl b/test/testdefs.jl index 5caf7680cf2d0..db5b5da49e141 100644 --- a/test/testdefs.jl +++ b/test/testdefs.jl @@ -3,6 +3,7 @@ using Base.Test function runtests(name) println(" \033[1m*\033[0m \033[31m$(name)\033[0m") Core.include("$name.jl") + nothing end function propagate_errors(a,b) From 213f3d87744bb8b9b1596c96a3612bb276a0b5f7 Mon Sep 17 00:00:00 2001 From: Jeff Bezanson <jeff.bezanson@gmail.com> Date: Tue, 11 Mar 2014 00:08:49 -0400 Subject: [PATCH 13/27] try harder to resolve tangles of module re-exports. fixes #6105 Say module A uses module D and others. It will first try to resolve an identifier using D. If D imports the identifier from A, jl_get_binding_ will return NULL due to a cycle. Before, we considered the identifier undefined at that point. Now we keep looking in subsequent `using` modules. The reason I didn't do this before is that it can cause us to dig deeper into the using list, and possibly find an identifier that should be shadowed by other usings. But this is only a problem if the same identifier is provided by two different usings, which is already ambiguous and deserves more serious treatment. --- src/module.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/module.c b/src/module.c index c87d171607cd7..ae4c49abd78ec 100644 --- a/src/module.c +++ b/src/module.c @@ -141,7 +141,8 @@ static jl_binding_t *jl_get_binding_(jl_module_t *m, jl_sym_t *var, modstack_t * if (b != HT_NOTFOUND && b->exportp) { b = jl_get_binding_(imp, var, &top); if (b == NULL || b->owner == NULL) - return NULL; + // couldn't resolve; try next using (see issue #6105) + continue; // do a full import to prevent the result of this lookup // from changing, for example if this var is assigned to // later. From cf86d64e071288f8655b21275b039cefd845cd5d Mon Sep 17 00:00:00 2001 From: Jeff Bezanson <jeff.bezanson@gmail.com> Date: Tue, 11 Mar 2014 01:58:32 -0400 Subject: [PATCH 14/27] remove randsym from doc --- doc/stdlib/base.rst | 4 ---- 1 file changed, 4 deletions(-) diff --git a/doc/stdlib/base.rst b/doc/stdlib/base.rst index cecd2ad5eb500..bfe4fe1d7b2c9 100644 --- a/doc/stdlib/base.rst +++ b/doc/stdlib/base.rst @@ -3443,10 +3443,6 @@ Random number generation in Julia uses the `Mersenne Twister library <http://www Fill the array A with normally-distributed (mean 0, standard deviation 1) random numbers. Also see the rand function. -.. function:: randsym(n) - - Generate a ``nxn`` symmetric array of normally-distributed random numbers with mean 0 and standard deviation 1. - Arrays ------ From 7662f72c04901fb60d34d573f02e18b13df0ee22 Mon Sep 17 00:00:00 2001 From: Jeff Bezanson <jeff.bezanson@gmail.com> Date: Tue, 11 Mar 2014 02:29:08 -0400 Subject: [PATCH 15/27] set CPU_CORES only at startup and not during build. fixes #6102 --- base/client.jl | 1 + base/sysinfo.jl | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/base/client.jl b/base/client.jl index 6c37d9ec092cb..8f5589f887a64 100644 --- a/base/client.jl +++ b/base/client.jl @@ -366,6 +366,7 @@ end function _start() + Sys.init_sysinfo() if CPU_CORES > 8 && !("OPENBLAS_NUM_THREADS" in keys(ENV)) && !("OMP_NUM_THREADS" in keys(ENV)) # Prevent openblas from stating to many threads, unless/until specifically requested ENV["OPENBLAS_NUM_THREADS"] = 8 diff --git a/base/sysinfo.jl b/base/sysinfo.jl index ef847385140c5..08c3cf1bddcba 100644 --- a/base/sysinfo.jl +++ b/base/sysinfo.jl @@ -21,7 +21,7 @@ import ..Base: show, uv_error global CPU_CORES -function __init__() +function init_sysinfo() # set CPU core count global const CPU_CORES = int( haskey(ENV,"JULIA_CPU_CORES") ? From c707ba8bd5c23d367b491bc2a317cd3b2062ce75 Mon Sep 17 00:00:00 2001 From: Jeff Bezanson <jeff.bezanson@gmail.com> Date: Tue, 11 Mar 2014 03:48:01 -0400 Subject: [PATCH 16/27] improve inference of accessing type attributes --- base/inference.jl | 21 +++++++++++++-------- 1 file changed, 13 insertions(+), 8 deletions(-) diff --git a/base/inference.jl b/base/inference.jl index d4ea6764ac0da..68a9ac9c00170 100644 --- a/base/inference.jl +++ b/base/inference.jl @@ -285,6 +285,9 @@ const tupleref_tfunc = function (A, t, i) else types = t end + if !isa(types, Type) + return Any + end T = reduce(tmerge, None, types) if wrapType return isleaftype(T) ? Type{T} : Type{TypeVar(:_,T)} @@ -379,14 +382,16 @@ const getfield_tfunc = function (A, s0, name) end if isType(s0) sp = s0.parameters[1] - if fld === :parameters && isleaftype(sp.parameters) - return Type{sp.parameters} - end - if fld === :types && isleaftype(sp.types) - return Type{sp.types} - end - if fld === :super && isleaftype(sp) - return Type{sp.super} + if isa(sp,DataType) && !any(x->isa(x,TypeVar), sp.parameters) + if fld === :parameters + return Type{sp.parameters} + end + if fld === :types + return Type{sp.types} + end + if fld === :super + return Type{sp.super} + end end end for i=1:length(s.names) From 0c8f09b651df86ac77ec156f6bee32fe047e1fd4 Mon Sep 17 00:00:00 2001 From: Jeff Bezanson <jeff.bezanson@gmail.com> Date: Tue, 11 Mar 2014 03:48:21 -0400 Subject: [PATCH 17/27] fix dot operator deprecations in benchmarks --- test/perf/kernel/bench_eu.jl | 6 +++--- test/perf/kernel/indexing.jl | 2 +- test/perf/kernel/perf.jl | 2 +- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/test/perf/kernel/bench_eu.jl b/test/perf/kernel/bench_eu.jl index ad1e15b3de0d7..cdf726d445d78 100644 --- a/test/perf/kernel/bench_eu.jl +++ b/test/perf/kernel/bench_eu.jl @@ -19,7 +19,7 @@ function bench_eu_devec(numPaths) end end - V = mean( exp(-r*T)*max(K-S,0) ) + V = mean( exp(-r*T)*max(K.-S,0) ) end function bench_eu_vec(numPaths) @@ -35,8 +35,8 @@ function bench_eu_vec(numPaths) t1 = (r-0.5*sigma.^2)*dt t2 = sigma*sqrt(dt) for i=1:steps - S = S .* exp(t1+t2*randn(numPaths)) + S = S .* exp(t1.+t2*randn(numPaths)) end - V = mean( exp(-r*T)*max(K-S,0) ) + V = mean( exp(-r*T)*max(K.-S,0) ) end diff --git a/test/perf/kernel/indexing.jl b/test/perf/kernel/indexing.jl index 9ed07e5adcb3d..3b0e8903073c6 100644 --- a/test/perf/kernel/indexing.jl +++ b/test/perf/kernel/indexing.jl @@ -1,5 +1,5 @@ function add1!(x,y) - x[y] += 1 + x[y] .+= 1 end function devec_add1!(x,y) diff --git a/test/perf/kernel/perf.jl b/test/perf/kernel/perf.jl index d4144764da315..f09c81dd60984 100644 --- a/test/perf/kernel/perf.jl +++ b/test/perf/kernel/perf.jl @@ -83,7 +83,7 @@ x = randn(200_000) @timeit (for n in 1:10; count = cmp_with_func(x, isless) end) "funarg" "Function argument benchmark" -arith_vectorized(b,c,d) = b.*c + d + 1.0 +arith_vectorized(b,c,d) = b.*c + d .+ 1.0 len = 1_000_000 b = randn(len) From 010ad00f48b0b8974e839fd205e8f3662af78bc0 Mon Sep 17 00:00:00 2001 From: Jiahao Chen <jiahao@mit.edu> Date: Mon, 10 Mar 2014 19:21:22 -0400 Subject: [PATCH 18/27] Better crossreferencing of `full` in doc --- doc/stdlib/linalg.rst | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/doc/stdlib/linalg.rst b/doc/stdlib/linalg.rst index 6d5e2f9867d2c..e549cc5c6d2b2 100644 --- a/doc/stdlib/linalg.rst +++ b/doc/stdlib/linalg.rst @@ -107,7 +107,7 @@ Linear algebra functions in Julia are largely implemented by calling functions f Computes the QR factorization of ``A`` and returns either a ``QR`` type if ``pivot=false`` or ``QRPivoted`` type if ``pivot=true``. From a ``QR`` or ``QRPivoted`` factorization ``F``, an orthogonal matrix ``F[:Q]`` and a triangular matrix ``F[:R]`` can be extracted. For ``QRPivoted`` it is also posiible to extract the permutation vector ``F[:p]`` or matrix ``F[:P]``. The following functions are available for the ``QR`` objects: ``size``, ``\``. When ``A`` is rectangular ``\`` will return a least squares solution and if the soultion is not unique, the one with smallest norm is returned. - The orthogonal matrix ``Q=F[:Q]`` is a ``QRPackedQ`` type when ``F`` is a ``QR`` and a ``QRPivotedQ`` then ``F`` is a ``QRPivoted``. Both have the ``*`` operator overloaded to support efficient multiplication by ``Q`` and ``Q'``. Multiplication with respect to either thin or full ``Q`` is allowed, i.e. both ``F[:Q]*F[:R]`` and ``F[:Q]*A`` are supported. A ``Q`` matrix can be converted into a regular matrix with ``full`` which has a named argument ``thin``. + The orthogonal matrix ``Q=F[:Q]`` is a ``QRPackedQ`` type when ``F`` is a ``QR`` and a ``QRPivotedQ`` then ``F`` is a ``QRPivoted``. Both have the ``*`` operator overloaded to support efficient multiplication by ``Q`` and ``Q'``. Multiplication with respect to either thin or full ``Q`` is allowed, i.e. both ``F[:Q]*F[:R]`` and ``F[:Q]*A`` are supported. A ``Q`` matrix can be converted into a regular matrix with :func:`full` which has a named argument ``thin``. .. function:: qrfact!(A,[pivot=false]) @@ -170,7 +170,7 @@ Linear algebra functions in Julia are largely implemented by calling functions f .. function:: hessfact(A) - Compute the Hessenberg decomposition of ``A`` and return a ``Hessenberg`` object. If ``F`` is the factorization object, the unitary matrix can be accessed with ``F[:Q]`` and the Hessenberg matrix with ``F[:H]``. When ``Q`` is extracted, the resulting type is the ``HessenbergQ`` object, and may be converted to a regular matrix with ``full``. + Compute the Hessenberg decomposition of ``A`` and return a ``Hessenberg`` object. If ``F`` is the factorization object, the unitary matrix can be accessed with ``F[:Q]`` and the Hessenberg matrix with ``F[:H]``. When ``Q`` is extracted, the resulting type is the ``HessenbergQ`` object, and may be converted to a regular matrix with :func:`full`. .. function:: hessfact!(A) @@ -281,16 +281,16 @@ Linear algebra functions in Julia are largely implemented by calling functions f .. function:: Tridiagonal(dl, d, du) - Construct a tridiagonal matrix from the lower diagonal, diagonal, and upper diagonal, respectively. The result is of type ``Tridiagonal`` and provides efficient specialized linear solvers, but may be converted into a regular matrix with ``full``. + Construct a tridiagonal matrix from the lower diagonal, diagonal, and upper diagonal, respectively. The result is of type ``Tridiagonal`` and provides efficient specialized linear solvers, but may be converted into a regular matrix with :func:`full`. .. function:: Bidiagonal(dv, ev, isupper) Constructs an upper (``isupper=true``) or lower (``isupper=false``) bidiagonal matrix - using the given diagonal (``dv``) and off-diagonal (``ev``) vectors. The result is of type ``Bidiagonal`` and provides efficient specialized linear solvers, but may be converted into a regular matrix with ``full``. + using the given diagonal (``dv``) and off-diagonal (``ev``) vectors. The result is of type ``Bidiagonal`` and provides efficient specialized linear solvers, but may be converted into a regular matrix with :func:`full`. .. function:: SymTridiagonal(d, du) - Construct a real symmetric tridiagonal matrix from the diagonal and upper diagonal, respectively. The result is of type ``SymTridiagonal`` and provides efficient specialized eigensolvers, but may be converted into a regular matrix with ``full``. + Construct a real symmetric tridiagonal matrix from the diagonal and upper diagonal, respectively. The result is of type ``SymTridiagonal`` and provides efficient specialized eigensolvers, but may be converted into a regular matrix with :func:`full`. .. function:: Woodbury(A, U, C, V) From d114e11c93c416d35da059d4b6b5f6799628dd79 Mon Sep 17 00:00:00 2001 From: "Steven G. Johnson" <stevenj@mit.edu> Date: Mon, 10 Mar 2014 17:49:06 -0400 Subject: [PATCH 19/27] more type-stable reductions (fix #6069) --- base/reduce.jl | 141 +++++++++++++++++++++++++++++++---------------- test/arrayops.jl | 29 ++++++++++ 2 files changed, 123 insertions(+), 47 deletions(-) diff --git a/base/reduce.jl b/base/reduce.jl index af19f92a92504..48577c45f04a8 100644 --- a/base/reduce.jl +++ b/base/reduce.jl @@ -2,6 +2,9 @@ ###### higher level reduction functions ###### +# Note that getting type-stable results from reduction functions, +# or at least having type-stable loops, is nontrivial (#6069). + # reduce function reduce(op::Callable, itr) # this is a left fold @@ -19,17 +22,24 @@ function reduce(op::Callable, itr) # this is a left fold return op() # empty collection end (v, s) = next(itr, s) - while !done(itr, s) + if done(itr, s) + return v + else # specialize for length > 1 to have a hopefully type-stable loop (x, s) = next(itr, s) - v = op(v,x) + result = op(v, x) + while !done(itr, s) + (x, s) = next(itr, s) + result = op(result, x) + end + return result end - return v end +# pairwise reduction, requires n > 1 (to allow type-stable loop) function r_pairwise(op::Callable, A::AbstractArray, i1,n) if n < 128 - @inbounds v = A[i1] - for i = i1+1:i1+n-1 + @inbounds v = op(A[i1], A[i1+1]) + for i = i1+2:i1+n-1 @inbounds v = op(v,A[i]) end return v @@ -41,12 +51,12 @@ end function reduce(op::Callable, A::AbstractArray) n = length(A) - n == 0 ? op() : r_pairwise(op,A, 1,n) + n == 0 ? op() : n == 1 ? A[1] : r_pairwise(op,A, 1,n) end function reduce(op::Callable, v0, A::AbstractArray) n = length(A) - n == 0 ? v0 : op(v0, r_pairwise(op,A, 1,n)) + n == 0 ? v0 : n == 1 ? op(v0, A[1]) : op(v0, r_pairwise(op,A, 1,n)) end function reduce(op::Callable, v0, itr) @@ -169,46 +179,53 @@ end function sum(itr) s = start(itr) if done(itr, s) - return 0 + if method_exists(eltype, (typeof(itr),)) + T = eltype(itr) + return zero(T) + zero(T) + else + throw(ArgumentError("sum(itr) is undefined for empty collections; instead, do isempty(itr) ? z : sum(itr), where z is the correct type of zero for your sum")) + end end (v, s) = next(itr, s) + done(itr, s) && return v + zero(v) # adding zero for type stability + # specialize for length > 1 to have type-stable loop + (x, s) = next(itr, s) + result = v + x while !done(itr, s) (x, s) = next(itr, s) - v += x + result += x end - return v + return result end sum(A::AbstractArray{Bool}) = countnz(A) -# a fast implementation of sum in sequential order (from left to right) +# a fast implementation of sum in sequential order (from left to right). +# to allow type-stable loops, requires length > 1 function sum_seq{T}(a::AbstractArray{T}, ifirst::Int, ilast::Int) - - @inbounds if ifirst + 3 >= ilast # a has at most four elements - if ifirst > ilast - return zero(T) - else - i = ifirst - s = a[i] - while i < ilast - s += a[i+=1] - end - return s + + @inbounds if ifirst + 6 >= ilast # length(a) < 8 + i = ifirst + s = a[i] + a[i+1] + i = i+1 + while i < ilast + s += a[i+=1] end + return s - else # a has more than four elements + else # length(a) >= 8 # more effective utilization of the instruction # pipeline through manually unrolling the sum # into four-way accumulation. Benchmark shows # that this results in about 2x speed-up. - s1 = a[ifirst] - s2 = a[ifirst + 1] - s3 = a[ifirst + 2] - s4 = a[ifirst + 3] + s1 = a[ifirst] + a[ifirst + 4] + s2 = a[ifirst + 1] + a[ifirst + 5] + s3 = a[ifirst + 2] + a[ifirst + 6] + s4 = a[ifirst + 3] + a[ifirst + 7] - i = ifirst + 4 + i = ifirst + 8 il = ilast - 3 while i <= il s1 += a[i] @@ -243,6 +260,7 @@ end # Note: sum_seq uses four accumulators, so each accumulator gets at most 256 numbers const PAIRWISE_SUM_BLOCKSIZE = 1024 +# note: requires length > 1, due to sum_seq function sum_pairwise(a::AbstractArray, ifirst::Int, ilast::Int) # bsiz: maximum block size @@ -254,18 +272,29 @@ function sum_pairwise(a::AbstractArray, ifirst::Int, ilast::Int) end end -sum(a::AbstractArray) = sum_pairwise(a, 1, length(a)) -sum{T<:Integer}(a::AbstractArray{T}) = sum_seq(a, 1, length(a)) +function sum{T}(a::AbstractArray{T}) + n = length(a) + n == 0 && return zero(T) + zero(T) + n == 1 && return a[1] + zero(a[1]) + sum_pairwise(a, 1, length(a)) +end + +function sum{T<:Integer}(a::AbstractArray{T}) + n = length(a) + n == 0 && return zero(T) + zero(T) + n == 1 && return a[1] + zero(a[1]) + sum_seq(a, 1, length(a)) +end # Kahan (compensated) summation: O(1) error growth, at the expense # of a considerable increase in computational expense. function sum_kbn{T<:FloatingPoint}(A::AbstractArray{T}) n = length(A) if (n == 0) - return zero(T) + return zero(T)+zero(T) end - s = A[1] - c = zero(T) + s = A[1]+zero(T) + c = zero(T)+zero(T) for i in 2:n Ai = A[i] t = s + Ai @@ -286,14 +315,23 @@ end function prod(itr) s = start(itr) if done(itr, s) - return *() + if method_exists(eltype, (typeof(itr),)) + T = eltype(itr) + return one(T) * one(T) + else + throw(ArgumentError("prod(itr) is undefined for empty collections; instead, do isempty(itr) ? o : prod(itr), where o is the correct type of identity for your product")) + end end (v, s) = next(itr, s) + done(itr, s) && return v * one(v) # multiplying by one for type stability + # specialize for length > 1 to have type-stable loop + (x, s) = next(itr, s) + result = v * x while !done(itr, s) (x, s) = next(itr, s) - v = v*x + result *= x end - return v + return result end prod(A::AbstractArray{Bool}) = @@ -301,14 +339,16 @@ prod(A::AbstractArray{Bool}) = function prod_rgn{T}(A::AbstractArray{T}, first::Int, last::Int) if first > last - return one(T) + return one(T) * one(T) end i = first - v = A[i] + @inbounds v = A[i] + i == last && return v * one(v) + @inbounds result = v * A[i+=1] while i < last - @inbounds v *= A[i+=1] + @inbounds result *= A[i+=1] end - return v + return result end prod{T}(A::AbstractArray{T}) = prod_rgn(A, 1, length(A)) @@ -467,11 +507,17 @@ function mapreduce(f::Callable, op::Callable, itr) end (x, s) = next(itr, s) v = f(x) - while !done(itr, s) + if done(itr, s) + return v + else # specialize for length > 1 to have a hopefully type-stable loop (x, s) = next(itr, s) - v = op(v,f(x)) + result = op(v, f(x)) + while !done(itr, s) + (x, s) = next(itr, s) + result = op(result, f(x)) + end + return result end - return v end function mapreduce(f::Callable, op::Callable, v0, itr) @@ -482,10 +528,11 @@ function mapreduce(f::Callable, op::Callable, v0, itr) return v end +# pairwise reduction, requires n > 1 (to allow type-stable loop) function mr_pairwise(f::Callable, op::Callable, A::AbstractArray, i1,n) if n < 128 - @inbounds v = f(A[i1]) - for i = i1+1:i1+n-1 + @inbounds v = op(f(A[i1]), f(A[i1+1])) + for i = i1+2:i1+n-1 @inbounds v = op(v,f(A[i])) end return v @@ -496,11 +543,11 @@ function mr_pairwise(f::Callable, op::Callable, A::AbstractArray, i1,n) end function mapreduce(f::Callable, op::Callable, A::AbstractArray) n = length(A) - n == 0 ? op() : mr_pairwise(f,op,A, 1,n) + n == 0 ? op() : n == 1 ? f(A[1]) : mr_pairwise(f,op,A, 1,n) end function mapreduce(f::Callable, op::Callable, v0, A::AbstractArray) n = length(A) - n == 0 ? v0 : op(v0, mr_pairwise(f,op,A, 1,n)) + n == 0 ? v0 : n == 1 ? op(v0, f(A[1])) : op(v0, mr_pairwise(f,op,A, 1,n)) end # specific mapreduce functions diff --git a/test/arrayops.jl b/test/arrayops.jl index 08cf04dfd82bc..7c925cf663428 100644 --- a/test/arrayops.jl +++ b/test/arrayops.jl @@ -367,6 +367,35 @@ end @test sum(z) == sum(z,(1,2,3,4))[1] == 136 +# check variants of summation for type-stability and other issues (#6069) +sum2(itr) = invoke(sum, (Any,), itr) +plus(x,y) = x + y +plus() = 0 +sum3(A) = reduce(plus, A) +sum4(itr) = invoke(reduce, (Function, Any), plus, itr) +sum5(A) = reduce(plus, 0, A) +sum6(itr) = invoke(reduce, (Function, Int, Any), plus, 0, itr) +sum7(A) = mapreduce(x->x, plus, A) +sum8(itr) = invoke(mapreduce, (Function, Function, Any), x->x, plus, itr) +sum9(A) = mapreduce(x->x, plus, 0, A) +sum10(itr) = invoke(mapreduce, (Function, Function, Int, Any), x->x,plus,0,itr) +for f in (sum2, sum3, sum4, sum5, sum6, sum7, sum8, sum9, sum10) + @test sum(z) == f(z) + @test sum(Int[]) == f(Int[]) == 0 + @test sum(Int[7]) == f(Int[7]) == 7 + if f == sum3 || f == sum4 || f == sum7 || f == sum8 + @test typeof(f(Int8[])) == typeof(f(Int8[1 7])) + else + @test typeof(f(Int8[])) == typeof(f(Int8[1])) == typeof(f(Int8[1 7])) + end +end +@test typeof(sum(Int8[])) == typeof(sum(Int8[1])) == typeof(sum(Int8[1 7])) + +prod2(itr) = invoke(prod, (Any,), itr) +@test prod(Int[]) == prod2(Int[]) == 1 +@test prod(Int[7]) == prod2(Int[7]) == 7 +@test typeof(prod(Int8[])) == typeof(prod(Int8[1])) == typeof(prod(Int8[1 7])) == typeof(prod2(Int8[])) == typeof(prod2(Int8[1])) == typeof(prod2(Int8[1 7])) + v = cell(2,2,1,1) v[1,1,1,1] = 28.0 v[1,2,1,1] = 36.0 From 83ccf97eea7c1cc20ed6b2c4e83a128bfd5fd787 Mon Sep 17 00:00:00 2001 From: Stefan Karpinski <stefan@karpinski.org> Date: Tue, 11 Mar 2014 12:49:01 -0400 Subject: [PATCH 20/27] remove the word "dependent" [#6113] --- doc/manual/types.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/manual/types.rst b/doc/manual/types.rst index 60902a02b5006..6164c2bb1a3c1 100644 --- a/doc/manual/types.rst +++ b/doc/manual/types.rst @@ -36,7 +36,7 @@ perhaps somewhat counterintuitively, often significantly simplify them. Describing Julia in the lingo of `type systems <http://en.wikipedia.org/wiki/Type_system>`_, it is: dynamic, -nominative, parametric and dependent. Generic types can be parameterized, +nominative and parametric. Generic types can be parameterized, and the hierarchical relationships between types are explicitly declared, rather than implied by compatible structure. One particularly distinctive feature of Julia's type system From ff2f6a100ba9a5d2ce4a984ff539a11cbf71e3c4 Mon Sep 17 00:00:00 2001 From: Jeff Bezanson <jeff.bezanson@gmail.com> Date: Tue, 11 Mar 2014 13:52:57 -0400 Subject: [PATCH 21/27] don't alloca more than one page in apply() --- src/builtins.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/builtins.c b/src/builtins.c index 4e62a4f356f7f..9e1a12018fb23 100644 --- a/src/builtins.c +++ b/src/builtins.c @@ -241,6 +241,7 @@ JL_CALLABLE(jl_f_typeassert) } static jl_function_t *jl_append_any_func; +extern size_t jl_page_size; JL_CALLABLE(jl_f_apply) { @@ -295,7 +296,7 @@ JL_CALLABLE(jl_f_apply) return result; } } - if (n > 64000) { + if (n > jl_page_size/sizeof(jl_value_t*)) { // put arguments on the heap if there are too many argarr = (jl_value_t*)jl_alloc_cell_1d(n); newargs = jl_cell_data(argarr); From 24df4b588cf274d478e8c2d4823ccf39bc4c0921 Mon Sep 17 00:00:00 2001 From: Jeff Bezanson <jeff.bezanson@gmail.com> Date: Tue, 11 Mar 2014 15:59:53 -0400 Subject: [PATCH 22/27] fix problem introduced by 764098f251a8e6bdccaae42bd8386f596757d2ee when we try to box the result of a bottoming (None) expression --- src/cgutils.cpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/cgutils.cpp b/src/cgutils.cpp index a5389a0ac88d8..cf50e5fd40d35 100644 --- a/src/cgutils.cpp +++ b/src/cgutils.cpp @@ -1454,7 +1454,7 @@ static jl_value_t *static_constant_instance(Constant *constant, jl_value_t *jt) // this is used to wrap values for generic contexts, where a // dynamically-typed value is required (e.g. argument to unknown function). // if it's already a pointer it's left alone. -static Value *boxed(Value *v, jl_codectx_t *ctx, jl_value_t *jt) +static Value *boxed(Value *v, jl_codectx_t *ctx, jl_value_t *jt) { Type *t = (v == NULL) ? NULL : v->getType(); @@ -1469,7 +1469,7 @@ static Value *boxed(Value *v, jl_codectx_t *ctx, jl_value_t *jt) jt = jt2; } - if (v == NULL || dyn_cast<UndefValue>(v) != 0 || t == NoopType) { + if (jt == jl_bottom_type || v == NULL || dyn_cast<UndefValue>(v) != 0 || t == NoopType) { jl_value_t *s = static_void_instance(jt); if (jl_is_tuple(jt) && jl_tuple_len(jt) > 0) jl_add_linfo_root(ctx->linfo, s); From c4e92fff5ab6657e5d58292c0b8bc67946f97a55 Mon Sep 17 00:00:00 2001 From: Jiahao Chen <jiahao@mit.edu> Date: Tue, 11 Mar 2014 16:00:15 -0400 Subject: [PATCH 23/27] Update doc of qrfact At long last, the documentation for the QR factorization (qrfact) is now in line with reality. - Explains which of QR, QRCompactWY and QRPivoted is returned depending on the inputs - Explains what QRPackedQ and QRCompactWYQ are and what you can use them for - Some technical notes on the structure of all these objects and why we have so many of them --- doc/stdlib/linalg.rst | 54 +++++++++++++++++++++++++++++++++++++++---- 1 file changed, 50 insertions(+), 4 deletions(-) diff --git a/doc/stdlib/linalg.rst b/doc/stdlib/linalg.rst index e549cc5c6d2b2..2ed2edfc1d8cd 100644 --- a/doc/stdlib/linalg.rst +++ b/doc/stdlib/linalg.rst @@ -103,11 +103,57 @@ Linear algebra functions in Julia are largely implemented by calling functions f Compute the (pivoted) QR factorization of ``A`` such that either ``A = Q*R`` or ``A[:,p] = Q*R``. Also see ``qrfact``. The default is to compute a thin factorization. Note that ``R`` is not extended with zeros when the full ``Q`` is requested. -.. function:: qrfact(A,[pivot=false]) +.. function:: qrfact(A,[pivot=false]) -> F - Computes the QR factorization of ``A`` and returns either a ``QR`` type if ``pivot=false`` or ``QRPivoted`` type if ``pivot=true``. From a ``QR`` or ``QRPivoted`` factorization ``F``, an orthogonal matrix ``F[:Q]`` and a triangular matrix ``F[:R]`` can be extracted. For ``QRPivoted`` it is also posiible to extract the permutation vector ``F[:p]`` or matrix ``F[:P]``. - The following functions are available for the ``QR`` objects: ``size``, ``\``. When ``A`` is rectangular ``\`` will return a least squares solution and if the soultion is not unique, the one with smallest norm is returned. - The orthogonal matrix ``Q=F[:Q]`` is a ``QRPackedQ`` type when ``F`` is a ``QR`` and a ``QRPivotedQ`` then ``F`` is a ``QRPivoted``. Both have the ``*`` operator overloaded to support efficient multiplication by ``Q`` and ``Q'``. Multiplication with respect to either thin or full ``Q`` is allowed, i.e. both ``F[:Q]*F[:R]`` and ``F[:Q]*A`` are supported. A ``Q`` matrix can be converted into a regular matrix with :func:`full` which has a named argument ``thin``. + Computes the QR factorization of ``A``. The return type of ``F`` depends on the element type of ``A`` and whether pivoting is specified (with ``pivot=true``). + + ================ ================= ========= ===================================== + Return type ``eltype(A)`` ``pivot`` Relationship between ``F`` and ``A`` + ---------------- ----------------- --------- ------------------------------------- + ``QR`` not ``BlasFloat`` either ``A==F[:Q]*F[:R]`` + ``QRCompactWY`` ``BlasFloat`` ``true`` ``A==F[:Q]*F[:R]`` + ``QRPivoted`` ``BlasFloat`` ``false`` ``A[:,F[:p]]==F[:Q]*F[:R]`` + ================ ================= ========= ===================================== + + ``BlasFloat`` refers to any of: ``Float32``, ``Float64``, ``Complex64`` or ``Complex128``. + + The individual components of the factorization ``F`` can be accessed by indexing: + + =========== ============================================= ================== ===================== ================== + Component Description ``QR`` ``QRCompactWY`` ``QRPivoted`` + ----------- --------------------------------------------- ------------------ --------------------- ------------------ + ``F[:Q]`` ``Q`` (orthogonal/unitary) part of ``QR`` ✓ (``QRPackedQ``) ✓ (``QRCompactWYQ``) ✓ (``QRPackedQ``) + ``F[:R]`` ``R`` (upper right triangular) part of ``QR`` ✓ ✓ ✓ + ``F[:p]`` pivot ``Vector`` ✓ + ``F[:P]`` (pivot) permutation ``Matrix`` ✓ + =========== ============================================= ================== ===================== ================== + + The following functions are available for the ``QR`` objects: ``size``, ``\``. When ``A`` is rectangular, ``\`` will return a least squares solution and if the solution is not unique, the one with smallest norm is returned. + + Multiplication with respect to either thin or full ``Q`` is allowed, i.e. both ``F[:Q]*F[:R]`` and ``F[:Q]*A`` are supported. A ``Q`` matrix can be converted into a regular matrix with :func:`full` which has a named argument ``thin``. + + .. note:: + + ``qrfact`` returns multiple types because LAPACK uses several representations that minimize the memory storage requirements of products of Householder elementary reflectors, so that the ``Q`` and ``R`` matrices can be stored compactly rather as two separate dense matrices. + + The data contained in ``QR`` or ``QRPivoted`` can be used to construct the ``QRPackedQ`` type, which is a compact representation of the rotation matrix: + + .. math:: + + Q = \prod_{i=1}^{\min(m,n)} (I - \tau_i v_i v_i^T) + + where :math:`\tau_i` is the scale factor and :math:`v_i` is the projection vector associated with the :math:`i^{th}` Householder elementary reflector. + + The data contained in ``QRCompactWY`` can be used to construct the ``QRCompactWYQ`` type, which is a compact representation of the rotation matrix + + .. math:: + + Q = I + Y T Y^T + + where ``Y`` is :math:`m \times r` lower trapezoidal and ``T`` is :math:`r \times r` upper triangular. The *compact WY* representation [Schreiber1989]_ is not to be confused with the older, *WY* representation [Bischof1987]_. (The LAPACK documentation uses ``V`` in lieu of ``Y``.) + + .. [Bischof1987] C Bischof and C Van Loan, The WY representation for products of Householder matrices, SIAM J Sci Stat Comput 8 (1987), s2-s13. doi:10.1137/0908009 + .. [Schreiber1989] R Schreiber and C Van Loan, A storage-efficient WY representation for products of Householder transformations, SIAM J Sci Stat Comput 10 (1989), 53-57. doi:10.1137/0910005 .. function:: qrfact!(A,[pivot=false]) From 09d9e34417ccc228339eafb5f4b9cd829a9c6c21 Mon Sep 17 00:00:00 2001 From: Jeff Bezanson <jeff.bezanson@gmail.com> Date: Tue, 11 Mar 2014 16:12:49 -0400 Subject: [PATCH 24/27] fix bug in t-function for isdefined() --- base/inference.jl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/base/inference.jl b/base/inference.jl index 68a9ac9c00170..134ca2958da4d 100644 --- a/base/inference.jl +++ b/base/inference.jl @@ -123,7 +123,7 @@ t_func[eval(Core.Intrinsics,:select_value)] = t_func[is] = (2, 2, cmp_tfunc) t_func[issubtype] = (2, 2, cmp_tfunc) t_func[isa] = (2, 2, cmp_tfunc) -t_func[isdefined] = (1, 2, (args...)->Bool) +t_func[isdefined] = (1, Inf, (args...)->Bool) t_func[Union] = (0, Inf, (args...)->(if all(isType,args) Type{Union(map(t->t.parameters[1],args)...)} From e092d66f193aba112e9f97f0dd9ba187afe5fffb Mon Sep 17 00:00:00 2001 From: Dahua Lin <lindahua@gmail.com> Date: Tue, 11 Mar 2014 15:19:52 -0500 Subject: [PATCH 25/27] add typealias for DenseVector & DenseMatrix --- base/array.jl | 4 ++++ base/exports.jl | 3 +++ 2 files changed, 7 insertions(+) diff --git a/base/array.jl b/base/array.jl index 5b5bb342ed3d0..42dc231971e16 100644 --- a/base/array.jl +++ b/base/array.jl @@ -4,6 +4,10 @@ typealias Vector{T} Array{T,1} typealias Matrix{T} Array{T,2} typealias VecOrMat{T} Union(Vector{T}, Matrix{T}) +typealias DenseVector{T} DenseArray{T,1} +typealias DenseMatrix{T} DenseArray{T,2} +typealias DenseVecOrMat{T} Union(DenseVector{T}, DenseMatrix{T}) + typealias StoredVector{T} StoredArray{T,1} typealias StridedArray{T,N,A<:DenseArray} Union(DenseArray{T,N}, SubArray{T,N,A}) typealias StridedVector{T,A<:DenseArray} Union(DenseArray{T,1}, SubArray{T,1,A}) diff --git a/base/exports.jl b/base/exports.jl index e72dbff4b3a8c..198f424c9ce6a 100644 --- a/base/exports.jl +++ b/base/exports.jl @@ -36,6 +36,9 @@ export Complex64, Complex32, DArray, + DenseMatrix, + DenseVecOrMat, + DenseVector, DevNull, Diagonal, Dict, From 9718fcf19e8143e7f8e933aa110493a023d8c345 Mon Sep 17 00:00:00 2001 From: Jeff Bezanson <jeff.bezanson@gmail.com> Date: Tue, 11 Mar 2014 18:46:59 -0400 Subject: [PATCH 26/27] fix regression in inlining of some apply() calls --- base/inference.jl | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/base/inference.jl b/base/inference.jl index 134ca2958da4d..7ee4313beb6a9 100644 --- a/base/inference.jl +++ b/base/inference.jl @@ -1768,7 +1768,11 @@ function exprtype(x::ANY) end return abstract_eval(x, (), sv) elseif isa(x,QuoteNode) - return typeof(x.value) + v = x.value + if isa(v,Type) + return Type{v} + end + return typeof(v) elseif isa(x,Type) return Type{x} elseif isa(x,LambdaStaticData) @@ -2154,6 +2158,8 @@ function inlining_pass(e::Expr, sv, ast) if isa(aarg,Expr) && is_known_call(aarg, tuple, sv) # apply(f,tuple(x,y,...)) => f(x,y,...) newargs[i-2] = aarg.args[2:end] + elseif isa(aarg, Tuple) + newargs[i-2] = { QuoteNode(x) for x in aarg } elseif isa(t,Tuple) && !isvatuple(t) && effect_free(aarg,sv) # apply(f,t::(x,y)) => f(t[1],t[2]) newargs[i-2] = { mk_tupleref(aarg,j) for j=1:length(t) } From bbbaf8885b988e2ff5ea0489dd1606d523ddf228 Mon Sep 17 00:00:00 2001 From: Jeff Bezanson <jeff.bezanson@gmail.com> Date: Tue, 11 Mar 2014 19:28:14 -0400 Subject: [PATCH 27/27] synchronize printing of QuoteNode and :quote exprs. ref #6104 --- base/show.jl | 13 ++++++++----- 1 file changed, 8 insertions(+), 5 deletions(-) diff --git a/base/show.jl b/base/show.jl index e6c05a28a6259..7f67aff0f9c1d 100644 --- a/base/show.jl +++ b/base/show.jl @@ -339,13 +339,16 @@ function show_unquoted(io::IO, ex::SymbolNode, ::Int, ::Int) show_expr_type(io, ex.typ) end -function show_unquoted(io::IO, ex::QuoteNode, indent::Int, prec::Int) - if isa(ex.value, Symbol) && !(ex.value in quoted_syms) +show_unquoted(io::IO, ex::QuoteNode, indent::Int, prec::Int) = + show_unquoted_quote_expr(io, ex.value, indent, prec) + +function show_unquoted_quote_expr(io::IO, value, indent::Int, prec::Int) + if isa(value, Symbol) && !(value in quoted_syms) print(io, ":") - print(io, ex.value) + print(io, value) else print(io, ":(") - show_unquoted(io, ex.value, indent+indent_width, 0) + show_unquoted(io, value, indent+indent_width, 0) print(io, ")") end end @@ -496,7 +499,7 @@ function show_unquoted(io::IO, ex::Expr, indent::Int, prec::Int) elseif is(head, :block) || is(head, :body) show_block(io, "begin", ex, indent); print(io, "end") elseif is(head, :quote) && nargs == 1 - show_unquoted(io, args[1], indent) + show_unquoted_quote_expr(io, args[1], indent, 0) elseif is(head, :gotoifnot) && nargs == 2 print(io, "unless ") show_list(io, args, " goto ", indent)