Skip to content

Commit

Permalink
Merge pull request #19989 from ararslan/aa/array-syntax
Browse files Browse the repository at this point in the history
Deprecate Array(T, dims...)
  • Loading branch information
ararslan authored Jan 14, 2017
2 parents 8afb74d + 36122a7 commit 50d0d5d
Show file tree
Hide file tree
Showing 14 changed files with 117 additions and 105 deletions.
9 changes: 9 additions & 0 deletions base/array.jl
Original file line number Diff line number Diff line change
Expand Up @@ -25,6 +25,15 @@ typealias DenseVecOrMat{T} Union{DenseVector{T}, DenseMatrix{T}}

import Core: arraysize, arrayset, arrayref

"""
Array{T,N}(dims)
Construct an uninitialized `N`-dimensional dense array with element type `T`. `dims` may
be a tuple or a series of integer arguments corresponding to the length in each dimension.
If the rank `N` is omitted, i.e. `Array{T}(dims)`, the rank is determined based on `dims`.
"""
Array

vect() = Array{Any,1}(0)
vect{T}(X::T...) = T[ X[i] for i=1:length(X) ]

Expand Down
2 changes: 1 addition & 1 deletion base/asyncmap.jl
Original file line number Diff line number Diff line change
Expand Up @@ -246,7 +246,7 @@ end

# Special handling for some types.
function asyncmap(f, s::AbstractString; kwargs...)
s2=Array(Char, length(s))
s2 = Array{Char,1}(length(s))
asyncmap!(f, s2, s; kwargs...)
return convert(String, s2)
end
Expand Down
7 changes: 0 additions & 7 deletions base/boot.jl
Original file line number Diff line number Diff line change
Expand Up @@ -330,13 +330,6 @@ typealias NTuple{N,T} Tuple{Vararg{T,N}}
(::Type{Array{T,1}}){T}() = Array{T,1}(0)
(::Type{Array{T,2}}){T}() = Array{T,2}(0, 0)

# TODO: possibly turn these into deprecations
Array{T,N}(::Type{T}, d::NTuple{N,Int}) = Array{T,N}(d)
Array{T}(::Type{T}, d::Int...) = Array(T, d)
Array{T}(::Type{T}, m::Int) = Array{T,1}(m)
Array{T}(::Type{T}, m::Int,n::Int) = Array{T,2}(m,n)
Array{T}(::Type{T}, m::Int,n::Int,o::Int) = Array{T,3}(m,n,o)

# primitive Symbol constructors
function Symbol(s::String)
return ccall(:jl_symbol_n, Ref{Symbol}, (Ptr{UInt8}, Int),
Expand Down
19 changes: 19 additions & 0 deletions base/deprecated.jl
Original file line number Diff line number Diff line change
Expand Up @@ -1741,4 +1741,23 @@ end
export @test_approx_eq
# END code from base/test.jl

# Deprecate Array(T, dims...) in favor of proper type constructors
@deprecate Array{T,N}(::Type{T}, d::NTuple{N,Int}) Array{T,N}(d)
@deprecate Array{T}(::Type{T}, d::Int...) Array{T,length(d)}(d...)
@deprecate Array{T}(::Type{T}, m::Int) Array{T,1}(m)
@deprecate Array{T}(::Type{T}, m::Int,n::Int) Array{T,2}(m,n)
@deprecate Array{T}(::Type{T}, m::Int,n::Int,o::Int) Array{T,3}(m,n,o)
@deprecate Array{T}(::Type{T}, d::Integer...) Array{T,length(d)}(convert(Tuple{Vararg{Int}}, d))
@deprecate Array{T}(::Type{T}, m::Integer) Array{T,1}(Int(m))
@deprecate Array{T}(::Type{T}, m::Integer,n::Integer) Array{T,2}(Int(m),Int(n))
@deprecate Array{T}(::Type{T}, m::Integer,n::Integer,o::Integer) Array{T,3}(Int(m),Int(n),Int(o))

# Likewise for SharedArrays
@deprecate SharedArray{T,N}(::Type{T}, dims::Dims{N}; kwargs...) SharedArray{T,N}(dims; kwargs...)
@deprecate SharedArray{T}(::Type{T}, dims::Int...; kwargs...) SharedArray{T,length(dims)}(dims...; kwargs...)
@deprecate(SharedArray{T,N}(filename::AbstractString, ::Type{T}, dims::NTuple{N,Int}, offset; kwargs...),
SharedArray{T,N}(filename, dims, offset; kwargs...))
@deprecate(SharedArray{T}(filename::AbstractString, ::Type{T}, dims::NTuple, offset; kwargs...),
SharedArray{T,length(dims)}(filename, dims, offset; kwargs...))

# End deprecations scheduled for 0.6
9 changes: 0 additions & 9 deletions base/docs/helpdb/Base.jl
Original file line number Diff line number Diff line change
Expand Up @@ -1676,15 +1676,6 @@ by `show` generally includes Julia-specific formatting and type information.
"""
show(x)

"""
Array(dims)
`Array{T}(dims)` constructs an uninitialized dense array with element type `T`. `dims` may
be a tuple or a series of integer arguments. The syntax `Array(T, dims)` is also available,
but deprecated.
"""
Array

"""
issubtype(type1, type2)
Expand Down
2 changes: 1 addition & 1 deletion base/libc.jl
Original file line number Diff line number Diff line change
Expand Up @@ -277,7 +277,7 @@ if is_windows()
const FORMAT_MESSAGE_FROM_SYSTEM = UInt32(0x1000)
const FORMAT_MESSAGE_IGNORE_INSERTS = UInt32(0x200)
const FORMAT_MESSAGE_MAX_WIDTH_MASK = UInt32(0xFF)
lpMsgBuf = Array(Ptr{UInt16})
lpMsgBuf = Array{Ptr{UInt16},0}()
lpMsgBuf[1] = 0
len = ccall(:FormatMessageW,stdcall,UInt32,(Cint, Ptr{Void}, Cint, Cint, Ptr{Ptr{UInt16}}, Cint, Ptr{Void}),
FORMAT_MESSAGE_ALLOCATE_BUFFER | FORMAT_MESSAGE_FROM_SYSTEM | FORMAT_MESSAGE_IGNORE_INSERTS | FORMAT_MESSAGE_MAX_WIDTH_MASK,
Expand Down
112 changes: 59 additions & 53 deletions base/sharedarray.jl
Original file line number Diff line number Diff line change
Expand Up @@ -28,19 +28,8 @@ type SharedArray{T,N} <: DenseArray{T,N}
end
end

(::Type{SharedArray{T}}){T,N}(d::NTuple{N,Int}; kwargs...) =
SharedArray(T, d; kwargs...)
(::Type{SharedArray{T}}){T}(d::Integer...; kwargs...) =
SharedArray(T, d; kwargs...)
(::Type{SharedArray{T}}){T}(m::Integer; kwargs...) =
SharedArray(T, m; kwargs...)
(::Type{SharedArray{T}}){T}(m::Integer, n::Integer; kwargs...) =
SharedArray(T, m, n; kwargs...)
(::Type{SharedArray{T}}){T}(m::Integer, n::Integer, o::Integer; kwargs...) =
SharedArray(T, m, n, o; kwargs...)

"""
SharedArray(T::Type, dims::NTuple; init=false, pids=Int[])
SharedArray{T,N}(dims::NTuple; init=false, pids=Int[])
Construct a `SharedArray` of a bitstype `T` and size `dims` across the processes specified
by `pids` - all of which have to be on the same host.
Expand All @@ -52,8 +41,39 @@ computation with the master process acting as a driver.
If an `init` function of the type `initfn(S::SharedArray)` is specified, it is called on all
the participating workers.
SharedArray{T,N}(filename::AbstractString, dims::NTuple, [offset=0]; mode=nothing, init=false, pids=Int[])
Construct a `SharedArray` backed by the file `filename`, with element
type `T` (must be a `bitstype`) and size `dims`, across the processes
specified by `pids` - all of which have to be on the same host. This
file is mmapped into the host memory, with the following consequences:
- The array data must be represented in binary format (e.g., an ASCII
format like CSV cannot be supported)
- Any changes you make to the array values (e.g., `A[3] = 0`) will
also change the values on disk
If `pids` is left unspecified, the shared array will be mapped across
all processes on the current host, including the master. But,
`localindexes` and `indexpids` will only refer to worker
processes. This facilitates work distribution code to use workers for
actual computation with the master process acting as a driver.
`mode` must be one of `"r"`, `"r+"`, `"w+"`, or `"a+"`, and defaults
to `"r+"` if the file specified by `filename` already exists, or
`"w+"` if not. If an `init` function of the type
`initfn(S::SharedArray)` is specified, it is called on all the
participating workers. You cannot specify an `init` function if the
file is not writable.
`offset` allows you to skip the specified number of bytes at the
beginning of the file.
"""
function SharedArray{T,N}(::Type{T}, dims::Dims{N}; init=false, pids=Int[])
SharedArray

function (::Type{SharedArray{T,N}}){T,N}(dims::Dims{N}; init=false, pids=Int[])
isbits(T) || throw(ArgumentError("type of SharedArray elements must be bits types, got $(T)"))

pids, onlocalhost = shared_pids(pids)
Expand Down Expand Up @@ -110,39 +130,20 @@ function SharedArray{T,N}(::Type{T}, dims::Dims{N}; init=false, pids=Int[])
S
end

SharedArray(T, I::Int...; kwargs...) = SharedArray(T, I; kwargs...)

"""
SharedArray(filename::AbstractString, T::Type, dims::NTuple, [offset=0]; mode=nothing, init=false, pids=Int[])
Construct a `SharedArray` backed by the file `filename`, with element
type `T` (must be a `bitstype`) and size `dims`, across the processes
specified by `pids` - all of which have to be on the same host. This
file is mmapped into the host memory, with the following consequences:
- The array data must be represented in binary format (e.g., an ASCII
format like CSV cannot be supported)
- Any changes you make to the array values (e.g., `A[3] = 0`) will
also change the values on disk
If `pids` is left unspecified, the shared array will be mapped across
all processes on the current host, including the master. But,
`localindexes` and `indexpids` will only refer to worker
processes. This facilitates work distribution code to use workers for
actual computation with the master process acting as a driver.
`mode` must be one of `"r"`, `"r+"`, `"w+"`, or `"a+"`, and defaults
to `"r+"` if the file specified by `filename` already exists, or
`"w+"` if not. If an `init` function of the type
`initfn(S::SharedArray)` is specified, it is called on all the
participating workers. You cannot specify an `init` function if the
file is not writable.
(::Type{SharedArray{T,N}}){T,N}(I::Integer...; kwargs...) =
SharedArray{T,N}(I; kwargs...)
(::Type{SharedArray{T}}){T}(d::NTuple; kwargs...) =
SharedArray{T,length(d)}(d; kwargs...)
(::Type{SharedArray{T}}){T}(I::Integer...; kwargs...) =
SharedArray{T,length(I)}(I; kwargs...)
(::Type{SharedArray{T}}){T}(m::Integer; kwargs...) =
SharedArray{T,1}(m; kwargs...)
(::Type{SharedArray{T}}){T}(m::Integer, n::Integer; kwargs...) =
SharedArray{T,2}(m, n; kwargs...)
(::Type{SharedArray{T}}){T}(m::Integer, n::Integer, o::Integer; kwargs...) =
SharedArray{T,3}(m, n, o; kwargs...)

`offset` allows you to skip the specified number of bytes at the
beginning of the file.
"""
function SharedArray{T,N}(filename::AbstractString, ::Type{T}, dims::NTuple{N,Int},
function (::Type{SharedArray{T,N}}){T,N}(filename::AbstractString, dims::NTuple{N,Int},
offset::Integer=0; mode=nothing, init=false, pids::Vector{Int}=Int[])
if !isabspath(filename)
throw(ArgumentError("$filename is not an absolute path; try abspath(filename)?"))
Expand Down Expand Up @@ -208,6 +209,10 @@ function SharedArray{T,N}(filename::AbstractString, ::Type{T}, dims::NTuple{N,In
S
end

(::Type{SharedArray{T}}){T,N}(filename::AbstractString, dims::NTuple{N,Int}, offset::Integer=0;
mode=nothing, init=false, pids::Vector{Int}=Int[]) =
SharedArray{T,N}(filename, dims, offset; mode=mode, init=init, pids=pids)

function initialize_shared_array(S, onlocalhost, init, pids)
if onlocalhost
init_loc_flds(S)
Expand Down Expand Up @@ -246,6 +251,7 @@ typealias SharedMatrix{T} SharedArray{T,2}

length(S::SharedArray) = prod(S.dims)
size(S::SharedArray) = S.dims
ndims(S::SharedArray) = length(S.dims)
linearindexing{S<:SharedArray}(::Type{S}) = LinearFast()

function reshape{T,N}(a::SharedArray{T}, dims::NTuple{N,Int})
Expand Down Expand Up @@ -307,21 +313,21 @@ localindexes(S::SharedArray) = S.pidx > 0 ? range_1dim(S, S.pidx) : 1:0
unsafe_convert{T}(::Type{Ptr{T}}, S::SharedArray) = unsafe_convert(Ptr{T}, sdata(S))

function convert(::Type{SharedArray}, A::Array)
S = SharedArray(eltype(A), size(A))
S = SharedArray{eltype(A),ndims(A)}(size(A))
copy!(S, A)
end
function convert{T}(::Type{SharedArray{T}}, A::Array)
S = SharedArray(T, size(A))
S = SharedArray{T,ndims(A)}(size(A))
copy!(S, A)
end
function convert{TS,TA,N}(::Type{SharedArray{TS,N}}, A::Array{TA,N})
S = SharedArray(TS, size(A))
S = SharedArray{TS,ndims(A)}(size(A))
copy!(S, A)
end

function deepcopy_internal(S::SharedArray, stackdict::ObjectIdDict)
haskey(stackdict, S) && return stackdict[S]
R = SharedArray(eltype(S), size(S); pids = S.pids)
R = SharedArray{eltype(S),ndims(S)}(size(S); pids = S.pids)
copy!(sdata(R), sdata(S))
stackdict[S] = R
return R
Expand Down Expand Up @@ -468,16 +474,16 @@ end

# convenience constructors
function shmem_fill(v, dims; kwargs...)
SharedArray(typeof(v), dims; init = S->fill!(S.loc_subarr_1d, v), kwargs...)
SharedArray{typeof(v),length(dims)}(dims; init = S->fill!(S.loc_subarr_1d, v), kwargs...)
end
shmem_fill(v, I::Int...; kwargs...) = shmem_fill(v, I; kwargs...)

# rand variant with range
function shmem_rand(TR::Union{DataType, UnitRange}, dims; kwargs...)
if isa(TR, UnitRange)
SharedArray(Int, dims; init = S -> map!(x -> rand(TR), S.loc_subarr_1d, S.loc_subarr_1d), kwargs...)
SharedArray{Int,length(dims)}(dims; init = S -> map!(x -> rand(TR), S.loc_subarr_1d, S.loc_subarr_1d), kwargs...)
else
SharedArray(TR, dims; init = S -> map!(x -> rand(TR), S.loc_subarr_1d, S.loc_subarr_1d), kwargs...)
SharedArray{TR,length(dims)}(dims; init = S -> map!(x -> rand(TR), S.loc_subarr_1d, S.loc_subarr_1d), kwargs...)
end
end
shmem_rand(TR::Union{DataType, UnitRange}, i::Int; kwargs...) = shmem_rand(TR, (i,); kwargs...)
Expand All @@ -487,7 +493,7 @@ shmem_rand(dims; kwargs...) = shmem_rand(Float64, dims; kwargs...)
shmem_rand(I::Int...; kwargs...) = shmem_rand(I; kwargs...)

function shmem_randn(dims; kwargs...)
SharedArray(Float64, dims; init = S-> map!(x -> randn(), S.loc_subarr_1d, S.loc_subarr_1d), kwargs...)
SharedArray{Float64,length(dims)}(dims; init = S-> map!(x -> randn(), S.loc_subarr_1d, S.loc_subarr_1d), kwargs...)
end
shmem_randn(I::Int...; kwargs...) = shmem_randn(I; kwargs...)

Expand Down
2 changes: 1 addition & 1 deletion base/sparse/sparsematrix.jl
Original file line number Diff line number Diff line change
Expand Up @@ -1390,7 +1390,7 @@ speye_scaled(diag, m::Integer, n::Integer) = speye_scaled(typeof(diag), diag, m,
function speye_scaled(T, diag, m::Integer, n::Integer)
((m < 0) || (n < 0)) && throw(ArgumentError("invalid array dimensions"))
nnz = min(m,n)
colptr = Array(Int, 1+n)
colptr = Array{Int,1}(1+n)
colptr[1:nnz+1] = 1:nnz+1
colptr[nnz+2:end] = nnz+1
SparseMatrixCSC(Int(m), Int(n), colptr, Vector{Int}(1:nnz), fill!(Vector{T}(nnz), diag))
Expand Down
6 changes: 3 additions & 3 deletions base/sparse/sparsevector.jl
Original file line number Diff line number Diff line change
Expand Up @@ -563,7 +563,7 @@ end

function find{Tv,Ti}(x::SparseVector{Tv,Ti})
numnz = nnz(x)
I = Array(Ti, numnz)
I = Array{Ti,1}(numnz)

nzind = x.nzind
nzval = x.nzval
Expand All @@ -587,8 +587,8 @@ end
function findnz{Tv,Ti}(x::SparseVector{Tv,Ti})
numnz = nnz(x)

I = Array(Ti, numnz)
V = Array(Tv, numnz)
I = Array{Ti,1}(numnz)
V = Array{Tv,1}(numnz)

nzind = x.nzind
nzval = x.nzval
Expand Down
6 changes: 0 additions & 6 deletions base/sysimg.jl
Original file line number Diff line number Diff line change
Expand Up @@ -99,12 +99,6 @@ include("subarray.jl")
(::Type{Matrix{T}}){T}(m::Integer, n::Integer) = Matrix{T}(Int(m), Int(n))
(::Type{Matrix})(m::Integer, n::Integer) = Matrix{Any}(Int(m), Int(n))

# TODO: possibly turn these into deprecations
Array{T}(::Type{T}, d::Integer...) = Array(T, convert(Tuple{Vararg{Int}}, d))
Array{T}(::Type{T}, m::Integer) = Array{T,1}(Int(m))
Array{T}(::Type{T}, m::Integer,n::Integer) = Array{T,2}(Int(m),Int(n))
Array{T}(::Type{T}, m::Integer,n::Integer,o::Integer) = Array{T,3}(Int(m),Int(n),Int(o))

# numeric operations
include("hashing.jl")
include("rounding.jl")
Expand Down
14 changes: 7 additions & 7 deletions doc/src/manual/parallel-computing.md
Original file line number Diff line number Diff line change
Expand Up @@ -312,7 +312,7 @@ Parallel for loops like these must be avoided. Fortunately, [Shared Arrays](@ref
to get around this limitation:

```julia
a = SharedArray(Float64,10)
a = SharedArray{Float64}(10)
@parallel for i=1:10
a[i] = i
end
Expand Down Expand Up @@ -720,10 +720,10 @@ just returns the object itself, so it's safe to use [`sdata()`](@ref) on any `Ar
The constructor for a shared array is of the form:

```julia
SharedArray(T::Type, dims::NTuple; init=false, pids=Int[])
SharedArray{T,N}(dims::NTuple; init=false, pids=Int[])
```

which creates a shared array of a bits type `T` and size `dims` across the processes specified
which creates an `N`-dimensional shared array of a bits type `T` and size `dims` across the processes specified
by `pids`. Unlike distributed arrays, a shared array is accessible only from those participating
workers specified by the `pids` named argument (and the creating process too, if it is on the
same host).
Expand All @@ -741,7 +741,7 @@ julia> addprocs(3)
3
4

julia> S = SharedArray(Int, (3,4), init = S -> S[Base.localindexes(S)] = myid())
julia> S = SharedArray{Int,2}((3,4), init = S -> S[Base.localindexes(S)] = myid())
3×4 SharedArray{Int64,2}:
2 2 3 4
2 3 3 4
Expand All @@ -762,7 +762,7 @@ convenient for splitting up tasks among processes. You can, of course, divide th
you wish:

```julia
julia> S = SharedArray(Int, (3,4), init = S -> S[indexpids(S):length(procs(S)):length(S)] = myid())
julia> S = SharedArray{Int,2}((3,4), init = S -> S[indexpids(S):length(procs(S)):length(S)] = myid())
3×4 SharedArray{Int64,2}:
2 2 2 2
3 3 3 3
Expand Down Expand Up @@ -861,8 +861,8 @@ end
If we create `SharedArray`s and time these functions, we get the following results (with `julia -p 4`):

```julia
q = SharedArray(Float64, (500,500,500))
u = SharedArray(Float64, (500,500,500))
q = SharedArray{Float64,3}((500,500,500))
u = SharedArray{Float64,3}((500,500,500))

# Run once to JIT-compile
advection_serial!(q, u)
Expand Down
2 changes: 1 addition & 1 deletion src/julia-syntax.scm
Original file line number Diff line number Diff line change
Expand Up @@ -2242,7 +2242,7 @@
,.(map (lambda (v r) `(= ,v (call (top length) ,r))) lengths rv)
(scope-block
(block
(= ,result (call (core Array) ,atype ,@lengths))
(= ,result (call (curly Array ,atype ,(length lengths)) ,@lengths))
(= ,ri 1)
,(construct-loops (reverse ranges) (reverse rv) is states (reverse lengths))
,result)))))
Expand Down
Loading

0 comments on commit 50d0d5d

Please sign in to comment.