diff --git a/README.md b/README.md index 01b6d47..1760c19 100644 --- a/README.md +++ b/README.md @@ -245,7 +245,7 @@ using CUDArt export function1 const ptxdict = Dict() -const mdlist = Array(CuModule, 0) +const mdlist = Array{CuModule}(0) function mdinit(devlist) global ptxdict @@ -317,7 +317,7 @@ demonstration that activates processing on multiple devices: ``` measured_sleep_time = CUDArt.devices(dev->true, nmax=2) do devlist sleeptime = 0.5 - results = Array(Float64, 3*length(devlist)) + results = Array{Float64}(3*length(devlist)) streams = [(device(dev); Stream()) for dev in devlist] # Force one run to precompile cudasleep(sleeptime; dev=devlist[1], stream=streams[1]) diff --git a/src/arrays.jl b/src/arrays.jl index 7dcae34..e52f82f 100644 --- a/src/arrays.jl +++ b/src/arrays.jl @@ -86,7 +86,7 @@ device(A::AbstractArray) = -1 # for host pointer(g::AbstractCudaArray) = g.ptr -to_host{T}(g::AbstractCudaArray{T}) = copy!(Array(T, size(g)), g) +to_host{T}(g::AbstractCudaArray{T}) = copy!(Array{T}(size(g)), g) summary(g::AbstractCudaArray) = string(g) @@ -160,7 +160,7 @@ function _copy!{T}(dst::ContiguousArray{T}, src::ContiguousArray{T}, stream) return dst end _copy!{T}(dst::ContiguousArray{T}, src::ContiguousArray, stream) = _copy!(dst, to_eltype(T, src), stream) -_copy!{T}(dst::AbstractCudaArray{T}, src, stream) = _copy!(dst, copy!(Array(T, size(src)), src), stream) +_copy!{T}(dst::AbstractCudaArray{T}, src, stream) = _copy!(dst, copy!(Array{T}(size(src)), src), stream) function fill!{T}(X::CudaArray{T}, val; stream=null_stream) valT = convert(T, val) @@ -181,7 +181,7 @@ CudaPitchedArray(T::Type, dims::Integer...) = CudaPitchedArray(T, dims) function CudaPitchedArray(T::Type, dims::Dims) nd = length(dims) 1 <= nd <= 3 || error("Supports only dimensions 1, 2, or 3") - p = Array(rt.cudaPitchedPtr, 1) + p = Array{rt.cudaPitchedPtr}(1) ext = CudaExtent(T, dims) rt.cudaMalloc3D(p, ext) pp = p[1] @@ -402,7 +402,7 @@ function free(ha::HostArray) if ha.ptr != C_NULL && haskey(cuda_ptrs, ha.ptr) rt.cudaFreeHost(ha.ptr) ha.ptr = C_NULL - ha.data = Array(eltype(ha), ntuple(d->0, ndims(ha))) + ha.data = Array{eltype(ha)}(ntuple(d->0, ndims(ha))) end end diff --git a/src/device.jl b/src/device.jl index 0504eb7..dbaf658 100644 --- a/src/device.jl +++ b/src/device.jl @@ -27,7 +27,7 @@ end device_synchronize() = rt.cudaDeviceSynchronize() -device_properties(dev::Integer) = (aprop = Array(rt.cudaDeviceProp, 1); rt.cudaGetDeviceProperties(aprop, dev); aprop[1]) +device_properties(dev::Integer) = (aprop = Array{rt.cudaDeviceProp}(1); rt.cudaGetDeviceProperties(aprop, dev); aprop[1]) attribute(dev::Integer, code::Integer) = (ret = Cint[0]; rt.cudaDeviceGetAttribute(ret, code, dev); Int(ret[1])) diff --git a/src/stream.jl b/src/stream.jl index d18ce83..8d778e1 100644 --- a/src/stream.jl +++ b/src/stream.jl @@ -11,7 +11,7 @@ type Stream <: AbstractStream c::Condition end function Stream() - p = Array(Ptr{Void}, 1) + p = Array{Ptr{Void}}(1) rt.cudaStreamCreate(p) hnd = CuStream(p[1]) Stream(hnd, Condition()) diff --git a/test/test.jl b/test/test.jl index 53f104b..23bd45d 100644 --- a/test/test.jl +++ b/test/test.jl @@ -163,7 +163,7 @@ gc() # check for finalizer errors if CUDArt.devcount() > 1 CUDArt.devices(dev->true, nmax=2) do devlist sleeptime = 0.5 - results = Array(Any, ceil(Int, 2.5*length(devlist))) + results = Array{Any}(ceil(Int, 2.5*length(devlist))) streams = [(CUDArt.device(dev); CUDArt.Stream()) for dev in devlist] # Force one run to precompile CUDArt.cudasleep(sleeptime; dev=devlist[1], stream=streams[1])