Skip to content

Commit

Permalink
Merge pull request #47 from FluxML/julia-0.7
Browse files Browse the repository at this point in the history
Julia 0.7
  • Loading branch information
MikeInnes authored Jun 5, 2018
2 parents 0609258 + f0993bb commit 23155cf
Show file tree
Hide file tree
Showing 9 changed files with 54 additions and 55 deletions.
2 changes: 1 addition & 1 deletion .travis.yml
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@ os:
- linux
- osx
julia:
- 0.6
- nightly
notifications:
email: false
git:
Expand Down
2 changes: 1 addition & 1 deletion REQUIRE
Original file line number Diff line number Diff line change
@@ -1,3 +1,3 @@
julia 0.6
julia 0.7-
Requires
MacroTools
2 changes: 1 addition & 1 deletion src/NNlib.jl
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
module NNlib

using Requires
using Requires, Libdl

export σ, sigmoid, relu, leakyrelu, elu, swish, selu, softplus, softsign, logσ, logsigmoid,
softmax, logsoftmax, maxpool, meanpool
Expand Down
10 changes: 5 additions & 5 deletions src/conv.jl
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@ include("impl/conv.jl")
# Convolutions

function cdims(x::NTuple{N}, w::NTuple{N}, pad, stride) where N
ntuple(Val{N}) do i
ntuple(Val(N)) do i
if i < N-1
1 + div(x[i] - w[i] + 2*pad[i], stride[i])
elseif i == N-1
Expand All @@ -31,10 +31,10 @@ function conv(x::A, w::A; pad = 0, stride = 1, dilation = 1) where A<:AbstractAr
end

∇conv_data(dy::A, x::A, w::A; pad = 0, stride = 1, dilation = 1) where A<:AbstractArray =
∇conv_data!(zeros(x), dy, x, w; pad = pad, stride = stride, dilation = dilation)
∇conv_data!(zero(x), dy, x, w; pad = pad, stride = stride, dilation = dilation)

∇conv_filter(dy::A, x::A, w::A; pad = 0, stride = 1, dilation = 1) where A<:AbstractArray =
∇conv_filter!(zeros(w), dy, x, w; pad = pad, stride = stride, dilation = dilation)
∇conv_filter!(zero(w), dy, x, w; pad = pad, stride = stride, dilation = dilation)

# N-D dispatch

Expand Down Expand Up @@ -88,7 +88,7 @@ conv!(y::AbstractArray{T,5}, x::AbstractArray{T,5}, w::AbstractArray{T,5};
# Pooling

function pdims(dims::Dims{N}, window, padding, stride) where N
ntuple(Val{N}) do i
ntuple(Val(N)) do i
if i < N-1
1 + (dims[i] + 2*padding[i] - window[i])÷stride[i]
else
Expand All @@ -97,7 +97,7 @@ function pdims(dims::Dims{N}, window, padding, stride) where N
end
end

expand(::Type{Val{N}}, i::Integer) where N = ntuple(_ -> i, Val{N})
expand(::Type{Val{N}}, i::Integer) where N = ntuple(_ -> i, Val(N))
expand(::Type{Val{N}}, i::NTuple{N, Integer}) where N = i

# Interface
Expand Down
4 changes: 2 additions & 2 deletions src/impl/pool.jl
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,7 @@ function max_pooling2d_bwd!(x::AbstractArray{T,4}, y::AbstractArray{T,4},
channels::Int, num::Int, pooled_width::Int, pooled_height::Int, kernel_w::Int,
kernel_h::Int, pad_w::Int, pad_h::Int, stride_w::Int, stride_h::Int) where T

grad_input[:, :, :, :] = 0
grad_input .= 0
#pragma omp parallel for
for n = 1:num, c = 1:channels, ph = 1:pooled_height, pw = 1:pooled_width
hstart = (ph - 1) * stride_h - pad_h
Expand Down Expand Up @@ -168,7 +168,7 @@ function max_pooling3d_bwd!(x::AbstractArray{T,5}, y::AbstractArray{T,5},
kernel_w::Int, kernel_h::Int, kernel_d::Int, pad_w::Int, pad_h::Int, pad_d::Int,
stride_w::Int, stride_h::Int, stride_d::Int) where T

grad_input[:, :, :, :, :] = 0
grad_input .= 0

#pragma omp parallel for
for n = 1:num, c = 1:channels, pd = 1:pooled_depth, ph = 1:pooled_height, pw = 1:pooled_width
Expand Down
18 changes: 9 additions & 9 deletions src/linalg.jl
Original file line number Diff line number Diff line change
@@ -1,8 +1,8 @@
## Low level gemm! call with pointers
## Borrowed from Knet.jl

using Base.LinAlg
using Base.LinAlg.BLAS: libblas, BlasInt, @blasfunc
using LinearAlgebra
using LinearAlgebra.BLAS: libblas, BlasInt, @blasfunc

# C := alpha*op(A)*op(B) + beta*C, where:
# op(X) is one of op(X) = X, or op(X) = XT, or op(X) = XH,
Expand All @@ -18,13 +18,13 @@ for (gemm, elty) in ((:dgemm_,:Float64), (:sgemm_,:Float32))
if transA=='N'; lda=M; else; lda=K; end
if transB=='N'; ldb=K; else; ldb=N; end
ldc = M;
ccall((@blasfunc($gemm), libblas), Void,
(Ptr{UInt8}, Ptr{UInt8}, Ptr{BlasInt}, Ptr{BlasInt},
Ptr{BlasInt}, Ptr{$elty}, Ptr{$elty}, Ptr{BlasInt},
Ptr{$elty}, Ptr{BlasInt}, Ptr{$elty}, Ptr{$elty},
Ptr{BlasInt}),
&transA, &transB, &M, &N, &K,
&alpha, A, &lda, B, &ldb, &beta, C, &ldc)
ccall((@blasfunc(dgemm_), libblas), Nothing,
(Ref{UInt8}, Ref{UInt8}, Ref{BlasInt}, Ref{BlasInt},
Ref{BlasInt}, Ref{Float64}, Ptr{Float64}, Ref{BlasInt},
Ptr{Float64}, Ref{BlasInt}, Ref{Float64}, Ptr{Float64},
Ref{BlasInt}),
transA, transB, M, N, K,
alpha, A, lda, B, ldb, beta, C, ldc)
end
end
end
28 changes: 14 additions & 14 deletions test/activation.jl
Original file line number Diff line number Diff line change
Expand Up @@ -22,19 +22,19 @@ function test_value_int_input_forces_float64(a)
end
end

if Base.find_in_path("ForwardDiff") nothing
using ForwardDiff
function test_value_duals(a)
@testset "$(a): " begin
for T in [Float32, Float64, Int32, Int64]
val = @inferred a(ForwardDiff.Dual(float(T(1)), one(float(T))))
@test typeof(val) == ForwardDiff.Dual{Void,float(T),1}
end
end
end

test_value_duals.(ACTIVATION_FUNCTIONS)
end
# if Base.find_in_path("ForwardDiff") ≠ nothing
# using ForwardDiff
# function test_value_duals(a)
# @testset "$(a): " begin
# for T in [Float32, Float64, Int32, Int64]
# val = @inferred a(ForwardDiff.Dual(float(T(1)), one(float(T))))
# @test typeof(val) == ForwardDiff.Dual{Nothing,float(T),1}
# end
# end
# end
#
# test_value_duals.(ACTIVATION_FUNCTIONS)
# end

@testset "Activation Functions" begin

Expand Down Expand Up @@ -82,7 +82,7 @@ end

xs = rand(5,5)

@test all(sum(softmax(xs), 1) .≈ 1)
@test all(sum(softmax(xs), dims = 1) .≈ 1)

@test sum(softmax(vec(xs))) 1

Expand Down
32 changes: 16 additions & 16 deletions test/conv.jl
Original file line number Diff line number Diff line change
Expand Up @@ -4,17 +4,17 @@ using NNlib: conv, ∇conv_filter, ∇conv_data, ∇maxpool, maxpool
x = reshape(Float64[1:20;], 5, 4, 1, 1)
w = reshape(Float64[1:4;], 2, 2, 1, 1)

@test squeeze(conv(x, w),(3,4)) == [
@test squeeze(conv(x, w), dims = (3,4)) == [
29 79 129;
39 89 139;
49 99 149;
59 109 159.]

@test squeeze(conv(x, w; stride=2),(3,4)) == [
@test squeeze(conv(x, w; stride=2), dims = (3,4)) == [
29 129;
49 149.]

@test squeeze(conv(x, w; pad=1),(3,4)) == [
@test squeeze(conv(x, w; pad=1), dims = (3,4)) == [
1.0 9.0 29.0 49.0 48.0;
4.0 29.0 79.0 129.0 115.0;
7.0 39.0 89.0 139.0 122.0;
Expand All @@ -23,15 +23,15 @@ using NNlib: conv, ∇conv_filter, ∇conv_data, ∇maxpool, maxpool
10.0 40.0 70.0 100.0 80.0
]

@test squeeze(conv(x, w; dilation=2),(3,4)) == [
@test squeeze(conv(x, w; dilation=2), dims = (3,4)) == [
48 98;
58 108;
68 118.]

# NaN tests for dilation forward pass

ys = []
for idx in 1:1000
for idx in 1:1000
push!(ys, conv(x, w; dilation=2))
end
@test !any([any(isnan.(ys[idx])) for idx in 1:1000])
Expand Down Expand Up @@ -72,9 +72,9 @@ end

x = reshape(Float64[1:20;], 5, 4, 1, 1)

@test squeeze(maxpool(x, (2,2)), (3,4)) == [7 17; 9 19]
@test squeeze(maxpool(x, (2,2); stride=(2,2)), (3,4)) == [7 17; 9 19]
@test squeeze(maxpool(x, (2,2); pad=(1,1)), (3,4)) == [
@test squeeze(maxpool(x, (2,2)), dims = (3,4)) == [7 17; 9 19]
@test squeeze(maxpool(x, (2,2); stride=(2,2)), dims = (3,4)) == [7 17; 9 19]
@test squeeze(maxpool(x, (2,2); pad=(1,1)), dims = (3,4)) == [
1.0 11.0 16.0;
3.0 13.0 18.0;
5.0 15.0 20.0;
Expand Down Expand Up @@ -106,9 +106,9 @@ end
1078.0 1258.0 1438.0;
1114.0 1294.0 1474.0;
1150.0 1330.0 1510.0]
@test squeeze(conv(x, w),(4,5)) == res
@test squeeze(conv(x, w), dims = (4,5)) == res

@test squeeze(conv(x, w; stride=2),(3,4,5)) == [
@test squeeze(conv(x, w; stride=2), dims = (3,4,5)) == [
322.0 682.0;
394.0 754.0]

Expand Down Expand Up @@ -141,9 +141,9 @@ end
478.0 1185.0 1315.0 1445.0 877.0;
489.0 1211.0 1341.0 1471.0 892.0;
270.0 660.0 730.0 800.0 480.0]
@test squeeze(conv(x, w; pad=1),(4,5)) == res
@test squeeze(conv(x, w; pad=1), dims = (4,5)) == res

@test squeeze(conv(x, w; dilation=2),(3,4,5)) == [
@test squeeze(conv(x, w; dilation=2), dims = (3,4,5)) == [
608 788;
644 824;
680 860.
Expand All @@ -152,7 +152,7 @@ end
# NaN tests for dilation forward pass

ys = []
for idx in 1:1000
for idx in 1:1000
push!(ys, conv(x, w; dilation=2))
end
@test !any([any(isnan.(ys[idx])) for idx in 1:1000])
Expand Down Expand Up @@ -187,8 +187,8 @@ end

x = reshape(Float64[1:60;], 5, 4, 3, 1, 1)

@test squeeze(maxpool(x, (2,2,2)), (3,4,5)) == [27 37; 29 39.]
@test squeeze(maxpool(x, (2,2,2); stride=(2,2,2)), (3,4,5)) == [27 37; 29 39.]
@test squeeze(maxpool(x, (2,2,2)), dims = (3,4,5)) == [27 37; 29 39.]
@test squeeze(maxpool(x, (2,2,2); stride=(2,2,2)), dims = (3,4,5)) == [27 37; 29 39.]
res = zeros(3,3,2)
res[:, :, 1] = [
1.0 11.0 16.0;
Expand All @@ -198,7 +198,7 @@ end
41.0 51.0 56.0;
43.0 53.0 58.0;
45.0 55.0 60.0]
@test squeeze(maxpool(x, (2,2,2), pad=(1,1,1)), (4,5)) == res
@test squeeze(maxpool(x, (2,2,2), pad=(1,1,1)), dims = (4,5)) == res

# for gradients, check only size
# correctness of gradients is cross-checked with CUDNN.jl
Expand Down
11 changes: 5 additions & 6 deletions test/runtests.jl
Original file line number Diff line number Diff line change
@@ -1,13 +1,12 @@
using NNlib
using Base.Test
using NNlib, Test

@testset "NNlib" begin

include("activation.jl")
include("conv.jl")
if Base.find_in_path("CuArrays") nothing
include("cubroadcast.jl")
end
# if Base.find_in_path("CuArrays") ≠ nothing
# include("cubroadcast.jl")
# end

xs = [-100_000, -100_000.]
@test softmax(xs) [0.5, 0.5]
Expand All @@ -19,7 +18,7 @@ xs = rand(5)
@test logsigmoid.(xs) log.(sigmoid.(xs))

xs = rand(5,10)
@test softmax(xs) exp.(xs) ./ sum(exp.(xs),1)
@test softmax(xs) exp.(xs) ./ sum(exp.(xs), dims = 1)
@test logsoftmax(xs) log.(softmax(xs))
@test logsigmoid.(xs) log.(sigmoid.(xs))

Expand Down

0 comments on commit 23155cf

Please sign in to comment.