Skip to content

Commit

Permalink
Merge pull request #30 from Clonkk/devel
Browse files Browse the repository at this point in the history
Devel
  • Loading branch information
Clonkk authored Aug 16, 2021
2 parents d729076 + af6f7c1 commit 3f387b3
Show file tree
Hide file tree
Showing 14 changed files with 204 additions and 50 deletions.
9 changes: 8 additions & 1 deletion changelog.md
Original file line number Diff line number Diff line change
@@ -1,6 +1,13 @@
Changelog for Nimjl. Date in format YYYY_MM_DD

Release v0.5.6 - 2021_07_XX
Release v0.5.7 - 2021_08_16
===========================
* Added dot operators and broadcast mechanism.
* Added JlDataType conversions to typedesc
* Added ``rand`` proc to initialize Julia Array
* Updated tests

Release v0.5.6 - 2021_07_23
===========================
* Julia.Exit() now made optinnal
* Fixed indexing bugs. Factorized some code.
Expand Down
2 changes: 1 addition & 1 deletion nimjl.nimble
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
# Nimjl
# Licensed and distributed under MIT license (license terms in the root directory or at http://opensource.org/licenses/MIT).
version = "0.5.6"
version = "0.5.7"
author = "Regis Caillaud"
description = "Nim Julia bridge"
license = "MIT"
Expand Down
6 changes: 4 additions & 2 deletions nimjl/arrays.nim
Original file line number Diff line number Diff line change
Expand Up @@ -46,7 +46,7 @@ proc shape*[T](x: JlArray[T]): seq[int] =
result.add x.dim(i)

proc eltype*[T](x: JlArray[T]): JlDataType =
jl_array_eltype(x)
jl_array_eltype(cast[JlValue](x))

# Buffer with dims
proc jlArrayFromBuffer*[T](data: ptr UncheckedArray[T], dims: openArray[int]): JlArray[T] =
Expand Down Expand Up @@ -84,9 +84,11 @@ proc toJlArray*[T](x: Tensor[T]): JlArray[T] =
result = allocJlArray[T](shape)
copyMem(unsafeAddr(result.getRawData()[0]), unsafeAddr(toUnsafeView(x)[0]), nbytes)


import ./arrays/interop
export interop

import ./arrays/indexing
export indexing

import ./arrays/dotoperators
export dotoperators
65 changes: 65 additions & 0 deletions nimjl/arrays/dotoperators.nim
Original file line number Diff line number Diff line change
@@ -0,0 +1,65 @@
import ../types
import ../functions
import ../conversions
import ../glucose

import ./interop

import std/macros

# Arrays operator
# Comparaison
proc equal*[T](val1, val2: JlArray[T]): bool =
jlCall("==", val1, val2).to(bool)

template `==`*[T](val1, val2: JlArray[T]): bool =
val1.equal(val2)

# Dot operators
# Broadcasted addition
template jlBroadcast*[T](f: JlFunc, arr: JlArray[T], args: varargs[untyped]): untyped =
jlCall("broadcast", f, arr, args)

template jlBroadcast*[T](f: string, arr: JlArray[T], args: varargs[untyped]): untyped =
jlCall("broadcast", getJlFunc(f), arr, args)

macro jlBroadcast*(f: untyped, args: varargs[untyped]): untyped =
var expr = f.toStrLit.strVal
quote:
let f = getJlFunc(`expr`)
jlCall("broadcast", f, `args`)

proc `.+`*[T: SomeInteger, U: SomeInteger](val: JlArray[T], factor: U|JlArray[U]): JlArray[T] =
jlBroadcast(`+`, val, factor).toJlArray(T)

proc `.+`*[T: SomeInteger, U: SomeFloat](val: JlArray[T], factor: U|JlArray[U]): JlArray[U] =
jlBroadcast(`+`, val, factor).toJlArray(U)

proc `.+`*[T: SomeFloat, U: SomeNumber](val: JlArray[T], factor: U|JlArray[U]): JlArray[T] =
jlBroadcast(`+`, val, factor).toJlArray(T)

proc `.-`*[T: SomeInteger, U: SomeInteger](val: JlArray[T], factor: U|JlArray[U]): JlArray[T] =
jlBroadcast(`-`, val, factor).toJlArray(T)

proc `.-`*[T: SomeInteger, U: SomeFloat](val: JlArray[T], factor: U|JlArray[U]): JlArray[U] =
jlBroadcast(`-`, val, factor).toJlArray(U)

proc `.-`*[T: SomeFloat, U: SomeNumber](val: JlArray[T], factor: U|JlArray[U]): JlArray[T] =
jlBroadcast(`-`, val, factor).toJlArray(T)

proc `.*`*[T: SomeInteger, U: SomeInteger](val: JlArray[T], factor: U|JlArray[U]): JlArray[T] =
jlBroadcast(`*`, val, factor).toJlArray(T)

proc `.*`*[T: SomeInteger, U: SomeFloat](val: JlArray[T], factor: U|JlArray[U]): JlArray[U] =
jlBroadcast(`*`, val, factor).toJlArray(U)

proc `.*`*[T: SomeFloat, U: SomeNumber](val: JlArray[T], factor: U|JlArray[U]): JlArray[T] =
jlBroadcast(`*`, val, factor).toJlArray(T)

# ./ division is a special case
proc `./`*[T: SomeInteger, U: SomeNumber](val: JlArray[T], factor: U|JlArray[U]): JlArray[float] =
jlBroadcast(`/`, val, factor).toJlArray(float)

proc `./`*[T: SomeFloat, U: SomeNumber](val: JlArray[T], factor: U|JlArray[U]): JlArray[T] =
jlBroadcast(`/`, val, factor).toJlArray(T)

8 changes: 7 additions & 1 deletion nimjl/arrays/interop.nim
Original file line number Diff line number Diff line change
Expand Up @@ -120,7 +120,13 @@ proc fill*[T](x: T, dims: varargs[int]): JlArray[T] =
else:
result = jlCall("fill", x).toJlArray(T)

# TODO map typedesc to Julia type
proc rand*[T](dims: openArray[int]) : JlArray[T] =
let
tmp : T
tmp2 = tmp.toJlValue()

jlCall("rand", jltypeof(tmp2), dims)

proc asType*[T](x: JlArray[T], U: typedesc): JlArray[U] =
let tmp = newSeq[U](1).toJlArray()
result = jlCall("convert", jltypeof(tmp), x).toJlArray(U)
Expand Down
3 changes: 3 additions & 0 deletions nimjl/conversions.nim
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,8 @@ import conversions/box
import conversions/dict_tuples
import conversions/obj_structs

import conversions/jldatatype

export fromjl
export unbox

Expand All @@ -16,3 +18,4 @@ export box
export dict_tuples
export obj_structs

export jldatatype
1 change: 0 additions & 1 deletion nimjl/conversions/box.nim
Original file line number Diff line number Diff line change
Expand Up @@ -37,4 +37,3 @@ proc julia_box(x: pointer): JlValue = jl_box_voidpointer(x)
proc jlBox*[T](val: T): JlValue =
julia_box(val)


33 changes: 33 additions & 0 deletions nimjl/conversions/jldatatype.nim
Original file line number Diff line number Diff line change
@@ -0,0 +1,33 @@
import ../types
import std/strformat

proc to*(t: JlDataType) : typedesc =
case t
of jlType(char):
return typedesc[char]

of jlType(int8):
return typedesc[int8]
of jlType(int16):
return typedesc[int16]
of jlType(int32):
return typedesc[int32]
of jlType(int64):
return typedesc[int64]

of jlType(uint8):
return typedesc[uint8]
of jlType(uint16):
return typedesc[uint16]
of jlType(uint32):
return typedesc[uint32]
of jlType(uint64):
return typedesc[uint64]

of jlType(float32):
return typedesc[float32]
of jlType(float64):
return typedesc[float64]

else:
raise newException(JlError, &"Type conversion from Nim to Julia not support for type {T}")
2 changes: 1 addition & 1 deletion nimjl/private/jlarrays.nim
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,7 @@ proc jl_alloc_array_3d*(atype: ptr jl_value, nr: csize_t, nc: csize_t, z: csize_
## Handle apply Array type mechanics
proc jl_apply_array_type(x: ptr jl_value, ndims: csize_t): ptr jl_value {.importc: "jl_apply_array_type".}

proc jl_array_eltype*(x: ptr jl_array): ptr jl_datatype {.importc: "jl_array_eltype".}
proc jl_array_eltype*(x: ptr jl_value): ptr jl_datatype {.importc: "jl_array_eltype".}

{.pop.}

Expand Down
26 changes: 12 additions & 14 deletions nimjl/sugar/operators.nim
Original file line number Diff line number Diff line change
Expand Up @@ -48,24 +48,22 @@ proc equal*(val1, val2: JlValue): bool =
template `==`*(val1, val2: JlValue): bool =
val1.equal(val2)

proc equal*[T](val1, val2: JlArray[T]): bool =
jlCall("==", val1, val2).to(bool)

# # Comparaison
template `==`*[T](val1, val2: JlArray[T]): bool =
val1.equal(val2)


proc `!=`*(val1, val2: JlValue): bool =
Julia.`!=`(val1, val2).to(bool)

proc `!==`*(val1, val2: JlValue): bool =
Julia.`!==`(val1, val2).to(bool)

# Assignment
# TODO
# +=, -=, /=, *=
#
# Dot operators
# TODO
# ., .*, ./, .+, .- etc..
proc `+=`*(val1: var JlValue, val2: JlValue) =
discard Julia.`+=`(val1, val2)

proc `-=`*(val1: var JlValue, val2: JlValue) =
discard Julia.`-=`(val1, val2)

proc `*=`*(val1: var JlValue, val2: JlValue) =
discard Julia.`*=`(val1, val2)

proc `/=`*(val1: var JlValue, val2: JlValue) =
discard Julia.`/=`(val1, val2)

1 change: 1 addition & 0 deletions nimjl/types.nim
Original file line number Diff line number Diff line change
Expand Up @@ -67,3 +67,4 @@ template jlType*(T: typedesc[bool]): JlDataType = JlBool
template jlType*(T: typedesc[char]): JlDataType = JlChar
template jlType*(T: typedesc[float32]): JlDataType = JlFloat32
template jlType*(T: typedesc[float64]): JlDataType = JlFloat64

39 changes: 11 additions & 28 deletions readme.md
Original file line number Diff line number Diff line change
Expand Up @@ -2,8 +2,6 @@

![workflow](https://github.com/Clonkk/nimjl/actions/workflows/ci.yml/badge.svg)

This is repo is a WIP to be able to call Julia function from Nim using the C-API of Julia.

## Prerequisite

* Install Julia version 1.5.3 or above
Expand All @@ -14,7 +12,7 @@ This is repo is a WIP to be able to call Julia function from Nim using the C-API

After this steps, ``$JULIA_PATH/include`` should points to Julia header and ``$JULIA_PATH/lib`` should point to ``libjulia.so``

You can also install Julia locally by running ``nimble installjulia``, in that case it will install Julia in the ``third_party`` folder
You can also install Julia locally by running ``nimble install julia``, in that case it will install Julia in the ``third_party`` folder

## Ressources

Expand All @@ -39,42 +37,34 @@ Julia is mostly oriented towards numerical computing so Arrays are THE most impo
Mostly quality-of-life improvements, especially when handling arrays.

* Improve Julia Arrays interop. from Nim.
* Create Array API with most common proc
* Implement Matrix calc. operators : `*`, `+`, `-`, `/`, "Dotted operator" ``*.``, ``+.``, ``-.``, ``/.``
* Supports complex Arrays

* Array constructor API with most common proc
* Supports complex Arrays
* map / apply / reduce /fold

* Map mutable struct to Nim object

### Backlog

* Add support for Enum types

* GPU Support (or is CUDA.jl enough ?)

* Support Julia chaining syntax

* Add support for Enum types
* Add a tag for tracing for Julia memory allocation

## Limitations

* Value conversion Nim ==> Julia are done **by copy**.
* Value conversion Nim -> Julia are done **by copy**.
* Arrays are an exception to this rule and can be created from buffer / are accessible using a buffer.
* Value conversion Julia => Nim s always done **by copy**

* Value conversion Julia -> Nim s always done **by copy**
* When using Arrays you can access the buffer as ``ptr UncheckedArray`` of the Julia Arrays with ``rawData()``.
* Using ``to(seq[T])`` or ``to(Tensor[T])`` perform a ``copyMem`` of ``jlArray.rawData()`` in your seq/Tensor

* Julia allocated arrays only goes up to 3 dimensions (but Arrays can be allocated in Nim)

* Linux / WSL supports only
* So I far, I haven't taken the times to create a dedicated syntax to link on Windows. Otherwise, most of it should work
* I still accept PR aiming at improving Windows supports.
* You can uncomment in ``.github/workflows/ci.yml`` the Windows part to run the CI on a fork
* Windows dynamic library linking is different than Linux.
* If you need Windows support, consider opening an issue or a PR :).

# Examples

Here is the basic API usage :
Here is the basic example:
```nim
import nimjl
Expand All @@ -87,15 +77,8 @@ echo res # 2.0
```

Take a look at the ``examples/`` folder for more examples.

# Documentation

Complete API documentation remains a TODO.

# Contributions
Take a look at the ``examples/`` folder for more examples. There's also the ``tests/`` folder with more application.

All contributions are welcomed !

# License

Expand Down
54 changes: 54 additions & 0 deletions tests/arraymancertensortest.nim
Original file line number Diff line number Diff line change
Expand Up @@ -92,6 +92,56 @@ proc tensorBuiltinRot180() =
orig_tensor.apply_inline: 11.0 - x
check tensorResData == orig_tensor


proc tensorDotOperator() =
block:
var
origTensor = [[1, 2, 3], [4, 5, 6], [7, 8, 9]].toTensor
origJlArray = toJlArray(origTensor)
res = origJlArray .+ 3
origTensor.apply_inline: x+3
check eltype(res) == jlType(int)
check res == toJlArray(origTensor)

block:
var
origTensor = [[1, 2, 3], [4, 5, 6], [7, 8, 9]].toTensor
origJlArray = toJlArray(origTensor)
res = origJlArray .+ 3.0
var t2 = origTensor.asType(float)
t2.apply_inline: x+3.0
check eltype(res) == jlType(float)
check res == toJlArray(t2)

block:
var
origTensor = toTensor([[1.0'f64, 2.0, 3.0], [4.0, 5.0, 6.0], [7.0, 8.0, 9.0]])
origJlArray = toJlArray(origTensor)
res = origJlArray .+ 3
origTensor.apply_inline: x+3.0
check eltype(res) == jlType(float)
check res == toJlArray(origTensor)

block:
var
origTensor = toTensor([[2.0'f64, 2.0, 2.0], [4.0, 4.0, 4.0], [8.0, 8.0, 8.0]])
origJlArray = toJlArray(origTensor)
res = origJlArray ./ 2
origTensor.apply_inline: x/2.0
check eltype(res) == jlType(float)
check res == toJlArray(origTensor)

block:
var
origTensor = toTensor([[1.0'f64, 2.0, 3.0], [4.0, 5.0, 6.0], [7.0, 8.0, 9.0]])
origJlArray = toJlArray(origTensor)
res = jlBroadcast("addValue", origJlArray, 6).toJlArray(float)
origTensor.apply_inline: x+6.0
check eltype(res) == jlType(float)
check res == toJlArray(origTensor)



proc runTensorArgsTest*() =
suite "Tensor":
teardown: jlGcCollect()
Expand All @@ -105,6 +155,10 @@ proc runTensorArgsTest*() =
test "rot180[Tensor]":
tensorBuiltinRot180()

test "Dot Operators":
tensorDotOperator()


when isMainModule:
import ./testfull
Julia.init()
Expand Down
Loading

0 comments on commit 3f387b3

Please sign in to comment.