Saturday, May 16, 2015

make a love/ransom note from magazine(s)

// 1 magazine
(ransom, magazine):
    result = true
    map = new
    foreach r in ransom:
        while !map.has(r) || map[r] == 0:
            if !magazine.hasnext():
                result = false
                break
            m = magazine.next()
            if !map.has(m):
                map[m] = 1
            else:
                map[m]++
        map[r]--
    return result

// 1+ magazines
(ransom, magazines):
    result = true
    map = new
    magazine = magazines.next()
    foreach r in ransom:
        while !map.has(r) || map[r] == 0:
            while !magazine.hasnext(): //while to handle magazine with 0 words
                if !magazines.hasnext():
                    result = false
                    break
                magazine = magazines.next()
            m = magazine.next()
            if !map.has(m):
                map[m] = 1
            else:
                map[m]++
        map[r]--
    return result
Using word->count map for ransom.
// 1+ magazines
(ransom, magazines):
    rmap = ransom.groupby(x).tomap(x.key, x.count) //word->count map
    magazine = magazines.next()
    while rmap.keys.count > 0:
        while !magazine.hasnext(): // while to handle magazine with 0 words
            if !magazines.hasnext():
                break
            magazine = magazines.next()
        m = magazine.next()
        if rmap.has(m):
            rmap[m]--
            if rmap[m] == 0:
                rmap.remove(m)
    result = rmap.keys.count == 0:
    return result

[Hat tip to GL]

Wednesday, May 13, 2015

lru cache for n items

lru cache for n items with O(1) add, remove, has, get.

With queue, add() is not O(1), get() is difficult/hacky to implement. So deque.
// caches n recently used items
lru-cache:
    n // items
    ctor(n):
        n = n
    count = 0
    map, q
    mapkv, dq, mapkn //mapkv is key->value, mapkn is key->node
    add(key, value):
        if map.has(key):
            return
        if count == n:
            // remove lru item
            // handle delayed removal from queue
            do:
                lrukey = q.pop()
            while !map.has(lrukey)
            map.remove(lrukey)
            count--
        // add key
        count++
        q.push(key)
        map.add(key, value)
    add(key, value):
        if map.has(key):
            return
        if count == n:
            // remove lru item
            count--
            lrukey = dq.pop()
            mapkn.remove(lrukey)
            mapkv.remove(lrukey)
        // add key
        count++
        node = dq.push(key)
        mapkn.add(key, node)
        mapkv.add(key, value)
    remove(key):
        if !map.has(key):
            return // or throw "error"
        count--
        // only remove from map, delay removal from queue during add
        map.remove(key)
    remove(key):
        if !map.has(key):
            return // or throw "error"
        count--
        node = mapkn[key]
        dq.remove(node)
        mapkn.remove(key)
        mapkv.remove(key)
    has(key):
        return map.has(key)
    has(key):
        return mapkv.has(key)
    get(key):
        // difficult/hacky to implement
    get(key):
        if !mapkv.has(key):
            return null
        // move node to end of dq
        node = mapkn[key]
        node = dq.push(node)
        // mapkv, mapkn do not change
    count():
        return count
    foreach():
        foreach key in q:
            yield map[key]
    foreach():
        foreach key in dq:
            yield mapkv[key]
    
// deque interface
dq:
    // push(key) and remove(node) are used for removal in O(1) time
    node push(key):
    key pop():
    remove(node):
    push(node): // move existing node to end

Tuesday, May 12, 2015

kth in-order traversal node of binary tree

(node, k):
    wrapper = { count = 0 }
    return (node, k, wrapper)

(node, k, wrapper):
    if node == null: 
        return null
    x = (node.left, k, wrapper)
    if x != null:
        return x
    wrapper.count++
    if wrapper.count == k:
        return node
    return (node.right, k, wrapper)

Saturday, May 09, 2015

producer consumer

Note:
- Uses a blocking queue as backing store.
- Cleanly exit the blocked producer or consumer threads when stop() is called. Calling stop() is needed if not wrapped in using as that stops the blocking queue through queue.dispose() call. There is no need for finalize() (destructor) as there is no unmanaged resource to clean. It is disposable to avoid forgetting calling stop().
- Uses the stopping/stopped pattern to stop (see below).
main():
    using pc = new producer-consumer(threads: 1000, maxsize: 10):
        pc.go()
        sleep(10s)
        pc.stop() // not required

// creates threads for consume/produce tasks
producer-consumer: idisposable
    private readonly q // used for lock
    nthreads
    
    // ctor
    producer-consumer(threads, maxsize):
        nthreads = threads
        q = new blocking-queue(maxsize)
    
    // work
    go():
        range(0, nthreads)
            .select(x => new thread(produce))
            .foreach(t => t.start())
        range(0, nthreads)
            .select(x => new thread(consume))
            .foreach(t => t.start())
    
    // consume/produce
    consume():
        while !isstopping(): // read stopping with lock(q)
            //i = q.dequeue()
            //print(i)
            // OR 
            i
            if q.trydequeue(out i):
                print(i)
                sleep(random(500))
    produce():
        i = // randomly generated
        while !isstopping(): // read stopping with lock(q)
            //q.enqueue(i)
            //print(i)
            // OR
            if q.tryenqueue(i):
                print(i)
                sleep(random(500))
    
    // stop
    // stopping is true when stop() is called
    // stopped is true when stop() finishes
    stopping = false
    isstopping():
        lock(q):
            return stopping
    stop():
        lock(q):
            if !stopping:
                stopping = true
                q.dispose()
                monitor.pulseall(q)
                stopped = true
    stopped = false
    stopped():
        lock(q):
            return stopped
    
    // dispose - simply stop, no need for finalize/destructor
    dispose():
        stop()

// queue blocks when size is maxsize
blocking-queue: idisposable
    private readonly q // used for lock
    maxsize
    
    // ctor
    blocking-queue(maxsize):
        maxsize = maxsize
        q = new queue()
    
    // en/de/tryde/tryen-queue
    enqueue(item):
        lock(q):
            while q.count == maxsize:
                monitor.wait(q)
            q.enqueue(item)
            if q.count == 1:
                monitor.pulseall(q)
    dequeue():
        lock(q):
            while q.count == 0:
                monitor.wait(q)
            item = q.dequeue()
            if q.count == maxsize-1:
                monitor.pulseall(q)
            return item
    // for graceful exit of blocked consumers
    trydequeue(out item):
        lock(q):
            while q.count == 0:
                // on stop, unblock blocked consumers
                if stopping: // ok to read stopping directly due to lock(q)
                    item = null
                    return false
                monitor.wait(q)
            item = q.dequeue()
            if q.count == maxsize-1:
                monitor.pulseall(q)
            return true
    // for graceful exit of blocked producers
    tryenqueue(item):
        lock(q):
            while q.count == maxsize:
                // on stop, unblock blocked producers
                if stopping: // ok to read stopping directly due to lock(q)
                    return false
                monitor.wait(q)
            q.enqueue(item)
            if q.count == 1:
                monitor.pulseall(q)
            return true
    
    // stop
    // stopping is true when stop() is called
    // stopped is true when stop() finishes
    stopping = false
    stop():
        lock(q):
            if !stopping:
                stopping = true
                monitor.pulseall(q)
                stopped = true
    stopped = false
    stopped():
        lock(q):
            return stopped
    
    // dispose - simply stop, no need for finalize/destructor
    dispose():
        stop()

There is a delay between when stop() is called and till it finishes. The blocked threads will exit during this delay. stopping is set to true when stop() is called, stopped is set when stop() finishes. Internally, stopping is used to start shutdown. stopped is just for calling code for debugging to indicate that stop has finished.
// stopping/stopped pattern
stopping = false
stop():
    lock(lock):
        if !stopping: // cannot use stopped to avoid double clean up
            stopping = true
            // clean up here
            stopped = true
stopped = false
stopped():
    lock(lock):
        return stopped

[Hat tip to MG, JS]

Thursday, May 07, 2015

semaphore

//counting semaphore
semaphore:
    maxsize
    count = 0
    lock = new
    acquire():
        lock(lock):
            while count == maxsize:
                monitor.wait(lock)
            id = count // returns 0
            count++
            if count == 1:
                monitor.pulseall(lock)
            return id
    release():
        lock(lock):
            while count == 0:
                monitor.wait(lock)
            count--
            id = count // returns 0
            if count == maxsize -1:
                monitor.pulseall(lock)
            return id
// below statements are equivalent
lock(lock):
    // do something

monitor.enter(lock)
try:
    // do something
finally:
    monitor.exit(lock)
Note:
- pulse/all when count is 1+ does nothing
- pulse/all when count is (maxsize-1)- does nothing
As there will be no threads in waiting queue to be moved to ready queue.


[Hat tip to MG]

Wednesday, May 06, 2015

circular queue, no throw when full

Used for last n items from a stream.
circular-queue:
    size
    buffer
    start = 0
    end = 0
    count = 0
    ctor(size):
        size = size
        buffer = new [size]
    enqueue(x):
        buffer[end] = x
        //count
        if count < size:
            count++ //count stays at size when full
        //end
        end++
        if end == size:
            end = 0
        //start
        if count == size: //when queue is full, start = end
            start = end
    dequeue():
        if count == 0:
            throw "empty"
        x = buffer[start]
        //count
        count--
        //start
        start++
        if start == size:
            start = 0
        return x
    isempty():
        return count == 0
last-n-items(stream, n):
    if n < 0:
        throw
    if n == 0:
        return [] //empty
    q = new circular-queue(n)
    foreach item in stream:
        q.enqueue(item)
    items = new
    while !q.empty():
        items.add(q.dequeue())
    return items

[Hat tip to ctci]

Saturday, May 02, 2015

get path in graph

Works for cyclic graph too. Bfs is better than Dfs for large graphs.
graph:
    nodes
node:
    id
    neighbors
Dfs.
// dfs
path-dfs(n1, n2):
    path, visited = new 
    path-dfs(n1, n2, path, visited)
    return path

path-dfs(n, n2, path, visited)
    if visited(n):
        return false
    path.append(n)
    if n == n2:
        return true
    visited.add(n)
    foreach x in n.neighbors:
        if path-dfs(x, n2, path):
            return true
    path.removelast() //remove(n)
    return false
Bfs. With visited and node->parent maps.
// bfs, with node->parent map, multi-threaded also
path-bfs(n1, n2):
    q, visited, parentmap = new
    success = false
    q.enqueue(n1)
    while !q.isempty():
        n = q.dequeue()
        if n == n2:
            success = true
            break
        visited.add(n)
        foreach x in n.neighbors:
            if !visited(x):
                parentmap[x] = n //save parent
                q.enqueue(x)
    if !success:
        return null
    return get-path(n2, parentmap)

get-path(n, parentmap):
    path.append(n)
    while parentmap.has(n):
        n = parentmap[n]
        path.append(n)
    path.reverse()
    return path
Bfs. With a maxdepth limit. With visited and node->parent maps. Save depth of node in queue. Or use 2 queues to keep track of depth, one for odd and other for even levels.
path-bfs-maxdepth(n1, n2, maxdepth):
    // maxdepth is > 0
    q, visited, parentmap = new
    success = false
    q.enqueue({ node: n1, depth: 1}) //save depth too
    while !q1.isempty():
        item = q.dequeue()
        n = item.node
        depth = item.depth
        if n == n2:
            success = true
            break
        visited.add(n)
        if depth == maxdepth:
            continue
        foreach x in n.neighbors:
            if !visited(x):
                parentmap[x] = n //save parent
                q.enqueue({ node: x, depth: depth +1) }) //save depth too
    if !success:
        return null
    return get-path(n2, parentmap)

get-path(n, parentmap):
    path.append(n)
    while parentmap.has(n):
        n = parentmap[n]
        path.append(n)
    path.reverse()
    return path
Tests
graph: 1<->2->3, n1: 1, n2: 3

[Hat tip to ctci]

Thursday, April 30, 2015

intersection of n collections

Increment count of each element of collection only once as it could occur 1+ times in it.

Tests:
{ 1, 1 }, { 1, 2 } => { 1 }
{ 1, 2 }, { 1, 1 } => { 1 }
{ 1 }, { 2 } => {}
{ 1 }, { 1, 2 }, { 1, 1, 2 } => { 1 }
intersection(c1, c2, ... cn):
    collections = { c1, c2, ... cn }
    map = new
    foreach collection in collections:
        addtomap(collection, map)
    intersection = new
    foreach x in map.keys:
        count = map[x]
        if count == n:
            intersection.add(x)
    return intersection

addtomap(collection, map):
    // x can occur 1+ times in collection
    set = new
    foreach x in collection:
        if !set.has(x):
            set.add(x)
            if !map.has(item):
                map[item] = 1
            else:
                map[item]++

Friday, April 24, 2015

shuffle

Fisher-Yates shuffle, revised by Knuth edition. O(n) time.
shuffle(a):
    return shuffle(a.copy(), new rng())

// ok to modify buffer array
shuffle(buffer, rng):
    for i = buffer.length -1, i >= 0, i--:
        // rng.next(i) returns 0 to i-1, rng.next(0) is invalid
        swapindex = rng.next(i +1)
        yield return buffer[swapindex]
        // no need to swap fully as buffer[i] is not used after this iteration
        buffer[swapindex] = buffer[i]

Monday, April 20, 2015

convert spaces to %20

Convert spaces to %20 in buffer with spaces at end.
"this.is.a.test......"
"this%20is%20a%20test"
Test cases:
".test.."
"%20test"

"test..."
"test%20"

"..."
"%20"

"........."
"%20%20%20"

throw exception for:
"...test"
"..test."
3 passes: count spaces, verify actual spaces, convert spaces.

Throw if
- spaces %3 is not 0
- the spaces aren't within actual length.
convert-spaces(s):
    // count spaces
    spaces = 0
    foreach c in s:
        if c == ' ':
            spaces++
    if spaces %3 != 0:
        throw
    spaces = spaces /3
    // calculate actual length
    actual_length = s.length - 2*spaces // 2 extra characters per space
    // verify spaces within actual length
    actual_spaces = 0
    i = actual_length -1
    while i >= 0:
        if s[i] == ' ':
            actual_spaces++
        i--
    if spaces != actual_spaces:
        throw
    // convert spaces
    i = actual_length -1
    target = s.length -1
    while i >= 0:
        if s[i] == ' ':
            target -= 2
            s[target +0] = '%'
            s[target +1] = '2'
            s[target +2] = '0'
        else:
            s[target] == s[i]
        i--
        target--