Giter Club home page Giter Club logo

learning's People

Contributors

kevinacoder avatar

Stargazers

 avatar  avatar  avatar  avatar  avatar

Watchers

 avatar  avatar

learning's Issues

bfs

  1. Rotting Oranges
func OrangesRotting(grids [][]int) (elapse int) {
	const (
		Empty = 0
		Fresh = 1
		Rotten = 2
	) 

	rowNum := len(grids)
	if rowNum == 0 {return}
	colNum := len(grids[0])

	//build a queue
	start := findFirstof(grids, Rotten)
	//add start pt
	queue := [][2]int{start}
	rowOff, colOff := [4]int{-1, 0, 1, 0}, [4]int{0, -1, 0, 1}

	//iterate util queue is empty
	for len(queue) > 0 {
		elapse++
		num := len(queue)

		for cnt := 0; cnt < num; cnt++ {
			cur := queue[0]
			queue = queue[1:]

			for i := 0; i < 4; i++ {
				x, y := cur[0]+rowOff[i], cur[1]+colOff[i]
				if x < 0 || x >= rowNum || 
				y < 0 || y >= colNum {continue}

				if grids[x][y] == Fresh {
					grids[x][y] = Rotten
					queue = append(queue, [2]int{x, y})
				}
			}		
		}
	}
	elapse--

	//check if exist un-rottened fruits
	for i := range grids {
		for j := range grids[i] {
			if grids[i][j] == Fresh {
				elapse = -1
				return
			}
		}
	}

	return 
}

func OrangesRottingTest() {
	grids := [][]int{{2,1,1},{1,1,0},{0,1,1}}
	fmt.Println(OrangesRotting(grids))
	grids = [][]int{{2,1,1},{0,1,1},{1,0,1}}
	fmt.Println(OrangesRotting(grids))
	grids = [][]int{{0,2}}
	fmt.Println(OrangesRotting(grids))
}

aray

//941. Valid Mountain Array
func ValidMountainArray(nums []int) bool {
if len(nums) < 3 {
return false
}
var hasUp, hasDown bool
//track the up and down
for i := 1; i < len(nums); i++ {
if nums[i] > nums[i-1]{
//fasle if up followed down
if hasDown {return false}
hasUp = true
} else if nums[i] < nums[i-1]{
//false if down comes before up
if !hasUp {return false}
hasDown = true
} else{
return false
}
}
return hasUp && hasDown
}

func ValidMountainArrayTest() {
nums := []int{0,3,2,1}
fmt.Println(ValidMountainArray(nums))
nums = []int{3,5,5}
fmt.Println(ValidMountainArray(nums))
}

func RemoveDuplicates(nums []int) int {
//corner case
if len(nums) < 2 {return len(nums)}

//index for after remove array
var index int = 1

//iterate over all num
for i := 1; i < len(nums); i++ {
	if nums[i] != nums[i-1] {
		nums[index] = nums[i];
		index++;
	}
}

return index

}

func RemoveDuplicatesTest() {
nums := []int{0,0,1,1,1,2,2,3,3,4}
size := RemoveDuplicates(nums)
fmt.Println(nums[:size])
}

search 2

//880. Random Pick with Weight
type RandPick struct {
	sum   []int
}

func NewRandPick(nums []int) (rp *RandPick) {
	rp = &RandPick{}
	rand.Seed(time.Now().Unix())
	rp.sum = make([]int, len(nums))
	copy(rp.sum, nums)
	for i := 1; i < len(rp.sum); i++ {
		rp.sum[i] += rp.sum[i-1]
	}
	return
}

func (rp *RandPick) pickIndex() (idx int) {
	back := func() int{
		if len(rp.sum) == 0 {panic("input nil parameter")}
		return rp.sum[len(rp.sum)-1]
	}
	s := rand.Intn(back()) + 1  //[0, max_sum]
	//the index to insert x if x is not present (it could be len(a))
	return sort.SearchInts(rp.sum[:len(rp.sum)], s)
}

trie

//h
#define INIT_TRIE(t) trie t; init_trie(&t)
#define FREE_TRIE(t) reset_trie(&t);

typedef struct trie_node {
	bool				is_word;
	struct trie_node	*children[KEY_SET_NUM];
} trie_node;

typedef struct trie {
	trie_node			*root;
} trie;

void init_trie(trie *t);
void reset_trie(trie *t);
void insert_trie(trie *t, const char *word);
bool search_trie(trie *t, const char *word);
bool start_with_trie(trie *t, const char *prefix);

void test_tire();

#endif // !TRIE_H

//c
#include "trie.h"
#include <stdlib.h>
#include <string.h>
#include <stdio.h>

extern void err_exit(char* msg);
#define MEM_FAIL "fail to allocate memory"

static trie_node *new_trie_node() {
	trie_node *n = malloc(sizeof(trie_node));
	if (!n)
		err_exit(MEM_FAIL);
	
	n->is_word = false;
	memset(n->children, 0, sizeof(trie_node *)*KEY_SET_NUM);

	return n;
}

static void del_trie_node(trie_node *n) {
	//delet all childrens first
	for (int i = 0; i < KEY_SET_NUM; i++) {
		if (n->children[i]) {
			del_trie_node(n->children[i]);
			n->children[i] = NULL;
		}	
	}
	free(n);
}

void init_trie(trie *t) {
	t->root = new_trie_node();
}

void reset_trie(trie *t) {
	del_trie_node(t->root);
	t->root = NULL;
}

void insert_trie(trie *t, const char *word) {
	trie_node *cur = t->root;
	if (!cur)
		return;

	int word_len = strlen(word);
	for (int i = 0; i < word_len; i++) {
		int ix = word[i] - 'a';
		if (!cur->children[ix])
			cur->children[ix] = new_trie_node();

		cur = cur->children[ix];
	}
	cur->is_word = true;
}

static trie_node * find(trie *t, const char *prefix) {
	trie_node *cur = t->root;

	int word_len = strlen(prefix);
	for (int i = 0; i < word_len; i++) {
		cur = cur->children[prefix[i] - 'a'];
		if (!cur)
			break;
	}
	return cur;
}

bool search_trie(trie *t, const char *word) {
	trie_node *cur = find(t, word);
	return cur && cur->is_word;
}

bool start_with_trie(trie *t, const char *prefix) {
	return find(t, prefix) != NULL;
}

void test_tire() {
	const char *word = "banan";
	INIT_TRIE(t);
	insert_trie(&t, word);
	printf("has prefix %d", start_with_trie(&t, "ban"));
	FREE_TRIE(t);
}

dfs

//417. Pacific Atlantic Water Flow
func pacificAtlantic(matrix [][]int) (targets [][2]int){
	if len(matrix) == 0 || len(matrix[0]) == 0 {return}
	R, C := len(matrix), len(matrix[0])

	makeMatrix := func() (mat [][]bool) {
		mat = make([][]bool, R)
		for i := range mat {
			mat[i] = make([]bool, C)
		}
		return
	}

	toP, toA := makeMatrix(), makeMatrix()
	//dfs to check if water in cell is reachable for two ocean
	for i := 0; i < R; i++ {
		pacificAtlanticDFS(matrix, i,   0,   0, toP) //left
		pacificAtlanticDFS(matrix, i,   C-1, 0, toA) //right
	}

	for j := 0; j < C; j++ {
		pacificAtlanticDFS(matrix, 0,   j, 0, toP)//top
		pacificAtlanticDFS(matrix, R-1, j, 0, toA)//bottom
	}

	//get the list of cell that are reachable for both oceans
	for i := 0; i < R; i++ {
		for j := 0; j < C; j++ {
			if toP[i][j] && toA[i][j] {
				targets = append(targets, [2]int{i, j})
			}
		}
	}

	return
}

func pacificAtlanticDFS(matrix [][]int, r, c, h int, reachAble [][]bool){
	isValidIndex := func(x, y int) bool {
		return x >= 0 && x < len(matrix) && y >= 0 && y < len(matrix[0])
	}

	if isValidIndex(r, c) && !reachAble[r][c] && matrix[r][c] >= h {
		reachAble[r][c] = true
		pacificAtlanticDFS(matrix, r + 1, c, matrix[r][c], visited)
		pacificAtlanticDFS(matrix, r - 1, c, matrix[r][c], visited)
		pacificAtlanticDFS(matrix, r, c + 1, matrix[r][c], visited)
		pacificAtlanticDFS(matrix, r, c - 1, matrix[r][c], visited)
	}
}

list

//25. Reverse Nodes in k-Group *
func reverseKGroup(head *ListNode, k int) *ListNode {
	if head == nil || k == 1 {return head}
	dummy := ListNode{Next:head}
	var size int
	//measure the length of list
	for head != nil {
		head = head.Next
		size++
	}
	pre := &dummy
	for seg := 0; seg + k <= size; seg += k {
		cur := pre.Next
		nxt := cur.Next
		for i := 1; i < k; i++ {
			/*
				pre -> cur -> nxt -> nn
				cur -> nn
				nn  -> cur
				nxt  -> cur(pre) - > nn(nxt)
			*/
			//swap cur and nxt, put nxt before cur
			cur.Next = nxt.Next 
			nxt.Next = pre.Next //this must be 'pre.Next' rather than 'cur'
			pre.Next = nxt //link new head
			nxt = cur.Next //advance to next
		}
		pre = cur
	}

	return dummy.Next
}

simulation

"""  1041. Robot Bounded In Circle """
def isRobotBounded(instructions):
    x = 0
    y = 0
    dir = 0 #direction - N W S E
    #offset for each direction
    dx = [0, -1, 0, 1]
    dy = [1, 0, -1, 0]
    for c in instructions:
        if c == 'G': #go ahead by dir
            x += dx[dir]
            y += dy[dir]
        elif c == 'L': #turn left
            dir = (dir + 3)%4
        elif c == 'R': #turn right
            dir = (dir + 1)%4
    return (x == 0 and y == 0) or dir != 0 #if go back to origin or not facing north

combination

//78. Subsets
func subsets(nums []int) (ans [][]int){
	var cur []int
	for i := 0; i <= len(nums); i++ {
		subsetsDFS(nums, 0, i, cur, &ans)
	}
	return
}

func subsetsDFS(nums []int, start, end int, cur []int, ans *[][]int) {
	if start == end {
		*ans = append(*ans, cur)
		return
	}

	for i := start; i < end; i++ {
		subsetsDFS(nums, i + 1, end, append(cur, nums[i]), ans)
	}
}

linux buffered IO

/*
block -
    . block is an abstraction representing the smallest unit of storage on file sys
    . no I/O operation may execute on amount of data less than the block size or
        not an interger multiple of the block size
    . non block-aligned I/O is inefficient, need to round up to next largest block

field, stings
    . application operate in terms of high-level abstractions, fields or strings
    . which size varies, at its worst, user-space application might read and write a single
        byte at a time

user-buffered I/O
    . application to read/write data in way feel natural, while the actual I/O occur in units
        of file sys block size
    . kernel buffers data for delaying writes or read ahead
    . when data is written, it is stored in a buffer inside program's address space, when buffer
        reaches a set size (buffer size), the entire buffer is written out in a single write()
    . likewise, when application issues read requests, they are not served directly by file sys,
        but via the chunks of buf. Data in buffer is hand out piece by piece. When buffer is empty,
        another large block-aligned chunk would be read in
    . only issue large, block-aligned reads and writes

c stdio
    . developer can choose to use stdio, home-rolled user-buffer solution or straight sys calls
    . stdio operate on unique identifier (file pointer) rather than directly on fd
    . fp is represented by a pointer to FILE typedef
    . and open file is called a stream, input streams or output streams

data alignment
    . processors do not read and write from memory in byte-sized chunks, but with
        specific granularity, such as 2, 4, 8 or 16 bytes. Therefore, processors initiate
        access from address that is an integer multiple of granularity.
    . C variables must be stored at and access from aligned address. a 32-bit integer is 
        aligned on a 4-byte boundary
    . access misaligned data has penalties, some processor can access misaligned data with
        performance penalty, some cannot at all, and access would cause hardware exception,
        some processors sliently drop low-order bits to force address be aligned, causing
        unintended behavior
    . alignment are naturally handled by compiler. But when saving binary data, dealing structure,
        and communicating over network bring alignment issues to the forefront.

type of user buffering
    . Unbuffered, data directly submit to the kernel, stderr is by default unbuffered
    . Line-buffered, buffering performed on per-line basis, with newline character, buffer is submitted
        to the kernel. it makes sense for streams being output to screen, stdout is by default
        line-buffered
    . Block-buffered, per-block basis, ideal for files, all streams associated with files are by
        default block buffering

thread-safty of stdio
    . I/O functions are thread-safe, they associate a lock, a lock count and a owning thread, on
        each open stream
    . any given thread must acquire the lock and become the owning thread before issuing any I/O requests
*/
#include <stdio.h>
// open file by path with given behavior mode
/*
    position stream at start of file -
    r (read), r+ (read + write), w (write, truncated file to 0 if file exists, create one if not exists)
    w+ (write + read, include behavior of w)
    position stream at end of file -
    a (wirte in append mode, create if not exists), a+ (append read + write)
    b (on windows, open file in binary mode)
*/
FILE * fopen (const char *path, const char *mode);
// convert a fd to a stream, closing the stream also close the fd
FILE * fdopen (int fd, const char *mode);
// any buffered and not-yet-written data is flushed, given stream is closed
int fclose (FILE *stream);
// close all streams associated with current process, including stdin, stdout and stderr
int fcloseall (void);
// read one character, return the 'unsigned char' cast to 'int'
//  the errors also need to detect from returns, stream position move by one
//  after read
int fgetc (FILE *stream);
c = fgetc (stream);
if (c == EOF){
    printf("nok");
}else{
    printf("read %c", (char)c);
}
// cast c to 'unsigned char' onto stream, on success, c is returned, on failure, EOF is returned
int ungetc (int c, FILE *stream);

// write a char to the stream, return c if success, otherwise EOF
int fputc (int c, FILE *stream);

// read a string from stream, reads up to one less than size bytes from stream,
//  on success, str returned, on failure, NULL is returned
char * fgets (char *str, int size, FILE *stream);

//  read the whole file 
char buf[LINE_MAX];
if (!fgets (buf, LINE_MAX, stream)){

}

// write all of the null-terminated string to stream, on failure return EOF
int fputs (const char *str, FILE *stream);

// read until EOF or a delimiter
char *s;
int c = 0;
s = str; //str point to a buffer
while (--n > 0 && (c = fgetc(stream)) != EOF && (*s++ = c) != '\n')
    ;
if (c == d)
    *--s = '\0';
else
    *s = '\0';

// read / write binary data, such as C structures, read up tp nr
//  elements of data (each of size bytes) from stream into buf, fp,
//  return the num of elements read (not number of bytes)
//  also advanced by num of bytes
// binary data diff in variable size, alignment, padding and byte order
size_t fread(void *buf, size_t size, size_t nr, FILE *stream);
char buf[64];
size_t nr;
nr = fread(buf, sizeof(buf), 1, stream);
if (nr == 0)
{
    /* error*/
}

// write to stream up to nr elements, each size bytes in length
// advance fp, return num of elements write
size_t fwrite(void *buf,
              size_t size,
              size_t nr,
              FILE *stream);
// manipulate the file pos of stream with offset
// SEEK_SET: file pos set to be offset
// SEEK_CUR: file pos set to be current pos + offset
// SEEK_END: file pos set to be end of file + offset
int fseek (FILE *stream, long offset, int whence);
// set stream position to pos
int fsetpos (FILE *stream, fpos_t *pos);
// set stream position to the start of file
void rewind (FILE *stream);
// return the current stream position, return -1 on error
long ftell (FILE *stream);
// on success, return 0 and place stream postion in pos
int fgetpos (FILE *stream, fpos_t *pos);
// write out the user buffer to the kernel, if stream is NULL, all open input
//  stream is flushed, it does not guarantee that data is committed to any medium, need
//  to call fsync()
int fflush (FILE *stream);
// tests whether the error indicator is set, return nonzero error value if occurred error
int ferror (FILE *stream);
// return nonzero value if EOF reached (EOF indicator)
int feof (FILE *stream);
// clears error and EOF indicator
void clearerr (FILE *stream);
// obtain the fd from stream, useful to perform a sys call on a stream
int fileno (FILE *stream);
// sets the buffering type of stream to mode
/*
    mode: _IONBF(unbuffered), _IOLBF(line-buffered), _IOFBF(block-buffered)
    buf of size bytes will be employed by stdio as buffer for the given stream
    do not supply the buffer as automatic variable in a scope (not to provide a buffer local 
        to main())
*/
int setvbuf (FILE *stream, char *buf, int mode, size_t size);

linux concept

swap - Swap is a filesystem type – a raw disk partition is formatted as swap
upon system installation. It's treated as second-level RAM by the
OS. When the OS runs out of RAM, it uses swap.

c sort

void bucket_sort_fvec(fvector *v, float_cmp_ptr cmp);

void radix_sort_vec(vector *v);

static void fswap(float *lhs, float *rhs) {
	float tmp = *lhs;
	*lhs = *rhs;
	*lhs = tmp;
}

void bucket_sort_fvec(fvector *v, float_cmp_ptr cmp) {
	//create buckets for count sorting
	const int BUCKET_NUM = 10;
	fvector buckets[BUCKET_NUM];
	for (int i = 0; i < BUCKET_NUM; i++) {
		init_fvec(&buckets[i]);
	}

	//put all elements into bukcets
	int ix = 0;
	for (int i = 0; i < v->len; i++) {
		ix = v->items[i] * BUCKET_NUM;
		if (ix < 0 || ix > BUCKET_NUM)
			err_exit(BAD_INDEX);
		push_back_fvec(&buckets[ix], v->items[i]);
	}

	//sort each buckets
	for (int i = 0; i < BUCKET_NUM; i++) {
		select_sort_fvec(&buckets[ix], cmp);
	}

	//merge all buckets
	ix = 0;
	for (int i = 0; i < BUCKET_NUM; i++) {
		for (int j = 0; j < buckets[i].len; j++) {
			v->items[ix++] = buckets[i].items[j];
		}
	}
}

int get_max_ele(vector *v) {
	int mx = INT_MIN;
	for (int i = 0; i < v->total; i++) {
		if (mx < (int)v->items[i])
			mx = (int)v->items[i];
	}
	return mx;
}

#define GET_DIGIT(a, n) ((a)/n)%10  

//sort arry according to the digit represented by exp
void count_sort(vector *v, int exp) {
	int bucket_num = 10;
	int *output = calloc(v->total, sizeof(int)),  //output array
		*count = calloc(bucket_num, sizeof(int));

	//store the count of occurences
	for (int i = 0; i < v->total; i++)
		count[GET_DIGIT(VECTOR_GET(*v, int, i), exp)]++;

	//make count[i] contain the actual position of this digit
	// in output
	for (int i = 1; i < 10; i++)
		count[i] += count[i - 1];

	//build the output array
	for (int i = v->total - 1; i >= 0; i--) {
		int digit = GET_DIGIT(VECTOR_GET(*v, int, i), exp);
		output[digit - 1] = (int)v->items[i];
		count[digit]--;
	}

	//copy ouput array to dst
	for (int i = 0; i < v->total; i++)
		v->items[i] = (void *)(long)output[i];

	free(output);
	free(count);
}

void radix_sort_vec(vector *v) {
	int mx = get_max_ele(v);

	//start sorting from less significant digit
	for (int exp = 1; mx / exp > 0; exp *= 10)
		count_sort(v, exp);
}

void test_sort() {
	int nums[] = { 4, 2, 3, 1 , 7, 5, 6};
	VECTOR_INIT(arr);
	VECTOR_SET_DATA(arr, nums, ITEM_OF(nums));
	VECTOR_PRINT(arr);
	//shell_sort_vec(&arr, less_cmp);
	//heap_sort_vec(&arr, true);
	radix_sort_vec(&arr);
	VECTOR_PRINT(arr);

	VECTOR_RESET(arr);

	float fnums[] = {0.45, 0.23, 0.44, 0.26, 0.78, 0.12};
	fvector farr;
	init_fvec(&farr);
	set_data_fvec(&farr, fnums, ITEM_OF(fnums));
	print_fvec(&farr);
	bucket_sort_fvec(&farr, less_cmp_f);
	print_fvec(&farr);
	reset_fvec(&farr);
}

golang rb tree

package bst

import (
	. "../kit"
)

/*without considering on deletion*/

const (
	RED   = 0
	BLACK = 1
)

type RBNode struct {
	key					 string
	val					 interface{}
	color				 int //on init, node color will be red
	left, right, parent  *RBNode
}

type RBTree struct {
	root				*RBNode
}

/*insert rb node to tree*/
func (root *RBNode) insertRBNode(pt *RBNode) *RBNode {
	if root == nil {return pt}

	if pt.key < root.key {
		root.left = root.left.insertRBNode(pt)
		root.left.parent = root
	} else if pt.key > root.key {
		root.right = root.right.insertRBNode(pt)
		root.right.parent = root
	}

	return root
}
/*	  
	   ptP					  ptP
	  /						  /
	 pt						ptL
    /						   \  
  ptL			=>				pt
    \						   /
	ptLR					ptLR
*/
/*balance rotation, adjust node as well as root might be change*/
func (root *RBNode) rotateRight(pt *RBNode) (*RBNode, *RBNode) {
	if pt == nil {return root, pt}
	ptLeft := pt.left //save ptr to ptL

	//link ptLR as pt.left
	pt.left = ptLeft.right
	if pt.left != nil {
		//update the parent of new pt.left
		pt.left.parent = pt
	}

	//link ptL with parent of pt
	ptLeft.parent = pt.parent
	if pt.parent == nil {
		root = ptLeft
	//replace the position of pt
	} else if pt == pt.parent.left {
		pt.parent.left = ptLeft
	} else {
		pt.parent.right = ptLeft
	}

	//link pt as right child of original ptL
	ptLeft.right = pt
	pt.parent = ptLeft
	return root, pt
}

/*
	    ptP						ptP
	    /						/	
	  pt					   ptR
        \			=>		   /
        ptR					  pt
		/						\
	   ptRL						ptRL
*/
func (root *RBNode) rotateLeft(pt *RBNode) (*RBNode, *RBNode) {
	ptRight := pt.right //preserve ptr to ptR

	pt.right = ptRight.left //link ptRL with pt
	if pt.right != nil {
		pt.right.parent = pt
	}

	ptRight.parent = pt.parent  //link ptR with ptP
	if pt.parent == nil {
		root = ptRight
	} else if pt == pt.parent.left { //put in the position of pt
		pt.parent.left = ptRight
	} else {
		pt.parent.right = ptRight
	}
  
	ptRight.left = pt //link pt as left child of ptR
	pt.parent = ptRight
	return root, pt
}

/*fix violation upon pt node to root*/
func (root *RBNode) fixViolation(pt *RBNode) (*RBNode, *RBNode){
	var ptP, ptGP *RBNode //parent and graphpa of pt
	for pt != root && pt.color != BLACK && pt.parent.color == RED {

		ptP = pt.parent
		ptGP = pt.parent.parent


		if ptP == ptGP.left {
		/*
		 Case A: parent of new node is the left child of graphpa
		 G(grandpa), P(parent), U(uncle), N(new node)
			G
		   / \
		  P   U
		 /
		N
		*/
			ptUC := ptGP.right
			if ptUC != nil && ptUC.color == RED {
			/*
			 Case A-1: uncle of new node is in red color, in this case, only
			 need to do recoloring
				G(B)
			   /    \
			  P(R)   U (R)
			 /
			N
			*/
				ptGP.color = RED
				ptP.color = BLACK
				ptUC.color = BLACK
				pt = ptGP //next loop check and fix violation for grandpa
			} else {
				if pt == ptP.right {
				/*
				 Case A-2: new node is the right child of its parent, in this case 
				  left-roation is needed
					G(R)
				   /    \
				  P(B)   U (B)
					\
					 N
				*/
					root, ptP = root.rotateLeft(ptP)
					pt = ptP
					ptP = pt.parent
				}
				/*
				 Case A-3: new node is the left child of its parent, in this case 
				  right-roation is needed
					G(R)
				   /    \
				  P(B)   U (B)
				 /
				N
				*/
				root, ptGP = root.rotateRight(ptGP)
				ptP.color, ptGP.color = ptGP.color, ptP.color
				pt = ptP				
			}
		} else {
		/*
		 Case B: parent of new node is the right child of its grandpa
			G
		   / \
		  U   P
			 /
			N
		*/		
			ptUC := ptGP.left
			if ptUC != nil && ptUC.color == RED {
			/*
			 Case B-1: uncle of new node is in color of red, in this case, only need
			 to recoloring
				G(B)
			   /   \
			  U(R)  P(R)
				   /
				  N 
			*/
				ptGP.color = RED
				ptP.color  = BLACK
				ptUC.color = BLACK
				pt = ptGP			
			} else {
				
				if pt == ptP.left {
				/*
				 Case B-2: new node is the left child of parent, in this case, right rotation
				 is needed
					G(R)
				   /   \
				  U(B)  P(B)
					   /
					  N 
				*/		
					root, ptP = root.rotateRight(ptP)
					pt = ptP
					ptP = pt.parent		
				}

				/*
				 Case B-3: new node is the right child of parent, in this case, left rotation
				 is needed
					G(R)
				   /   \
				  U(B)  P(B)
						  \
						   N 
				*/
				root, ptGP = root.rotateLeft(ptGP)
				ptP.color, ptGP.color = ptGP.color, ptP.color
				pt = ptP
			}
		}
	}

	root.color = BLACK
	return root, pt
}

func (t *RBTree) InsertRBNode(k string, v interface{}){
	nnode := &RBNode{key:k,val:v} //new rb node is mark as red
	t.root = t.root.insertRBNode(nnode)
	t.root, nnode = t.root.fixViolation(nnode)
}

/*iterative in-order traversal*/
func (t *RBTree) InorderRB() (out []Item) {
	if t.root == nil {return}
	
	var stack []*RBNode
	curr := t.root

	for true {
		for curr != nil {
			stack = append(stack, curr)
			curr = curr.left
		}

		if len(stack) > 0 {
			curr = stack[len(stack)-1]
			stack = stack[:len(stack)-1]
			out = append(out, Item{Key:curr.key,Val:curr.val})
		}

		curr = curr.right
		if curr == nil && len(stack) == 0 {break}
	}
	return
}

func NewRBTree() (t *RBTree) {
	t = &RBTree{}
	return
}

func (root *RBNode) getHeight() int {
	if root == nil {return 0}
	return Max(root.left.getHeight(), root.right.getHeight()) + 1
}

func (t *RBTree) IsBalanced() bool {
	if t.root == nil {return true}
	return Abs(t.root.left.getHeight() - t.root.right.getHeight()) <= 1
}

func (t *RBTree) GetHeight() int {
	return t.root.getHeight()
}

/*iterative level-order traversal with BFS*/
func (t *RBTree) Levelorder() (out [][]interface{}) {
	if t.root == nil {return}

	queue := []*RBNode{t.root}
	level := 0
	for len(queue) > 0 {	
		size := len(queue)
		if size > 0	{out = append(out, []interface{}{})} //allocate array to store next level
		for ; size > 0; size-- {
			curr := queue[0]
			queue = queue[1:]
			out[level] = append(out[level], curr.val) //travers node of current level

			//put next level nodes into queue
			if curr.left != nil {queue = append(queue, curr.left)}
			if curr.right != nil {queue = append(queue, curr.right)}
		}
		level++ //increase level
	}

	return
}

golang tree

package kit

type TreeNode struct {
	Val		 Val_t
	Left	 *TreeNode
	Right	 *TreeNode
}

type BSTree struct {
	Root	 *TreeNode
	Size	  int
}

var NULL -1 << 63

func NewBSTree() (t *BSTree) {
	t = &BSTTest{}
	return t
}

func (t *BSTree) Serialize() (out []Val_t) {
	Q := []*TreeNode{t.Root}

	for len(Q) > 0 {
		node := Q[0]
		Q = Q[1:]
		
		if node != nil {
			out = append(out, node.Val)
			Q = append(Q, node.Left)
			Q = append(Q, node.Right)		
		} else {
			out = append(out, NULL)
		}
	}
	return
}

func (t *BSTree) DeSerializeRecur(nums []Val_t) {

}

func BSTTest() {
	
}

prefix sum

//303. Range Sum Query – Immutable
type NumArray struct {
	sums  []int
}

func NewNumArray(nums []int) (na *NumArray) {
	na = &NumArray{}
	na.sums = make([]int, len(nums) + 1)
	for i := 1; i <= len(nums); i++  {
		na.sums[i] = na.sums[i - 1] + nums[i - 1]
	}
	return
}

func (na *NumArray) SumRange(i, j int) int {
	if j >= len(na.sums) || i < 0 || i > j {
		panic("index out of range")
	}
	return na.sums[j + 1] - na.sums[i]
}

go lang avl tree

/*
 Author: Zhu GengYu
*/

package bst

import (
	"sync"
	"errors"
	. "../kit"
)

/*definition of tree node*/
type AVLNode struct {
	key				string
	val				interface{}
	left, right		*AVLNode
	height			int
}

/*tree root node with read-write lock*/
type AVLTree struct {
	lock			sync.RWMutex
	root			*AVLNode
}

const (
	NODE_NOT_FOUND = "key not found"
)

/*find node with key k in tree under root n*/
func (n *AVLNode) value(k string) (interface{}, error) {
	for n != nil {
		if k < n.key {
			n = n.left
		} else if k > n.key {
			n = n.right
		} else {
			return n.val, nil
		}
	}

	return nil, errors.New(NODE_NOT_FOUND)
}

/*get height of node*/
func (root *AVLNode) getHeight() int {
	if root == nil {return 0}
	return root.height
}

/*get balance factor of node*/
func (root *AVLNode) getBalance() int {
	if root == nil {return 0}
	return root.left.getHeight() - root.right.getHeight()
}

/*create new tree node*/
func newAVLNode(k string, v interface{})(n *AVLNode){
	n = &AVLNode{key:k, val:v, height:1}
	return
}

/*
     y            x
    / \    (R)   / \
   x   T3  =>   T1  y
  / \      <=      / \
 T1	 T2	   (L) 	 (T2) T3
    do right/left roation and change root node,
	for right roation, T2 will be moved from x.Right to y.Left, and root node will change from y to x
	for left roation, T2 will be moved from y.Left to x.Right, root node 
	  will be changed from x to y
*/
func rightRotate(y *AVLNode) *AVLNode {
	if y == nil || y.left == nil {return y}
	x := y.left
	T2 := x.right

	x.right = y
	y.left = T2

	y.height = Max(y.left.getHeight(), y.right.getHeight()) + 1
	x.height = Max(x.left.getHeight(), x.right.getHeight()) + 1

	return x
}
func leftRotate(x *AVLNode) *AVLNode {
	if x == nil || x.right == nil {return x}
	y := x.right
	T2 := y.left

	y.left = x
	x.right = T2

	x.height = Max(x.left.getHeight(), x.right.getHeight()) + 1
	y.height = Max(y.left.getHeight(), y.right.getHeight()) + 1

	return y
}

/*rebalance the root node*/
func (root *AVLNode) reBalance(key string, balance int) *AVLNode{
	if root.left != nil && balance > 1 {
		if key < root.left.key {//left left case
/*
 root 'z' is the first unbalanced parent after insert
		*z					  (y)
	   /  \					/    \
      *y   T4			   x      (z)
     / \				  / \     / \
	x  *T3        =>    T1   T2 (T3) T4
   / \
  T1 T2
  during right rotation, T2 will change its parent node
*/
			return rightRotate(root)
		} else if key > root.left.key {//left right case
/*
	   z					 *z				   (x)
	   / \					/  \			   / \
     *y   T3			 *(x)   T3			  y	  (z)
     / \				  /					 / \   \
	T1 *x        =>		(y)			=>		T1 T2	T3
	   /				/ \
      *T2               T1 (T2)
	firstly, left rotate the the left node (y) of root (z)
	secondly, right rotate the root (z)
*/
			root.left = leftRotate(root.left)
			return rightRotate(root)
		}
	} else if root.right != nil && balance < -1 {
		if key > root.right.key {//right right case
/*
    *z				 (y)
   / \				/    \
  T1  *y		  (z)     x	
     / \    =>	  / \    / \
    *T2  x		 T1(T2) T3 T4
	   / \
      T3 T4
*/
			return leftRotate(root)
		} else if key < root.right.key {//right left case
/*
	z					*z						(x)
   / \				   / \					   /   \	
  T1 *y				  T1  *(x)				 (z)    (y)
     / \     =>			 /  \		=>		 / \   / \
    *x   T4			   *T2  (y)				T1 T2 T3 T4
   / \					   /   \
  T2  *T3			      (T3)  T4
*/
			root.right = rightRotate(root.right)
			return leftRotate(root)
		}	
	}

	return root
}

/*insert key-value pair into tree*/
func (root *AVLNode) insertKv(k string, v interface{}) *AVLNode {
	if root == nil {return newAVLNode(k, v)}

	if k < root.key {
		root.left = root.left.insertKv(k, v)
	} else if k > root.key {
		root.right = root.right.insertKv(k, v)
	}

	//update height of root node
	root.height = Max(root.left.getHeight(), root.right.getHeight()) + 1
	//balance the root and return new root node
	balance := root.getBalance()
	return root.reBalance(root.key, balance)
}

/*iterative in-order traversal*/
func (root *AVLNode) inorder() (out []Item) {
	if root == nil {return}
	
	var stack []*AVLNode
	curr := root

	for true {
		for curr != nil {
			stack = append(stack, curr)
			curr = curr.left
		}

		if len(stack) > 0 {
			curr = stack[len(stack)-1]
			stack = stack[:len(stack)-1]
			out = append(out, Item{curr.key, curr.val})
		}

		curr = curr.right
		if curr == nil && len(stack) == 0 {break}
	}
	return
}

/*iterative pre-order traversal*/
func (root *AVLNode) preorder() (out []Item) {
	if root == nil {return}

	queue := []*AVLNode{root}
	for len(queue) > 0 {
		curr := queue[0]
		queue = queue[1:]

		out = append(out, Item{curr.key, curr.val})

		if curr.left != nil {queue = append(queue, curr.left)}
		if curr.right != nil {queue = append(queue, curr.right)}
	}

	return
}

/*iterative post-order traversal with two stacks*/
func (root *AVLNode) postorder() (out []Item) {
	if root == nil {return}

	var stack1, stack2 []*AVLNode
	stack1 = append(stack1, root)
	for len(stack1) > 0 {
		curr := stack1[len(stack1)-1]
		stack1 = stack1[:len(stack1)-1]
		stack2 = append(stack2, curr)

		if curr.left != nil {stack1 = append(stack1, curr.left)}
		if curr.right != nil {stack1 = append(stack1, curr.right)}
	}

	for len(stack2) > 0 {
		curr := stack2[len(stack2)-1]
		stack2 = stack2[:len(stack2)-1]
		out = append(out, Item{curr.key, curr.val})
	}

	return
}

/*iterative level-order traversal with BFS*/
func (root *AVLNode) levelorder() (out [][]Item) {
	if root == nil {return}

	queue := []*AVLNode{root}
	level := 0
	for len(queue) > 0 {	
		size := len(queue)
		if size > 0	{out = append(out, []Item{})} //allocate array to store next level
		for ; size > 0; size-- {
			curr := queue[0]
			queue = queue[1:]
			//travers node of current level
			out[level] = append(out[level], Item{curr.key, curr.val})

			//put next level nodes into queue
			if curr.left != nil {queue = append(queue, curr.left)}
			if curr.right != nil {queue = append(queue, curr.right)}
		}
		level++ //increase level
	}

	return
}

/*check links to node*/
func (root *AVLNode) hasLeft() bool {
	return root.left != nil
}
func (root *AVLNode) hasRight() bool {
	return root.right != nil
}
func (root *AVLNode) isLeaf() bool {
	return !root.hasLeft() && !root.hasRight()
}

/*get the left-most node which preserve the min key*/
func (root *AVLNode) min() *AVLNode {
	for ; root.hasLeft(); root = root.left {}
	return root
}

/*delete a node from AVL tree by key*/
func (root *AVLNode) delete(k string) (*AVLNode, error) {
	var err error
	if root == nil {return nil, errors.New(NODE_NOT_FOUND)}
	//recursively find the parent node of to-delete node
	if k < root.key {
		root.left, err = root.left.delete(k)
		return root, err
	} else if k > root.key {
		root.right, err = root.right.delete(k)
		return root, err
	}

	if root.isLeaf() {
		return nil, nil //if the to-delete node is leaf, return it as nil to remove link
	} else if root.hasLeft() && !root.hasRight() {
		return root.left, nil 
	} else if !root.hasLeft() && root.hasRight() {
		return root.right, nil
	} //if the to-delete node has only one child, replace the position of root
	//with it child to remove the link

	//exchange value of to-delete node with the left most node of its right
	// node, which is the successor of to-delete node
	min := root.right.min()
	root.key, root.val = min.key, min.val
	root.right, err = root.right.delete(min.key)
	return root, err
}

/*inorder iterator*/
//refer to https://danrl.com/blog/2018/basic-data-structures-binary-tree/
type Item struct {
	Key			string
	Val			interface{}
}
/*inorder traversal through channel*/
func (root *AVLNode) iter (ch chan<-Item) {
	if root == nil {
		return
	}
	root.left.iter(ch)
	ch <- Item{Key:root.key,Val:root.val}
	root.right.iter(ch)
}

//public function guarante concurrency-safty
func NewAVLTree()(t *AVLTree){
	t = &AVLTree{}
	return t
} 

/*search value by key in tree*/
func (t *AVLTree) Value(k string) (interface{}, error){
	t.lock.RLock()
	defer t.lock.RUnlock()
	return t.root.value(k)
}

/*insert key-value pair to tree with tree gurannted to be balanced*/
func (t *AVLTree) BalancedInsert(key string, val interface{}) {
	t.lock.Lock()
	defer t.lock.Unlock()
	t.root = t.root.insertKv(key, val)
}

/*get the height of tree*/
func (t *AVLTree) GetHeight() int {
	t.lock.RLock()
	defer t.lock.RUnlock()
	return t.root.getHeight()
}

/*check if tree is balanced*/
func (t *AVLTree) IsBalanced() bool {
	t.lock.RLock()
	defer t.lock.RUnlock()
	return Abs(t.root.getBalance()) <= 1 
}

/*iterative in-order traversal*/
func (t *AVLTree) Inorder() (out []Item) {
	t.lock.RLock()
	defer t.lock.RUnlock()	
	return t.root.inorder()
}

/*iterative pre-order traversal*/
func (t *AVLTree) Preorder() (out []Item) {
	t.lock.RLock()
	defer t.lock.RUnlock()
	return t.root.preorder()	
}

/*iterative post-order traversal*/
func (t *AVLTree) Postorder() (out []Item) {
	t.lock.RLock()
	defer t.lock.RUnlock()
	return t.root.postorder()	
}

/*iterative level-order traversal with BFS*/
func (t *AVLTree) LevelOrder() (out [][]Item) {
	t.lock.RLock()
	defer t.lock.RUnlock()
	return t.root.levelorder()
}

/*inorder traversal through channel*/
func (t *AVLTree) Iter() <-chan Item {
	ch := make(chan Item)
	t.lock.RLock()
	//delegate all tasks to a goroutine, 
	//  keep program from blocking
	go func() {
		t.root.iter(ch)
		t.lock.RUnlock() //unlock the mutex once 
		close(ch)
	}()
	return ch
}

/*deletion*/
func (t *AVLTree) Delete(k string) error {
	var err error
	t.lock.Lock()
	defer t.lock.Unlock()
	t.root, err = t.root.delete(k)
	return err
}

linux IO basics


/*
file descriptor
    -0 is standard in (stdin),1 is standard out (stdout), and file descriptor 2 is standard error (stderr).
    -STDIN_FILENO, STDOUT_FILENO, and STDERR_FILENO in C std lib
    -by default, keyboard, terminal's display
    -fd refer to regular file, device files, pips
    -a process's file table, containes open files and their access mode, current file position,
        and other metadata, a child process receives a copy of its parent's file table, which means
        file operations in one process doesn't affect the other.   
redirect
    -redirect the std fd, pipe the output of one program into another

owners of the newly created files
    -owner user is the uid of process creating the file
    -owner group could be the gid of process creating file, or maybe the gid of the parent directory

permissions of the new files
    -through mode bitset
    -by default '0644', owner can read and write, everyone else can only read
    -'0700' executable by owner
    -permission is a process-specific attribute set via login shell

behavior of writes
    -when call of write() returns, the kernel has copied the data from supplied buffer
        to a kernel buffer, howerver, there is no guarantee that data would be written 
        to its destination (system calls return much faster than this happens dur to
        the disparity in performance between processor and hard disk)
    -kernel gathers up all the dirty buffers (buffers that contain data newer than what
        is on disk), sort them optimally and writes them to the disk (so called writeback),
        therfore, kernel actually defer writes to more idle periods and batch many writes
        together (delayed writes)
    -when read is issued for a piece of just-written data that only lives in buffer (in-memory cache)
        rather than disk, that request will directly satisfied from the buffer and not cause read
        from disk. However, this is just the results as expected, if system does not crash
        before data makes it to disk.
    -write ordering may take cared by application and kernel will reorder the write requests as
        it sees fit(for reason of performance), all request would hit the disk. the ordering for
        database is a concern, they need to ensure the write order, therefore, database file write
        need to be sync
    -I/O errors, I/O errors occur during writeback have no chance to be reported back to the issuing
        process. Actually, dirty buffers inside kernel are not associated with processes. Multiple
        processes may dirty a single buffer. 
    -Kernel attempts to write out data in a timely manner, which is by 'maximum buffer age', specified
        in centiseconds, 0.01s manner

journey of read()
    -C lib provides definitions of sys call that would be converted to trap statements at  
        compile time
    -user-space process is trapped into the kernel, pass through the sys call handler and
        handed to the read() sys call
    -kernel figures out what obj backs the given fd, then invokes the read func associated with
        backing obj.
    -read func is part of file sys code, who does the physical readng from the file sys, ret
        data to user-space read() call, then returns sys call handler, and copies data back
        to user space
    -sys call rets and process continues to exec


append writes
    -mulitple process may race for the end of the same log file without synchronization
    -append mode avoid this race condtion and ensures the file pos is always set to the 
        end of the file in all writes, file pos is automatically updated before new read()
        issues.
    
synchronizing I/O
    -buffering writes provide a significant performance improvement, but application may need
        to ctrl when data hits the disk, at this case, performance need to traded fro synchronized
        operations
    
direct I/O
    -bypass the complex layer of cache, buffering and I/O management between devices and applications
        and perform app's own I/O management
    -e.g. database systems often prefer to perform their own caching and want to minimize
        the presentce of OS
    -flag O_DIRECT in open() will instructs the kernel minimize I\O managment, I\O will initiate
        directly from user-space buffer to device, all I\O will be sync
    -the request length, buffer alignment and file offset must be integer multiples of underlying 
        device sector size

multiplexed I/O
    -application often need to block on more than one fd, e.g keyboard(stdin), ipc, files,
        GUI application may contend with hundreds of pending event in mainloop
    -thread can servicing each fd separately, but one process cannot block on more than one
        fd at the same time, say, if a read() system call is issued and data is not yet, process
        will block, no longer able to service the other fd
    -with nonblocking I/O, application can issue I/O requests that return a special error code instead
        of blocking, but this is inefficient since first, process needs to continually issue I/O,
        waiting fro one of its fd to be ready (poor design), secondly, application can not sleep,
        free the processor for other tasks
    -multiplexed I/O allows application to concurrently block on multiple fd, and receive notification
        when any one of fd becomes ready without blocking
    -work procedure of multiplexed I/O is like,
        1. issue multiplexed I/O: tell process any of fd is ready for I/O
        2. sleep until one or more fd is ready
        3. wake up process
        4. handle all fd that is ready
        5. go back to sleep
    
I/O kernel implementation
    -VFS: a mechanism of abstraction that allows kernel to call file sys functions and 
        manipulate file sys data without caring about the specific file sys type, so call
        'common file model'
        . via function pointer and object-oriented practices, the 'common model' provides
            a framework to which file sys in kernel must adhere 
        . the framework provides hooks to support reading, creating links, synchronizing and so on
        . the framework enable sys call read/write from/to any medium. All file sys support
            the same concepts, same interfaces and sys calls
    -Page cache
        . a in-memory store of recently accessed data from on-disk file sys, which faster the access
            speed and avoiding repeated disk access.
        . exploits the concept of 'temporal locality', resouce accessed at one point are very likely
            to be accessed again in the near future.
        . page cache is the first place for kernel looks for file sys data. data is transferred from
            disk to cache on first time visit, later access would simply return from cache. cache operations
            are transparently and ensuring its data is always valid.
        . page cache is dynamic in size. I\O brings more and more data, with page cache size grows, 
            consuming any free memory. If page cache eventually consumed all free memory, page cahce
            will be 'pruned', releasing the least-used pages to make room.
        . often kernel try to store data on the disk to have a larger memory foorprint, which is swap to
            disk a seldom-used page of process memory.
        . 'prune' and 'swap' preference can be adjust via /proc/sys/vm/swappiness
        . another locality of reference is 'sequential locality', which says data is often reference 
            sequentially. kernel provides 'page cache readahed' to take advantage of this. Reading extra
            data off the disk and into page cache on read request.
        . kernel handle readahead dynamicly, if it's noticed that a process is consistently using
            data readaheaded, kernel may enlarge the readahead window from 16 KB to 128 KB
    -Page writeback
        . kernel defers writes via buffers. Eventually, dirty buffers is committed to disk, synchronizing
            on-disk file with data in memory
        . dirty buffers are written when, free memory shrinks below a configurable threshold or 
            a dirty buffer ages beyond a configurable threshold
        . kernel carry out writeback by 'flusher' threads, flusher will wake up and begin to commit
            dirty buffers to disk when above condition is true
    -buffers in kernel
        . represented by buffer_head structure, which tracks various metadata associated with buffer
        . the buffer data resides in page cache
*/

/*Basic File Function API (sys call)*/
#include <sys/types.h>
#include <sys/stat.h>
#include <fcntl.h>
//maps the file (pathname) to a fd, file pos is to the start of file 
//  access mode is also specified
/*
flags can be combined in bitwise-or manner
    O_RDONLY, O_WRONLY, or O_RDWR
    O_APPEND: file pos is being updated to the end of file instead (always synchorize to
        the real last pos after last writes(even from another process))
    O_ASYNC: a signal (e.g. SIGIO) generated when file becomes readable/writeable, only for
        FIFOs, pipes, sockets and terminals
    O_CLOEXEC: automatically close file upon executing a new process, which is for eliminates a
        race condition
    O_CREAT: create the file if not exist, otherwise no effect
    O_DIRECTORY: target is a directory, to support opendir()
    O_EXCL: cause O_CREAT to fail if file already exists, which is used to prevent race conditions
        on file creation
    O_LARGEFILE: target files is larger than 2GB, open file using 64-bit offset for file pos
    O_NONBLOCK: open file in nonblocking mode, no operations will cause process to block on I/O, it's
        defined for FIFOs
    O_SYNC: open file for synchronous I\O, write operation will not complete until data has been
        physically written to disk, no effect on reads since they are already synchronous
    O_TRUNC: for regular file, file will be truncated to zero length once open, do not use
        O_TRUNC | O_RDONLY, it's undefined
*/
int open (const char *name, int flags);
/*
    owner - S_IRWXU (RWX)  S_IRUSR (R-only) S_IWUSR(W-only)  S_IXUSR(execute only)
    group - S_IRWXG        S_IRGRP          S_IWGRP          S_IXGRP
    others - S_IRWXO       S_IROTH          S_IWOTH          S_IXOTH
*/
int open (const char *name, int flags, mode_t mode);
//shorcut for open in O_WRONLY | O_CREAT | O_TRUNC
int creat (const char *name, mode_t mode);
//return fd on success, on error, return -1 and set errno
#include <unistd.h>
//reads up to len bytes from current file pos into memory pointed by buf,
//  return the number of bytes read, the file pos is also advanced, return
//  - size may less than len, maybe less than len bytes may available, or
//  sys call have been interrupted by a signal, pipe broken
//  - ret size may be 0 to indicate EOF(end of file), in case of blocking, read
//  is waiting for more data (socket or device file)
ssize_t read (int fd, void *buf, size_t len);

//read all bytes with error handled (parital reads)
/*
   EBADF, EINVAL(bad fd), EFAULT(bad read buf),  EIO
*/
ssize_t ret; //limited by SSIZE_MAX 2^31
if (len > SSIZE_MAX)
    len = SSIZE_MAX;

while (len != 0 && (ret = read(fd, buf, len)) != 0) //read until EOF reached
{
    if (ret == −1)
    {
        if (errno == EINTR) //reissue the call
            continue;
        if (errno == EAGAIN) //non-blocking reads, we may want call to read ret immediately,
                             // indicating no data available
            //resubmit later
        perror("read");
        break;
    }
    len -= ret; //to handling partial reading
    buf += ret;
}
//writes up to count bytes starting at buf to current file pos,
//  for files not support seeking (character device), always from 'head'
//ret the num of bytes written
ssize_t write (int fd, const void *buf, size_t count);
/*
regular files is guaranteed to not perform partial writes,
    for other file, e.g. socket, it may happens
*/
ssize_t ret, nr;
while (len != 0 && (ret = write(fd, buf, len)) != 0)
{
    if (ret == −1)
    {
        if (errno == EINTR)
            continue;
        perror("write");
        break;
    }
    len -= ret;
    buf += ret;
}
//ensure all dirty data and metadata (creation timestamp, attributes) 
//  associated with file are written back to disk, function call will
//  not return until disk says data are reached
int fsync (int fd);//write back data, update inode's data, where expensive seek operation may happens
//only flushes data and essential data when access file (e.g. file size),
//  which is potentially faster, nonessential data (modification timestamp)
//  is not guaranteed to synchronized
int fdatasync (int fd);
//all buffers—both data and metadata—are guaranteed to reside on disk
void sync (void);
//setting sync flag
//O_DSYNC: sync only normal data  O_RSYNC
fd = open (file, O_WRONLY | O_SYNC);
//unmap fd, invalid fd and ret to OS, closing a file does not bearing that file is flushed
//  to disk, sync ensure that file is committed to disk 
//when the last open fd referring to a file is closed, the data structure representing the file
//  inside kernel is freed, and it unpins the in-memory copy of the file inode, if nothing else
//  is pinning the inode, inode may too freed in memory
//if a file has been unlinked from disk but kept open before it unlinked, the file is not physically
//  removed until file closed and inode removed from memory. Therefore, close() can result in unlinked
//  file finally being physically removed from disk
int close (int fd);
//set the file pos of a fd to a given value and jump around in a file
/*
    origin - start origin point of setting
    SEEK_CUR, cur file pos + pos
    SEEK_END, end file pos + pos
    SEEK_SET, pos (absolute pos)
*/
//ret the new file pos
off_t lseek (int fd, off_t pos, int origin);

//get current file pos
int pos;
pos = lseek (fd, 0, SEEK_CUR);

//set file pos to the end of file
off_t ret;
ret = lseek (fd, 0, SEEK_END);
if (ret == (off_t) −1)
    return -1;

//set with a absolute file pos
off_t ret;
ret = lseek(fd, (off_t)1825, SEEK_SET);
if (ret == (off_t) −1)
    return -1;

//seeking past the end of the file
//  -a read request to the newly created file pos will ret EOF
//  -write request to this pos, new space will be created between old length and new length, 
//      and padded with zeros
//  -zero padding makes a hole, a hole on Unix-style filesystem do not occupy any physical
//      disk space (which implies the total size of files can add up to more than the physical
//      disk size)
//  -files with holes called sparse files, sparse file can enhance performance since manipulating
//      holes doesn't initiate physical IO
//
int ret;
//size of off_t is often C long type, which is generally the word size (the size of
//  machine's general-propose registers), 32-bit machine may encounter EOVERFLOW errors
ret = lseek (fd, (off_t) 1688, SEEK_END);
if (ret == (off_t) −1)
    return -1;
//not lisk read() and write(), pread() and pwrite() do not update the file position
//  upon completion, they completely ignore the current file position
//they may enable caller to do some tricky operations such as moving file backward or
//  randomly, and avoid any potential races occur when using lseek()
ssize_t pread (int fd, void *buf, size_t count, off_t pos);
ssize_t pwrite (int fd, const void *buf, size_t count, off_t pos);
//two sys calls for truncating the len of file (smaller or larger), file must be writable
int ftruncate (int fd, off_t len);
int truncate (const char *path, off_t len);

#include <sys/select.h>
#include <sys/time.h>
struct timeval
{
    long tv_sec;  /* seconds */
    long tv_usec; /* microseconds */
};
//a call to select() blocks until the given fd are ready for I/O, or until some timeout
/*
    n: value of highest-valued fd in any set plus one
    timeout: specify the return timeout
*/
/*
    three sets to watch (set could be NULL)
        readfds: watched to see if data is available for reading
        writefds: ... writing
        exceptfds: ... if exception occurred, or if out-of-band data available (only for sockets)
*/
int select(int n,
           fd_set *readfds,
           fd_set *writefds,
           fd_set *exceptfds,
           struct timeval *timeout);
//manage the fd sets
FD_CLR(int fd, fd_set *set);//remove a fd from the set
FD_ISSET(int fd, fd_set *set);//test whether a fd is part of the set
FD_SET(int fd, fd_set *set);//add a fd to the set
FD_ZERO(fd_set *set); //remove all fd from the set

//apply select() to sleep for subsecond-resolution time
struct timeval tv;
tv.tv_sec = 0;
tv.tv_usec = 500;
/* sleep for 500 microseconds */
select(0, NULL, NULL, NULL, &tv);

//System V’s multiplexed I/O solution
#include <poll.h>
struct pollfd
{
    int fd;        /* file descriptor */
    short events;  /* requested events to watch */
    short revents; /* returned events witnessed */
};
/*
  fds: each pollfd structure specifies a single fd to watch
  events: POLLIN, POLLOUT,...
*/
int poll (struct pollfd *fds, nfds_t nfds, int timeout);

struct pollfd fds[2];
int ret;
/* watch stdin for input */
fds[0].fd = STDIN_FILENO;
fds[0].events = POLLIN;
/* watch stdout for ability to write (almost always true) */
fds[1].fd = STDOUT_FILENO;
fds[1].events = POLLOUT;
/* All set, block! */
ret = poll(fds, 2, TIMEOUT * 1000);
if (ret == −1)
{
    perror("poll");
    return 1;
}

739. Daily Temperatures

func WarmerDay(temp []int) (ans []int){
var stack []int
ans = make([]int, len(temp))
//reverse iterate over all temps
for i := len(temp) - 1; i >= 0; i-- {
//remove history record that are not warmer
for len(stack) > 0 && temp[stack[len(stack)-1]] <= temp[i] {
stack = stack[:len(stack)-1]
}
if len(stack) == 0 {
ans[i] = 0
} else {
//calculate how many days take to be warmer
ans[i] = stack[len(stack)-1] - i
}
stack = append(stack, i) //for next loop of comparsion
}
return
}

//output [1 1 4 2 1 1 0 0]
func WarmerDayTest() {
temp := []int{73, 74, 75, 71, 69, 72, 76, 73}
ans := WarmerDay(temp)
fmt.Println(ans)
}

sliding window

//209. Minimum Size Subarray Sum
func minSubArrayLen(sum int, nums []int) (ans int) {
	var start, end, tmpSum int
	ans = math.MaxInt32
	for start < len(nums) {
		//move 'end' to make slide window valid
		for end < len(nums) && tmpSum < sum {
			tmpSum += nums[end]
			end++
		}
		if tmpSum < sum {break} //not able to find a valid sliding window
		ans = Min(ans, end - start)
		//move slide window by increase 'start'
		tmpSum -= nums[start]
		start++
	}

	if ans == math.MaxInt32 {ans = 0}
	return
}

bst

//230. Kth Smallest Element in a BST
func kthSmallest(root *TreeNode, k int) int {
	if root == nil {return -1}
	
	var stack []*TreeNode
	curr := root

	for true {
		for curr != nil {
			stack = append(stack, curr)
			curr = curr.Left
		}

		if len(stack) > 0 {
			curr = stack[len(stack)-1]
			stack = stack[:len(stack)-1]
			if k <= 0 {return curr.key}
			k--
		}

		curr = curr.Right
		if curr == nil && len(stack) == 0 {break}
	}
	return -1
}

3rd round 3

//34. Search for a Range
vector<int> searchRange(vector<int>& nums, int target) {
	vector<int> ans(2, -1);
	if (nums.size() == 0 || target < *nums.begin() || target > nums.back()) {
		return move(ans);
	}
	ans[0] = lower_bound(nums.begin(), nums.end(), target) - nums.begin(); //equal or greater
	ans[1] = upper_bound(nums.begin(), nums.end(), target) - nums.begin() - 1; //greater
	return move(ans);
}

void SearchRange() {
	auto ans = searchRange(vector<int>{5, 7, 7, 8, 8, 10}, 3);
	cout << "SearchRange" << endl;
	cout << ans[0] << " " << ans[1] << endl;
}

golang list kit

package kit

import (
	"fmt"
	"math"
)

type Val_t	int

type ListNode struct {
	Val		 Val_t
	Next    *ListNode
}

type List struct {
	Head	*ListNode
	Tail	*ListNode
	Len		int
}

func NewList() (lst *List) {
	lst = &List{}
	return
}

func (lst *List) Ints2Lst(nums []Val_t) {
	if len(nums) <= 0 {return}
	
	dummy := &ListNode{Val:math.MaxInt32}
	prevNode := dummy
	lst.Len = 0
	for _, n := range nums {
		newNode := &ListNode{Val:n}
		prevNode.Next = newNode
		lst.Len++
		prevNode = newNode
	}

	if lst.Len > 0 {
		lst.Head = dummy.Next
		lst.Tail = prevNode	
	}
}

func (lst *List) Print() {
	curr := lst.Head
	if lst.Len > 0 {
		fmt.Println("[len]", lst.Len, "[head]", lst.Head.Val, 
			"[tail]", lst.Tail.Val)
	} else {
		fmt.Println("len", lst.Len)
		return
	}
	
	for curr != nil {
		fmt.Printf("%v", curr.Val)
		if curr != lst.Tail {
			fmt.Printf("->")
		} else {
			fmt.Println()
		}
		curr = curr.Next
	}
}

func (lst *List) GetAt(idx int) *ListNode {
	if idx < 0 || idx > lst.Len {panic("index out of boundary")}
	curr := lst.Head
	for idx > 0 {
		curr = curr.Next
		idx--
	}
	return curr
} 

func (lst *List) PushBack(item Val_t) {
	newNode := &ListNode{Val:item}
	if lst.Len <= 0 {
		lst.Head, lst.Tail = newNode, newNode
	} else {
		lst.Tail.Next = newNode
		lst.Tail = newNode
	}
	lst.Len++
}

func (lst *List) PushFront(item Val_t) {
	newNode := &ListNode{Val:item}
	if lst.Len <= 0 {
		lst.Head, lst.Tail = newNode, newNode
	} else {
		newNode.Next = lst.Head
		lst.Head = newNode
	}
	lst.Len++
}

func (lst *List) InsertAt(idx int, item Val_t) {
	if idx < 0 || idx > lst.Len {panic("index out of boundary")}

	newNode := &ListNode{Val:item}

	//insert a node after the idx-th node
	dummy := &ListNode{Val:math.MaxInt32}
	dummy.Next = lst.Head
	prev, next := dummy, dummy.Next
	for idx > 0 {
		prev = next
		next = next.Next
		idx--
	}
	prev.Next = newNode
	newNode.Next = next

	//update the reference of head and tail
	lst.Len++
	lst.Head = dummy.Next
	curr := lst.Head
	for curr.Next != nil {curr = curr.Next}
	lst.Tail = curr
}

func (lst *List) PopBack() (ret Val_t) {
	if lst.Len < 1 {panic("no item to pop")}
	ret = lst.Tail.Val
	if lst.Len == 1 {
		lst.Head, lst.Tail = nil, nil
	} else {
		prevTail := lst.Head
		for prevTail.Next != lst.Tail {prevTail = prevTail.Next}
		lst.Tail = prevTail
	}
	lst.Len--
	return 
}

func (lst *List) PopFront() (ret Val_t){
	if lst.Len < 1 {panic("no item to pop")}
	ret = lst.Head.Val
	if lst.Len == 1 {
		lst.Head, lst.Tail = nil, nil
	} else {
		lst.Head = lst.Head.Next
	}
	lst.Len--
	return
}

func (lst *List) RemoveAt(idx int) (ret Val_t) {
	if idx < 0 || idx > lst.Len {panic("index out of boundary")}
	curr := lst.GetAt(idx)
	ret = curr.Val
	if idx == 0 {
		return lst.PopFront()
	} else if idx == lst.Len - 1 {
		return lst.PopBack()
	} else {
		prev, next := lst.GetAt(idx - 1), lst.GetAt(idx + 1)
		prev.Next = next
		lst.Len--
	}
	return
}

func reverse(head *ListNode) *ListNode {
	var prev, curr *ListNode = nil, head

	for curr != nil {
		next := curr.Next
		curr.Next = prev
		prev = curr
		curr = next
	}

	return prev
}

func (lst *List) Reverse(from, to int) {
	if from < 0 || from >= lst.Len || to < 0 ||
		to >= lst.Len || from > to {panic("index out of boundary")}

	if from == to {return }
	var start, end, before, next *ListNode
	curr := lst.Head

	//fint the four point before reverse
	for cnt := 0; cnt <= to; cnt++ {
		if cnt < from {before = curr}
		if cnt == from {start = curr}
		if cnt == to {end = curr}
		curr = curr.Next
	}
	next = end.Next 
	end.Next = nil //break the from-to list

	//re-link from-to list head
	if before != nil {
		before.Next = reverse(start)
	} else {
		lst.Head = reverse(start)
	}
	//re-link from-to list to remain part
	start.Next = next
	lst.Tail = start

	//update list tail
	for lst.Tail.Next != nil {lst.Tail = lst.Tail.Next}
}

func LstTest() {
	nums := []Val_t{3, 5, 2, 4, 6}
	lst := NewList()
	lst.Ints2Lst(nums)
	lst.Print()
	lst.Reverse(0, lst.Len-1)
	lst.Print()
	/*lst.PushBack(7)
	lst.PushFront(1)
	lst.Print()
	lst.InsertAt(4, 3)
	lst.Print()
	fmt.Printf("3rd item: %v\n", lst.GetAt(3))

	lst.PopBack()
	lst.PopFront()
	fmt.Printf("remove 3rd item: %v \n", lst.RemoveAt(3))
	lst.Print()*/
}

c++11 feature

//# C++11 new features
//1. long long and unsigned long long types to support 64-bit integers
//2. char16_t and char32_t types to support 16-bit and 32-bit character
//3. brace-enclosed list (list-initialization)
int x = {5};
double y {2.75};
short quar[5] {4,5,2,76,1};
int * ar = new int [4] {2,4,6,7};
class Stump
{
  private:
    int roots;
    double weight;

  public:
    Stump(int r, double w) : roots(r), weight(w) {}
};
Stump s1(3, 15.6);    // old style
Stump s2{5, 43.4};    // C++11
Stump s3 = {4, 32.1}; // C++11
//advantages
/*  
(initialization-list are provide type safe check)
Ordinary initialization allows you to do things that may or 
may not make sense, while in initialization-list syntax compiler 
disallows type conversions that attempts to 'type narrower',
'type wider' are allowed.
*/
char c1 {1.57e27}; // double-to-char, compile-time error
char c2 = {459585821};// int-to-char,out of range, compile-time error

//4. add initializer_list template class that can be used as a constructor argument
// that is If a class has such a constructor, the brace syntax can be used only 
// with that constructor.
vector<int> a1(10); // uninitialized vector with 10 elements
vector<int> a2{10}; // initializer-list, a2 has 1 element set to 10
vector<int> a3{4,6,1}; // 3 elements set to 4,6,1

//initializer_list can be used like a container
#include <initializer_list>
double sum(std::initializer_list<double> il);
int main()
{
    double total = sum({2.5, 3.1, 4}); // 4 converted to 4.0
    ...
}
double sum(std::initializer_list<double> il)
{
    double tot = 0;
    for (auto p = il.begin(); p != il.end(); p++)
        tot += *p;
    return tot;
}

//5. 'auto' support automatic type deduction, provided that an explicit initializer is given
auto maton = 112; // maton is type int
auto pt = &maton; // pt is type int *
double fm(double, int);
auto pf = fm; // pf is type double (*)(double,int)
//'auto' is especially useful for template declaration 

//6. 'decltype' supports creating a variable of the type indicated by an expression
double x;
int n;
decltype(x*n) q; // q same type as x*n, i.e., double
decltype(&x) pd; // pd same as &x, i.e., double *

int j = 3;
int &k = j
const int &n = j;
decltype(n) i1; // i1 type const int &
decltype(j) i2; // i2 type int
decltype((j)) i3; // i3 type int & //take notes about this
decltype(k + 1) i4; // i4 type int
// also, it is particular useful in template definitions, when type 
// is not be determined until specific instantiation made
template<typename T, typename U)
void ef(T t, U u)
{
    decltype(T * U) tu; //do not know the specific type until T and U fixed
    ...
}

//7. Trailing Return Type for declaring a function
double f1(double, int); // traditional syntax
auto f2(double, int) -> double; // new syntax, return type is double
//the new syntax looks like a backward on reabaiblity but make it possible
// to use decltype to specify template function when template type T and U
// are out of scope after parameter list
return types
template<typename T, typename U)
auto eff(T t, U u) -> decltype(T*U)
{
    ...
}

//8. Template Aliases: 'using ='
typedef std::vector<std::string>::iterator itType; //old way
using itType = std::vector<std::string>::iterator; //c++11 way
//this syntax are useful for partial template specializations
template<typename T>
using arr12 = std::array<T,12>; // template for multiple aliases

//9. nullptr to replace NULL ('better type safe')
//The null pointer is a pointer guaranteed not to point to valid data,
// nullptr has type rather than just 0 (depends on specific implement)
// for NULL
//for back compatibility, nullptr == 0 evaulates true

//10. smart pointer
// introduce unique_ptr, shared_ptr, and weak_ptr when old c++
// have only auto_ptr

//11. new Exception Specification
void f501(int) throw(bad_dog); // can throw type bad_dog exception
void f733(long long) throw(); // doesn't throw an exception
void f875(short, short) noexcept; // doesn't throw an exception

//12. Scoped Enumerations
enum Old1 {yes, no, maybe}; // traditional form
enum class New1 {never, sometimes, often, always}; // new form
enum struct New2 {never, lever, sever}; // new form
//you need to use New1::never and New2::never to identify those particular enumerations
// therefore name conflicts can be avoid

//13. explicit to suppress automatic conversions
// invoked by one-argument constructors
class Plebe
{
    Plebe(int);             // automatic int-to-plebe conversion
    explicit Plebe(double); // requires explicit use
    ...
};
... Plebe a, b;
a = 5;          // implicit conversion, call Plebe(5)
b = 0.5;        // not allowed
b = Plebe(0.5); // explicit conversion, only allow explict conversion

//14. Member In-Class Initialization
class Session
{
    int mem1 = 10;        // in-class initialization
    double mem2{1966.54}; // in-class initialization
    short mem3;

  public:
    Session() {}                                                    // #1
    Session(short s) : mem3(s) {}                                   // #2
    Session(int n, double d, short s) : mem1(n), mem2(d), mem3(s){} // #3
                                        ...
};
//in-class initialization avoids duplicat code in constructor

//15. Range-based for Loop
double prices[5] = {4.99, 10.99, 6.87, 7.99, 8.49};
for (auto x : prices)
    std::cout << x << std::endl;
for (auto &x : prices) //loop modifier
    std::cout << x << std::endl;

//16. New STL Containers
// introduce forward_list, unordered_map, unordered_multimap, unordered_set, and
//   unordered_multiset, array to STL collections
//'forward_list' container is a singly linked list that can be traversed in just one direction
//  it's simpler and more economical of space compare with 'list'

//17. New STL Methods
//cbegin() and cend() return const iterators

//18. Angle Brackets
std::vector<std::list<int> > vl; // >> ok in pre-C++11
std::vector<std::list<int>> vl; // >> ok in C++11

//19. rvalue Reference
//  fo which one cannot apply address operator
//binding to rvalue results in the value being stored
//  in a location whose address can be taken
int x = 10;
int y = 23;
int && r1 = 13;
int && r2 = x + y;
double && r3 = std::sqrt(2.0);
//the main reason for rvalue reference is to implement
//  move semantics
//move constructor
Useless::Useless(Useless &&f) : n(f.n)
{
    ++ct;
    cout << "move constructor called; number of objects: " << ct << endl;
    pc = f.pc;      // steal address
    f.pc = nullptr; // give old object nothing in return
    f.n = 0;
    ShowObject();
}
Useless &Useless::operator=(Useless &&f) // move assignment
{
    if (this == &f)
        return *this;
    delete[] pc; //delete the origin data
    n = f.n;
    pc = f.pc; //steal data from rvalue
    f.n = 0;
    f.pc = nullptr; 
    return *this;
}

Useless two = one; //calls copy constructor
Useless four (one + three); // calls operator+(), move constructor
//if class of one doesn't define a move constructor, it will call
//  the copy constructor
four = std::move(one); // forced move assignment from a lvalue

//20. default/delete method
//explicitly declare the defaulted versions of these methods:
Someclass(const Someclass &) = default;
Someclass & operator=(const Someclass &) = default;
//disable copying, may only allow move
Someclass(const Someclass &) = delete;
Someclass & operator=(const Someclass &) = delete;
//detect redo(int) as compile-time error
class Someclass
{
public:
...
void redo(double);
void redo(int) = delete;
...
};

//21. Delegating Constructors
//code reduce
class Notes
{
    int k;
    double x;
    std::string st;

  public:
    Notes();
    Notes(int);
    Notes(int, double);
    Notes(int, double, std::string);
};
Notes::Notes(int kk, double xx, std::string stt) : k(kk),
                                                   x(xx), st(stt)
{ /*do stuff*/
}
Notes::Notes() : Notes(0, 0.01, "Oh")
{ /* do other stuff*/
}
Notes::Notes(int kk) : Notes(kk, 0.01, "Ah")
{ /* do yet other stuff*/
}
Notes::Notes(int kk, double xx) : Notes(kk, xx, "Uh")
{ /* ditto*/
}

//22. override and final for virtual methods
//virtual specifier override to indicate that you intend
//  to override a virtual function. if declaration does not
//  match a base method, prompt compile time error
virtual void f(char *ch) const override { 
    std::cout << val()<< ch << "!\n"; 
}
//final prohibit derived classes from overriding a particular virtual method
virtual void f(char ch) const final { 
    std::cout << val() << ch << "\n"; 
}

//23. Lambda function
[&count](int x){count += (x % 13 == 0);}

//old way to count num dividable by 3
std::vector<int> numbers(1000);
std::generate(vector.begin(), vector.end(), std::rand);

bool f3(int x) {return x % 3 == 0;}

int count3 = std::count_if(numbers.begin(), numbers.end(), f3);

//c++11 lambda way
count3 = std::count_if(numbers.begin(), numbers.end(),
    [](int x){return x % 3 == 0;});
//locate definition
//compile as inline function
//can capture automatic variables (by value or by reference) in scope
auto mod3 = [](int x){return x % 3 == 0;} // mod3 a name for the lambda
count1 = std::count_if(n1.begin(), n1.end(), mod3);
count2 = std::count_if(n2.begin(), n2.end(), mod3);

//capture variable by reference
int count13 = 0;
std::for_each(numbers.begin(), numbers.end(),
              [&count13](int x) { count13 += x % 13 == 0; });

//capture all variables by reference
int count3 = 0;
int count13 = 0;
std::for_each(numbers.begin(), numbers.end(),
              [&](int x) {count3 += x % 3 == 0; count13 += x % 13 == 0; });


//24. Variadic templates
//pack the args
template <typename... Args>   // Args is a template parameter pack
void show_list1(Args... args) // args is a function parameter pack
{
    ...
}
//unpack the args
template <typename T, typename... Args>
void show_list3(T value, Args... args) //recursively
{
    std::cout << value << ", ";
    show_list3(args...);
}

golang freamwork

package main

import (
    "fmt"
	"time"
	"sync"
	"math/rand"
)

type routineFunc func(n int) bool

var wg sync.WaitGroup
func main(){
	start := time.Now()

	funcs := []routineFunc{test, test}
	wg.Add(len(funcs))
	for i := range funcs {
		if !(funcs[i])(i) {
			fmt.Println("Fail at", funcs[i])
			break
		}		
	}

	wg.Wait()
	elapsed := time.Since(start)
    fmt.Println("Execution time: ", elapsed.Nanoseconds()/1e6, "ms\n")
}

func test(n int) bool {
	defer wg.Done()
	amt := time.Duration(rand.Intn(250))
	fmt.Println("#", n, "wait ", amt.Nanoseconds(), "ms")
	time.Sleep(time.Millisecond * amt)
	return true
}

win thread

/*
advantages of using thread
1. thread reduce the OS overhead on creating and switching 
    context compare with process
2. threads are tightly coupled therefore easy to share resources
3. thread improve performance by avoid waiting for processing or I/O
4. thread exploits the performance improvement on multiprocessor by
    concurrent
 */

/*
 common issues and concerns of using thread
1. shared resource could lead to defects such as race conditions and
    deadlocks
2. how to design programs based on program understanding
*/

/*
 basic understanding of thread
1. each stack has its own stack
2. the argument (pointer) pass to a thread at creation time is actually
    on thread's stack
3. thread can allocate TLS(thread local storage), which provides small
    pointer arrays to thread. TLS can only be accessed by the specific 
    thread, which assures thread wouldn't modify one another's data
*/

/*
CreateThread
    .start address, 
    .stack size(1 MB by default), one page(4 kB) is committed to
    the stack initially, and more pages committed until reach limit
    .pointer to thread argument
    .return thread's ID and handle
CreateRemoteThread: create a thread in another process

ExitThread
    .a terminated thread will continue to exist until the last handle be 
    closed with CloseHandle()

GetExitCodeThread
    .lpExitCode

SuspendThread ResumeThread
    .every thread has a suspend count, a thread can execute only if the count
    is 0

WaitForMultipleObjects
    .wait for another thread to terminate

TlsAlloc
TlsFree
TlsGetValue(idx)
TlsGetValue(idx, void * val)
    Thread local storage can be used in Dll to replace gloabl storage,
    each calling thread has its own global storage (for thread-safe DLL)

foreground thread (need quick respond) and background thread

SetThreadPriority()
GetThreadPriority()
*/

/*
C library thread safety
c lib was written to operate in single-threaded processes, some functions may
    use static storage to store intermediate results, therefore are not 
    thread-safe when two separate threads simultaneously accessing and modifying
    static storgae
strtok : scan a string to find the next occurrence of a token
    is an example, which maintains persistent state between successive
    function calls

OS solves these problems by supplying thread-safe C lib implementation
    - use specical C functin _beginthreadex() and _endthreadex() instead of 
    CreateThread() and ExitThread() to get rid of the risk of library thread 
    access
    - select Runtime Library, specify Multi-threaded DLL (/MD) 
*/

/*
threading model
    .boss\worker threading model
    .work crew model
    .client\server model, multistage pipeline, work moves from one thread to
        next

thread state
    .running, executing on a processor
    .wait, wait on a nonsignaled handle (thread, process, synchronization obj),
        

advantages of applying threading model when designing multi-thread program
    .model is well-understand and tested, which avoid many of the mistakes
    .model helps dev obtain the best performance
    .model correspond naturally to structures of programming problem
    .troubleshooting is easy if analyze in terms of models, underlying problem
        (race condition, deadlocks)is seen to violate the basic principles of 
        models
*/

/*
race condition when thread 1 waits for thread 2 before thread 2 is created:
    .create all worker thread in suspended state and resumes only 
        after all worker threads are created
    .the race condition is caused by unsafe assumptions about the progress
        of other thread

thread starvation and priority inversion
    .low-priority thread hold resources required by high-priority thread
    .therefore fairness ensures that all threads will eventually run and
        avoid the two cases


divide and conquer is more than a strategy for algorithm design,
    it can also be key to exploiting multiprocessors
*/


sort

""" 853. Car Fleet """
def carFleet(target, pos, speed):
    #corner case handle
    if len(pos) != len(speed) or len(pos) == 0 :
        return 0

    cars = []
    #car position, time to reach target
    for i in range(len(pos)):
        cars.append((pos[i], 1.0*(target - pos[i])/speed[i]))
    
    #sort in reverse order
    cars.sort(key=lambda tup: tup[0], reverse=True)
    car_fleet = 0
    max_time = 0
    for _, t in cars:
        #max_time record the time for prev car fleet, if the next car
        # need less time to reach target, it would be blocked and join
        # the prev car fleet
        if t > max_time:
            #new car fleet exist if need to take event more to reach
            #   the final target
            max_time = t
            car_fleet += 1
    return car_fleet

3rd round 2

""" 392. Is Subsequence (string) pass """ 
def isSubsequence(s, t : str) -> bool:
    i, j = 0, 0
    while i < len(s) and j < len(t):
        if s[i] == t[j]:
            i += 1
        j += 1
    
    return i == len(s) #True if all characters in s have been found in t

search

//1014. Capacity To Ship Packages Within D Days
func shipWithinDays(weights []int, D int) int {
	maxEle := func (xs []int) (max int) {
		max = math.MinInt32
		for _, x := range xs {
			if x > max {max = x}
		}
		return
	}

	sumEle := func (xs []int) (sum int) {
		for _, x := range xs {
			sum += x
		}
		return
	}

	//the target daily capcity must be within [low, high]
	low := maxEle(weights)
	high := sumEle(weights)
	for low < high {
		mid := low + (high - low)/2
		days := 1 //count days take to ship
		trans := 0 //count weights can be ship in one day
		//try to ship out all weights with selected dayily capcity
		//  mid
		for _, w := range weights {
			if trans + w > mid {
				days++  //need to ship in the next day
				trans = w
			} else {
				trans += w //can be ship today
			}
		}

		if days > D {
			low = mid + 1
		} else {
			high = mid - 1
		}
	}

	return low
}

3rd round

""" 379. Design Phone Directory """
class PhoneDirectory:
    def __init__(self, max_num):
        self.dir = dict()
        #allocate a directory which all numbers are available
        for i in range(max_num):
            self.dir[i] = False

    def get(self):
        #find the first num that is available
        for k, v in self.dir.items():
            if v == False:
                self.dir[k] = True
                return k
        return -1 #no available num

    def check(self, num):
        if num in self.dir and not self.dir[num]:
            #if the num exist and available
            return True
        return False


    def release(self, num):
        #release the num if it exist and available 
        if num in self.dir and self.dir[num]:
            self.dir[num] = False

Recommend Projects

  • React photo React

    A declarative, efficient, and flexible JavaScript library for building user interfaces.

  • Vue.js photo Vue.js

    🖖 Vue.js is a progressive, incrementally-adoptable JavaScript framework for building UI on the web.

  • Typescript photo Typescript

    TypeScript is a superset of JavaScript that compiles to clean JavaScript output.

  • TensorFlow photo TensorFlow

    An Open Source Machine Learning Framework for Everyone

  • Django photo Django

    The Web framework for perfectionists with deadlines.

  • D3 photo D3

    Bring data to life with SVG, Canvas and HTML. 📊📈🎉

Recommend Topics

  • javascript

    JavaScript (JS) is a lightweight interpreted programming language with first-class functions.

  • web

    Some thing interesting about web. New door for the world.

  • server

    A server is a program made to process requests and deliver data to clients.

  • Machine learning

    Machine learning is a way of modeling and interpreting data that allows a piece of software to respond intelligently.

  • Game

    Some thing interesting about game, make everyone happy.

Recommend Org

  • Facebook photo Facebook

    We are working to build community through open source technology. NB: members must have two-factor auth.

  • Microsoft photo Microsoft

    Open source projects and samples from Microsoft.

  • Google photo Google

    Google ❤️ Open Source for everyone.

  • D3 photo D3

    Data-Driven Documents codes.