[prev in list] [next in list] [prev in thread] [next in thread] 

List:       linux-ntfs-cvs
Subject:    [Linux-NTFS-cvs] CVS: ntfs-driver-tng/linux/fs/ntfs attrib.c,1.48,1.49
From:       Richard Russon <flatcap () users ! sourceforge ! net>
Date:       2002-01-27 7:25:09
[Download RAW message or body]


Changes by: flatcap

Update of /cvsroot/linux-ntfs/ntfs-driver-tng/linux/fs/ntfs
In directory usw-pr-cvs1:/tmp/cvs-serv9220/linux/fs/ntfs

Modified Files:
	attrib.c 
Log Message:
more commenting & tweaks


Index: attrib.c
===================================================================
RCS file: /cvsroot/linux-ntfs/ntfs-driver-tng/linux/fs/ntfs/attrib.c,v
retrieving revision 1.48
retrieving revision 1.49
diff -U2 -r1.48 -r1.49
--- attrib.c	2002/01/26 03:36:01	1.48
+++ attrib.c	2002/01/27 07:25:07	1.49
@@ -22,18 +22,49 @@
 #include "ntfs.h"
 
-/* Temporary helper functions */
+/* Temporary helper functions -- might become macros */
 /**
- * rl_realloc (run_list *orig, int old, int new)
+ * rl_mm - run_list memmove
  */
-static inline run_list * rl_realloc (run_list *orig, int old, int new)
+static inline void rl_mm (run_list *base, int dst, int src, int size)
 {
-	run_list *nrl;
+	if ((dst != src) && (size > 0))
+		memmove (base + dst, base + src, size * sizeof (run_list));
+}
+
+/**
+ * rl_mc - run_list memory copy
+ */
+static inline void rl_mc (run_list *dstbase, int dst, run_list *srcbase,
+		int src, int size)
+{
+	if (size > 0)
+		memcpy (dstbase+dst, srcbase+src, size * sizeof (run_list));
+}
 
-	if (!orig)
-		return ERR_PTR(-EINVAL);
 
+/**
+ * ntfs_rl_realloc - Reallocate memory for run_lists
+ * @orig:  The original memory allocation
+ * @old:   The number of run_lists in the original
+ * @new:   The number of run_lists we need space for
+ *
+ * As the run_lists grow, more memory will be required.  To prevent the
+ * kernel having to allocate and reallocate large numbers of small bits of
+ * memory, this function returns and entire page of memory.
+ *
+ * N.B.  If the new allocation doesn't require a different number of pages in
+ *       memory, the function will return the original pointer.
+ *
+ * Return: Pointer  The newly allocated, or recycled,  memory.
+ *
+ * Errors: -ENOMEM, Not enough memory to allocate run list array.
+ *         -EINVAL, Invalid parameters were passed in.
+ */
+static inline run_list * ntfs_rl_realloc (run_list *orig, int old, int new)
+{
+	run_list *nrl;
+
 	old = PAGE_ALIGN (old * sizeof (run_list));
 	new = PAGE_ALIGN (new * sizeof (run_list));
-
 	if (old == new)
 		return orig;
@@ -41,44 +72,34 @@
 	nrl = ntfs_malloc_nofs (new);
 	if (!nrl)
-		return ERR_PTR(-ENOMEM);
+		return ERR_PTR (-ENOMEM);
 
-	memcpy (nrl, orig, min (old, new));
-	ntfs_free (orig);
+	if (orig) {
+		memcpy (nrl, orig, min (old, new));
+		ntfs_free (orig);
+	}
 	return nrl;
 }
 
 /**
- * rl_mm (run_list *base, int dst, int src, int size)
- */
-static inline void rl_mm (run_list *base, int dst, int src, int size)
-{
-	if ((dst != src) && (size > 0))
-		memmove (base+dst, base+src, size * sizeof (run_list));
-}
-
-/**
- * rl_mc (run_list *dstbase, int dst, run_list *srcbase, int src, int size)
+ * ntfs_rl_merge - Join together two run_lists
+ * @one:  The first run_list and destination
+ * @two:  The second run_list
+ *
+ * If possible merge together two run_lists.  For this, their VCNs and LCNs
+ * must be adjacent.
+ *
+ * Return: TRUE   Success, the run_lists were merged
+ *         FALSE  Failure, the run_lists were not merged
  */
-static inline void rl_mc (run_list *dstbase, int dst, run_list *srcbase, int src, \
int size) +static inline BOOL ntfs_rl_merge (run_list *one, run_list *two)
 {
-	if (size > 0)
-		memcpy (dstbase+dst, srcbase+src, size * sizeof (run_list));
-}
+	BUG_ON (!one || !two);
 
-/**
- * rl_merge (run_list *one, run_list *two)
- */
-static inline BOOL rl_merge (run_list *one, run_list *two)
-{
-	if (!one || !two)
+	if ((one->lcn < 0) || (two->lcn < 0))     /* Are we merging holes? */
 		return FALSE;
-
-	if ((one->lcn < 0) || (two->lcn < 0))
-		return FALSE; /* trying to merge two holes */
-
-	if ((one->lcn + one->length) != two->lcn)
+	if ((one->lcn + one->length) != two->lcn) /* Are the runs contiguous? */
 		return FALSE;
-	if ((one->vcn + one->length) != two->vcn)
-		return FALSE; /* runs are misaligned */
+	if ((one->vcn + one->length) != two->vcn) /* Are the runs misaligned? */
+		return FALSE;
 
 	one->length += two->length;
@@ -86,5 +107,4 @@
 }
 
-
 /**
  * ntfs_rl_append - Append a run_list after the given element
@@ -105,6 +125,5 @@
  */
 static inline run_list * ntfs_rl_append (run_list *orig, int osize,
-					 run_list *new,  int nsize,
-					 int loc)
+					 run_list *new,  int nsize, int loc)
 {
 	run_list *res;
@@ -113,15 +132,11 @@
 	BUG_ON (!orig || !new);
 
-	right = rl_merge (new + nsize - 1, orig + loc + 1);
+	right = ntfs_rl_merge (new + nsize - 1, orig + loc + 1);
 
-	res = rl_realloc (orig, osize, osize + nsize - right);
+	res = ntfs_rl_realloc (orig, osize, osize + nsize - right);
 	if (IS_ERR (res))
 		return res;
 
-	if (nsize != right)
-		rl_mm (res, loc + 1 + nsize,
-			    loc + 1 + right,
-			    osize - loc - 1 - right);
-
+	rl_mm (res, loc + 1 + nsize, loc + 1 + right, osize - loc - 1 - right);
 	rl_mc (res, loc + 1, new, 0, nsize);
 
@@ -148,7 +163,6 @@
  *         -EINVAL, Invalid parameters were passed in.
  */
-run_list * ntfs_rl_insert (run_list *orig, int osize,
-			   run_list *new,  int nsize,
-			   int loc)
+static inline run_list * ntfs_rl_insert (run_list *orig, int osize,
+					 run_list *new,  int nsize, int loc)
 {
 	run_list *res;
@@ -160,5 +174,5 @@
 
 	if (loc > 0) {
-		left = rl_merge (orig + loc - 1, new);
+		left = ntfs_rl_merge (orig + loc - 1, new);
 
 		disc = (new[0].vcn > (orig[loc-1].vcn + orig[loc-1].length));
@@ -169,19 +183,17 @@
 	}
 
-	res = rl_realloc (orig, osize, osize + nsize - left + disc - hole);
+	res = ntfs_rl_realloc (orig, osize, osize + nsize - left + disc - hole);
 	if (IS_ERR (res))
 		return res;
-
-	if ((nsize - left + disc - hole) != 0)
-		rl_mm (res, loc + nsize - left + disc - hole,
-			    loc,
-			    osize - loc);
 
-	if ((nsize - left) > 0)
-		rl_mc (res, loc + disc - hole, new, left, nsize - left);
+	rl_mm (res, loc + nsize - left + disc - hole, loc, osize - loc);
+	rl_mc (res, loc + disc - hole, new, left, nsize - left);
 
 	if (res[loc+nsize-left+disc-hole].lcn >= LCN_HOLE) {
-		res[loc+nsize-left+disc-hole].vcn = res[loc+nsize-left+disc-hole-1].vcn + \
                res[loc+nsize-left+disc-hole-1].length;
-		res[loc+nsize-left+disc-hole].length -= (new[nsize-1].vcn + new[nsize-1].length - \
new[0].vcn); +		res[loc+nsize-left+disc-hole].vcn =
+			res[loc+nsize-left+disc-hole-1].vcn +
+			res[loc+nsize-left+disc-hole-1].length;
+		res[loc+nsize-left+disc-hole].length -= (new[nsize-1].vcn +
+			new[nsize-1].length - new[0].vcn);
 	}
 
@@ -191,5 +203,6 @@
 		} else {
 			if (loc > 0) {
-				res[loc].vcn = res[loc-1].vcn + res[loc-1].length;
+				res[loc].vcn = res[loc-1].vcn +
+					res[loc-1].length;
 				res[loc].length = res[loc+1].vcn - res[loc].vcn;
 			} else {
@@ -220,7 +233,6 @@
  *         -EINVAL, Invalid parameters were passed in.
  */
-run_list * ntfs_rl_replace (run_list *orig, int osize,
-			    run_list *new,  int nsize,
-			    int loc)
+static inline run_list * ntfs_rl_replace (run_list *orig, int osize,
+					  run_list *new, int nsize, int loc)
 {
 	run_list *res;
@@ -230,19 +242,17 @@
 	BUG_ON (!orig || !new);
 
-	right = rl_merge (new + nsize - 1, orig + loc + 1);
+	right = ntfs_rl_merge (new + nsize - 1, orig + loc + 1);
 	if (loc > 0)
-		left = rl_merge (orig + loc - 1, new);
+		left = ntfs_rl_merge (orig + loc - 1, new);
 
-	res = rl_realloc (orig, osize, osize + nsize - left - right);
+	res = ntfs_rl_realloc (orig, osize, osize + nsize - left - right);
 	if (IS_ERR (res))
 		return res;
 
-	if ((nsize - left) != (right + 1))
-		rl_mm (res, loc + nsize - left,
-			    loc + right + 1,
-			    osize - loc - right - 1);
+	rl_mm (res, loc + nsize - left,
+		    loc + right + 1,
+		    osize - loc - right - 1);
 
-	if ((nsize - left) > 0)
-		rl_mc (res, loc, new, left, nsize - left);
+	rl_mc (res, loc, new, left, nsize - left);
 
 	return res;
@@ -258,5 +268,5 @@
  *
  * Split the run_list at @loc into two and insert @new.  No merging of
- * run_list's is necessary.  Adjust the size of the holes either side.
+ * run_lists is necessary.  Adjust the size of the holes either side.
  *
  * Return: Pointer, The new, combined, run_list
@@ -265,7 +275,6 @@
  *         -EINVAL, Invalid parameters were passed in.
  */
-run_list * ntfs_rl_split (run_list *orig, int osize,
-			  run_list *new,  int nsize,
-			  int loc)
+static inline run_list * ntfs_rl_split (run_list *orig, int osize,
+					run_list *new,  int nsize, int loc)
 {
 	run_list *res;
@@ -273,5 +282,5 @@
 	BUG_ON (!orig || !new);
 
-	res = rl_realloc (orig, osize, osize + nsize + 1);
+	res = ntfs_rl_realloc (orig, osize, osize + nsize + 1);
 	if (IS_ERR (res))
 		return res;
@@ -282,7 +291,19 @@
 	res[loc].length = res[loc+1].vcn - res[loc].vcn;
 	res[loc+nsize+1].vcn = res[loc+nsize].vcn + res[loc+nsize].length;
-	res[loc+nsize+1].length -= (new[nsize-1].vcn + new[nsize-1].length - new[0].vcn);
-	res[loc+nsize+1].length -= res[loc].length;
+	res[loc+nsize+1].length -= (new[nsize-1].vcn + new[nsize-1].length - new[0].vcn) + \
res[loc].length;  
+#if 0
+	#define hole1 (*(res + loc))		// preceding hole
+	#define first (*(res + loc + 1))	// first inserted element
+	#define last  (*(res + nsize))		// last  inserted element
+	#define hole2 (*(res + nsize + 1))	// following hole
+	#define newf  (*(new + 0))		// first new element
+	#define newl  (*(new + nsize - 1))	// last  new element
+
+	hole1.length = first.vcn - hole1.vcn;
+	hole2.vcn = last.vcn + last.length;
+	hole2.length -= newl.vcn + newl.length - newf.vcn + hole1.length;
+#endif
+
 	return res;
 }
@@ -290,137 +311,33 @@
 
 /**
- * merge_run_lists - Merge two run_list's into one
+ * merge_run_lists - Merge two run_lists into one
  * @drl:  The original run_list.
  * @srl:  The new run_list to be merge into @drl.
- *
- * Sanity check the two run lists for their ability to be merged, and if all is
- * ok, merge @srl into @drl returning the resulting run list. Both @srl and
- * @drl are deallocated before returning the new run list. In some cases, @drl
- * might be returned and not deallocated (when @srl and @drl fit inside @drl).
- * And vice versa.
- *
- * If an error occurs, IS_ERR(result) will be true and PTR_ERR(result) will
- * give the error code. In that case both @srl and @drl are left untouched.
- *
- * If @srl == NULL, just return @drl. If @drl == NULL, create start element at
- * beginning, if not already present, and return the result. Usually the result
- * will fit into @srl itself, thus @srl will be returned in most such cases.
  *
- * If both @srl and @drl are not NULL, they need to be merged.
+ * First we sanity check the two run_lists to make sure that they are sensible
+ * and can be merged.  The @srl run_list must be either after the @drl run_list
+ * or completely within a hole in @drl.
  *
  * Merging of run lists is necessary in two cases:
- *	1. When attribute lists are used and a further extent is being mapped.
- *	2. When new clusters are allocated to fill a hole or extend a file.
+ *   1. When attribute lists are used and a further extent is being mapped.
+ *   2. When new clusters are allocated to fill a hole or extend a file.
+ *
+ * There are four possible ways @srl can be merged.  It can be inserted at
+ * the beginning of a hole; split the hole in two; appended at the end of
+ * a hole; replace the whole hole.  It can also be appended to the end of
+ * the run_list, which is just a variant of the insert case.
  *
- * In the first case, the idea is that ntfs_read_inode() only parses the run
- * list of the first attribute extent. When the user later tries to read data
- * from a position not covered by the first extent, the run list of the
- * corresponding extent will be read into memory and merged with the existing
- * mapped run list.
- *
- * In the second case, the idea is that the cluster allocator will be given a
- * starting vcn and a number of lcns to allocate and it will create a new run
- * list with the newly allocated cluster runs using the starting vcn in order
- * to fill-in the vcn parts of the run list elements correctly. This run list
- * fragment (for it has no start element) is then merged into the existing
- * mapped run list.
- *
- * This merge function is generic enough to cover both these cases but it is
- * not a generic merge function of arbitrary run lists. It is optimized for
- * the common cases occuring in the above two applications and it will only
- * cope when both @drl and @srl fulfill the following constraints:
- *
- * 1. @srl never contains unmapped elements inside it. It only has one
- * unmapped element on each end, the placeholder at the start and the
- * terminator at the end, the former being optional. For example, run lists
- * returned by the cluster allocator do not a start element, while run lists
- * returned by decompress_mapping_pairs() always contain both.
- *
- * 2. @srl and @drl never intercalate. I.e. while it is legal that @srl will
- * fall within an unmapped region of @drl, @srl will lie in that region in its
- * entirety. The only exception to this being that the unmapped region of @drl
- * might be an actual hole (or anything else with lcn < 0 for that matter), or
- * in the most complex case, there might be a succession of different run list
- * elements with different lcns (but all < 0).
- *
- * 3. All mergeable unmapped runs within both @srl and @drl will have been
- * merged before this function is called. Thus you cannot have two successive
- * elements with the same lcn value less than zero.
- *
- * Thus we do not allow (will detect as error) for both run lists to have
- * unmapped parts in their middle AND to have them be intercalated (cf.
- * constraints 1 and 2), i.e. the following scenario is NOT legal (A, B, C,
- * and D > 0):
- *
- *	vcn		@drl lcn	@srl lcn
- *	==========================================
- *	A		mapped		not mapped
- *	A+B		not mapped	mapped
- *	A+B+C		mapped		not mapped
- *	A+B+C+D		not mapped	mapped
- *
- * We do allow a run list to fit into the middle of the other as this makes
- * sense for mapping run lists of attribute extents on demand. So we allow
- * this scenario (A, B, and C > 0):
- *
- *	vcn		@drl lcn	@srl lcn
- *	==========================================
- *	A		mapped		not mapped
- *	A+B		not mapped	mapped
- *	A+B+C		mapped		not mapped
- *
- * And, of course, we allow the trivial case where the two run lists don't
- * actually overlap, i.e. when they look like this (A and B > 0):
- *
- *	vcn		@drl lcn	@srl lcn
- *	==========================================
- *	A		mapped		not mapped
- *	A+B		not mapped	mapped
- *
- * Implementation details
- * ======================
- *
- * There are three cases where the source run list needs to be inserted:
- *
- * 	1. In front of the destination one, covered by __front_merge_rl().
- *	2. Into the destination run list, covered by __insert_merge_rl().
- * 	3. After the destination one, covered by __back_merge_rl().
- *
- * While cases 1 and 3 can logically be folded into the same one by swapping
- * the two run lists, we do not do so, due to the variation in constraints
- * placed upon the two run lists (see above). TODO: We might still be safe to
- * do this anyway. Need to look back at this case once we have the completed
- * code as planned now. If it then makes sense to merge the two cases we can
- * do it then.
- *
- * We start by going through the two run lists getting to the first element
- * with lcn >= LCN_HOLE and we note the positions in @[sd]start.
- *
- * Starting at @dstart, scan the destination run list until we find the
- * location at which the source has to be inserted (case 2) or until we reach
- * the end of the destination run list (case 3). Case 1 is detected by the fact
- * that the run list element @sstart in the source run list has a lower vcn
- * than the run list element @dstart in the destination run list. Record
- * place of insertion in @dins.
- *
- * Proceed to scan to the end of both run lists in order to know their sizes
- * and record the positions in @[sd]end.
- *
- * Seek back to the last element with lcn >= LCN_HOLE and record positions in
- * @[sd]final.
- *
- * Check for obvious invalid overlaps between the run lists and abort if any
- * found.
- *
- * We now are in a position to A) know the size of the run lists and B) know
- * how they fit together, thus we can call the appropriate helper function to
- * perform the actual merge.
- *
- * The helper functions allocate enough memory for both run lists if the merge
- * cannot be performed in place and then perform the merge. We cannot allocate
- * the memory before calling the helper functions, because we don't know yet
- * whether or not the merge can be performed in place.
+ * N.B.  Either, or both, of the input pointers may be freed if the function
+ *       is successful.  Only the returned pointer may be used.
+ *
+ *       If the function fails, neither of the input run_lists may be safe.
+ *
+ * Return: Pointer, The resultant merged run_list.
+ *
+ * Errors: -ENOMEM, Not enough memory to allocate run list array.
+ *         -EINVAL, Invalid parameters were passed in.
+ *         -ERANGE, The run_lists overlap and cannot be merged.
  */
-run_list * merge_run_lists(run_list *drl, run_list *srl)
+run_list * merge_run_lists (run_list *drl, run_list *srl)
 {
 	run_list *nrl;		/* New run list. */
@@ -429,33 +346,33 @@
 	int dins;		/* Index into @drl at which to insert @srl. */
 	int dend, send;		/* Last index into @[ds]rl. */
-	int dfinal, sfinal;	/* The last index into @[ds]rl with lcn >= LCN_HOLE. */
+	int dfinal, sfinal;	/* The last index into @[ds]rl with
+				   lcn >= LCN_HOLE. */
 
 	ntfs_debug ("dst:\n");
-	ntfs_debug_dump_runlist(drl);
+	ntfs_debug_dump_runlist (drl);
 	ntfs_debug ("src:\n");
-	ntfs_debug_dump_runlist(srl);
+	ntfs_debug_dump_runlist (srl);
 
  	/* Check for silly calling... */
-	if (unlikely(!srl))
+	if (unlikely (!srl))
 		return drl;
-	if (unlikely(IS_ERR(srl) || IS_ERR(drl)))
-		return ERR_PTR(-EINVAL);
+	if (unlikely (IS_ERR (srl) || IS_ERR (drl)))
+		return ERR_PTR (-EINVAL);
 
 	/* Check for the case where the first mapping is being done now. */
-	if (unlikely(!drl)) {
+	if (unlikely (!drl)) {
 		nrl = srl;
 
 		/* Complete the source run list if necessary. */
-		if (unlikely(srl[0].vcn)) {
+		if (unlikely (srl[0].vcn)) {
 			/* Scan to the end of the source run list. */
-			for (send = 0; likely(srl[send].length); send++)
+			for (send = 0; likely (srl[send].length); send++)
 				;
-			nrl = rl_realloc (srl, send, send + 1);
+			nrl = ntfs_rl_realloc (srl, send, send + 1);
 			if (!nrl)
-				return ERR_PTR(-ENOMEM);
+				return ERR_PTR (-ENOMEM);
 
-			memmove(nrl + 1, nrl, send * sizeof (run_list));
-			/* Add start element. */
-			nrl[0].vcn = 0;
+			rl_mm (nrl, 1, 0, send);
+			nrl[0].vcn = 0;			/* Add start element. */
 			nrl[0].lcn = LCN_RL_NOT_MAPPED;
 			nrl[0].length = nrl[1].vcn;
@@ -473,7 +390,6 @@
 		di++;
 
-	/* Can't have entirely unmapped run lists. */
-	//BUG_ON(!srl[si].length || !drl[di].length);
-	BUG_ON(!srl[si].length);
+	/* Can't have an entirely unmapped srl run_list. */
+	BUG_ON (!srl[si].length);
 
 	/* Record the starting points. */
@@ -496,11 +412,12 @@
 	    (drl[di].lcn >= 0) &&
 	    (srl[si].lcn >= 0)) {
-		ntfs_error(NULL, "Run lists overlap. Cannot merge! Returning ERANGE.");
-		nrl = ERR_PTR(-ERANGE);
+		ntfs_error (NULL, "Run lists overlap. Cannot merge! Returning "
+				"ERANGE.");
+		nrl = ERR_PTR (-ERANGE);
 		goto exit;
 	}
 
 	/* Scan to the end of both run lists in order to know their sizes. */
-	for (send = si; likely(srl[send].length); send++)
+	for (send = si; srl[send].length; send++)
 		;
 	for (dend = di; drl[dend].length; dend++)
@@ -516,12 +433,12 @@
 	BOOL start;
 	BOOL finish;
-	int ds = dend - dstart + 1;	/* Number of elements in drl & srl */
+	int ds = dend   - dstart + 1;	/* Number of elements in drl & srl */
 	int ss = sfinal - sstart + 1;
 
 	start  = ((drl[dins].lcn == LCN_RL_NOT_MAPPED) ||    /* End of file   */
-		 (drl[dins].vcn == srl[sstart].vcn));	     /* Start of hole */
+		  (drl[dins].vcn == srl[sstart].vcn));	     /* Start of hole */
 	finish = ((drl[dins].lcn != LCN_RL_NOT_MAPPED) &&    /* End of file   */
-		 ((drl[dins].vcn + drl[dins].length)	     /* End of hole   */
-		  <= (srl[send-1].vcn + srl[send-1].length)));
+		 ((drl[dins].vcn + drl[dins].length) <=      /* End of hole   */
+		  (srl[send-1].vcn + srl[send-1].length)));
 
 	if (start)
@@ -537,18 +454,19 @@
 	}
 
-	if (likely(!IS_ERR(nrl))) {
+	if (likely (!IS_ERR (nrl))) {
 		/* The merge was completed successfully. */
 		if (nrl != drl)
-			ntfs_free(drl);
+			ntfs_free (drl);
 finished:
 		if (nrl != srl)
-			ntfs_free(srl);
-		/*ntfs_debug("Done.\n");*/
-		/*ntfs_debug("Merged run list:\n");*/
-		
+			ntfs_free (srl);
+		/*ntfs_debug ("Done.\n");*/
+		/*ntfs_debug ("Merged run list:\n");*/
+
 		ntfs_debug ("res:\n");
-		ntfs_debug_dump_runlist(nrl);
+		ntfs_debug_dump_runlist (nrl);
 	} else {
-		ntfs_error(NULL, "Merge failed, returning error code %i.", -PTR_ERR(nrl));
+		ntfs_error (NULL, "Merge failed, returning error code %i.",
+				-PTR_ERR (nrl));
 	}
 exit:


_______________________________________________
Linux-NTFS-cvs mailing list
Linux-NTFS-cvs@lists.sourceforge.net
https://lists.sourceforge.net/lists/listinfo/linux-ntfs-cvs


[prev in list] [next in list] [prev in thread] [next in thread] 

Configure | About | News | Add a list | Sponsored by KoreLogic