http://blog.chinaunix.net/uid-20481436-id-1941493.html
linux-3.10/fs/inode.c 856 /* 857 * Each cpu owns a range of LAST_INO_BATCH numbers. 858 * 'shared_last_ino' is dirtied only once out of LAST_INO_BATCH allocations, 859 * to renew the exhausted range. 860 * 861 * This does not significantly increase overflow rate because every CPU can 862 * consume at most LAST_INO_BATCH-1 unused inode numbers. So there is 863 * NR_CPUS*(LAST_INO_BATCH-1) wastage. At 4096 and 1024, this is ~0.1% of the 864 * 2^32 range, and is a worst-case. Even a 50% wastage would only increase 865 * overflow rate by 2x, which does not seem too significant. 866 * 867 * On a 32bit, non LFS stat() call, glibc will generate an EOVERFLOW 868 * error if st_ino won't fit in target struct field. Use 32bit counter 869 * here to attempt to avoid that. 870 */ 871 #define LAST_INO_BATCH 1024 872 static DEFINE_PER_CPU(unsigned int, last_ino); 873 874 unsigned int get_next_ino(void) 875 { 876 unsigned int *p = &get_cpu_var(last_ino); 877 unsigned int res = *p; 878 879 #ifdef CONFIG_SMP 880 if (unlikely((res & (LAST_INO_BATCH-1)) == 0)) { 881 static atomic_t shared_last_ino; 882 int next = atomic_add_return(LAST_INO_BATCH, &shared_last_ino); 883 884 res = next - LAST_INO_BATCH; 885 } 886 #endif 887 888 *p = ++res; 889 put_cpu_var(last_ino); 890 return res; 891 } 892 EXPORT_SYMBOL(get_next_ino); |