符合中小企业对网站设计、功能常规化式的企业展示型网站建设
本套餐主要针对企业品牌型网站、中高端设计、前端互动体验...
商城网站建设因基本功能的需求不同费用上面也有很大的差别...
手机微信网站开发、微信官网、微信商城网站...
所有节点:
十多年的仁寿网站建设经验,针对设计、前端、开发、售后、文案、推广等六对一服务,响应快,48小时及时工作处理。营销型网站的优势是能够根据用户设备显示端的尺寸不同,自动调整仁寿建站的显示方式,使网站能够适用不同显示终端,在浏览器中调整网站的宽度,无论在任何一种浏览器上浏览网站,都能展现优雅布局与设计,从而大程度地提升浏览体验。创新互联从事“仁寿网站设计”,“仁寿网站推广”以来,每个客户项目都认真落实执行。
# cd ~/soft
# wget http://mirror.bit.edu.cn/apache/commons/daemon/source/commons-daemon-1.0.15-native-src.tar.gz
# tar zxfcommons-daemon-1.0.15-native-src.tar.gz
# cd commons-daemon-1.0.15-native-src/unix;./configure; make
# cp jsvc /usr/local/hadoop-2.4.0/libexec/
# cd ~/soft
# wgethttp://mirror.bit.edu.cn/apache//commons/daemon/binaries/commons-daemon-1.0.15-bin.tar.gz
# tar zxf commons-daemon-1.0.15-bin.tar.gz
# cpcommons-daemon-1.0.15/commons-daemon-1.0.15.jar/usr/local/hadoop-2.4.0/share/hadoop/hdfs/lib/
# cpcommons-daemon-1.0.15/commons-daemon-1.0.15.jar/usr/local/hadoop-2.4.0/share/hadoop/httpfs/tomcat/webapps/webhdfs/WEB-INF/lib/
# rm -f /usr/local/hadoop-2.4.0/share/hadoop/hdfs/lib/commons-daemon-1.0.13.jar
# rm -f/usr/local/hadoop-2.4.0/share/hadoop/httpfs/tomcat/webapps/webhdfs/WEB-INF/lib/commons-daemon-1.0.13.jar
# # vim/usr/local/hadoop-2.4.0/etc/hadoop/hadoop-env.sh
exportJSVC_HOME=/usr/local/hadoop-2.4.0/libexec/
所有节点:
# wget–c http://download.oracle.com/otn-pub/java/jce/7/UnlimitedJCEPolicyJDK7.zip?AuthParam=1400207941_ee158c414c707a057960c521a7b29866
# unzipUnlimitedJCEPolicyJDK7.zip
# cp UnlimitedJCEPolicy/*.jar/usr/java/jdk1.7.0_65/jre/lib/security/
cp:是否覆盖"/usr/java/jdk1.7.0_51/jre/lib/security/local_policy.jar"? y
cp:是否覆盖"/usr/java/jdk1.7.0_51/jre/lib/security/US_export_policy.jar"? y
主机test3:
# yum -y install krb5\*
[logging]
default = FILE:/var/log/krb5libs.log
kdc= FILE:/var/log/krb5kdc.log
admin_server = FILE:/var/log/kadmind.log
[libdefaults]
default_realm = cc.cn
DNS_lookup_realm = false
dns_lookup_kdc = false
ticket_lifetime = 365d
renew_lifetime = 365d
forwardable = true
[realms]
cc.cn = {
kdc = test3
admin_server = test3
}
[kdc]
profile = /var/kerberos/krb5kdc/kdc.conf
# vim /var/kerberos/krb5kdc/kdc.conf
[kdcdefaults]
kdc_ports = 88
kdc_tcp_ports = 88
[realms]
cc.cn = {
#master_key_type = aes256-cts
acl_file = /var/kerberos/krb5kdc/kadm5.acl
dict_file = /usr/share/dict/words
admin_keytab = /var/kerberos/krb5kdc/kadm5.keytab
supported_enctypes = aes256-cts:normal aes128-cts:normaldes3-hmac-sha1:normal arcfour-hmac:normal des-hmac-sha1:normaldes-cbc-md5:normal des-cbc-crc:normal
}
# vim /var/kerberos/krb5kdc/kadm5.acl
*/admin@cc.cn *
# kdb5_util create -r cc.cn –s
Enter KDC database master key:
# service krb5kdc start
# service kadmin start
# chkconfig krb5kdc on
# chkconfig kadmin on
# kadmin.local
kadmin.local: addprinc root/admin
Enter password for principal "root/admin@cc.cn":
主机test1:
# yum -y install krb5\*
# scp test3:/etc/krb5.conf /etc/
# kadmin –p root/admin
kadmin: addprinc -randkey root/test1
kadmin: addprinc -randkey HTTP/test1
kadmin: ktadd -k /hadoop/krb5.keytab root/test1 HTTP/test1
主机test2:
# yum -y install krb5\*
# scp test3:/etc/krb5.conf /etc/
# kadmin -p root/admin
kadmin: addprinc -randkey root/test2
kadmin: addprinc -randkey HTTP/test2
kadmin: ktadd -k /hadoop/krb5.keytab root/test2 HTTP/test2
主机test3:
# kadmin.local
kadmin.local: addprinc -randkey root/test3
kadmin.lcoal: addprinc -randkey HTTP/test3
kadmin.local: ktadd -k /hadoop/krb5.keytab root/test3 HTTP/test3
主机test1:
# vim/usr/local/hadoop-2.4.0/etc/hadoop/core-site.xml
主机test1:
# vim /usr/local/hadoop-2.4.0/etc/hadoop/hdfs-site.xml
主机test1:
# vim/usr/local/hadoop-2.4.0/etc/hadoop/yarn-site.xml
主机test1:
# vim /usr/local/hadoop-2.4.0/etc/hadoop/mapred-site.xml
主机test1:
# scp -r/usr/local/hadoop-2.4.0/ test2:/usr/local/
# scp -r/usr/local/hadoop-2.4.0/ test3:/usr/local/
主机test1:
# start-all.sh
主机test3:
# kinit -k -t /hadoop/krb5.keytab root/test3
# hdfs dfs –ls /
主机test1:
# vim/usr/local/hbase-0.98.1/conf/hbase-site.xml
主机test1:
# scp/usr/local/hbase-0.98.1/conf/hbase-site.xml test2:/usr/local/hbase-0.98.1/conf/
# scp /usr/local/hbase-0.98.1/conf/hbase-site.xmltest3:/usr/local/hbase-0.98.1/conf/
主机test1:
# start-hbase.sh
主机test3:
# kinit -k -t /hadoop/krb5.keytab root/test3
# hbase shell
/etc/xiaofeiyun.keytab
主机test1:
# kadmin -p root/admin
Password for root/admin@cc.cn:
kadmin: addprinc -randkey data/xiaofeiyun
kadmin: addprinc -randkey platform/xiaofeiyun
kadmin: ktadd -k /etc/xiaofeiyun.keytab data/xiaofeiyun platform/xiaofeiyun
# scp /etc/xiaofeiyun.keytab test2:/etc/
# scp /etc/xiaofeiyun.keytab test3:/etc/
/etc/krb5.conf
conf.set("fs.defaultFS","hdfs://cluster1");
conf.set("dfs.nameservices","cluster1");
conf.set("dfs.ha.namenodes.cluster1","test1,test2");
conf.set("dfs.namenode.rpc-address.cluster1.test1","test1:9000");
conf.set("dfs.namenode.rpc-address.cluster1.test2","test2:9000");
conf.set("dfs.client.failover.proxy.provider.cluster1","org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider");